# # # add_file "benchmark.py" # content [6ee2557df30f2cdae0fcbe4794e573996848a506] # # add_file "mtn_benchmark/cmdline.py" # content [dd2f3a48bc0c2e1cee6dcabf8bcf3e0077aead16] # ============================================================ --- benchmark.py 6ee2557df30f2cdae0fcbe4794e573996848a506 +++ benchmark.py 6ee2557df30f2cdae0fcbe4794e573996848a506 @@ -0,0 +1,4 @@ +import sys +import mtn_benchmark.cmdline + +mtn_benchmark.cmdline.main(sys.argv[0], sys.argv[1:]) ============================================================ --- mtn_benchmark/cmdline.py dd2f3a48bc0c2e1cee6dcabf8bcf3e0077aead16 +++ mtn_benchmark/cmdline.py dd2f3a48bc0c2e1cee6dcabf8bcf3e0077aead16 @@ -0,0 +1,127 @@ +import optparse + +import mtn_benchmark.driver +import mtn_benchmark.mtn +import mtn_benchmark.util + +def namespace(): + "Returns the namespace for eval'ing descriptors" + from mtn_benchmark.util import * + from mtn_benchmark.mtn import * + from mtn_benchmark.benchmarks import * + from mtn_benchmark.instrumenters import * + from mtn_benchmark.repo import * + + return locals() + +# $ benchmark scratch results -v 'mtn=Mtn("mtn")' \ +# -b 'pull=PullBenchmark(ExistingRepo("/home/njs/blahblah"))' \ +# -i 'time=TimingInstrumenter(2)' +# +# --debug / -d +# --clear-cache / -c +# --mtn / -m + +class InvalidDescriptor(Exception): + pass + +def split_descriptor(descriptor): + if "=" not in descriptor: + raise InvalidDescriptor, ("no = in '%s'" % descriptor) + name, code = descriptor.split("=", 1) + return name, code + +def eval_descriptor_code(code): + return eval(code, {}, dict(namespace())) + +def process_descriptors(l, output, error, munger=eval_descriptor_code): + if l: + for arg in l: + name, value = eval_descriptor(arg) + if output.has_key(name): + error("Name %s used twice" % name) + output[name] = value + +def main(cmd, args): + parser = optparse.OptionParser(usage="usage: %prog SCRATCH-DIR RESULTS-DIR") + parser.add_option("-v", "--vcs", dest="vcses", + action="append", + help="A VCS to benchmark", + metavar="NAME=CODE") + parser.add_option("-b", "--benchmark", dest="benchmarks", + action="append", + help="A benchmark to run", + metavar="NAME=CODE") + parser.add_option("-i", "--instrument", dest="instrumenters", + action="append", + help="An instrumenter to use", + metavar="NAME=CODE") + parser.add_option("-d", "--debug", dest="debug", + action="store_true", + help="Leave scratch files behind", + default=False) + parser.add_option("-c", "--clear-cache", dest="clear_cache", + action="store", help="Path to cache clearing program", + metavar="PATH", default="drop_caches") + parser.add_option("-m", "--mtn", dest="mtns", + action="append", + help="Path to a mtn executable to benchmark", + metavar="NAME=PATH") + parser.add_option("-f", "--force", dest="force", + default=False, action="store_true", + help="Clear results and scratch dir before running") + + + options, args = parser.parse_args(args) + if len(args) != 2: + parser.error("Incorrect number of arguments (%s)" % len(args)) + scratch, results = args + if options.force: + shutil.rmtree(scratch, True) + shutil.rmtree(results, True) + + testables = {} + process_descriptors(options.vcses, testables, parser.error) + def path_to_mtn(path): + return mtn_benchmark.mtn.Mtn(path) + process_descriptors(options.mtns, testables, parser.err, path_to_mtn) + if not testables: + parser.error("Need at least one VCS to benchmark") + + benchmarks = {} + process_descriptors(options.benchmarks, benchmarks, parser.error) + if not benchmarks: + parser.error("Need at least one benchmark to run") + + instrumenters = {} + process_descriptors(options.instrumenters, instrumenters, parser.error) + if not instrumenters: + print "No instrumenters given, using default 'time' instrumenter" + instrumenters["time"] = TimingInstrumenter() + + cache_clearer = mtn_benchmark.util.CacheClearer(options.clear_cache) + + driver = mtn_benchmark.driver.Driver(scratch, results, + testables, benchmarks, instrumenters, + debug, cache_clearer) + driver.run() + + +if __name__ == "__main__": + import sys + main(sys.argv[0], sys.argv[1:]) + +def tryit(): + scratch = "scratch" + results = "results" + shutil.rmtree(scratch, True) + shutil.rmtree(results, True) + + testables = {"mtn": Mtn("/home/njs/src/monotone/opt/mtn")} + benchmarks = {"pull": PullBenchmark(ExistingRepo("/home/njs/src/monotone/benchmark/test.mtn"))} + instrumenters = {"time": TimingInstrumenter(2)} + debug = 1 + + cache_clearer = CacheClearer("/home/njs/src/monotone/benchmark/drop_caches") + +