Commits

Maciej Fijalkowski committed fed114f

An ability to store raw results

  • Participants
  • Parent commits a2f7681

Comments (0)

Files changed (2)

         
 def run_and_store(benchmark_set, result_filename, pypy_c_path, revision=0,
                   options='', branch='trunk', args='', upload=False,
-                  force_host=None, fast=False, baseline=sys.executable):
+                  force_host=None, fast=False, baseline=sys.executable,
+                  full_store=False):
     funcs = perf.BENCH_FUNCS.copy()
     funcs.update(perf._FindAllBenchmarks(benchmarks.__dict__))
     opts = ['-b', ','.join(benchmark_set), '--inherit_env=PATH',
         opts += ['--fast']
     if args:
         opts += ['--args', args]
+    if full_store:
+        opts.append('--no_statistics')
     opts += [baseline, pypy_c_path]
     results = perf.main(opts, funcs)
     f = open(str(result_filename), "w")
                       help="Force the hostname")
     parser.add_option("--fast", default=False, action="store_true",
                       help="Run shorter benchmark runs")
+    parser.add_option("--full-store", default=False, action="store_true",
+                      help="")
     options, args = parser.parse_args(argv)
     benchmarks = options.benchmarks.split(',')
     for benchmark in benchmarks:
     run_and_store(benchmarks, options.output_filename, options.pypy_c,
                   options.revision, args=options.args, upload=options.upload,
                   force_host=options.force_host, fast=options.fast,
-                  baseline=options.baseline)
+                  baseline=options.baseline, full_store=options.full_store)
 
 if __name__ == '__main__':
     main(sys.argv[1:])

unladen_swallow/perf.py

         return ("%(base_time)f -> %(changed_time)f: %(time_delta)s"
                 % self.__dict__)
 
+class RawResult(object):
+    def __init__(self, base_times, changed_times):
+        self.base_times = base_times
+        self.changed_times = changed_times
+
+    def string_representation(self):
+        return "Raw results: %s %s" % (self.base_times, self.changed_times)
+
 def CompareMemoryUsage(base_usage, changed_usage, options):
     """Like CompareMultipleRuns, but for memory usage."""
     max_base, max_changed = max(base_usage), max(changed_usage)
         human consumption.
     """
     assert len(base_times) == len(changed_times)
+    if options.no_statistics:
+        return RawResult(base_times, changed_times)
     if len(base_times) == 1:
         # With only one data point, we can't do any of the interesting stats
         # below.
     parser.add_option("--no_charts", default=False, action="store_true",
                       help=("Don't use google charts for displaying the"
                             " graph outcome"))
+    parser.add_option("--no_statistics", default=False, action="store_true",
+                      help=("Don't perform statistics - return raw data"))
 
     options, args = parser.parse_args(argv)
     if len(args) != 2: