[pypy-svn] r77601 - in pypy/benchmarks: . unladen_swallow

fijal at codespeak.net fijal at codespeak.net
Tue Oct 5 14:28:34 CEST 2010


Author: fijal
Date: Tue Oct  5 14:28:32 2010
New Revision: 77601

Modified:
   pypy/benchmarks/runner.py
   pypy/benchmarks/unladen_swallow/perf.py
Log:
An ability to store raw results


Modified: pypy/benchmarks/runner.py
==============================================================================
--- pypy/benchmarks/runner.py	(original)
+++ pypy/benchmarks/runner.py	Tue Oct  5 14:28:32 2010
@@ -11,7 +11,8 @@
         
 def run_and_store(benchmark_set, result_filename, pypy_c_path, revision=0,
                   options='', branch='trunk', args='', upload=False,
-                  force_host=None, fast=False, baseline=sys.executable):
+                  force_host=None, fast=False, baseline=sys.executable,
+                  full_store=False):
     funcs = perf.BENCH_FUNCS.copy()
     funcs.update(perf._FindAllBenchmarks(benchmarks.__dict__))
     opts = ['-b', ','.join(benchmark_set), '--inherit_env=PATH',
@@ -20,6 +21,8 @@
         opts += ['--fast']
     if args:
         opts += ['--args', args]
+    if full_store:
+        opts.append('--no_statistics')
     opts += [baseline, pypy_c_path]
     results = perf.main(opts, funcs)
     f = open(str(result_filename), "w")
@@ -93,6 +96,8 @@
                       help="Force the hostname")
     parser.add_option("--fast", default=False, action="store_true",
                       help="Run shorter benchmark runs")
+    parser.add_option("--full-store", default=False, action="store_true",
+                      help="")
     options, args = parser.parse_args(argv)
     benchmarks = options.benchmarks.split(',')
     for benchmark in benchmarks:
@@ -101,7 +106,7 @@
     run_and_store(benchmarks, options.output_filename, options.pypy_c,
                   options.revision, args=options.args, upload=options.upload,
                   force_host=options.force_host, fast=options.fast,
-                  baseline=options.baseline)
+                  baseline=options.baseline, full_store=options.full_store)
 
 if __name__ == '__main__':
     main(sys.argv[1:])

Modified: pypy/benchmarks/unladen_swallow/perf.py
==============================================================================
--- pypy/benchmarks/unladen_swallow/perf.py	(original)
+++ pypy/benchmarks/unladen_swallow/perf.py	Tue Oct  5 14:28:32 2010
@@ -406,6 +406,14 @@
         return ("%(base_time)f -> %(changed_time)f: %(time_delta)s"
                 % self.__dict__)
 
+class RawResult(object):
+    def __init__(self, base_times, changed_times):
+        self.base_times = base_times
+        self.changed_times = changed_times
+
+    def string_representation(self):
+        return "Raw results: %s %s" % (self.base_times, self.changed_times)
+
 def CompareMemoryUsage(base_usage, changed_usage, options):
     """Like CompareMultipleRuns, but for memory usage."""
     max_base, max_changed = max(base_usage), max(changed_usage)
@@ -660,6 +668,8 @@
         human consumption.
     """
     assert len(base_times) == len(changed_times)
+    if options.no_statistics:
+        return RawResult(base_times, changed_times)
     if len(base_times) == 1:
         # With only one data point, we can't do any of the interesting stats
         # below.
@@ -1564,6 +1574,8 @@
     parser.add_option("--no_charts", default=False, action="store_true",
                       help=("Don't use google charts for displaying the"
                             " graph outcome"))
+    parser.add_option("--no_statistics", default=False, action="store_true",
+                      help=("Don't perform statistics - return raw data"))
 
     options, args = parser.parse_args(argv)
     if len(args) != 2:



More information about the Pypy-commit mailing list