[pypy-commit] benchmarks default: merge heads

arigo noreply at buildbot.pypy.org
Wed May 1 11:16:23 CEST 2013


Author: Armin Rigo <arigo at tunes.org>
Branch: 
Changeset: r210:18190ecf74c9
Date: 2013-05-01 11:16 +0200
http://bitbucket.org/pypy/benchmarks/changeset/18190ecf74c9/

Log:	merge heads

diff --git a/unladen_swallow/perf.py b/unladen_swallow/perf.py
--- a/unladen_swallow/perf.py
+++ b/unladen_swallow/perf.py
@@ -448,7 +448,7 @@
         base_python: path to the reference Python binary.
         changed_python: path to the experimental Python binary.
         options: optparse.Values instance.
-        *args, **kwargs: will be passed through to benchmark_function. 
+        *args, **kwargs: will be passed through to benchmark_function.
 
     Returns:
         An object representing differences between the two benchmark runs.
@@ -671,7 +671,12 @@
         A string summarizing the difference between the runs, suitable for
         human consumption.
     """
-    assert len(base_times) == len(changed_times)
+    if len(base_times) != len(changed_times):
+        print "Base:"
+        print base_times
+        print "Changed:"
+        print changed_times
+        raise Exception("length did not match")
     if options.no_statistics:
         return RawResult(base_times, changed_times)
     if len(base_times) == 1:
@@ -746,7 +751,7 @@
     Returns:
         (stdout, mem_usage), where stdout is the captured stdout as a string;
         mem_usage is a list of memory usage samples in kilobytes (if
-        track_memory is False, mem_usage is None). 
+        track_memory is False, mem_usage is None).
 
     Raises:
         RuntimeError: if the command failed. The value of the exception will
@@ -761,7 +766,9 @@
         future = MemoryUsageFuture(subproc.pid)
     result, err = subproc.communicate()
     if subproc.returncode != 0:
-        raise RuntimeError("Benchmark died: " + err)
+        print result
+        raise RuntimeError("Benchmark died (returncode: %d): %s" %
+                           (subproc.returncode, err))
     if track_memory:
         mem_usage = future.GetMemoryUsage()
     return result, mem_usage
@@ -1443,7 +1450,7 @@
 BENCH_FUNCS = _FindAllBenchmarks(globals())
 
 # Benchmark groups. The "default" group is what's run if no -b option is
-# specified. 
+# specified.
 # If you update the default group, be sure to update the module docstring, too.
 # An "all" group which includes every benchmark perf.py knows about is generated
 # automatically.
@@ -1571,7 +1578,7 @@
                             " Valid benchmarks are: " +
                             ", ".join(bench_groups.keys() + all_benchmarks)))
     parser.add_option("--inherit_env", metavar="ENVVARS", type="string", action="callback",
-                      callback=ParseEnvVars, default=[],                      
+                      callback=ParseEnvVars, default=[],
                       help=("Comma-separated list of environment variable names"
                             " that are inherited from the parent environment"
                             " when running benchmarking subprocesses."))


More information about the pypy-commit mailing list