[Python-checkins] benchmarks: Closes #19079: add VersionRange decorator to benchmark suite to mark compatible

georg.brandl python-checkins at python.org
Mon Oct 14 21:31:38 CEST 2013


http://hg.python.org/benchmarks/rev/e40042a7788c
changeset:   214:e40042a7788c
user:        Georg Brandl <georg at python.org>
date:        Mon Oct 14 21:32:30 2013 +0200
summary:
  Closes #19079: add VersionRange decorator to benchmark suite to mark compatible Python versions.

The selected set of benchmarks is intersected with the set of benchmarks
compatible with both selected Pythons before running.

The "2n3" set of benchmarks is now calculated automatically.

files:
  perf.py |  177 +++++++++++++++++++++++++++++++------------
  1 files changed, 125 insertions(+), 52 deletions(-)


diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -88,8 +88,8 @@
 info = logging.info
 
 
-def ported_lib(python, _cache={}):
-    """Return the 3rd-party library path for the given Python interpreter.
+def interpreter_version(python, _cache={}):
+    """Return the interpreter version for the given Python interpreter.
     *python* is the base command (as a list) to execute the interpreter.
     """
     key = tuple(python)
@@ -97,20 +97,27 @@
         return _cache[key]
     except KeyError:
         pass
-    code = """import sys; print(sys.version_info[0])"""
+    code = """import sys; print('.'.join(map(str, sys.version_info[:2])))"""
     subproc = subprocess.Popen(python + ['-c', code],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
     out, err = subproc.communicate()
     if subproc.returncode != 0:
         raise RuntimeError("Child interpreter died: " + err.decode())
-    major = int(out.strip())
-    if major == 2:
-        result = 'lib'
-    else:
-        result = 'lib3'
-    _cache[key] = result
-    return result
+    version = out.strip()
+    if len(version) != 3:
+        raise RuntimeError("Strange version printed: %s" % version)
+    _cache[key] = version
+    return version
+
+
+def ported_lib(python):
+    """Return the 3rd-party library path for the given Python interpreter.
+    *python* is the base command (as a list) to execute the interpreter.
+    """
+    if interpreter_version(python)[0] == '3':
+        return 'lib3'
+    return 'lib'
 
 
 def avg(seq):
@@ -1134,6 +1141,17 @@
 
 ### Benchmarks
 
+# Decorators for giving ranges of supported Python versions.
+# Benchmarks without a range applied are assumed to be compatible with all
+# (reasonably new) Python versions.
+
+def VersionRange(minver=None, maxver=None):
+    def deco(func):
+        func._range = minver or '2.0', maxver or '4.0'
+        return func
+    return deco
+
+
 class PyBenchBenchmarkResult(object):
 
     def __init__(self, min_base, min_changed, delta_min,
@@ -1172,6 +1190,7 @@
     return BenchmarkError(line)
 
 
+ at VersionRange(None, '2.7')
 def BM_PyBench(base_python, changed_python, options):
     if options.track_memory:
         return BenchmarkError("Benchmark does not report memory usage yet")
@@ -1310,7 +1329,7 @@
     command = python + [two_to_three_bin, "-f", "all", target]
     return MeasureCommand(command, trials, env, options.track_memory)
 
-
+ at VersionRange()
 def BM_2to3(*args, **kwargs):
     return SimpleBenchmark(Measure2to3, *args, **kwargs)
 
@@ -1331,7 +1350,7 @@
     command = python + [hg_bin, "help"]
     return MeasureCommand(command, trials, hg_env, options.track_memory)
 
-
+ at VersionRange(None, '2.7')
 def BM_hg_startup(*args, **kwargs):
     return SimpleBenchmark(MeasureHgStartup, *args, **kwargs)
 
@@ -1350,7 +1369,7 @@
     command = python + [bzr_bin, "help"]
     return MeasureCommand(command, trials, bzr_env, options.track_memory)
 
-
+ at VersionRange(None, '2.7')
 def BM_bzr_startup(*args, **kwargs):
     return SimpleBenchmark(MeasureBzrStartup, *args, **kwargs)
 
@@ -1362,7 +1381,7 @@
     bm_env = {"PYTHONPATH": lib_path}
     return MeasureGeneric(python, options, bm_path, bm_env, iteration_scaling=3)
 
-
+ at VersionRange(None, '3.3')
 def BM_Chameleon(*args, **kwargs):
     return SimpleBenchmark(MeasureChameleon, *args, **kwargs)
 
@@ -1375,7 +1394,7 @@
     bm_env = {"PYTHONPATH": DJANGO_DIR}
     return MeasureGeneric(python, options, bm_path, bm_env)
 
-
+ at VersionRange(None, '2.7')
 def BM_Django(*args, **kwargs):
     return SimpleBenchmark(MeasureDjango, *args, **kwargs)
 
@@ -1386,6 +1405,7 @@
     bm_env = {"PYTHONPATH": django_path}
     return MeasureGeneric(python, options, bm_path, bm_env)
 
+ at VersionRange()
 def BM_Django_v2(*args, **kwargs):
     return SimpleBenchmark(MeasureDjangoV2, *args, **kwargs)
 
@@ -1394,6 +1414,7 @@
     bm_path = Relative("performance/bm_float.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_Float(*args, **kwargs):
     return SimpleBenchmark(MeasureFloat, *args, **kwargs)
 
@@ -1413,7 +1434,7 @@
 
     return MeasureGeneric(python, options, bm_path, bm_env)
 
-
+ at VersionRange(None, '2.7')
 def BM_Rietveld(*args, **kwargs):
     return SimpleBenchmark(MeasureRietveld, *args, **kwargs)
 
@@ -1505,10 +1526,12 @@
             pass
 
 
+ at VersionRange(None, '2.7')
 def BM_Spitfire(*args, **kwargs):
     return SimpleBenchmark(MeasureSpitfireWithPsyco, *args, **kwargs)
 
 
+ at VersionRange(None, '2.7')
 def BM_SlowSpitfire(base_python, changed_python, options):
     extra_args = ["--disable_psyco"]
     spitfire_env = {"PYTHONPATH": Relative("lib/spitfire")}
@@ -1530,7 +1553,7 @@
     bm_env = BuildEnv({"PYTHONPATH": mako_path}, options.inherit_env)
     return MeasureGeneric(python, options, bm_path, bm_env, iteration_scaling=5)
 
-
+ at VersionRange()
 def BM_mako(*args, **kwargs):
     return SimpleBenchmark(MeasureMako, *args, **kwargs)
 
@@ -1542,7 +1565,7 @@
     return MeasureGeneric(python, options, bm_path, bm_env,
                           iteration_scaling=10)
 
-
+ at VersionRange()
 def BM_mako_v2(*args, **kwargs):
     return SimpleBenchmark(MeasureMakoV2, *args, **kwargs)
 
@@ -1554,7 +1577,7 @@
     return MeasureGeneric(python, options, bm_path, bm_env,
                           iteration_scaling=10)
 
-
+ at VersionRange()
 def BM_pathlib(*args, **kwargs):
     return SimpleBenchmark(MeasurePathlib, *args, **kwargs)
 
@@ -1592,30 +1615,36 @@
     return SimpleBenchmark(MeasurePickle,
                            base_python, changed_python, options, extra_args)
 
-
+ at VersionRange()
 def BM_FastPickle(base_python, changed_python, options):
     args = ["--use_cpickle", "pickle"]
     return _PickleBenchmark(base_python, changed_python, options, args)
 
+ at VersionRange()
 def BM_FastUnpickle(base_python, changed_python, options):
     args = ["--use_cpickle", "unpickle"]
     return _PickleBenchmark(base_python, changed_python, options, args)
 
+ at VersionRange()
 def BM_Pickle_List(base_python, changed_python, options):
     args = ["--use_cpickle", "pickle_list"]
     return _PickleBenchmark(base_python, changed_python, options, args)
 
+ at VersionRange()
 def BM_Unpickle_List(base_python, changed_python, options):
     args = ["--use_cpickle", "unpickle_list"]
     return _PickleBenchmark(base_python, changed_python, options, args)
 
+ at VersionRange()
 def BM_Pickle_Dict(base_python, changed_python, options):
     args = ["--use_cpickle", "pickle_dict"]
     return _PickleBenchmark(base_python, changed_python, options, args)
 
+ at VersionRange(None, '2.7')   # 3.x doesn't have slow pickle
 def BM_SlowPickle(base_python, changed_python, options):
     return _PickleBenchmark(base_python, changed_python, options, ["pickle"])
 
+ at VersionRange(None, '2.7')
 def BM_SlowUnpickle(base_python, changed_python, options):
     return _PickleBenchmark(base_python, changed_python, options, ["unpickle"])
 
@@ -1634,25 +1663,25 @@
     bm_path = Relative("performance/bm_elementtree.py")
     return MeasureGeneric(python, options, bm_path, extra_args=extra_args)
 
-
+ at VersionRange()
 def BM_ETree_Parse(base_python, changed_python, options):
     extra_args = ['parse']
     return SimpleBenchmark(MeasureEtree,
                            base_python, changed_python, options, extra_args)
 
-
+ at VersionRange()
 def BM_ETree_IterParse(base_python, changed_python, options):
     extra_args = ['iterparse']
     return SimpleBenchmark(MeasureEtree,
                            base_python, changed_python, options, extra_args)
 
-
+ at VersionRange()
 def BM_ETree_Generate(base_python, changed_python, options):
     extra_args = ['generate']
     return SimpleBenchmark(MeasureEtree,
                            base_python, changed_python, options, extra_args)
 
-
+ at VersionRange()
 def BM_ETree_Process(base_python, changed_python, options):
     extra_args = ['process']
     return SimpleBenchmark(MeasureEtree,
@@ -1692,11 +1721,12 @@
     return SimpleBenchmark(MeasureJSON,
                            base_python, changed_python, options, extra_args)
 
-
+ at VersionRange()
 def BM_JSON_Dump(base_python, changed_python, options):
     args = ["json_dump"]
     return _JSONBenchmark(base_python, changed_python, options, args)
 
+ at VersionRange()
 def BM_JSON_Load(base_python, changed_python, options):
     args = ["json_load"]
     return _JSONBenchmark(base_python, changed_python, options, args)
@@ -1706,9 +1736,11 @@
     bm_path = Relative("performance/bm_json_v2.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_JSON_Dump_V2(*args, **kwargs):
     return SimpleBenchmark(MeasureJSONDumpV2, *args, **kwargs)
 
+
 def MeasureNQueens(python, options):
     """Test the performance of an N-Queens solver.
 
@@ -1722,6 +1754,7 @@
     bm_path = Relative("performance/bm_nqueens.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_NQueens(*args, **kwargs):
     return SimpleBenchmark(MeasureNQueens, *args, **kwargs)
 
@@ -1730,6 +1763,7 @@
     bm_path = Relative("performance/bm_chaos.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_Chaos(*args, **kwargs):
     return SimpleBenchmark(MeasureChaos, *args, **kwargs)
 
@@ -1738,6 +1772,7 @@
     bm_path = Relative("performance/bm_fannkuch.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_Fannkuch(*args, **kwargs):
     return SimpleBenchmark(MeasureFannkuch, *args, **kwargs)
 
@@ -1746,6 +1781,7 @@
     bm_path = Relative("performance/bm_go.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_Go(*args, **kwargs):
     return SimpleBenchmark(MeasureGo, *args, **kwargs)
 
@@ -1754,6 +1790,7 @@
     bm_path = Relative("performance/bm_meteor_contest.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_Meteor_Contest(*args, **kwargs):
     return SimpleBenchmark(MeasureMeteorContest, *args, **kwargs)
 
@@ -1762,6 +1799,7 @@
     bm_path = Relative("performance/bm_spectral_norm.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_Spectral_Norm(*args, **kwargs):
     return SimpleBenchmark(MeasureSpectralNorm, *args, **kwargs)
 
@@ -1770,6 +1808,7 @@
     bm_path = Relative("performance/bm_telco.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_Telco(*args, **kwargs):
     return SimpleBenchmark(MeasureTelco, *args, **kwargs)
 
@@ -1778,6 +1817,7 @@
     bm_path = Relative("performance/bm_hexiom2.py")
     return MeasureGeneric(python, options, bm_path, iteration_scaling=0.04)
 
+ at VersionRange()
 def BM_Hexiom2(*args, **kwargs):
     return SimpleBenchmark(MeasureHexiom2, *args, **kwargs)
 
@@ -1786,6 +1826,7 @@
     bm_path = Relative("performance/bm_raytrace.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_Raytrace(*args, **kwargs):
     return SimpleBenchmark(MeasureRaytrace, *args, **kwargs)
 
@@ -1823,15 +1864,17 @@
     return SimpleBenchmark(MeasureLogging,
                            base_python, changed_python, options, extra_args)
 
-
+ at VersionRange()
 def BM_Silent_Logging(base_python, changed_python, options):
     args = ["no_output"]
     return _LoggingBenchmark(base_python, changed_python, options, args)
 
+ at VersionRange()
 def BM_Simple_Logging(base_python, changed_python, options):
     args = ["simple_output"]
     return _LoggingBenchmark(base_python, changed_python, options, args)
 
+ at VersionRange()
 def BM_Formatted_Logging(base_python, changed_python, options):
     args = ["formatted_output"]
     return _LoggingBenchmark(base_python, changed_python, options, args)
@@ -1888,7 +1931,7 @@
       mem_usage = None
     return RawData(times, mem_usage)
 
-
+ at VersionRange()
 def BM_normal_startup(base_python, changed_python, options):
     if options.rigorous:
         num_loops = 100
@@ -1905,7 +1948,7 @@
 
     return CompareBenchmarkData(base_data, changed_data, options)
 
-
+ at VersionRange()
 def BM_startup_nosite(base_python, changed_python, options):
     if options.rigorous:
         num_loops = 200
@@ -1941,17 +1984,17 @@
     return SimpleBenchmark(MeasureRegexPerformance,
                            base_python, changed_python, options, bm_path)
 
-
+ at VersionRange()
 def BM_regex_v8(base_python, changed_python, options):
     bm_path = "performance/bm_regex_v8.py"
     return RegexBenchmark(base_python, changed_python, options, bm_path)
 
-
+ at VersionRange()
 def BM_regex_effbot(base_python, changed_python, options):
     bm_path = "performance/bm_regex_effbot.py"
     return RegexBenchmark(base_python, changed_python, options, bm_path)
 
-
+ at VersionRange()
 def BM_regex_compile(base_python, changed_python, options):
     bm_path = "performance/bm_regex_compile.py"
     return RegexBenchmark(base_python, changed_python, options, bm_path)
@@ -1976,12 +2019,12 @@
     return SimpleBenchmark(MeasureThreading,
                            base_python, changed_python, options, bm_name)
 
-
+ at VersionRange()
 def BM_threaded_count(base_python, changed_python, options):
     bm_name = "threaded_count"
     return ThreadingBenchmark(base_python, changed_python, options, bm_name)
 
-
+ at VersionRange()
 def BM_iterative_count(base_python, changed_python, options):
     bm_name = "iterative_count"
     return ThreadingBenchmark(base_python, changed_python, options, bm_name)
@@ -2000,7 +2043,7 @@
     bm_path = Relative("performance/bm_unpack_sequence.py")
     return MeasureGeneric(python, options, bm_path, iteration_scaling=1000)
 
-
+ at VersionRange()
 def BM_unpack_sequence(*args, **kwargs):
     return SimpleBenchmark(MeasureUnpackSequence, *args, **kwargs)
 
@@ -2009,7 +2052,7 @@
     bm_path = Relative("performance/bm_call_simple.py")
     return MeasureGeneric(python, options, bm_path, iteration_scaling=3)
 
-
+ at VersionRange()
 def BM_call_simple(*args, **kwargs):
     return SimpleBenchmark(MeasureCallSimple, *args, **kwargs)
 
@@ -2018,7 +2061,7 @@
     bm_path = Relative("performance/bm_call_method.py")
     return MeasureGeneric(python, options, bm_path, iteration_scaling=3)
 
-
+ at VersionRange()
 def BM_call_method(*args, **kwargs):
     return SimpleBenchmark(MeasureCallMethod, *args, **kwargs)
 
@@ -2027,7 +2070,7 @@
     bm_path = Relative("performance/bm_call_method_unknown.py")
     return MeasureGeneric(python, options, bm_path, iteration_scaling=3)
 
-
+ at VersionRange()
 def BM_call_method_unknown(*args, **kwargs):
     return SimpleBenchmark(MeasureCallMethodUnknown, *args, **kwargs)
 
@@ -2036,7 +2079,7 @@
     bm_path = Relative("performance/bm_call_method_slots.py")
     return MeasureGeneric(python, options, bm_path, iteration_scaling=3)
 
-
+ at VersionRange()
 def BM_call_method_slots(*args, **kwargs):
     return SimpleBenchmark(MeasureCallMethodSlots, *args, **kwargs)
 
@@ -2054,7 +2097,7 @@
     bm_path = Relative("performance/bm_nbody.py")
     return MeasureGeneric(python, options, bm_path)
 
-
+ at VersionRange()
 def BM_nbody(*args, **kwargs):
     return SimpleBenchmark(MeasureNbody, *args, **kwargs)
 
@@ -2074,7 +2117,7 @@
     bm_env = BuildEnv({"PYTHONPATH": pypath}, options.inherit_env)
     return MeasureGeneric(python, options, bm_path, bm_env)
 
-
+ at VersionRange(None, '2.7')
 def BM_spambayes(*args, **kwargs):
     return SimpleBenchmark(MeasureSpamBayes, *args, **kwargs)
 
@@ -2095,7 +2138,7 @@
     return MeasureGeneric(python, options, bm_path, bm_env,
                           iteration_scaling=0.10)
 
-
+ at VersionRange(None, '2.7')
 def BM_html5lib_warmup(*args, **kwargs):
     return SimpleBenchmark(MeasureHtml5libWarmup, *args, **kwargs)
 
@@ -2114,7 +2157,7 @@
     command = python + [bm_path, "-n", "1"]
     return MeasureCommand(command, trials, bm_env, options.track_memory)
 
-
+ at VersionRange(None, '2.7')
 def BM_html5lib(*args, **kwargs):
     return SimpleBenchmark(MeasureHtml5lib, *args, **kwargs)
 
@@ -2123,6 +2166,7 @@
     bm_path = Relative("performance/bm_richards.py")
     return MeasureGeneric(python, options, bm_path)
 
+ at VersionRange()
 def BM_richards(*args, **kwargs):
     return SimpleBenchmark(MeasureRichards, *args, **kwargs)
 
@@ -2141,7 +2185,7 @@
     bm_path = Relative("performance/bm_pidigits.py")
     return MeasureGeneric(python, options, bm_path)
 
-
+ at VersionRange()
 def BM_pidigits(*args, **kwargs):
     return SimpleBenchmark(MeasurePiDigits, *args, **kwargs)
 
@@ -2180,17 +2224,16 @@
                 "template" : ["slowspitfire", "django_v2", "mako_v2"],
                 "logging": ["silent_logging", "simple_logging",
                             "formatted_logging"],
-                # Benchmarks natively 2.x- and 3.x-compatible
-                "2n3": ["2to3", "calls", "chameleon", "chaos", "django_v2",
-                        "etree", "fannkuch", "fastpickle", "fastunpickle",
-                        "go", "hexiom2", "json_dump_v2", "json_load",
-                        "mako", "mako_v2", "math", "logging",
-                        "meteor_contest", "normal_startup", "nqueens",
-                        "pathlib", "raytrace", "regex", "richards",
-                        "spectral_norm", "startup_nosite", "telco",
-                        "threading", "unpack_sequence"],
                 }
 
+# Calculate set of 2-and-3 compatible benchmarks.
+group2n3 = BENCH_GROUPS["2n3"] = []
+for bm, func in BENCH_FUNCS.items():
+    minver, maxver = getattr(func, '_range', ('2.0', '4.0'))
+    if minver <= '2.7' and '3.2' <= maxver:
+        group2n3.append(bm)
+
+
 SLOW_BENCHMARKS = ["hexiom2"]
 
 
@@ -2257,6 +2300,33 @@
     return should_run
 
 
+def FilterBenchmarks(benchmarks, bench_funcs, base_python, changed_python):
+    """Filters out benchmarks not supported by both Pythons.
+
+    Args:
+        benchmarks: a set() of benchmark names
+        bench_funcs: dict mapping benchmark names to functions
+        base_python, changed_python: the interpereter commands (as lists)
+
+    Returns:
+        The filtered set of benchmark names
+    """
+    basever = interpreter_version(base_python)
+    changedver = interpreter_version(changed_python)
+    for bm in list(benchmarks):
+        minver, maxver = getattr(bench_funcs[bm], '_range', ('2.0', '4.0'))
+        if not minver <= basever <= maxver:
+            benchmarks.discard(bm)
+            logging.info("Skipping benchmark %s; not compatible with "
+                         "Python %s" % (bm, basever))
+            continue
+        if not minver <= changedver <= maxver:
+            benchmarks.discard(bm)
+            logging.info("Skipping benchmark %s; not compatible with "
+                         "Python %s" % (bm, changedver))
+    return benchmarks
+
+
 def ParsePythonArgsOption(python_args_opt):
     """Parses the --args option.
 
@@ -2407,6 +2477,9 @@
     should_run = ParseBenchmarksOption(options.benchmarks, bench_groups,
                                        options.fast)
 
+    should_run = FilterBenchmarks(should_run, bench_funcs,
+                                  base_cmd_prefix, changed_cmd_prefix)
+
     results = []
     for name in sorted(should_run):
         func = bench_funcs[name]

-- 
Repository URL: http://hg.python.org/benchmarks


More information about the Python-checkins mailing list