[Python-checkins] benchmarks: Tweak some iteration scaling values so that using the same interpreter doesn't

brett.cannon python-checkins at python.org
Sat Sep 15 00:12:37 CEST 2012


http://hg.python.org/benchmarks/rev/8b252c8b8457
changeset:   181:8b252c8b8457
user:        Brett Cannon <brett at python.org>
date:        Fri Sep 14 18:11:51 2012 -0400
summary:
  Tweak some iteration scaling values so that using the same interpreter doesn't lead to any perf difference with -f.

files:
  perf.py                   |  26 ++++++++++++++------------
  performance/bm_hexiom2.py |   7 +++----
  2 files changed, 17 insertions(+), 16 deletions(-)


diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -1344,7 +1344,7 @@
 
 def MeasureFloat(python, options):
     bm_path = Relative("performance/bm_float.py")
-    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+    return MeasureGeneric(python, options, bm_path)
 
 def BM_Float(*args, **kwargs):
     return SimpleBenchmark(MeasureFloat, *args, **kwargs)
@@ -1491,7 +1491,8 @@
     bm_path = Relative("performance/bm_mako_v2.py", python, options)
     mako_path = Relative("lib/mako-0.7.2", python, options)
     bm_env = BuildEnv({"PYTHONPATH": mako_path}, options.inherit_env)
-    return MeasureGeneric(python, options, bm_path, bm_env, iteration_scaling=5)
+    return MeasureGeneric(python, options, bm_path, bm_env,
+                          iteration_scaling=10)
 
 
 def BM_mako_v2(*args, **kwargs):
@@ -1502,7 +1503,8 @@
     bm_path = Relative("performance/bm_pathlib.py")
     pathlib_path = Relative("lib/pathlib")
     bm_env = BuildEnv({"PYTHONPATH": pathlib_path}, options.inherit_env)
-    return MeasureGeneric(python, options, bm_path, bm_env, iteration_scaling=10)
+    return MeasureGeneric(python, options, bm_path, bm_env,
+                          iteration_scaling=10)
 
 
 def BM_pathlib(*args, **kwargs):
@@ -1615,7 +1617,7 @@
 
 def MeasureJSONDumpV2(python, options):
     bm_path = Relative("performance/bm_json_v2.py")
-    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+    return MeasureGeneric(python, options, bm_path)
 
 def BM_JSON_Dump_V2(*args, **kwargs):
     return SimpleBenchmark(MeasureJSONDumpV2, *args, **kwargs)
@@ -1639,7 +1641,7 @@
 
 def MeasureChaos(python, options):
     bm_path = Relative("performance/bm_chaos.py")
-    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+    return MeasureGeneric(python, options, bm_path)
 
 def BM_Chaos(*args, **kwargs):
     return SimpleBenchmark(MeasureChaos, *args, **kwargs)
@@ -1647,7 +1649,7 @@
 
 def MeasureFannkuch(python, options):
     bm_path = Relative("performance/bm_fannkuch.py")
-    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+    return MeasureGeneric(python, options, bm_path)
 
 def BM_Fannkuch(*args, **kwargs):
     return SimpleBenchmark(MeasureFannkuch, *args, **kwargs)
@@ -1655,7 +1657,7 @@
 
 def MeasureGo(python, options):
     bm_path = Relative("performance/bm_go.py")
-    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+    return MeasureGeneric(python, options, bm_path)
 
 def BM_Go(*args, **kwargs):
     return SimpleBenchmark(MeasureGo, *args, **kwargs)
@@ -1663,7 +1665,7 @@
 
 def MeasureMeteorContest(python, options):
     bm_path = Relative("performance/bm_meteor_contest.py")
-    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+    return MeasureGeneric(python, options, bm_path)
 
 def BM_Meteor_Contest(*args, **kwargs):
     return SimpleBenchmark(MeasureMeteorContest, *args, **kwargs)
@@ -1671,7 +1673,7 @@
 
 def MeasureSpectralNorm(python, options):
     bm_path = Relative("performance/bm_spectral_norm.py")
-    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+    return MeasureGeneric(python, options, bm_path)
 
 def BM_Spectral_Norm(*args, **kwargs):
     return SimpleBenchmark(MeasureSpectralNorm, *args, **kwargs)
@@ -1679,7 +1681,7 @@
 
 def MeasureTelco(python, options):
     bm_path = Relative("performance/bm_telco.py")
-    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+    return MeasureGeneric(python, options, bm_path)
 
 def BM_Telco(*args, **kwargs):
     return SimpleBenchmark(MeasureTelco, *args, **kwargs)
@@ -1687,7 +1689,7 @@
 
 def MeasureHexiom2(python, options):
     bm_path = Relative("performance/bm_hexiom2.py")
-    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+    return MeasureGeneric(python, options, bm_path, iteration_scaling=0.04)
 
 def BM_Hexiom2(*args, **kwargs):
     return SimpleBenchmark(MeasureHexiom2, *args, **kwargs)
@@ -1695,7 +1697,7 @@
 
 def MeasureRaytrace(python, options):
     bm_path = Relative("performance/bm_raytrace.py")
-    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+    return MeasureGeneric(python, options, bm_path)
 
 def BM_Raytrace(*args, **kwargs):
     return SimpleBenchmark(MeasureRaytrace, *args, **kwargs)
diff --git a/performance/bm_hexiom2.py b/performance/bm_hexiom2.py
--- a/performance/bm_hexiom2.py
+++ b/performance/bm_hexiom2.py
@@ -523,10 +523,9 @@
     # with the default n=50 from runner.py, this means twice.
     l = []
     for i in xrange(n):
-        if (i % 25) == 0:
-            t0 = time.time()
-            run_level36()
-            time_elapsed = time.time() - t0
+        t0 = time.time()
+        run_level36()
+        time_elapsed = time.time() - t0
         l.append(time_elapsed)
     return l
 

-- 
Repository URL: http://hg.python.org/benchmarks


More information about the Python-checkins mailing list