[Python-checkins] benchmarks: Port PyPy's fannkuch benchmark.

brett.cannon python-checkins at python.org
Sat Sep 15 00:12:09 CEST 2012


http://hg.python.org/benchmarks/rev/5ebfddd6a81e
changeset:   167:5ebfddd6a81e
user:        Brett Cannon <brett at python.org>
date:        Fri Sep 14 10:49:27 2012 -0400
summary:
  Port PyPy's fannkuch benchmark.

files:
  perf.py                    |  10 +++-
  performance/bm_fannkuch.py |  72 ++++++++++++++++++++++++++
  2 files changed, 81 insertions(+), 1 deletions(-)


diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -1626,6 +1626,14 @@
     return SimpleBenchmark(MeasureChaos, *args, **kwargs)
 
 
+def MeasureFannkuch(python, options):
+    bm_path = Relative("performance/bm_fannkuch.py")
+    return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+
+def BM_Fannkuch(*args, **kwargs):
+    return SimpleBenchmark(MeasureChaos, *args, **kwargs)
+
+
 def MeasureLogging(python, options, extra_args):
     """Test the performance of Python's logging module.
 
@@ -2016,7 +2024,7 @@
                         "json_dump", "json_load", "regex", "threading",
                         "nqueens", "unpack_sequence", "richards",
                         "logging", "normal_startup", "startup_nosite",
-                        "pathlib"],
+                        "pathlib", "fannkuch"],
                 # After 2to3-conversion
                 "py3k": ["2to3", "2n3", "mako"]
                }
diff --git a/performance/bm_fannkuch.py b/performance/bm_fannkuch.py
new file mode 100644
--- /dev/null
+++ b/performance/bm_fannkuch.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# The Computer Language Benchmarks Game
+# http://shootout.alioth.debian.org/
+#
+# contributed by Sokolov Yura
+# modified by Tupteq
+
+from compat import xrange
+import optparse
+import time
+import util
+
+def fannkuch(n):
+    count = list(range(1, n+1))
+    max_flips = 0
+    m = n-1
+    r = n
+    check = 0
+    perm1 = list(range(n))
+    perm = list(range(n))
+    perm1_ins = perm1.insert
+    perm1_pop = perm1.pop
+
+    while 1:
+        if check < 30:
+            #print "".join(str(i+1) for i in perm1)
+            check += 1
+
+        while r != 1:
+            count[r-1] = r
+            r -= 1
+
+        if perm1[0] != 0 and perm1[m] != m:
+            perm = perm1[:]
+            flips_count = 0
+            k = perm[0]
+            while k:
+                perm[:k+1] = perm[k::-1]
+                flips_count += 1
+                k = perm[0]
+
+            if flips_count > max_flips:
+                max_flips = flips_count
+
+        while r != n:
+            perm1_ins(r, perm1_pop(0))
+            count[r] -= 1
+            if count[r] > 0:
+                break
+            r += 1
+        else:
+            return max_flips
+
+DEFAULT_ARG = 9
+
+def main(n):
+    times = []
+    for i in range(n):
+        t0 = time.time()
+        fannkuch(DEFAULT_ARG)
+        tk = time.time()
+        times.append(tk - t0)
+    return times
+    
+if __name__ == "__main__":
+    parser = optparse.OptionParser(
+        usage="%prog [options]",
+        description="Test the performance of the Float benchmark")
+    util.add_standard_options_to(parser)
+    options, args = parser.parse_args()
+
+    util.run_benchmark(options, options.num_runs, main)

-- 
Repository URL: http://hg.python.org/benchmarks


More information about the Python-checkins mailing list