[pypy-svn] r71129 - in pypy/benchmarks: . own

fijal at codespeak.net fijal at codespeak.net
Sat Feb 6 18:29:59 CET 2010


Author: fijal
Date: Sat Feb  6 18:29:57 2010
New Revision: 71129

Added:
   pypy/benchmarks/own/spitfire.py   (contents, props changed)
Modified:
   pypy/benchmarks/benchmarks.py
Log:
Add running the original version of spitfire, named spitfire and
spitfire_cstringio


Modified: pypy/benchmarks/benchmarks.py
==============================================================================
--- pypy/benchmarks/benchmarks.py	(original)
+++ pypy/benchmarks/benchmarks.py	Sat Feb  6 18:29:57 2010
@@ -5,7 +5,7 @@
 def relative(*args):
     return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args)
 
-def _register_new_bm(name, d, **opts):
+def _register_new_bm(name, bm_name, d, **opts):
     def Measure(python, options):
         bm_path = relative('own', name + '.py')
         return MeasureGeneric(python, options, bm_path, **opts)
@@ -13,7 +13,7 @@
 
     def BM(*args, **kwds):
         return SimpleBenchmark(Measure, *args, **kwds)
-    BM.func_name = 'BM_' + name
+    BM.func_name = 'BM_' + bm_name
 
     d[BM.func_name] = BM
 
@@ -23,4 +23,9 @@
 
 for name in ['float', 'nbody_modified', 'meteor-contest', 'fannkuch',
              'spectral-norm', 'chaos', 'telco', 'gcbench']:
-    _register_new_bm(name, globals(), **opts.get(name, {}))
+    _register_new_bm(name, name, globals(), **opts.get(name, {}))
+_register_new_bm('spitfire', 'spitfire', globals(),
+    extra_args=['--benchmark=spitfire_o4'])
+_register_new_bm('spitfire', 'spitfire_cstringio', globals(),
+    extra_args=['--benchmark=python_cstringio'])
+

Added: pypy/benchmarks/own/spitfire.py
==============================================================================
--- (empty file)
+++ pypy/benchmarks/own/spitfire.py	Sat Feb  6 18:29:57 2010
@@ -0,0 +1,43 @@
+
+import sys
+import os
+import util
+import time
+import optparse
+from StringIO import StringIO
+
+def relative(*args):
+    return os.path.join(os.path.dirname(os.path.abspath(__file__)), *args)
+
+class FakePsyco(object):
+    def bind(self, *args, **kwargs):
+        pass
+sys.modules["psyco"] = FakePsyco()
+
+testdir = relative('..', 'unladen_swallow', 'lib', 'spitfire', 'tests', 'perf')
+sys.path.insert(0, testdir)
+sys.path.insert(0, relative('..', 'unladen_swallow', 'lib', 'spitfire'))
+import bigtable
+# bummer, timeit module is stupid
+from bigtable import test_python_cstringio, test_spitfire_o4, test_spitfire
+
+def runtest(n, benchmark):
+    times = []
+    for i in range(n):
+        sys.stdout = StringIO()
+        bigtable.run([benchmark], 100)
+        times.append(float(sys.stdout.getvalue().split(" ")[-2]))
+        sys.stdout = sys.__stdout__
+    return times
+
+if __name__ == '__main__':
+    parser = optparse.OptionParser(
+        usage="%prog [options]",
+        description="Test the performance of the spitfire benchmark")
+    parser.add_option('--benchmark', type="choice",
+                      choices=['python_cstringio', 'spitfire_o4'],
+                      default="spitfire_o4",
+                      help="choose between cstringio and spitfire_o4")
+    util.add_standard_options_to(parser)
+    options, args = parser.parse_args(sys.argv)
+    util.run_benchmark(options, options.num_runs, runtest, options.benchmark)



More information about the Pypy-commit mailing list