[pypy-svn] r71560 - in pypy/benchmarks: . test

fijal at codespeak.net fijal at codespeak.net
Sun Feb 28 19:21:59 CET 2010


Author: fijal
Date: Sun Feb 28 19:21:58 2010
New Revision: 71560

Added:
   pypy/benchmarks/saveresults.py   (contents, props changed)
   pypy/benchmarks/test/test_saveresults.py   (contents, props changed)
Modified:
   pypy/benchmarks/runner.py
Log:
Add an ability to save results to speed.pypy.org


Modified: pypy/benchmarks/runner.py
==============================================================================
--- pypy/benchmarks/runner.py	(original)
+++ pypy/benchmarks/runner.py	Sun Feb 28 19:21:58 2010
@@ -7,9 +7,11 @@
 import sys
 from unladen_swallow import perf
 import benchmarks
-
+import socket
+        
 def run_and_store(benchmark_set, result_filename, pypy_c_path, revision=0,
-                  options='', branch='trunk', args=''):
+                  options='', branch='trunk', args='', upload=False,
+                  force_host=None):
     funcs = perf.BENCH_FUNCS.copy()
     funcs.update(perf._FindAllBenchmarks(benchmarks.__dict__))
     opts = ['-f', '-b', ','.join(benchmark_set), '--inherit_env=PATH',
@@ -28,6 +30,17 @@
         'branch'  : branch,
         }))
     f.close()
+    if upload:
+        from saveresults import save
+        if "--jit threshold" in options:
+            name = "pypy-c"
+        else:
+            name = "pypy-c-jit"
+        if force_host is not None:
+            host = force_host
+        else:
+            host = socket.gethostname()
+        save('pypy', revision, res, options, branch, name, "gc=hybrid", host)
 
 BENCHMARK_SET = ['richards', 'slowspitfire', 'django', 'spambayes',
                  'rietveld', 'html5lib', 'ai']
@@ -65,13 +78,18 @@
                             " python, and the arguments after are passed to the"
                             " changed python. If there's no comma, the same"
                             " options are passed to both."))
+    parser.add_option("--upload", default=False, action="store_true",
+                      help="Upload results to speed.pypy.org")
+    parser.add_option("--force-host", default=None, action="store",
+                      help="Force the hostname")
     options, args = parser.parse_args(argv)
     benchmarks = options.benchmarks.split(',')
     for benchmark in benchmarks:
         if benchmark not in BENCHMARK_SET:
             raise WrongBenchmark(benchmark)
     run_and_store(benchmarks, options.output_filename, options.pypy_c,
-                  options.revision, args=options.args)
+                  options.revision, args=options.args, upload=options.upload,
+                  force_host=options.force_host)
 
 if __name__ == '__main__':
     main(sys.argv[1:])

Added: pypy/benchmarks/saveresults.py
==============================================================================
--- (empty file)
+++ pypy/benchmarks/saveresults.py	Sun Feb 28 19:21:58 2010
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+import urllib, urllib2
+from datetime import datetime
+
+SPEEDURL = "http://speed.pypy.org/"
+
+def save(project, revision, results, options, branch, interpreter,
+         int_options, host, testing=False):
+    testparams = []
+    #Parse data
+    data = {}
+    current_date = datetime.today()
+    if branch != "" and branch != "trunk":
+        interpreter = branch
+        int_options = ""
+        
+    for b in results:
+        bench_name = b[0]
+        res_type = b[1]
+        results = b[2]
+        value = 0
+        if res_type == "SimpleComparisonResult":
+            value = results['changed_time']
+        elif res_type == "ComparisonResult":
+            value = results['avg_changed']
+        else:
+            print("ERROR: result type unknown " + b[1])
+            return 1
+        data = {
+            'revision_number': revision,
+            'revision_project': project,
+            'interpreter_name': interpreter,
+            'interpreter_coptions': int_options,
+            'benchmark_name': bench_name,
+            'environment': host,
+            'result_value': value,
+            'result_date': current_date,
+        }
+        if testing: testparams.append(data)
+        else: send(data)
+    if testing: return testparams
+    else: return 0
+    
+def send(data):
+    #save results
+    params = urllib.urlencode(data)
+    f = None
+    response = "None"
+    info = str(datetime.today()) + ": Saving result for " + data['interpreter_name'] + " revision "
+    info += str(data['revision_number']) + ", benchmark " + data['benchmark_name']
+    print(info)
+    try:
+        f = urllib2.urlopen(SPEEDURL + 'result/add/', params)
+        response = f.read()
+        f.close()
+    except urllib2.URLError, e:
+        if hasattr(e, 'reason'):
+            response = '\n  We failed to reach a server\n'
+            response += '  Reason: ' + str(e.reason)
+        elif hasattr(e, 'code'):
+            response = '\n  The server couldn\'t fulfill the request\n'
+            response += '  Error code: ' + str(e)
+        print("Server (%s) response: %s\n" % (SPEEDURL, response))
+        return 1   

Added: pypy/benchmarks/test/test_saveresults.py
==============================================================================
--- (empty file)
+++ pypy/benchmarks/test/test_saveresults.py	Sun Feb 28 19:21:58 2010
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+import saveresults
+
+class TestSaveresults(object):
+    '''Tests Saveresults script for saving data to speed.pypy.org'''
+    fixture = [
+        ['ai', 'ComparisonResult', {'avg_base': 0.42950453758219992, 'timeline_link': None, 'avg_changed': 0.43322672843939997, 'min_base': 0.42631793022199999, 'delta_min': '1.0065x faster', 'delta_avg': '1.0087x slower', 'std_changed': 0.0094009621054567376, 'min_changed': 0.423564910889, 'delta_std': '2.7513x larger', 'std_base': 0.0034169249420902843, 't_msg': 'Not significant\n'}],
+        ['chaos', 'ComparisonResult', {'avg_base': 0.41804099082939999, 'timeline_link': None, 'avg_changed': 0.11744904518135998, 'min_base': 0.41700506210299998, 'delta_min': '9.0148x faster', 'delta_avg': '3.5593x faster', 'std_changed': 0.14350186143481433, 'min_changed': 0.046257972717299999, 'delta_std': '108.8162x larger', 'std_base': 0.0013187546718754512, 't_msg': 'Significant (t=4.683672, a=0.95)\n'}],
+        ['django', 'ComparisonResult', {'avg_base': 0.83651852607739996, 'timeline_link': None, 'avg_changed': 0.48571481704719999, 'min_base': 0.82990884780899998, 'delta_min': '1.7315x faster', 'delta_avg': '1.7222x faster', 'std_changed': 0.006386606999421761, 'min_changed': 0.47929787635799997, 'delta_std': '1.7229x smaller', 'std_base': 0.011003382690633789, 't_msg': 'Significant (t=61.655971, a=0.95)\n'}],
+        ['fannkuch', 'ComparisonResult', {'avg_base': 1.8561528205879998, 'timeline_link': None, 'avg_changed': 0.38401727676399999, 'min_base': 1.84801197052, 'delta_min': '5.0064x faster', 'delta_avg': '4.8335x faster', 'std_changed': 0.029594360755246251, 'min_changed': 0.36913013458299998, 'delta_std': '3.2353x larger', 'std_base': 0.0091472519207758066, 't_msg': 'Significant (t=106.269998, a=0.95)\n'}],
+        ['float', 'ComparisonResult', {'avg_base': 0.50523018836940004, 'timeline_link': None, 'avg_changed': 0.15490598678593998, 'min_base': 0.49911379814099999, 'delta_min': '6.2651x faster', 'delta_avg': '3.2615x faster', 'std_changed': 0.057739598339608837, 'min_changed': 0.079665899276699995, 'delta_std': '7.7119x larger', 'std_base': 0.007487037523761327, 't_msg': 'Significant (t=13.454285, a=0.95)\n'}], ['gcbench', 'SimpleComparisonResult', {'base_time': 27.236408948899999, 'changed_time': 5.3500790595999996, 'time_delta': '5.0908x faster'}],
+        ['html5lib', 'SimpleComparisonResult', {'base_time': 11.666918992999999, 'changed_time': 12.6703209877, 'time_delta': '1.0860x slower'}],
+        ['richards', 'ComparisonResult', {'avg_base': 0.29083266258220003, 'timeline_link': None, 'avg_changed': 0.029299402236939998, 'min_base': 0.29025602340700002, 'delta_min': '10.7327x faster', 'delta_avg': '9.9262x faster', 'std_changed': 0.0033452973342946888, 'min_changed': 0.027044057846099999, 'delta_std': '5.6668x larger', 'std_base': 0.00059033067516221327, 't_msg': 'Significant (t=172.154488, a=0.95)\n'}],
+        ['rietveld', 'ComparisonResult', {'avg_base': 0.46909418106079998, 'timeline_link': None, 'avg_changed': 1.312631273269, 'min_base': 0.46490097045899997, 'delta_min': '2.1137x slower', 'delta_avg': '2.7982x slower', 'std_changed': 0.44401595627955542, 'min_changed': 0.98267102241500004, 'delta_std': '76.0238x larger', 'std_base': 0.0058404831974135556, 't_msg': 'Significant (t=-4.247692, a=0.95)\n'}],
+        ['slowspitfire', 'ComparisonResult', {'avg_base': 0.66740002632140005, 'timeline_link': None, 'avg_changed': 1.6204295635219998, 'min_base': 0.65965509414699997, 'delta_min': '1.9126x slower', 'delta_avg': '2.4280x slower', 'std_changed': 0.27415559151786589, 'min_changed': 1.26167798042, 'delta_std': '20.1860x larger', 'std_base': 0.013581457669479846, 't_msg': 'Significant (t=-7.763579, a=0.95)\n'}],
+        ['spambayes', 'ComparisonResult', {'avg_base': 0.279049730301, 'timeline_link': None, 'avg_changed': 1.0178018569945999, 'min_base': 0.27623891830399999, 'delta_min': '3.3032x slower', 'delta_avg': '3.6474x slower', 'std_changed': 0.064953583956645466, 'min_changed': 0.91246294975300002, 'delta_std': '28.9417x larger', 'std_base': 0.0022442880892229711, 't_msg': 'Significant (t=-25.416839, a=0.95)\n'}],
+        ['spectral-norm', 'ComparisonResult', {'avg_base': 0.48315834999099999, 'timeline_link': None, 'avg_changed': 0.066225481033300004, 'min_base': 0.476922035217, 'delta_min': '8.0344x faster', 'delta_avg': '7.2957x faster', 'std_changed': 0.013425108838933627, 'min_changed': 0.059360027313200003, 'delta_std': '1.9393x larger', 'std_base': 0.0069225510731835901, 't_msg': 'Significant (t=61.721418, a=0.95)\n'}],
+        ['spitfire', 'ComparisonResult', {'avg_base': 7.1179999999999994, 'timeline_link': None, 'avg_changed': 7.2780000000000005, 'min_base': 7.04, 'delta_min': '1.0072x faster', 'delta_avg': '1.0225x slower', 'std_changed': 0.30507376157250898, 'min_changed': 6.9900000000000002, 'delta_std': '3.4948x larger', 'std_base': 0.08729261137118062, 't_msg': 'Not significant\n'}],
+        ['twisted_iteration', 'SimpleComparisonResult', {'base_time': 0.148289627437, 'changed_time': 0.035354803126799998, 'time_delta': '4.1943x faster'}],
+        ['twisted_web', 'SimpleComparisonResult', {'base_time': 0.11312217194599999, 'changed_time': 0.625, 'time_delta': '5.5250x slower'}]
+    ]
+    
+    def test_good_input(self):
+        '''Given correct result data, check that every result being saved has the right parameters'''
+        for resultparams in saveresults.save("pypy", 71212, self.fixture, "", "trunk", "pypy-c-jit", "gc=hybrid", 'host', True):
+            assert resultparams['revision_project'] == "pypy"
+            assert resultparams['revision_number'] == 71212
+            assert resultparams['interpreter_name'] == "pypy-c-jit"
+            assert resultparams['interpreter_coptions'] == "gc=hybrid"
+            # get dict with correct data for this benchmark
+            fixturedata = []
+            benchfound = False
+            for res in self.fixture:
+                if res[0] == resultparams['benchmark_name']:
+                    fixturedata = res
+                    benchfound = True
+                    break
+            assert benchfound
+            # get correct result value depending on the type of result
+            fixturevalue = 0
+            if fixturedata[1] == "SimpleComparisonResult":
+                fixturevalue = fixturedata[2]['changed_time']
+            else:
+                fixturevalue = fixturedata[2]['avg_changed']
+            assert resultparams['result_value'] == fixturevalue



More information about the Pypy-commit mailing list