[pypy-svn] r73918 - in pypy/benchmarks: . test
tobami at codespeak.net
tobami at codespeak.net
Tue Apr 20 19:15:59 CEST 2010
Author: tobami
Date: Tue Apr 20 19:15:58 2010
New Revision: 73918
Modified:
pypy/benchmarks/saveresults.py
pypy/benchmarks/test/test_saveresults.py
Log:
changes to acomodate the new codespeed DB Schema
Modified: pypy/benchmarks/saveresults.py
==============================================================================
--- pypy/benchmarks/saveresults.py (original)
+++ pypy/benchmarks/saveresults.py Tue Apr 20 19:15:58 2010
@@ -1,4 +1,8 @@
# -*- coding: utf-8 -*-
+#######################################################
+# This script saves result data #
+# It expects the format of unladen swallow's perf.py #
+#######################################################
import urllib, urllib2
from datetime import datetime
@@ -10,9 +14,7 @@
#Parse data
data = {}
current_date = datetime.today()
- if branch == "":
- print("ERROR: No branch defined")
- return 1
+ if branch == "": branch = 'trunk'
for b in results:
bench_name = b[0]
@@ -27,16 +29,18 @@
print("ERROR: result type unknown " + b[1])
return 1
data = {
- 'revision_number': revision,
- 'revision_project': project,
- 'revision_branch': branch,
- 'interpreter_name': interpreter,
- 'interpreter_coptions': int_options,
- 'benchmark_name': bench_name,
+ 'commitid': revision,
+ 'project': project,
+ 'branch': branch,
+ 'executable_name': interpreter,
+ 'executable_coptions': int_options,
+ 'benchmark': bench_name,
'environment': host,
'result_value': value,
'result_date': current_date,
}
+ if res_type == "ComparisonResult":
+ data['std_dev'] = results['std_changed']
if testing: testparams.append(data)
else: send(data)
if testing: return testparams
@@ -47,8 +51,8 @@
params = urllib.urlencode(data)
f = None
response = "None"
- info = str(datetime.today()) + ": Saving result for " + data['interpreter_name'] + " revision "
- info += str(data['revision_number']) + ", benchmark " + data['benchmark_name']
+ info = str(datetime.today()) + ": Saving result for " + data['executable_name'] + " revision "
+ info += str(data['commitid']) + ", benchmark " + data['benchmark']
print(info)
try:
f = urllib2.urlopen(SPEEDURL + 'result/add/', params)
@@ -62,4 +66,7 @@
response = '\n The server couldn\'t fulfill the request\n'
response += ' Error code: ' + str(e)
print("Server (%s) response: %s\n" % (SPEEDURL, response))
- return 1
+ return 1
+ print "saved correctly!\n"
+ return 0
+
Modified: pypy/benchmarks/test/test_saveresults.py
==============================================================================
--- pypy/benchmarks/test/test_saveresults.py (original)
+++ pypy/benchmarks/test/test_saveresults.py Tue Apr 20 19:15:58 2010
@@ -23,16 +23,16 @@
def test_good_input(self):
'''Given correct result data, check that every result being saved has the right parameters'''
for resultparams in saveresults.save("pypy", 71212, self.fixture, "", "experimental", "pypy-c-jit", "gc=hybrid", 'host', True):
- assert resultparams['revision_project'] == "pypy"
- assert resultparams['revision_number'] == 71212
- assert resultparams['revision_branch'] == "experimental"
- assert resultparams['interpreter_name'] == "pypy-c-jit"
- assert resultparams['interpreter_coptions'] == "gc=hybrid"
+ assert resultparams['project'] == "pypy"
+ assert resultparams['commitid'] == 71212
+ assert resultparams['branch'] == "experimental"
+ assert resultparams['executable_name'] == "pypy-c-jit"
+ assert resultparams['executable_coptions'] == "gc=hybrid"
# get dict with correct data for this benchmark
fixturedata = []
benchfound = False
for res in self.fixture:
- if res[0] == resultparams['benchmark_name']:
+ if res[0] == resultparams['benchmark']:
fixturedata = res
benchfound = True
break
More information about the Pypy-commit
mailing list