[pypy-svn] r38664 - pypy/dist/pypy/translator/benchmark
mwh at codespeak.net
mwh at codespeak.net
Tue Feb 13 10:46:00 CET 2007
Author: mwh
Date: Tue Feb 13 10:46:00 2007
New Revision: 38664
Modified:
pypy/dist/pypy/translator/benchmark/bench-custom.py
pypy/dist/pypy/translator/benchmark/result.py
Log:
tweaks, including actually saving the results in the nominated pickle file.
Modified: pypy/dist/pypy/translator/benchmark/bench-custom.py
==============================================================================
--- pypy/dist/pypy/translator/benchmark/bench-custom.py (original)
+++ pypy/dist/pypy/translator/benchmark/bench-custom.py Tue Feb 13 10:46:00 2007
@@ -35,16 +35,24 @@
refs = {}
- exes = full_pythons+exes
+ exes = full_pythons + exes
for i in range(int(options.runcount)):
- for exe in full_pythons+exes:
+ for exe in exes:
for b in benchmarks:
benchmark_result.result(exe).run_benchmark(b, verbose=True)
- stats = ['stat:st_mtime', 'exe_name', 'bench:richards', 'pypy_rev', 'bench:pystone']
+ pickle.dump(benchmark_result, open(options.picklefile, 'wb'))
+
+ stats = ['stat:st_mtime', 'exe_name', 'pypy_rev']
+ for b in benchmarks:
+ stats.append('bench:'+b.name)
+ if options.relto:
+ relto = options.relto
+ else:
+ relto = full_pythons[0]
for row in benchmark_result.txt_summary(stats,
- relto=full_pythons[0],
+ relto=relto,
filteron=lambda r: r.exe_name in exes):
print row
@@ -63,5 +71,9 @@
'--runcount', dest='runcount',
default='1',
)
+ parser.add_option(
+ '--relto', dest='relto',
+ default=None,
+ )
options, args = parser.parse_args(sys.argv[1:])
main(options, args)
Modified: pypy/dist/pypy/translator/benchmark/result.py
==============================================================================
--- pypy/dist/pypy/translator/benchmark/result.py (original)
+++ pypy/dist/pypy/translator/benchmark/result.py Tue Feb 13 10:46:00 2007
@@ -84,8 +84,11 @@
if self.run_counts.get(benchmark.name, 0) > self.max_results:
return
if verbose:
- print 'running', benchmark.name, 'for', self.exe_name
+ print 'running', benchmark.name, 'for', self.exe_name,
+ sys.stdout.flush()
new_result = benchmark.run(self.exe_name)
+ if verbose:
+ print new_result
self.benchmarks.setdefault(benchmark.name, []).append(new_result)
if benchmark.name in self.best_benchmarks:
old_result = self.best_benchmarks[benchmark.name]
@@ -124,10 +127,11 @@
return time.ctime(statvalue), -1
elif stat == 'exe_name':
return os.path.basename(statvalue), -1
- elif stat == 'bench:richards':
- return "%8.2f%s"%(statvalue, 'ms'), 1
- elif stat == 'bench:pystone':
- return "%8.2f"%(statvalue,), 1
+ elif stat.startswith('bench:'):
+ from pypy.translator.benchmark import benchmarks
+ statkind, statdetail = stat.split(':', 1)
+ b = benchmarks.BENCHMARKS_BY_NAME[statdetail]
+ return "%8.2f%s"%(statvalue, b.units), 1
elif stat == 'pypy_rev':
return str(statvalue), 1
else:
More information about the Pypy-commit
mailing list