[Python-checkins] benchmarks: Port PyPy's json_bench benchmark. Currently only includes a new version of
brett.cannon
python-checkins at python.org
Sat Sep 15 00:12:28 CEST 2012
http://hg.python.org/benchmarks/rev/96c9c9ffb2ef
changeset: 175:96c9c9ffb2ef
user: Brett Cannon <brett at python.org>
date: Fri Sep 14 13:33:04 2012 -0400
summary:
Port PyPy's json_bench benchmark. Currently only includes a new version of json_dump and not json_load.
files:
perf.py | 12 ++++++-
performance/bm_json_v2.py | 39 +++++++++++++++++++++++++++
2 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -1612,6 +1612,14 @@
args = ["json_load"]
return _JSONBenchmark(base_python, changed_python, options, args)
+
+def MeasureJSONDumpV2(python, options):
+ bm_path = Relative("performance/bm_json_v2.py")
+ return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+
+def BM_JSON_Dump_V2(*args, **kwargs):
+ return SimpleBenchmark(MeasureJSONDumpV2, *args, **kwargs)
+
def MeasureNQueens(python, options):
"""Test the performance of an N-Queens solver.
@@ -2047,7 +2055,7 @@
"threading": ["threaded_count", "iterative_count"],
"serialize": ["slowpickle", "slowunpickle",
"fastpickle", "fastunpickle",
- "json_dump", "json_load"],
+ "json_dump_v2", "json_load"],
"apps": ["2to3", "html5lib", "rietveld", "spambayes"],
"calls": ["call_simple", "call_method", "call_method_slots",
"call_method_unknown"],
@@ -2056,7 +2064,7 @@
"logging": ["silent_logging", "simple_logging", "formatted_logging"],
# Benchmarks natively 2.x- and 3.x-compatible
"2n3": ["calls", "chaos", "fannkuch", "fastpickle",
- "fastunpickle", "json_dump", "json_load", "math",
+ "fastunpickle", "json_dump_v2", "json_load", "math",
"logging", "meteor_contest", "normal_startup",
"nqueens", "pathlib", "regex", "spectral_norm",
"startup_nosite", "richards", "threading",
diff --git a/performance/bm_json_v2.py b/performance/bm_json_v2.py
new file mode 100644
--- /dev/null
+++ b/performance/bm_json_v2.py
@@ -0,0 +1,39 @@
+import time
+import json
+
+from compat import u_lit, xrange
+
+# execution runtime per test case
+TARGET_RUNTIME = 10
+
+EMPTY = ({}, 200000)
+SIMPLE_DATA = {'key1': 0, 'key2': True, 'key3': 'value', 'key4': 'foo',
+ 'key5': 'string'}
+SIMPLE = (SIMPLE_DATA, 100000)
+NESTED_DATA = {'key1': 0, 'key2': SIMPLE[0], 'key3': 'value', 'key4': SIMPLE[0],
+ 'key5': SIMPLE[0], u_lit('key'): u_lit('\u0105\u0107\u017c')}
+NESTED = (NESTED_DATA, 100000)
+HUGE = ([NESTED[0]] * 1000, 100)
+
+cases = ['EMPTY', 'SIMPLE', 'NESTED', 'HUGE']
+
+def main(n):
+ l = []
+ for i in xrange(n):
+ t0 = time.time()
+ for case in cases:
+ data, count = globals()[case]
+ for i in xrange(count):
+ json.dumps(data)
+ l.append(time.time() - t0)
+ return l
+
+if __name__ == '__main__':
+ import util, optparse
+ parser = optparse.OptionParser(
+ usage="%prog [options]",
+ description="Test the performance of the JSON benchmark")
+ util.add_standard_options_to(parser)
+ options, args = parser.parse_args()
+
+ util.run_benchmark(options, options.num_runs, main)
--
Repository URL: http://hg.python.org/benchmarks
More information about the Python-checkins
mailing list