[Python-checkins] benchmarks: Port PyPy's spectral_norm benchmark.
brett.cannon
python-checkins at python.org
Sat Sep 15 00:12:15 CEST 2012
http://hg.python.org/benchmarks/rev/0d00cf856dc8
changeset: 171:0d00cf856dc8
user: Brett Cannon <brett at python.org>
date: Fri Sep 14 11:23:41 2012 -0400
summary:
Port PyPy's spectral_norm benchmark.
files:
perf.py | 18 ++++-
performance/bm_spectral_norm.py | 68 +++++++++++++++++++++
2 files changed, 81 insertions(+), 5 deletions(-)
diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -1641,6 +1641,13 @@
def BM_Meteor_Contest(*args, **kwargs):
return SimpleBenchmark(MeasureChaos, *args, **kwargs)
+def MeasureMeteor_Contest(python, options):
+ bm_path = Relative("performance/bm_spectral_norm.py")
+ return MeasureGeneric(python, options, bm_path, iteration_scaling=1)
+
+def BM_Spectral_Norm(*args, **kwargs):
+ return SimpleBenchmark(MeasureChaos, *args, **kwargs)
+
def MeasureLogging(python, options, extra_args):
"""Test the performance of Python's logging module.
@@ -2028,11 +2035,12 @@
"template" : ["slowspitfire", "django", "mako"],
"logging": ["silent_logging", "simple_logging", "formatted_logging"],
# Benchmarks natively 2.x- and 3.x-compatible
- "2n3": ["calls", "chaos", "math", "fastpickle", "fastunpickle",
- "json_dump", "json_load", "regex", "threading",
- "nqueens", "unpack_sequence", "richards",
- "logging", "normal_startup", "startup_nosite",
- "pathlib", "fannkuch", "meteor_contest"],
+ "2n3": ["calls", "chaos", "fannkuch", "fastpickle",
+ "fastunpickle", "json_dump", "json_load", "math",
+ "logging", "meteor_contest", "normal_startup",
+ "nqueens", "pathlib", "regex", "spectral_norm",
+ "startup_nosite", "richards", "threading",
+ "unpack_sequence"],
# After 2to3-conversion
"py3k": ["2to3", "2n3", "mako"]
}
diff --git a/performance/bm_spectral_norm.py b/performance/bm_spectral_norm.py
new file mode 100644
--- /dev/null
+++ b/performance/bm_spectral_norm.py
@@ -0,0 +1,68 @@
+# The Computer Language Benchmarks Game
+# http://shootout.alioth.debian.org/
+# Contributed by Sebastien Loisel
+# Fixed by Isaac Gouy
+# Sped up by Josh Goldfoot
+# Dirtily sped up by Simon Descarpentries
+# Concurrency by Jason Stitt
+
+from math import sqrt
+import time
+import itertools
+import optparse
+
+from compat import izip, xrange
+import util
+
+def eval_A(i, j):
+ return 1.0 / ((i + j) * (i + j + 1) / 2 + i + 1)
+
+def eval_times_u(func, u):
+ return map(func, ((i,u) for i in xrange(len(list(u)))))
+
+def eval_AtA_times_u(u):
+ return eval_times_u(part_At_times_u, eval_times_u(part_A_times_u, u))
+
+def part_A_times_u(i_u):
+ i, u = i_u
+ partial_sum = 0
+ for j, u_j in enumerate(u):
+ partial_sum += eval_A(i, j) * u_j
+ return partial_sum
+
+def part_At_times_u(i_u):
+ i, u = i_u
+ partial_sum = 0
+ for j, u_j in enumerate(u):
+ partial_sum += eval_A(j, i) * u_j
+ return partial_sum
+
+DEFAULT_N = 130
+
+def main(n):
+ times = []
+ for i in xrange(n):
+ t0 = time.time()
+ u = [1] * DEFAULT_N
+
+ for dummy in xrange(10):
+ v = eval_AtA_times_u(u)
+ u = eval_AtA_times_u(v)
+
+ vBv = vv = 0
+
+ for ue, ve in izip(u, v):
+ vBv += ue * ve
+ vv += ve * ve
+ tk = time.time()
+ times.append(tk - t0)
+ return times
+
+if __name__ == "__main__":
+ parser = optparse.OptionParser(
+ usage="%prog [options]",
+ description="Test the performance of the Float benchmark")
+ util.add_standard_options_to(parser)
+ options, args = parser.parse_args()
+
+ util.run_benchmark(options, options.num_runs, main)
--
Repository URL: http://hg.python.org/benchmarks
More information about the Python-checkins
mailing list