[Python-checkins] benchmarks: Issue #17317: Have the benchmark count from -h properly reflect the
brett.cannon
python-checkins at python.org
Mon Mar 25 22:29:32 CET 2013
http://hg.python.org/benchmarks/rev/3be9e07a2df4
changeset: 194:3be9e07a2df4
user: Brett Cannon <brett at python.org>
date: Mon Mar 25 17:29:26 2013 -0400
summary:
Issue #17317: Have the benchmark count from -h properly reflect the
total benchmarks per target.
Initial patch by Anuj Gupta.
files:
perf.py | 20 ++++++++++++++++----
test_perf.py | 35 ++++++++++++++++++++++++-----------
2 files changed, 40 insertions(+), 15 deletions(-)
diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -2109,7 +2109,7 @@
"regex", "richards", "spectral_norm", "startup_nosite",
"telco", "threading", "unpack_sequence"],
# After 2to3-conversion
- "py3k": ["2to3", "2n3", "chameleon", "mako_v2"]
+ "py3k": ["2to3", "2n3", "chameleon", "mako_v2"],
}
SLOW_BENCHMARKS = ["hexiom2"]
@@ -2138,6 +2138,7 @@
Args:
benchmarks_opt: the string passed to the -b option on the command line.
+ bench_groups: the collection of benchmark groups to pull from
Returns:
A set() of the names of the benchmarks to run.
@@ -2215,19 +2216,30 @@
parser.values.output_style = value
-def main(argv, bench_funcs=BENCH_FUNCS, bench_groups=BENCH_GROUPS):
+def CreateBenchGroups(bench_funcs=BENCH_FUNCS, bench_groups=BENCH_GROUPS):
bench_groups = bench_groups.copy()
all_benchmarks = bench_funcs.keys()
bench_groups["all"] = all_benchmarks
+ return bench_groups
+
+
+def main(argv, bench_funcs=BENCH_FUNCS, bench_groups=BENCH_GROUPS):
+ bench_groups = CreateBenchGroups(bench_funcs, bench_groups)
+
+ # Calculate the lengths of expanded benchmark names for all groups
+ bench_counts = {}
+ for name in bench_groups:
+ bench_counts[name] = sum(1 for _ in
+ _ExpandBenchmarkName(name, bench_groups))
# Prettify the displayed benchmark list: first the benchmark groups by
# decreasing number of benches, then individual benchmarks by
# lexicographic order.
pretty_benchmarks = ["%s(%d)" % (name, nbenchs)
for nbenchs, name in sorted(
- ((len(v), k) for (k, v) in bench_groups.items()),
+ ((v, k) for (k, v) in bench_counts.items()),
reverse=True)]
- pretty_benchmarks.extend(sorted(all_benchmarks))
+ pretty_benchmarks.extend(sorted(bench_groups["all"]))
parser = optparse.OptionParser(
usage="%prog [options] baseline_python changed_python",
diff --git a/test_perf.py b/test_perf.py
--- a/test_perf.py
+++ b/test_perf.py
@@ -112,36 +112,49 @@
def testParseBenchmarksOption(self):
# perf.py, no -b option.
- should_run = perf.ParseBenchmarksOption("")
- self.assertEqual(should_run, set(["2to3", "django", "slowpickle",
- "slowspitfire", "slowunpickle"]))
+ bench_groups = perf.CreateBenchGroups()
+ should_run = perf.ParseBenchmarksOption("", bench_groups)
+ self.assertEqual(should_run, set(["2to3", "django", "nbody",
+ "slowpickle", "slowspitfire",
+ "slowunpickle", "spambayes"]))
# perf.py -b 2to3
- should_run = perf.ParseBenchmarksOption("2to3")
+ should_run = perf.ParseBenchmarksOption("2to3", bench_groups)
self.assertEqual(should_run, set(["2to3"]))
# perf.py -b 2to3,pybench
- should_run = perf.ParseBenchmarksOption("2to3,pybench")
+ should_run = perf.ParseBenchmarksOption("2to3,pybench", bench_groups)
self.assertEqual(should_run, set(["2to3", "pybench"]))
# perf.py -b -2to3
- should_run = perf.ParseBenchmarksOption("-2to3")
- self.assertEqual(should_run, set(["django", "slowspitfire",
- "slowpickle", "slowunpickle"]))
+ should_run = perf.ParseBenchmarksOption("-2to3", bench_groups)
+ self.assertEqual(should_run, set(["django", "nbody", "slowspitfire",
+ "slowpickle", "slowunpickle",
+ "spambayes"]))
# perf.py -b all
- should_run = perf.ParseBenchmarksOption("all")
+ should_run = perf.ParseBenchmarksOption("all", bench_groups)
self.assertTrue("django" in should_run, should_run)
self.assertTrue("pybench" in should_run, should_run)
# perf.py -b -2to3,all
- should_run = perf.ParseBenchmarksOption("-2to3,all")
+ should_run = perf.ParseBenchmarksOption("-2to3,all", bench_groups)
self.assertTrue("django" in should_run, should_run)
self.assertTrue("pybench" in should_run, should_run)
self.assertFalse("2to3" in should_run, should_run)
# Error conditions
- self.assertRaises(ValueError, perf.ParseBenchmarksOption, "-all")
+ self.assertRaises(ValueError, perf.ParseBenchmarksOption, "-all",
+ bench_groups)
+
+
+ def testBenchmarkCounts(self):
+ bench_groups = {"top": ["middle1", "middle2"],
+ "middle1": ["bottom1", "bottom2"],
+ "middle2": ["bottom3"]}
+ found = list(perf._ExpandBenchmarkName("top", bench_groups))
+ self.assertEqual(["bottom1", "bottom2", "bottom3"], found)
+
if __name__ == "__main__":
unittest.main()
--
Repository URL: http://hg.python.org/benchmarks
More information about the Python-checkins
mailing list