[Python-checkins] benchmarks: Issue #25721: Fixes for pybench

victor.stinner python-checkins at python.org
Wed Nov 25 05:23:48 EST 2015


https://hg.python.org/benchmarks/rev/e77ed7d18a68
changeset:   235:e77ed7d18a68
user:        Victor Stinner <victor.stinner at gmail.com>
date:        Wed Nov 25 11:17:24 2015 +0100
summary:
  Issue #25721: Fixes for pybench

* Write a base class for benchmark result classes to ensure that always_display
  attribute is set
* Implement table output format for pybench

files:
  perf.py |  63 +++++++++++++++++++++++++++++++++++----------
  1 files changed, 49 insertions(+), 14 deletions(-)


diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -420,7 +420,17 @@
         self.inst_output = inst_output
 
 
-class BenchmarkResult(object):
+class BaseBenchmarkResult(object):
+    always_display = True
+
+    def __str__(self):
+        raise NotImplementedError
+
+    def as_csv(self):
+        raise NotImplementedError
+
+
+class BenchmarkResult(BaseBenchmarkResult):
     """An object representing data from a succesful benchmark run."""
 
     def __init__(self, min_base, min_changed, delta_min, avg_base,
@@ -458,11 +468,9 @@
         return ["%f" % self.min_base, "%f" % self.min_changed]
 
 
-class BenchmarkError(object):
+class BenchmarkError(BaseBenchmarkResult):
     """Object representing the error from a failed benchmark run."""
 
-    always_display = True
-
     def __init__(self, e):
         self.msg = str(e)
 
@@ -470,11 +478,9 @@
         return self.msg
 
 
-class MemoryUsageResult(object):
+class MemoryUsageResult(BaseBenchmarkResult):
     """Memory usage data from a successful benchmark run."""
 
-    always_display = True
-
     def __init__(self, max_base, max_changed, delta_max, timeline_link):
         self.max_base      = max_base
         self.max_changed   = max_changed
@@ -496,11 +502,9 @@
         return ["%.3f" % self.max_base, "%.3f" % self.max_changed]
 
 
-class SimpleBenchmarkResult(object):
+class SimpleBenchmarkResult(BaseBenchmarkResult):
     """Object representing result data from a successful benchmark run."""
 
-    always_display = True
-
     def __init__(self, base_time, changed_time, time_delta):
         self.base_time    = base_time
         self.changed_time = changed_time
@@ -515,11 +519,9 @@
         return ["%f" % self.base_time, "%f" % self.changed_time]
 
 
-class InstrumentationResult(object):
+class InstrumentationResult(BaseBenchmarkResult):
     """Object respresenting a --diff_instrumentation result."""
 
-    always_display = True
-
     def __init__(self, inst_diff, options):
         self.inst_diff = inst_diff
         self._control_label = options.control_label
@@ -599,6 +601,33 @@
     return table
 
 
+def _FormatPyBenchDataForTable(base_label, changed_label, results):
+    """Prepare PyBench performance data for tabular output.
+
+    Args:
+        base_label: label for the control binary.
+        changed_label: label for the experimental binary.
+        results: iterable of (bench_name, result) 2-tuples where bench_name is
+            the name of the benchmark being reported; and result is a
+            PyBenchBenchmarkResult object.
+
+    Returns:
+        A list of 4-tuples, where each tuple corresponds to a row in the output
+        table, and each item in the tuples corresponds to a cell in the output
+        table.
+    """
+    table = [("Benchmark", base_label, changed_label, "Change")]
+
+    for (bench_name, result) in results:
+        table.append((bench_name,
+                      # Limit the precision for conciseness in the table.
+                      str(round(result.avg_base, 2)),
+                      str(round(result.avg_changed, 2)),
+                      result.delta_avg))
+
+    return table
+
+
 def _FormatMemoryUsageForTable(base_label, changed_label, results):
     """Prepare memory usage data for tabular output.
 
@@ -644,6 +673,8 @@
         table = _FormatPerfDataForTable(base_label, changed_label, results)
     elif isinstance(results[0][1], MemoryUsageResult):
         table = _FormatMemoryUsageForTable(base_label, changed_label, results)
+    elif isinstance(results[0][1], PyBenchBenchmarkResult):
+        table = _FormatPyBenchDataForTable(base_label, changed_label, results)
     else:
         raise TypeError("Unknown result type: %r" % type(results[0][1]))
 
@@ -1183,7 +1214,7 @@
     return deco
 
 
-class PyBenchBenchmarkResult(object):
+class PyBenchBenchmarkResult(BaseBenchmarkResult):
 
     def __init__(self, min_base, min_changed, delta_min,
                  avg_base, avg_changed, delta_avg):
@@ -1199,6 +1230,10 @@
                  "Avg: %(avg_base)d -> %(avg_changed)d: %(delta_avg)s")
                 % self.__dict__)
 
+    def as_csv(self):
+        # Min base, min changed
+        return ["%f" % self.min_base, "%f" % self.min_changed]
+
 
 _PY_BENCH_TOTALS_LINE = re.compile("""
     Totals:\s+(?P<min_base>\d+)ms\s+

-- 
Repository URL: https://hg.python.org/benchmarks


More information about the Python-checkins mailing list