[Python-checkins] benchmarks: Add a logging benchmark
antoine.pitrou
python-checkins at python.org
Thu Dec 15 13:17:35 CET 2011
http://hg.python.org/benchmarks/rev/135d44d0c02a
changeset: 152:135d44d0c02a
user: Antoine Pitrou <solipsis at pitrou.net>
date: Thu Dec 15 13:17:14 2011 +0100
summary:
Add a logging benchmark
files:
perf.py | 49 ++++++++++-
performance/bm_logging.py | 119 ++++++++++++++++++++++++++
2 files changed, 167 insertions(+), 1 deletions(-)
diff --git a/perf.py b/perf.py
--- a/perf.py
+++ b/perf.py
@@ -1597,6 +1597,52 @@
def BM_NQueens(*args, **kwargs):
return SimpleBenchmark(MeasureNQueens, *args, **kwargs)
+def MeasureLogging(python, options, extra_args):
+ """Test the performance of Python's logging module.
+
+ Args:
+ python: prefix of a command line for the Python binary.
+ options: optparse.Values instance.
+ extra_args: list of arguments to append to the command line.
+
+ Returns:
+ RawData instance.
+ """
+ bm_path = Relative("performance/bm_logging.py")
+ return MeasureGeneric(python, options, bm_path, extra_args=extra_args)
+
+
+def _LoggingBenchmark(base_python, changed_python, options, extra_args):
+ """Test the performance of Python's logging module.
+
+ Args:
+ base_python: prefix of a command line for the reference
+ Python binary.
+ changed_python: prefix of a command line for the
+ experimental Python binary.
+ options: optparse.Values instance.
+ extra_args: list of arguments to append to the command line.
+
+ Returns:
+ Summary of whether the experiemental Python is better/worse than the
+ baseline.
+ """
+ return SimpleBenchmark(MeasureLogging,
+ base_python, changed_python, options, extra_args)
+
+
+def BM_Silent_Logging(base_python, changed_python, options):
+ args = ["no_output"]
+ return _LoggingBenchmark(base_python, changed_python, options, args)
+
+def BM_Simple_Logging(base_python, changed_python, options):
+ args = ["simple_output"]
+ return _LoggingBenchmark(base_python, changed_python, options, args)
+
+def BM_Formatted_Logging(base_python, changed_python, options):
+ args = ["formatted_output"]
+ return _LoggingBenchmark(base_python, changed_python, options, args)
+
def _StartupPython(command, mem_usage, track_memory, inherit_env):
startup_env = BuildEnv(inherit_env=inherit_env)
@@ -1935,11 +1981,12 @@
"call_method_unknown"],
"math": ["nbody", "float", "pidigits"],
"template" : ["slowspitfire", "django", "mako"],
+ "logging": ["silent_logging", "simple_logging", "formatted_logging"],
# Benchmarks natively 2.x- and 3.x-compatible
"2n3": ["calls", "math", "fastpickle", "fastunpickle",
"json_dump", "json_load", "regex", "threading",
"nqueens", "unpack_sequence", "richards",
- "normal_startup", "startup_nosite"],
+ "logging", "normal_startup", "startup_nosite"],
# After 2to3-conversion
"py3k": ["2to3", "2n3", "mako"]
}
diff --git a/performance/bm_logging.py b/performance/bm_logging.py
new file mode 100644
--- /dev/null
+++ b/performance/bm_logging.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+
+"""Script for testing the performance of logging simple messages.
+"""
+
+# Python imports
+import io
+import logging
+import optparse
+import sys
+import time
+
+# Local imports
+import util
+from compat import xrange
+
+# A simple format for parametered logging
+FORMAT = 'important: %s'
+MESSAGE = 'some important information to be logged'
+
+
+def test_no_output(iterations, logger):
+ times = []
+ m = MESSAGE
+ for _ in xrange(iterations):
+ t0 = time.time()
+ for _ in xrange(10000):
+ logger.debug(m)
+ logger.debug(m)
+ logger.debug(m)
+ logger.debug(m)
+ logger.debug(m)
+ logger.debug(m)
+ logger.debug(m)
+ logger.debug(m)
+ logger.debug(m)
+ logger.debug(m)
+ t1 = time.time()
+ times.append(t1 - t0)
+ return times
+
+
+def test_simple_output(iterations, logger):
+ times = []
+ m = MESSAGE
+ for _ in xrange(iterations):
+ t0 = time.time()
+ for _ in xrange(1000):
+ logger.warn(m)
+ logger.warn(m)
+ logger.warn(m)
+ logger.warn(m)
+ logger.warn(m)
+ logger.warn(m)
+ logger.warn(m)
+ logger.warn(m)
+ logger.warn(m)
+ logger.warn(m)
+ t1 = time.time()
+ times.append(t1 - t0)
+ return times
+
+
+def test_formatted_output(iterations, logger):
+ times = []
+ f = FORMAT
+ m = MESSAGE
+ for _ in xrange(iterations):
+ t0 = time.time()
+ for _ in xrange(1000):
+ logger.warn(f, m)
+ logger.warn(f, m)
+ logger.warn(f, m)
+ logger.warn(f, m)
+ logger.warn(f, m)
+ logger.warn(f, m)
+ logger.warn(f, m)
+ logger.warn(f, m)
+ logger.warn(f, m)
+ logger.warn(f, m)
+ t1 = time.time()
+ times.append(t1 - t0)
+ return times
+
+
+if __name__ == "__main__":
+ parser = optparse.OptionParser(
+ usage="%prog [no_output|simple_output|formatted_output] [options]",
+ description=("Test the performance of logging."))
+ util.add_standard_options_to(parser)
+ options, args = parser.parse_args()
+
+ benchmarks = ["no_output", "simple_output", "formatted_output"]
+ for bench_name in benchmarks:
+ if bench_name in args:
+ benchmark = globals()["test_" + bench_name]
+ break
+ else:
+ raise RuntimeError("Need to specify one of %s" % benchmarks)
+
+ logger = logging.getLogger("benchlogger")
+ logger.propagate = False
+ # NOTE: StringIO performance will impact the results...
+ if sys.version_info >= (3,):
+ sio = io.StringIO()
+ else:
+ sio = io.BytesIO()
+ handler = logging.StreamHandler(stream=sio)
+ logger = logging.getLogger("benchlogger")
+ logger.propagate = False
+ logger.addHandler(handler)
+ logger.setLevel(logging.WARNING)
+
+ util.run_benchmark(options, options.num_runs, benchmark, logger)
+
+ if benchmark is not test_no_output:
+ assert len(sio.getvalue()) > 0
+ else:
+ assert len(sio.getvalue()) == 0
--
Repository URL: http://hg.python.org/benchmarks
More information about the Python-checkins
mailing list