[pypy-commit] benchmarks default: Merge csenger benchmark runner changes, IMO makes sense

fijal noreply at buildbot.pypy.org
Mon Jan 30 18:56:32 CET 2012


Author: Maciej Fijalkowski <fijall at gmail.com>
Branch: 
Changeset: r172:012ff551c081
Date: 2012-01-30 19:55 +0200
http://bitbucket.org/pypy/benchmarks/changeset/012ff551c081/

Log:	Merge csenger benchmark runner changes, IMO makes sense

diff --git a/nullpython.py b/nullpython.py
new file mode 100755
--- /dev/null
+++ b/nullpython.py
@@ -0,0 +1,20 @@
+#!/usr/bin/python
+'''This is a dummy that does nothing except that it returns 1
+second for every round of the benchmark.
+
+You can use this as the baseline interpreter if you are only
+interested in the time of the changed interpreter, but not
+in the difference to a baseline interpreter.
+'''
+from own import util
+import optparse
+
+if __name__ == '__main__':
+    parser = optparse.OptionParser(
+        usage="%prog [options]",
+        description="Test the performance of the Go benchmark")
+    util.add_standard_options_to(parser)
+    options, args = parser.parse_args()
+
+    main = lambda n: [0.0001 for x in range(options.num_runs)]
+    util.run_benchmark(options, options.num_runs, main)
diff --git a/runner.py b/runner.py
--- a/runner.py
+++ b/runner.py
@@ -3,130 +3,294 @@
 """
 
 import json
+import socket
 import sys
+
+import benchmarks
+from saveresults import save
 from unladen_swallow import perf
-import benchmarks
-import socket
 
-def perform_upload(pypy_c_path, args, force_host, options, res, revision,
-                   changed=True, postfix='', branch='default'):
-    from saveresults import save
-    project = 'PyPy'
-    if "--jit" in args:
-        name = "pypy-c" + postfix
-    else:
-        name = "pypy-c-jit" + postfix
-    if "psyco.sh" in pypy_c_path:
-        name = "cpython psyco-profile"
-        revision = 100
-        project = 'cpython'
-    if force_host is not None:
-        host = force_host
-    else:
-        host = socket.gethostname()
-    print save(project, revision, res, options, name, host, changed=changed, branch=branch)
+BENCHMARK_SET = ['richards', 'slowspitfire', 'django', 'spambayes',
+                 'rietveld', 'html5lib', 'ai']
+BENCHMARK_SET += perf._FindAllBenchmarks(benchmarks.__dict__).keys()
 
-        
-def run_and_store(benchmark_set, result_filename, pypy_c_path, revision=0,
+CHANGED = 'changed'
+BASELINE = 'baseline'
+
+
+class WrongBenchmark(Exception):
+    pass
+
+
+def run_and_store(benchmark_set, result_filename, changed_path, revision=0,
                   options='', branch='default', args='', upload=False,
-                  force_host=None, fast=False, baseline=sys.executable,
-                  full_store=False, postfix=''):
+                  fast=False, baseline_path=sys.executable, full_store=False):
     funcs = perf.BENCH_FUNCS.copy()
     funcs.update(perf._FindAllBenchmarks(benchmarks.__dict__))
-    opts = ['-b', ','.join(benchmark_set), '--inherit_env=PATH',
+    opts = ['-b', ','.join(benchmark_set),
+            '--inherit_env=PATH',
             '--no_charts']
     if fast:
         opts += ['--fast']
     if args:
         opts += ['--args', args]
     if full_store:
-        opts.append('--no_statistics')
-    opts += [baseline, pypy_c_path]
+        opts += ['--no_statistics']
+    opts += [baseline_path, changed_path]
     results = perf.main(opts, funcs)
     f = open(str(result_filename), "w")
-    res = [(name, result.__class__.__name__, result.__dict__)
+    results = [(name, result.__class__.__name__, result.__dict__)
            for name, result in results]
     f.write(json.dumps({
-        'revision' : revision,
-        'results' : res,
-        'options' : options,
-        'branch'  : branch,
+        'revision': revision,
+        'results': results,
+        'options': options,
+        'branch': branch,
         }))
     f.close()
-    if upload:
-        if ',' in args:
-            argsbase, argschanged = args.split(',')
-        else:
-            argsbase, argschanged = args, args
-        if 'pypy' in baseline:
-            perform_upload(pypy_c_path, argsbase, force_host, options, res,
-                           revision, changed=False, postfix=postfix, branch=branch)
-        perform_upload(pypy_c_path, argschanged, force_host, options, res,
-                       revision, changed=True, postfix=postfix, branch=branch)
+    return results
 
-BENCHMARK_SET = ['richards', 'slowspitfire', 'django', 'spambayes',
-                 'rietveld', 'html5lib', 'ai']
-BENCHMARK_SET += perf._FindAllBenchmarks(benchmarks.__dict__).keys()
 
-class WrongBenchmark(Exception):
-    pass
+def get_upload_options(options):
+    '''
+    returns a dict with 2 keys: CHANGED, BASELINE. The values are
+    dicts with the keys
+    * 'upload' (boolean)
+    * 'project' (string)
+    * 'executable' (string)
+    * 'urls (list of strings).
+    * 'branch' (string)
+    * 'revision' (string)
+
+    This correspondents to the the --upload* and --upload-baseline*
+    options.
+
+    raises: AssertionError if upload is specified, but not the
+    corresponding executable or revision.
+    '''
+
+    if options.upload_baseline_revision is None:
+        options.upload_baseline_revision = options.upload_revision
+
+    upload_options = {}
+
+    for run in [CHANGED, BASELINE]:
+
+        def get_upload_option(name):
+            attr_name = 'upload'
+            if run == BASELINE:
+                attr_name = '%s_baseline' % attr_name
+            if name:
+                attr_name = '%s_%s' % (attr_name, name)
+            return getattr(options, attr_name)
+
+        urls = get_upload_option('urls')
+        urls = [url.strip() for url in urls.split(',') if url.strip()]
+        upload = get_upload_option(None)
+        project = get_upload_option('project')
+        executable = get_upload_option('executable')
+        branch = get_upload_option('branch')
+        revision = get_upload_option('revision')
+        if upload:
+            if executable is None:
+                raise AssertionError('If you want to --upload[-baseline] you '
+                                     'have to specify the corresponding '
+                                     '--upload[-baseline]-executable')
+            if revision is None:
+                raise AssertionError('If you want to upload the result you '
+                                     'have to specify a --revision (or '
+                                     '--upload-baseline-revision if you '
+                                     'want to upload the baseline result')
+            if ((run == BASELINE and 'nullpython.py' in options.baseline) or
+                (run == CHANGED and 'nullpython.py' in options.changed)):
+                raise AssertionError("Don't upload data from the nullpython "
+                                     "dummy interpreter. It won't run any "
+                                     "real benchmarks.")
+
+        upload_options[run] = {
+            'upload': upload,
+            'project': project,
+            'executable': executable,
+            'urls': urls,
+            'branch': branch,
+            'revision': revision}
+    return upload_options
+
 
 def main(argv):
     import optparse
     parser = optparse.OptionParser(
         usage="%prog [options]",
         description="Run benchmarks and dump json")
-    parser.add_option("-b", "--benchmarks", metavar="BM_LIST",
-                      default=','.join(BENCHMARK_SET),
-                      help=("Comma-separated list of benchmarks to run"
-                            " Valid benchmarks are: " +
-                            ", ".join(sorted(BENCHMARK_SET))))
-    parser.add_option('-p', '--pypy-c', default=sys.executable,
-                      help='pypy-c or other modified python to run against')
-    parser.add_option('-r', '--revision', default=0, action="store",
-                      help='specify revision of pypy-c')
-    parser.add_option('-o', '--output-filename', default="result.json",
-                      action="store",
-                      help='specify output filename to store resulting json')
-    parser.add_option('--options', default='', action='store',
-                      help='a string describing picked options, no spaces')
-    parser.add_option('--branch', default='default', action='store',
-                      help="pypy's branch")
-    parser.add_option('--baseline', default=sys.executable, action='store',
-                      help='baseline interpreter, defaults to host one')
-    parser.add_option("-a", "--args", default="",
-                      help=("Pass extra arguments to the python binaries."
-                            " If there is a comma in this option's value, the"
-                            " arguments before the comma (interpreted as a"
-                            " space-separated list) are passed to the baseline"
-                            " python, and the arguments after are passed to the"
-                            " changed python. If there's no comma, the same"
-                            " options are passed to both."))
-    parser.add_option("--upload", default=False, action="store_true",
-                      help="Upload results to speed.pypy.org")
-    parser.add_option("--force-host", default=None, action="store",
-                      help="Force the hostname")
-    parser.add_option("--fast", default=False, action="store_true",
-                      help="Run shorter benchmark runs")
-    parser.add_option("--full-store", default=False, action="store_true",
-                      help="")
-    parser.add_option('--postfix', default='', action='store',
-                      help='Append a postfix to uploaded executable')
+
+    # benchmark options
+    benchmark_group = optparse.OptionGroup(
+        parser, 'Benchmark options',
+        ('Options affecting the benchmark runs and the resulting output '
+         'json file.'))
+    benchmark_group.add_option(
+        "-b", "--benchmarks", metavar="BM_LIST",
+        default=','.join(BENCHMARK_SET),
+        help=("Comma-separated list of benchmarks to run"
+              " Valid benchmarks are: %s"
+              ". (default: Run all listed benchmarks)"
+              ) % ", ".join(sorted(BENCHMARK_SET)))
+    benchmark_group.add_option(
+        '-c', '--changed', default=sys.executable,
+        help=('pypy-c or another modified python interpreter to run against. '
+              'Also named the "changed" interpreter. (default: the python '
+              'used to run this script)'))
+    benchmark_group.add_option(
+        '--baseline', default=sys.executable, action='store',
+        help=('Baseline interpreter. (default: the python used to '
+              'run this script)'))
+    benchmark_group.add_option(
+        '-o', '--output-filename', default="result.json",
+        action="store",
+        help=('Specify the output filename to store resulting json. '
+              '(default: result.json)'))
+    benchmark_group.add_option(
+        '--options', default='', action='store',
+        help='A string describing picked options, no spaces.')
+    benchmark_group.add_option(
+        '--branch', default='default', action='store',
+        dest='upload_branch',
+        help=("The branch the 'changed' interpreter was compiled from. This "
+              'will be store in the result json and used for the upload. '
+              "(default: 'default')"))
+    benchmark_group.add_option(
+        '-r', '--revision', action="store",
+        dest='upload_revision',
+        help=("Specify the revision of the 'changed' interpreter. "
+              'This will be store in the '
+              'result json and used for the upload. (default: None)'))
+    benchmark_group.add_option(
+        "-a", "--args", default="",
+        help=("Pass extra arguments to the python binaries."
+              " If there is a comma in this option's value, the"
+              " arguments before the comma (interpreted as a"
+              " space-separated list) are passed to the baseline"
+              " python, and the arguments after are passed to"
+              " the changed python. If there's no comma, the"
+              " same options are passed to both."))
+    benchmark_group.add_option(
+        "--fast", default=False, action="store_true",
+        help="Run shorter benchmark runs.")
+    benchmark_group.add_option(
+        "--full-store", default=False, action="store_true",
+        help="Run the benchmarks with the --no-statistics flag.")
+    parser.add_option_group(benchmark_group)
+
+    # upload changed options
+    upload_group = optparse.OptionGroup(
+        parser, 'Upload Options',
+        ('Options for uploading the result of the "changed" python to '
+         'codespeed. The information about revision and branch will '
+         'be taken from the options --revision and --branch.'))
+    upload_group.add_option(
+        "--upload", default=None, action="store_true",
+        help=("Upload results to speed.pypy.org (unless "
+              "--upload-url is given)."))
+    upload_group.add_option(
+        "--upload-urls", default="http://speed.pypy.org/",
+        help=("Comma seperated urls of the codespeed instances "
+              "to upload to. (default: http://speed.pypy.org/)"))
+    upload_group.add_option(
+        "--upload-project", default="PyPy",
+        help="The project name in codespeed. (default: PyPy)")
+    upload_group.add_option(
+        "--upload-executable", default=None,
+        help=("The executable name in codespeed. (required if --upload "
+              "is given)"))
+    parser.add_option_group(upload_group)
+    parser.add_option(
+        "--force-host", default=None, action="store",
+        help=("Force the hostname. This option will also be used when "
+              "uploading the baseline result."))
+
+    # upload baseline group
+    upload_baseline_group = optparse.OptionGroup(
+        parser, 'Upload Baseline Options',
+        ('Options for uploading the result of the "baseline" python to '
+         'codespeed. The hostname of the --force-host option will be used '
+         'in the baseline upload too.'))
+    upload_baseline_group.add_option(
+        "--upload-baseline", default=None, action="store_true",
+        help=("Also upload results or the baseline benchmark "
+              "to speed.pypy.org (unless "
+              "--upload-baseline-url is given)."))
+    upload_baseline_group.add_option(
+        "--upload-baseline-urls",
+        default="http://speed.pypy.org/",
+        help=("Comma seperated urls of the codespeed instances "
+              "to upload to. (default: http://speed.pypy.org/)"))
+    upload_baseline_group.add_option(
+        "--upload-baseline-project", default="PyPy",
+        help="The project name in codespeed (default: PyPy).")
+    upload_baseline_group.add_option(
+        "--upload-baseline-executable", default=None,
+        help=("The executable name in codespeed. (required if "
+              "--upload-baseline is given)"))
+    upload_baseline_group.add_option(
+        '--upload-baseline-branch', default='default',
+        action='store',
+        help=("The name of the branch used for the baseline "
+              "run. (default: 'default'"))
+    upload_baseline_group.add_option(
+        '--upload-baseline-revision', action='store',
+        default=None,
+        help=("The revision of the baseline. (required if --upload-baseline "
+              "is given)"))
+    parser.add_option_group(upload_baseline_group)
+
+    # Backward compoatibility options
+    deprecated_group = optparse.OptionGroup(
+        parser, 'Deprecated Options',
+        'Still here for backward compatibility.')
+    deprecated_group.add_option(
+        '-p', '--pypy-c', default=sys.executable,
+        dest='changed', help='Deprecated alias for -c/--changed')
+    parser.add_option_group(deprecated_group)
+
     options, args = parser.parse_args(argv)
-    #
-    # use 'default' if the branch is empty
-    if not options.branch:
-        options.branch = 'default'
-    
+
+    upload_options = get_upload_options(options)
     benchmarks = options.benchmarks.split(',')
     for benchmark in benchmarks:
         if benchmark not in BENCHMARK_SET:
             raise WrongBenchmark(benchmark)
-    run_and_store(benchmarks, options.output_filename, options.pypy_c,
-                  options.revision, args=options.args, upload=options.upload,
-                  force_host=options.force_host, fast=options.fast,
-                  baseline=options.baseline, full_store=options.full_store,
-                  postfix=options.postfix, branch=options.branch)
+
+    changed_path = options.changed
+    baseline_path = options.baseline
+    fast = options.fast
+    args = options.args
+    full_store = options.full_store
+    output_filename = options.output_filename
+
+    branch = options.upload_branch
+    revision = options.upload_revision
+    force_host = options.force_host
+
+    results = run_and_store(benchmarks, output_filename, changed_path,
+                            revision, args=args, fast=fast,
+                            baseline_path=baseline_path,
+                            full_store=full_store, branch=branch)
+
+    for run in [CHANGED, BASELINE]:
+        upload = upload_options[run]['upload']
+        urls = upload_options[run]['urls']
+        project = upload_options[run]['project']
+        executable = upload_options[run]['executable']
+        branch = upload_options[run]['branch']
+        revision = upload_options[run]['revision']
+
+        if upload:
+            # prevent to upload results from the nullpython dummy
+            host = force_host if force_host else socket.gethostname()
+            for url in urls:
+                print save(project, revision, results, executable, host, url,
+                           changed=(run == CHANGED), branch=branch)
+
 
 if __name__ == '__main__':
     main(sys.argv[1:])
diff --git a/saveresults.py b/saveresults.py
--- a/saveresults.py
+++ b/saveresults.py
@@ -12,28 +12,30 @@
 
 Example usage:
 
-  $ ./saveresults.py result.json -r '45757:fabe4fc0dc08' -n pypy-c-jit -H tannit
-  
+  $ ./saveresults.py result.json -r '45757:fabe4fc0dc08' -n pypy-c-jit \
+    -H tannit
+
   OR
-  
-  $ ./saveresults.py result.json -r '45757:fabe4fc0dc08' -n pypy-c-jit-64 -H tannit
 
+  $ ./saveresults.py result.json -r '45757:fabe4fc0dc08' -n pypy-c-jit-64 \
+    -H tannit
 """
 
-import sys
-import urllib, urllib2, time
 from datetime import datetime
 import optparse
+import sys
+import time
+import urllib
+import urllib2
 
-SPEEDURL = "http://speed.pypy.org/"
 
-def save(project, revision, results, options, interpreter, host, testing=False,
+def save(project, revision, results, executeable, host, url, testing=False,
          changed=True, branch='default'):
     testparams = []
     #Parse data
     data = {}
     error = 0
-        
+
     for b in results:
         bench_name = b[0]
         res_type = b[1]
@@ -63,7 +65,7 @@
         data = {
             'commitid': revision,
             'project': project,
-            'executable': interpreter,
+            'executable': executeable,
             'benchmark': bench_name,
             'environment': host,
             'result_value': value,
@@ -77,26 +79,33 @@
                 data['std_dev'] = results['std_changed']
             else:
                 data['std_dev'] = results['std_base']
-        if testing: testparams.append(data)
-        else: error |= send(data)
+        if testing:
+            testparams.append(data)
+        else:
+            error |= send(data, url)
+
     if error:
         raise IOError("Saving failed.  See messages above.")
-    if testing: return testparams
-    else: return 0
-    
-def send(data):
+    if testing:
+        return testparams
+    else:
+        return 0
+
+
+def send(data, url):
     #save results
     params = urllib.urlencode(data)
     f = None
     response = "None"
-    info = str(datetime.today()) + ": Saving result for " + data['executable'] + " revision "
-    info += str(data['commitid']) + ", benchmark " + data['benchmark']
+    info = ("%s: Saving result for %s revision %s, benchmark %s" %
+            (str(datetime.today()), data['executable'],
+             str(data['commitid']), data['benchmark']))
     print(info)
     try:
         retries = [1, 2, 3, 6]
         while True:
             try:
-                f = urllib2.urlopen(SPEEDURL + 'result/add/', params)
+                f = urllib2.urlopen(url + 'result/add/', params)
                 response = f.read()
                 f.close()
                 break
@@ -116,7 +125,7 @@
         print response
         with open('error.html', 'w') as error_file:
             error_file.write(response)
-        print("Server (%s) response written to error.html" % (SPEEDURL,))
+        print("Server (%s) response written to error.html" % (url,))
         print('  Error code: %s\n' % (e,))
         return 1
     print "saved correctly!\n"
@@ -129,24 +138,36 @@
         data = simplejson.load(f)
     results = data['results']
     print 'uploading results...',
-    save('PyPy', options.revision, results, '', options.name, options.host,
-         changed=options.changed)
+    save(options.project, options.revision, results, options.executable,
+                options.host, options.url, changed=options.changed)
     print 'done'
 
 
 if __name__ == '__main__':
     parser = optparse.OptionParser(usage="%prog result.json [options]")
-    parser.add_option('-r', '--revision', dest='revision', default=None, type=str)
-    parser.add_option('-n', '--name', dest='name', default=None, type=str)
+    parser.add_option('-r', '--revision', dest='revision',
+                      default=None, type=str, help='VCS revision (required)')
+    parser.add_option('-n', '--name', dest='executable',
+                      default=None, type=str,
+                      help=('Name of the executable for codespeed.'
+                            'Deprecated. Use --e/--executable instead'))
+    parser.add_option('-e', '--executable', dest='executable',
+                      default=None, type=str,
+                      help='Name of the Executable for codespeed (required).')
     parser.add_option('-H', '--host', dest='host', default=None, type=str)
     parser.add_option('-b', '--baseline', dest='changed', default=True,
                       action='store_false',
                       help='upload the results as baseline instead of changed')
+    parser.add_option('-P', '--project', dest='project', default='PyPy')
+    parser.add_option('-u', '--url', dest='url',
+                      default="http://speed.pypy.org/",
+                      help=('Url of the codespeed instance '
+                            '(default: http://speed.pypy.org)'))
     parser.format_description = lambda fmt: __doc__
     parser.description = __doc__
     options, args = parser.parse_args()
-    if options.revision is None or options.name is None or options.host is None or \
-            len(args) != 1:
+    if (options.revision is None or options.executable is None or
+        options.host is None or len(args) != 1):
         parser.print_help()
         sys.exit(2)
     main(args[0], options)
diff --git a/test/test_runner.py b/test/test_runner.py
--- a/test/test_runner.py
+++ b/test/test_runner.py
@@ -1,9 +1,9 @@
-
 import py
 import json
 import sys
 from runner import run_and_store
 
+
 def test_run_and_store():
     tmpdir = py.test.ensuretemp('bench_runner')
     resfile = tmpdir.join('results')
diff --git a/test/test_saveresults.py b/test/test_saveresults.py
--- a/test/test_saveresults.py
+++ b/test/test_saveresults.py
@@ -22,7 +22,8 @@
     
     def test_good_input(self):
         '''Given correct result data, check that every result being saved has the right parameters'''
-        for resultparams in saveresults.save("PyPy", 71212, self.fixture, "", "pypy-c-jit", 'host', True):
+        for resultparams in saveresults.save("PyPy", 71212, self.fixture, "pypy-c-jit", 'host', 'url',
+                                             testing=True):
             assert resultparams['project'] == "PyPy"
             assert resultparams['commitid'] == 71212
             assert resultparams['executable'] == "pypy-c-jit"


More information about the pypy-commit mailing list