[pypy-svn] pypy commit 58f56bdf5c4b: Merge heads

Bitbucket commits-noreply at bitbucket.org
Tue Dec 14 11:47:23 CET 2010


# HG changeset patch -- Bitbucket.org
# Project pypy
# URL http://bitbucket.org/pypy/pypy/overview
# User Antonio Cuni <anto.cuni at gmail.com>
# Date 1292323601 -3600
# Node ID 58f56bdf5c4bf4ca311b0f6a794ed8a79a1a85a8
# Parent  e45b4f1a2e50ef1b44a602033a2dd2b297bc144f
# Parent  53e05faa128ca5a08b1dd95f706226694fc7245b
Merge heads

--- a/pypy/translator/benchmark/autopath.py
+++ /dev/null
@@ -1,134 +0,0 @@
-"""
-self cloning, automatic path configuration 
-
-copy this into any subdirectory of pypy from which scripts need 
-to be run, typically all of the test subdirs. 
-The idea is that any such script simply issues
-
-    import autopath
-
-and this will make sure that the parent directory containing "pypy"
-is in sys.path. 
-
-If you modify the master "autopath.py" version (in pypy/tool/autopath.py) 
-you can directly run it which will copy itself on all autopath.py files
-it finds under the pypy root directory. 
-
-This module always provides these attributes:
-
-    pypydir    pypy root directory path 
-    this_dir   directory where this autopath.py resides 
-
-"""
-
-def __dirinfo(part):
-    """ return (partdir, this_dir) and insert parent of partdir
-    into sys.path.  If the parent directories don't have the part
-    an EnvironmentError is raised."""
-
-    import sys, os
-    try:
-        head = this_dir = os.path.realpath(os.path.dirname(__file__))
-    except NameError:
-        head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
-
-    error = None
-    while head:
-        partdir = head
-        head, tail = os.path.split(head)
-        if tail == part:
-            checkfile = os.path.join(partdir, os.pardir, 'pypy', '__init__.py')
-            if not os.path.exists(checkfile):
-                error = "Cannot find %r" % (os.path.normpath(checkfile),)
-            break
-    else:
-        error = "Cannot find the parent directory %r of the path %r" % (
-            partdir, this_dir)
-    if not error:
-        # check for bogus end-of-line style (e.g. files checked out on
-        # Windows and moved to Unix)
-        f = open(__file__.replace('.pyc', '.py'), 'r')
-        data = f.read()
-        f.close()
-        if data.endswith('\r\n') or data.endswith('\r'):
-            error = ("Bad end-of-line style in the .py files. Typically "
-                     "caused by a zip file or a checkout done on Windows and "
-                     "moved to Unix or vice-versa.")
-    if error:
-        raise EnvironmentError("Invalid source tree - bogus checkout! " +
-                               error)
-    
-    pypy_root = os.path.join(head, '')
-    try:
-        sys.path.remove(head)
-    except ValueError:
-        pass
-    sys.path.insert(0, head)
-
-    munged = {}
-    for name, mod in sys.modules.items():
-        if '.' in name:
-            continue
-        fn = getattr(mod, '__file__', None)
-        if not isinstance(fn, str):
-            continue
-        newname = os.path.splitext(os.path.basename(fn))[0]
-        if not newname.startswith(part + '.'):
-            continue
-        path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
-        if path.startswith(pypy_root) and newname != part:
-            modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
-            if newname != '__init__':
-                modpaths.append(newname)
-            modpath = '.'.join(modpaths)
-            if modpath not in sys.modules:
-                munged[modpath] = mod
-
-    for name, mod in munged.iteritems():
-        if name not in sys.modules:
-            sys.modules[name] = mod
-        if '.' in name:
-            prename = name[:name.rfind('.')]
-            postname = name[len(prename)+1:]
-            if prename not in sys.modules:
-                __import__(prename)
-                if not hasattr(sys.modules[prename], postname):
-                    setattr(sys.modules[prename], postname, mod)
-
-    return partdir, this_dir
-
-def __clone():
-    """ clone master version of autopath.py into all subdirs """
-    from os.path import join, walk
-    if not this_dir.endswith(join('pypy','tool')):
-        raise EnvironmentError("can only clone master version "
-                               "'%s'" % join(pypydir, 'tool',_myname))
-
-
-    def sync_walker(arg, dirname, fnames):
-        if _myname in fnames:
-            fn = join(dirname, _myname)
-            f = open(fn, 'rwb+')
-            try:
-                if f.read() == arg:
-                    print "checkok", fn
-                else:
-                    print "syncing", fn
-                    f = open(fn, 'w')
-                    f.write(arg)
-            finally:
-                f.close()
-    s = open(join(pypydir, 'tool', _myname), 'rb').read()
-    walk(pypydir, sync_walker, s)
-
-_myname = 'autopath.py'
-
-# set guaranteed attributes
-
-pypydir, this_dir = __dirinfo('pypy')
-import py # note: py is imported only AFTER the path has been set
-libpythondir = str(py.path.local(pypydir).dirpath().join('lib-python', '2.5.2'))
-libpythonmodifieddir = str(py.path.local(libpythondir).dirpath().join('modified-2.5.2'))
-
-if __name__ == '__main__':
-    __clone()

--- a/pypy/translator/benchmark/bench-custom.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# benchmarks on a unix machine.
-
-import autopath
-from pypy.translator.benchmark.result import BenchmarkResultSet
-from pypy.translator.benchmark.benchmarks import BENCHMARKS
-import os, sys, time, pickle, re, py
-
-SPLIT_TABLE = True      # useful when executable names are very long
-
-def get_executables(args):  #sorted by revision number (highest first)
-    exes = sorted(args, key=os.path.getmtime)
-    r = []
-    for exe in exes:
-        if '/' not in exe:
-            r.append('./' + exe)
-        else:
-            r.append(exe)
-    return r
-
-def main(options, args):
-    if os.path.exists(options.picklefile):
-        benchmark_result = pickle.load(open(options.picklefile, 'rb'))
-    else:
-        benchmark_result = BenchmarkResultSet()
-
-    benchmarks = []
-    for b in BENCHMARKS:
-        if b.name in options.benchmarks:
-            if not b.check():
-                print "can't run %s benchmark for some reason"%(b.name,)
-            else:
-                if int(options.sizefactor) > 1:
-                    b = b * int(options.sizefactor)
-                benchmarks.append(b)
-
-    exes = get_executables(args)
-    pythons = 'python2.6 python2.5 python2.4'.split()
-    full_pythons = []
-    for python in pythons:
-        full_python = py.path.local.sysfind(python)
-        if full_python:
-            full_pythons.append(str(full_python))
-
-    sys.stdout.flush()
-
-    refs = {}
-    final_error_count = 0
-
-    if not options.nocpython:
-        exes = full_pythons + exes
-
-    for i in range(int(options.runcount)) or [None]:
-        if i is not None:
-            for exe in exes:
-                for b in benchmarks:
-                    br = benchmark_result.result(exe, allowcreate=True)
-                    result = br.run_benchmark(b, verbose=options.verbose)
-                    if not result:
-                        final_error_count += 1
-
-        if options.relto:
-            relto = options.relto
-        else:
-            relto = full_pythons[0]
-        if relto not in benchmark_result.benchmarks:
-            continue
-
-        pickle.dump(benchmark_result, open(options.picklefile, 'wb'))
-
-        exe_stats = ['stat:st_mtime', 'exe_name', 'pypy_rev']
-        if not SPLIT_TABLE:
-            stats = exe_stats[:]
-        else:
-            stats = ['exe']
-        for b in benchmarks:
-            stats.append('bench:'+b.name)
-        kwds = {'relto': relto,
-                'filteron' :lambda r: r.exe_name in exes,
-                }
-        for row in benchmark_result.txt_summary(stats, **kwds):
-            print row
-        if SPLIT_TABLE:
-            print
-            print 'Reference:'
-            for row in benchmark_result.txt_summary(['exe'] + exe_stats,
-                                                    **kwds):
-                print row
-            print
-
-    if final_error_count:
-        raise SystemExit("%d benchmark run(s) failed (see -FAILED- above)"
-                         % final_error_count)
-
-if __name__ == '__main__':
-    from optparse import OptionParser
-    parser = OptionParser()
-    default_benches = ','.join([b.name for b in BENCHMARKS if b.check()])
-    parser.add_option(
-        '--benchmarks', dest='benchmarks',
-        default=default_benches,
-        )
-    parser.add_option(
-        '--pickle', dest='picklefile',
-        default='bench-custom.benchmark_result'
-        )
-    parser.add_option(
-        '--runcount', dest='runcount',
-        default='1',
-        )
-    parser.add_option(
-        '--relto', dest='relto',
-        default=None,
-        )
-    parser.add_option(
-        '-v', '--verbose', action='store_true', dest='verbose',
-        default=None,
-        )
-    parser.add_option(
-        '--no-cpython', action='store_true', dest='nocpython',
-        default=None,
-        )
-    parser.add_option(
-        '--size-factor', dest='sizefactor',
-        default='1',
-        )
-    options, args = parser.parse_args(sys.argv[1:])
-    main(options, args)

--- a/pypy/translator/benchmark/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-#

--- a/pypy/translator/benchmark/benchmarks.py
+++ /dev/null
@@ -1,197 +0,0 @@
-import os, sys, time, pickle, re, py
-import yaml
-
-class BenchmarkFailed(Exception):
-    pass
-
-PYSTONE_CMD = 'from test import pystone;pystone.main(%s)'
-PYSTONE_PATTERN = 'This machine benchmarks at'
-
-RICHARDS_CMD = 'from richards import *;main(iterations=%d)'
-RICHARDS_PATTERN = 'Average time per iteration:'
-
-TIME_FMT = 'max mem used: %Mk\nelapsed time: %e\nsystem time:  %S\nuser time:    %U\nCPU use:      %P'
-
-def get_result(txt, pattern):
-    for line in txt.split('\n'):
-        if line.startswith(pattern):
-            break
-    else:
-        raise BenchmarkFailed
-    return float(line.split()[len(pattern.split())])
-
-class Benchmark(object):
-    def __init__(self, name, runner, asc_good, units,
-                 check=lambda:True, sizefactor=1):
-        if sizefactor > 1:
-            self.name = name + '*%d' % sizefactor
-        else:
-            self.name = name
-        self._basename = name
-        self._run = runner
-        self.asc_good = asc_good
-        self.units = units
-        self.check = check
-        self.sizefactor = sizefactor
-    def __mul__(self, n):
-        return Benchmark(self._basename, self._run, self.asc_good, self.units,
-                         self.check, self.sizefactor * n)
-    def run(self, exe):
-        self.latest_output = ''
-        try:
-            result, latest_output = self._run(exe, self.sizefactor)
-            self.latest_output = latest_output
-        except BenchmarkFailed, e:
-            result = '-FAILED-'
-        return result
-
-def external_dependency(dirname, svnurl, revision=None):
-    directory = py.path.local(__file__).dirpath().join(dirname)
-    wc = py.path.svnwc(directory)
-    wc.checkout(svnurl, rev=revision)
-    return True
-
-def run_cmd(cmd):
-    pipe = os.popen(cmd + ' 2>&1')
-    r = pipe.read()
-    status = pipe.close()
-    if status:
-        raise BenchmarkFailed(status)
-    return r
-
-def run_pystone(executable, sizefactor=1):
-    from pypy.tool import autopath
-    distdir = py.path.local(autopath.pypydir).dirpath()
-    pystone = py.path.local(autopath.libpythondir).join('test', 'pystone.py')
-    txt = run_cmd('"%s" "%s" %d' % (executable, pystone, 50000 * sizefactor))
-    return get_result(txt, PYSTONE_PATTERN), txt
-
-def run_richards(executable, sizefactor=1):
-    richards = py.path.local(__file__).dirpath().dirpath().join('goal').join('richards.py')
-    txt = run_cmd('"%s" %s %d' % (executable, richards, 5 * sizefactor))
-    return get_result(txt, RICHARDS_PATTERN), txt
-
-def run_translate(executable):
-    translate = py.path.local(__file__).dirpath().dirpath().join('goal').join('translate.py')
-    target = py.path.local(__file__).dirpath().dirpath().join('goal').join('targetrpystonedalone.py')
-    argstr = '%s %s --batch --backendopt --no-compile %s > /dev/null 2> /dev/null'
-    T = time.time()
-    status = os.system(argstr%(executable, translate, target))
-    r = time.time() - T
-    if status:
-        raise BenchmarkFailed(status)
-    return r
-
-def run_templess(executable, sizefactor=1):
-    """ run some script in the templess package
-
-        templess is some simple templating language.
-        We have a copy at
-        'http://codespeak.net/svn/user/arigo/hack/pypy-hack/templess'
-    """
-    here = py.path.local(__file__).dirpath()
-    pypath = os.path.dirname(os.path.dirname(py.__file__))
-    templessdir = here.join('templess')
-    testscript = templessdir.join('test/oneshot.py')
-    command = 'PYTHONPATH="%s:%s" "%s" "%s" %d' % (here, pypath,
-                                                   executable, testscript,
-                                                   100 * sizefactor)
-    txt = run_cmd(command)
-    for line in txt.split('\n'):
-        if '.' in line:
-            try:
-                return float(line) / sizefactor, txt
-            except ValueError:
-                pass
-    else:
-        raise BenchmarkFailed
-
-def check_templess():
-    return external_dependency('templess',
-                 'http://codespeak.net/svn/user/arigo/hack/pypy-hack/templess')
-
-def run_gadfly(executable, sizefactor=1):
-    """ run some tests in the gadfly pure Python database """
-    here = py.path.local(__file__).dirpath()
-    gadfly = here.join('gadfly')
-    testscript = gadfly.join('test', 'testsubset.py')
-    command = 'PYTHONPATH="%s" "%s" "%s" %d' % (gadfly, executable, testscript,
-                                                sizefactor)
-    txt = run_cmd(command)
-    return get_result(txt, 'Total running time:') / sizefactor, txt
-
-def check_gadfly():
-    return external_dependency('gadfly',
-              'http://codespeak.net/svn/user/arigo/hack/pypy-hack/gadflyZip',
-              70117)
-
-def run_mako(executable, sizefactor=1):
-    """ run some tests in the mako templating system """
-    here = py.path.local(__file__).dirpath()
-    mako = here.join('mako')
-    testscript = mako.join('examples', 'bench', 'basic.py')
-    command = 'PYTHONPATH="%s" "%s" "%s" -n%d mako' % (mako.join('lib'),
-                                                       executable, testscript,
-                                                       2000 * sizefactor)
-    txt = run_cmd(command)
-    return get_result(txt, 'Mako:'), txt
-
-def check_mako():
-    return external_dependency('mako',
-              'http://codespeak.net/svn/user/arigo/hack/pypy-hack/mako',
-              70118)    
-
-def check_translate():
-    return False   # XXX what should we do about the dependency on ctypes?
-
-class LanguageShootoutBenchmark(Benchmark):
-    def __init__(self, name, sizefactor=1, test=False):
-        self.test = test
-        self.basename = name
-        Benchmark.__init__(self, name, self.runner, False, 'ms',
-                           self.check, sizefactor)
-
-    def __mul__(self, i):
-        return LanguageShootoutBenchmark(self.name, self.sizefactor * i,
-                                         self.test)
-
-    def runner(self, executable, sizefactor=1):
-        shootout = py.path.local(__file__).dirpath().join(
-            'shootout_benchmarks')
-        argsfile = shootout.join('tests.yml')
-        if self.test:
-            kind = 'test'
-        else:
-            kind = 'run'
-        args = yaml.load(argsfile.read())[self.basename][kind]['args']
-        progname = str(shootout.join(self.basename)) + '.py'
-        cmd = 'time -f "%s" %s %s %s %d' % (TIME_FMT, executable, progname,
-                                         " ".join(args), sizefactor)
-        txt = run_cmd(cmd)
-        return get_result(txt, 'elapsed time:'), txt
-
-    def check(self):
-        return external_dependency('shootout_benchmarks',
-              'http://codespeak.net/svn/pypy/benchmarks/shootout')
-
-BENCHMARKS = [Benchmark('richards', run_richards, False, 'ms'),
-              Benchmark('pystone', run_pystone, True, ''),
-              Benchmark('translate', run_translate, False, 'ms',
-                        check_translate),
-              Benchmark('templess', run_templess, False,
-                        's', check_templess),
-              Benchmark('gadfly2', run_gadfly, False,
-                        's', check_gadfly),
-              Benchmark('mako', run_mako, False,
-                        's', check_mako),
-             ]
-
-SHOOTOUT_NAMES = ['binary-trees', 'fannkuch', 'fasta', 'float',
-                  'meteor-contest', 'nbody', 'spectral-norm']
-
-#for name in SHOOTOUT_NAMES:
-#    BENCHMARKS.append(LanguageShootoutBenchmark(name))
-
-BENCHMARKS_BY_NAME = {}
-for _b in BENCHMARKS:
-    BENCHMARKS_BY_NAME[_b.name] = _b

--- a/pypy/translator/benchmark/result.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import os, pickle, sys, time, re
-
-STAT2TITLE = {
-    'stat:st_mtime':  "date",
-    'exe_name':       "executable",
-}
-
-def stat2title(s):
-    if s.startswith('bench:'):
-        return s[6:]
-    else:
-        return STAT2TITLE.get(s, s)
-
-
-class BenchmarkResultSet(object):
-    def __init__(self, max_results=10):
-        self.benchmarks = {}
-        self.max_results = max_results
-
-    def result(self, exe, allowcreate=False):
-        if exe in self.benchmarks or not allowcreate:
-            return self.benchmarks[exe]
-        else:
-            r = self.benchmarks[exe] = BenchmarkResult(exe, self.max_results)
-            return r
-
-    def txt_summary(self, stats, **kw):
-        sortkey = kw.get('sortby', 'stat:st_mtime')
-        lst = self.benchmarks.values()
-        lst.sort(key=lambda x:x.getstat(sortkey, None), reverse=kw.get('reverse', False))
-        if 'filteron' in kw:
-            filteron = kw['filteron']
-            lst = [r for r in lst if filteron(r)]
-        relto = kw.get('relto', None)
-        table = [[(stat2title(s),0) for s in stats]]
-        for r in lst:
-            row = []
-            for stat in stats:
-                if stat.startswith('bench:'):
-                    benchname = stat[6:]
-                    if r.getstat(stat, None) is None:
-                        row.append(('XXX',-1))
-                    elif relto:
-                        factor = self.result(relto).getstat(stat)/r.getstat(stat)
-                        if not r.asc_goods[benchname]:
-                            factor = 1/factor
-                        s, f = r.fmtstat(stat)
-                        row.append((s + ' (%6.2fx)'%factor, f))
-                    else:
-                        row.append(r.fmtstat(stat))
-                else:
-                    row.append(r.fmtstat(stat))
-            table.append(row)
-        widths = [0 for thing in stats]
-        for row in table:
-            for i, cell in enumerate(row):
-                widths[i] = max(len(cell[0]), widths[i])
-        concretetable = []
-        concreterow = []
-        for w, cell in zip(widths, table[0]):
-            concreterow.append(cell[0].center(w))
-        concretetable.append(' '.join(concreterow))
-        for row in table[1:]:
-            concreterow = []
-            for w, cell in zip(widths, row):
-                concreterow.append("%*s"%(cell[1]*w, cell[0]))
-            concretetable.append(' '.join(concreterow))
-        return concretetable
-
-class BenchmarkResult(object):
-    IDS = {}
-
-    def __init__(self, exe, max_results=10):
-        self.max_results = max_results
-        self.exe_stat = os.stat(exe)
-        self.exe_name = exe
-        self.codesize = os.popen('size "%s" | tail -n1 | cut -f1'%(exe,)).read().strip()
-        try:
-            self.pypy_rev = int(os.popen(
-                exe + ' -c "import sys; print sys.pypy_version_info[-1]" 2>/dev/null').read().strip())
-        except ValueError:
-            self.pypy_rev = -1
-        self.best_benchmarks = {}
-        self.benchmarks = {}
-        self.asc_goods = {}
-        self.run_counts = {}
-
-    def run_benchmark(self, benchmark, verbose=False):
-        self.asc_goods[benchmark.name] = benchmark.asc_good
-        if self.run_counts.get(benchmark.name, 0) > self.max_results:
-            return -1
-        print 'running', benchmark.name, 'for', self.exe_name,
-        if verbose and self.pypy_rev > 0:
-            print '[rev %d]' % self.pypy_rev,
-        sys.stdout.flush()
-        new_result = benchmark.run(self.exe_name)
-        print new_result
-        if verbose:
-            print '{'
-            lines = benchmark.latest_output.splitlines(False)
-            for line in lines[:80]:
-                print '\t' + line
-            if len(lines) > 80:
-                print '\t....'
-            print '}'
-        self.run_counts[benchmark.name] = self.run_counts.get(benchmark.name, 0) + 1
-        if new_result == '-FAILED-':
-            return 0
-        self.benchmarks.setdefault(benchmark.name, []).append(new_result)
-        if benchmark.name in self.best_benchmarks:
-            old_result = self.best_benchmarks[benchmark.name]
-            if benchmark.asc_good:
-                new_result = max(new_result, old_result)
-            else:
-                new_result = min(new_result, old_result)
-        self.best_benchmarks[benchmark.name] = new_result
-        return 1
-
-    def getstat(self, *args):
-        # oh for supplied-p!
-        return_default = False
-        if len(args) == 1:
-            stat, = args
-        else:
-            stat, default = args
-            return_default = True
-        if hasattr(self, stat):
-            return getattr(self, stat)
-        if stat == 'exe':
-            myid = len(BenchmarkResult.IDS)
-            myid = BenchmarkResult.IDS.setdefault(self, myid)
-            return '[%s]' % myid
-        statkind, statdetail = stat.split(':')
-        if statkind == 'stat':
-            return getattr(self.exe_stat, statdetail)
-        elif statkind == 'bench':
-            if return_default:
-                return self.best_benchmarks.get(statdetail, default)
-            else:
-                return self.best_benchmarks[statdetail]
-        else:
-            1/0
-
-    def fmtstat(self, *args):
-        stat = args[0]
-        statvalue = self.getstat(*args)
-        if stat == 'stat:st_mtime':
-            return time.ctime(statvalue), -1
-        elif stat == 'exe_name':
-            return os.path.basename(statvalue), -1
-        elif stat.startswith('bench:'):
-            from pypy.translator.benchmark import benchmarks
-            statkind, statdetail = stat.split(':', 1)
-            if '*' in statdetail:
-                statdetail = statdetail.split('*')[0]
-            b = benchmarks.BENCHMARKS_BY_NAME[statdetail]
-            return "%8.2f%s"%(statvalue, b.units), 1
-        elif stat == 'pypy_rev':
-            return str(statvalue), 1
-        else:
-            return str(statvalue), -1
-
-    def summary(self, stats):
-        return [self.getstat(stat) for stat in stats]
-
-    def is_stable(self, name):
-        try:
-            return self.n_results[name] >= self.max_results
-        except:
-            return False
-
-if __name__ == '__main__':
-    import autopath
-    from pypy.translator.benchmark import benchmarks, result
-    import cPickle
-    if os.path.exists('foo.pickle'):
-        s = cPickle.load(open('foo.pickle', 'rb'))
-    else:
-        s = result.BenchmarkResultSet(4)
-    for exe in sys.argv[1:]:
-        r = s.result(exe)
-        r.run_benchmark(benchmarks.BENCHMARKS_BY_NAME['richards'])
-        r.run_benchmark(benchmarks.BENCHMARKS_BY_NAME['pystone'])
-    cPickle.dump(s, open('foo.pickle', 'wb'))
-    stats = ['stat:st_mtime', 'exe_name', 'bench:richards', 'bench:pystone']
-    
-    for row in s.txt_summary(stats, sortby="exe_name", reverse=True, relto="/usr/local/bin/python2.4"):
-        print row

--- a/pypy/translator/benchmark/conftest.py
+++ /dev/null
@@ -1,4 +0,0 @@
-import py
-
-def pytest_ignore_collect(path):
-    return path.basename == "test"

--- a/pypy/translator/benchmark/jitbench.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import sys, os
-from optparse import OptionParser
-
-parser = OptionParser()
-parser.add_option(
-    '--size-factor-list', dest='sizefactorlist',
-    default='1,2,5,20,1,2,5,20,1,2,5,20',
-    )
-options, args = parser.parse_args(sys.argv[1:])
-args = args or [sys.executable]
-executables = [os.path.abspath(executable) for executable in args]
-sizefactors = [int(s) for s in options.sizefactorlist.split(',')]
-
-os.chdir(os.path.dirname(sys.argv[0]) or '.')
-
-errors = []
-
-for sizefactor in sizefactors:
-    for executable in executables:
-        sys.argv[1:] = [executable, '--pickle=jitbench.benchmark_result',
-                        '-v', '--no-cpython',
-                        '--size-factor=%d' % sizefactor]
-        try:
-            execfile('bench-custom.py')
-        except SystemExit, e:
-            errors.append('%s:*%s: %s' % (executable, sizefactor, e))
-
-if errors:
-    print '\n'.join(errors)
-    sys.exit(1)



More information about the Pypy-commit mailing list