[pypy-svn] r59273 - in pypy/build/benchmem: . testing

fijal at codespeak.net fijal at codespeak.net
Mon Oct 20 18:06:48 CEST 2008


Author: fijal
Date: Mon Oct 20 18:06:48 2008
New Revision: 59273

Modified:
   pypy/build/benchmem/runbench.py
   pypy/build/benchmem/testing/test_benchtool.py
Log:
add benchmarks for app profiles


Modified: pypy/build/benchmem/runbench.py
==============================================================================
--- pypy/build/benchmem/runbench.py	(original)
+++ pypy/build/benchmem/runbench.py	Mon Oct 20 18:06:48 2008
@@ -20,17 +20,26 @@
 class BenchRunner:
     SEPBENCH = "=" * 80
 
-    def log(self, *args):
-        print " ".join(map(str, args))
-
-
-class BenchRunnerBaseSize(BenchRunner):
     def __init__(self, executable, logpath, options):
         self.executable = executable
         self.executable_full_path = py.path.local.sysfind(executable)
         self.logpath = py.path.local(logpath)
         self.logstream = self.logpath.open("a")
         self.options = options
+        self.tmpdir = py.path.local.make_numbered_dir(prefix="bench")
+
+    def getnames(self):
+        l = []
+        for name, obj in vars(self.benchpath.pyimport()).items():
+            if name.startswith("bench") and callable(obj):
+                l.append(name)
+        l.sort()
+        return l 
+
+    def log(self, *args):
+        print " ".join(map(str, args))
+
+class BenchRunnerBaseSize(BenchRunner):
 
     def write_benchheader(self):
         print >>self.logstream, self.SEPBENCH 
@@ -57,12 +66,9 @@
 
 class BenchRunnerBaseTime(BenchRunner):
     def __init__(self, executable, logpath, options):
-        self.executable = executable
-        self.executable_full_path = py.path.local.sysfind(executable)
-        self.logpath = py.path.local(logpath)
-        self.logstream = self.logpath.open("a")
+        BenchRunner.__init__(self, executable, logpath, options)
         self.numiter = options.basetime_numiter
-
+    
     def write_benchheader(self):
         print >>self.logstream, self.SEPBENCH
         print >>self.logstream, "#benchtype=basetime"
@@ -98,13 +104,9 @@
 
 class BenchRunnerObjsize(BenchRunner):
     def __init__(self, executable, logpath, options):
-        self.executable = executable
+        BenchRunner.__init__(self, executable, logpath, options)
         self.benchpath = benchmarkdir.join("sizes.py")
         assert self.benchpath.check()
-        self.logpath = py.path.local(logpath)
-        self.logstream = self.logpath.open("a")
-        self.tmpdir = py.path.local.make_numbered_dir(prefix="bench")
-        self.options = options
 
     def makebench(self, name):
         arglist = str(self.options.numiter)
@@ -143,14 +145,6 @@
         print >>self.logstream, "#benchargs=%s" %(args,)
         print >>self.logstream
 
-    def getnames(self):
-        l = []
-        for name, obj in vars(self.benchpath.pyimport()).items():
-            if name.startswith("bench") and callable(obj):
-                l.append(name)
-        l.sort()
-        return l 
-
     def run(self):
         for name in self.getnames():
             self.run_checkpointed(name)
@@ -182,6 +176,64 @@
             #sys.stdout.write(".")
             #sys.stdout.flush()
 
+class BenchRunnerAppProfiles(BenchRunner):
+    ITER2 = 100000
+
+    def __init__(self, *args):
+        BenchRunner.__init__(self, *args)
+        self.benchpath = benchmarkdir.join("appprofiles.py")
+
+    def write_benchheader(self, benchname, args):
+        print >>self.logstream, self.SEPBENCH 
+        print >>self.logstream, "#benchtype=appprofiles"
+        print >>self.logstream, "#executable=%s" %(str(self.executable ),)
+        print >>self.logstream, "#benchpath=%s" %(self.benchpath.basename,)
+        print >>self.logstream, "#benchname=%s" %(benchname,)
+        print >>self.logstream, "#benchargs=%s" %(args,)
+        print >>self.logstream
+
+    def run(self):
+        for name in self.getnames():
+            self.run_once(name)
+            
+    def run_once(self, name):
+        benchpyfile = self.makebench(name)
+        #self.log("created", benchpyfile)
+        cmd = "%s -u %s" %(self.executable, benchpyfile)
+        self.log("running %s(%s)" %(name, self.options.numiter))
+        stdout, stdin = os.popen2(cmd)
+        pid = int(stdin.readline())
+        self.write_benchheader(name, self.options.numiter)
+        rec = smaps.SmapsRecorder(pid, self.logstream)
+        self.interact_with_child(rec, stdout, stdin)
+
+    def makebench(self, name):
+        arglist = (int(self.options.numiter)/3000, self.ITER2)
+        source = py.code.Source(self.benchpath.read(), """
+            import gc
+            def write(c):
+                sys.stdout.write(c)
+                sys.stdout.flush()
+
+            if __name__ == "__main__":
+                import os, sys, gc
+                pid = os.getpid()
+                write(str(pid) + "\\n")
+                %s %s
+                sys.stdin.close()
+        """ %(name, arglist))
+        p = self.tmpdir.join(self.benchpath.basename)
+        p.write(source)
+        return p
+    
+    def interact_with_child(self, rec, stdin, stdout):
+        while not stdin.closed:
+            try:
+                rec.snapshot()
+            except py.error.ENOENT:
+                break
+
+
 #
 # ================ reading a benchmark log file =======================
 #
@@ -238,16 +290,9 @@
             yield parse_result(stream_iter, kw)
 
 def parse_result(stream, kw):
-    chosen_cls = None
-    benchtype = kw.pop('benchtype')
-    for scls in Result.__subclasses__():
-        if scls.benchtype == benchtype:
-            chosen_cls = scls
-            break
-    assert chosen_cls is not None, 'Unknown benchtype ' + repr(benchtype)
+    chosen_cls = benchtype2class[kw.pop('benchtype')]
     return chosen_cls.parse(stream, kw)
 
-
 class Result(object):
     @classmethod
     def parse(cls, lnstream, kw):
@@ -265,9 +310,7 @@
                 continue
         return cls(snapshots, **kw)
 
-
-class ObjsizeResult(Result):
-    benchtype = "objsizes"
+class CommonResult(Result):
     def __init__(self, snapshots, executable, benchpath, benchname, benchargs):
         assert snapshots
         self.snapshots = snapshots
@@ -276,6 +319,9 @@
         self.benchname = benchname
         self.benchargs = benchargs
 
+class ObjsizeResult(CommonResult):
+    benchtype = "objsizes"
+
     def max(self, attrname):
         maxvalue = 0
         for snap in self.snapshots:
@@ -289,6 +335,8 @@
                 dirty = mapping.private_dirty + mapping.shared_dirty 
                 assert mapping.rss == dirty + clean
 
+class AppprofileResult(CommonResult):
+    benchtype = "appprofiles"
 
 class BasesizeResult(Result):
     benchtype = 'basesize'
@@ -297,7 +345,6 @@
         assert len(snapshots) == 1
         self.executable = executable
 
-
 class BasetimeResult(Result):
     benchtype = 'basetime'
     def __init__(self, timings, executable):
@@ -318,6 +365,14 @@
             timings.append((name, times))
         return cls(timings, **kw)
 
+benchtype2class = {}
+
+def _update_benchtyp2class():
+    for cls in globals().values():
+        if Result in getattr(cls, '__mro__', []) and getattr(cls, 'benchtype', None):
+            benchtype2class[cls.benchtype] = cls
+
+_update_benchtyp2class()
 
 class Mappings(object):
     HEAP, CODE, DATA = object(), object(), object()
@@ -402,11 +457,14 @@
 def getrunnerclass(benchtype):
     if benchtype == "objsizes":
         return BenchRunnerObjsize
-    if benchtype == "basesize":
+    elif benchtype == "basesize":
         return BenchRunnerBaseSize
-    if benchtype == "basetime":
+    elif benchtype == "basetime":
         return BenchRunnerBaseTime
-    assert 0,benchtype
+    elif benchtype == "appprofiles":
+        return BenchRunnerAppProfiles
+    else:
+        raise NotImplementedError("Benchmark type: %s" % (benchtype,))
 
 if __name__ == '__main__':
     (options, args) = parser.parse_args()

Modified: pypy/build/benchmem/testing/test_benchtool.py
==============================================================================
--- pypy/build/benchmem/testing/test_benchtool.py	(original)
+++ pypy/build/benchmem/testing/test_benchtool.py	Mon Oct 20 18:06:48 2008
@@ -37,8 +37,27 @@
     names = runner.getnames()
     assert len(resultset.getname2results()) == len(names)
     for name, results in resultset.getname2results():
-        assert len(results) ==1
-        assert len(results[0].snapshots) == 2 + 1
+        assert len(results) == 1
+        assert len(results[0].snapshots) == 3
+
+
+def test_apps_runbench_and_read_results():
+    tmpdir = py.test.ensuretemp("benchrunner")
+    benchlog=tmpdir.join("log_apps")
+
+    class options:
+        numiter = 10
+    runner = runbench.BenchRunnerAppProfiles("python2.5", benchlog, options)
+    assert runner.benchpath.basename == "appprofiles.py"
+    runner.run()
+    resultset = runbench.ResultSet()
+    resultset.parse(benchlog)
+
+    names = runner.getnames()
+    assert len(resultset.getname2results()) == len(names)
+    for name, results in resultset.getname2results():
+        assert len(results) == 1
+        assert len(results[0].snapshots)
 
 def test_runbench_functional():
     script = py.path.local(runbench.__file__).dirpath("runbench.py")



More information about the Pypy-commit mailing list