[Python-checkins] r47088 - sandbox/trunk/pybch/pybch.py

sean.reifschneider python-checkins at python.org
Sat Jun 24 02:27:49 CEST 2006


Author: sean.reifschneider
Date: Sat Jun 24 02:27:48 2006
New Revision: 47088

Modified:
   sandbox/trunk/pybch/pybch.py
Log:
Finally worked out the math, I think it's functional now.  Need to run more
tests and tests of different versions.


Modified: sandbox/trunk/pybch/pybch.py
==============================================================================
--- sandbox/trunk/pybch/pybch.py	(original)
+++ sandbox/trunk/pybch/pybch.py	Sat Jun 24 02:27:48 2006
@@ -42,7 +42,7 @@
 ########################
 def shortifyTestName(s):
     #if s[:6] == 'Tests.': s = s[6:]
-    s = s.split('.')[-1]
+    s = '.'.join(s.split('.')[-2:])
     return(s)
 
 
@@ -75,33 +75,39 @@
                 / len(testSource[2]))
         compareAverage = (reduce(lambda x,y: x+y, testCompare[2][1], 0)
                 / len(testCompare[2]))
+        sourceBest = min(testSource[2][1])
+        compareBest = min(testCompare[2][1])
 
         #  calculate normalization
         normalizationFactor = float(testCompare[2][0]) / float(testSource[2][0])
-        print normalizationFactor  #@@@
 
-        #  calculate averages
-        sourceAverages = []
-        for n in testSource[2][1]:
-            sourceAverages.append(n / sourceAverage * 100.0)
-        compareAverages = []
-        for n in testCompare[2][1]:
-            compareAverages.append(n / compareAverage * 100.0)
-
-        sourceAveragesStr = ' '.join(map(lambda x: '%5.1f%%'
-                % x, sourceAverages))
-        compareAveragesStr = ' '.join(map(lambda x: '%5.1f%%'
-                % x, compareAverages))
-
-        difference = min(testCompare[2][1]) - min(testSource[2][1])
-        overallDiff = overallDiff + difference
-        if difference > 0:
-            overallSlowdowns = overallSlowdowns + difference
-        if difference < 0:
-            overallSpeedups = overallSpeedups + difference
+        #  compare
+        difference = (compareBest -
+                (normalizationFactor * sourceBest)) / compareBest
+        differenceStr = '%7.1f' % difference
+        if differenceStr.strip() == '-0.0': differenceStr = '    0.0'
+        differenceShorter = float(differenceStr.strip())
+
+        #  debugging
+        if verbose > 2:
+            print 'Source Laps: ', testSource[2][0]
+            print 'Compare Laps: ', testCompare[2][0]
+            print 'Source Best:', sourceBest
+            print 'Compare Best:', compareBest
+            print 'Source Average:', sourceAverage
+            print 'Compare Average:', compareAverage
+            print 'Normalization Factor:', normalizationFactor
+            print 'Difference:', difference
+            print 'Difference Str:', differenceStr
+            print 'Difference Shorter:', differenceShorter
+
+        overallDiff = overallDiff + differenceShorter
+        if differenceShorter > 0:
+            overallSlowdowns = overallSlowdowns + differenceShorter
+        if differenceShorter < 0:
+            overallSpeedups = overallSpeedups + differenceShorter
 
-        print '%-30s %s' % ( testSource[0], sourceAveragesStr )
-        print '%-30s %s -> %5.1f%%' % ( '', compareAveragesStr, difference )
+        print '%-50s -> %s%%' % ( testSource[0], differenceStr )
 
     print '=' * 78
     print '%68s %5.1f%%' % ( 'Overall difference:', overallDiff )
@@ -131,11 +137,26 @@
         metavar = 'TEST_REGEX')
 parser.add_option('-v', '--verbose', dest = 'verbose', action = 'count',
         default = 0, help = 'Increase verbosity level.')
+parser.add_option('-l', '--list-tests', dest = 'listTests', action = 'count',
+        default = 0, help = 'List available tests.')
 options, args = parser.parse_args()
 
+#  list tests
+if options.listTests:
+    import Tests
+    for moduleName in Tests.testModules:
+        exec('from Tests import %s' % moduleName)
+        module = eval(moduleName)
+        for testClass in module.__dict__.values():
+            if (not hasattr(testClass, 'is_a_test')
+                    or 'TestHelpers' in str(testClass)):
+                continue
+            print shortifyTestName(str(testClass))
+    sys.exit(0)
+
 if options.compareDestFileLoad:
     #  load results from a file
-    testResults = pickle.load(open(options.compareDestFileLoad, 'r'))
+    testData = pickle.load(open(options.compareDestFileLoad, 'r'))
 else:
     #  run tests locally
     import Tests


More information about the Python-checkins mailing list