[Python-checkins] r47089 - sandbox/trunk/pybch/pybch.py

sean.reifschneider python-checkins at python.org
Sat Jun 24 12:19:51 CEST 2006


Author: sean.reifschneider
Date: Sat Jun 24 12:19:49 2006
New Revision: 47089

Modified:
   sandbox/trunk/pybch/pybch.py
Log:
More math changes.  Now I'm seeing all kinds of skew in the results.  Ugh.


Modified: sandbox/trunk/pybch/pybch.py
==============================================================================
--- sandbox/trunk/pybch/pybch.py	(original)
+++ sandbox/trunk/pybch/pybch.py	Sat Jun 24 12:19:49 2006
@@ -46,15 +46,15 @@
     return(s)
 
 
-#########################################################
-def compareResults(testResults, verbose, compareResults):
+###########################################################
+def compareResults(testResults, verbose, compareAgainst):
     #  display comparison results
     print ('Comparing %(version)s (%(build)s)'
-            % compareResults['environment'])
+            % compareAgainst['environment'])
     print ('       to %(version)s (%(build)s)'
             % testResults['environment'])
     print ('Comparing [%(environment)s] on %(host)s'
-            % compareResults['environment'])
+            % compareAgainst['environment'])
     print ('       to [%(environment)s] on %(host)s'
             % testResults['environment'])
     print
@@ -69,21 +69,18 @@
     overallSpeedups = 0.0
     overallSlowdowns = 0.0
     for testSource in testList:
-        compareResults = compareResults['results'][testSource[1]]
-        testCompare = [ None, None, [compareResults[0], compareResults[1]] ]
-        sourceAverage = (reduce(lambda x,y: x+y, testSource[2][1], 0)
-                / len(testSource[2]))
-        compareAverage = (reduce(lambda x,y: x+y, testCompare[2][1], 0)
-                / len(testCompare[2]))
+        compareData = compareAgainst['results'][testSource[1]]
+        testCompare = [ None, None, [compareData[0], compareData[1]] ]
         sourceBest = min(testSource[2][1])
         compareBest = min(testCompare[2][1])
 
         #  calculate normalization
-        normalizationFactor = float(testCompare[2][0]) / float(testSource[2][0])
+        normalizationFactor = compareBest / sourceBest
+        sourceLapsNormalized = testSource[2][0] * normalizationFactor
 
         #  compare
-        difference = (compareBest -
-                (normalizationFactor * sourceBest)) / compareBest
+        difference = 100.0 - ((sourceLapsNormalized / testCompare[2][0])
+                * 100.0)
         differenceStr = '%7.1f' % difference
         if differenceStr.strip() == '-0.0': differenceStr = '    0.0'
         differenceShorter = float(differenceStr.strip())
@@ -91,11 +88,10 @@
         #  debugging
         if verbose > 2:
             print 'Source Laps: ', testSource[2][0]
+            print 'Source Laps Normalized: ', sourceLapsNormalized
             print 'Compare Laps: ', testCompare[2][0]
             print 'Source Best:', sourceBest
             print 'Compare Best:', compareBest
-            print 'Source Average:', sourceAverage
-            print 'Compare Average:', compareAverage
             print 'Normalization Factor:', normalizationFactor
             print 'Difference:', difference
             print 'Difference Str:', differenceStr
@@ -107,12 +103,12 @@
         if differenceShorter < 0:
             overallSpeedups = overallSpeedups + differenceShorter
 
-        print '%-50s -> %s%%' % ( testSource[0], differenceStr )
+        print '%-63s -> %s%%' % ( testSource[0], differenceStr )
 
     print '=' * 78
     print '%68s %5.1f%%' % ( 'Overall difference:', overallDiff )
-    print '%68s %5.1f%%' % ( 'Overall speedups:', overallSpeedups )
-    print '%68s %5.1f%%' % ( 'Overall slowdowns:', overallSlowdowns )
+    print '%68s %5.1f%%' % ( 'Total speedups:', overallSpeedups )
+    print '%68s %5.1f%%' % ( 'Total slowdowns:', overallSlowdowns )
 
 
 ##################################


More information about the Python-checkins mailing list