[Scipy-svn] r2106 - trunk/Lib/sandbox/svm

scipy-svn at scipy.org scipy-svn at scipy.org
Fri Jul 14 18:45:11 EDT 2006


Author: fullung
Date: 2006-07-14 17:45:02 -0500 (Fri, 14 Jul 2006)
New Revision: 2106

Modified:
   trunk/Lib/sandbox/svm/classification.py
   trunk/Lib/sandbox/svm/dataset.py
   trunk/Lib/sandbox/svm/kernel.py
   trunk/Lib/sandbox/svm/regression.py
Log:
More cleanups to conform to PEP 8.


Modified: trunk/Lib/sandbox/svm/classification.py
===================================================================
--- trunk/Lib/sandbox/svm/classification.py	2006-07-14 22:26:16 UTC (rev 2105)
+++ trunk/Lib/sandbox/svm/classification.py	2006-07-14 22:45:02 UTC (rev 2106)
@@ -61,7 +61,7 @@
             count = 0
             d = {}
             for i in range(len(self.labels)):
-                for j in range(i+1, len(self.labels)):
+                for j in range(i + 1, len(self.labels)):
                     d[self.labels[i], self.labels[j]] = v[count]
                     d[self.labels[j], self.labels[i]] = -v[count]
                     count += 1

Modified: trunk/Lib/sandbox/svm/dataset.py
===================================================================
--- trunk/Lib/sandbox/svm/dataset.py	2006-07-14 22:26:16 UTC (rev 2105)
+++ trunk/Lib/sandbox/svm/dataset.py	2006-07-14 22:45:02 UTC (rev 2106)
@@ -41,7 +41,7 @@
         # Create Gram matrix as a list of vectors which an extra entry
         # for the id field.
         n = len(origdata)
-        grammat = [N.empty((n+1,), dtype=libsvm.svm_node_dtype)
+        grammat = [N.empty((n + 1,), dtype=libsvm.svm_node_dtype)
                    for i in range(n)]
         self.grammat = grammat
 
@@ -150,7 +150,7 @@
         self.data = map(lambda x: convert_to_svm_node(x), origdata)
 
 def convert_to_svm_node(x):
-    y = N.empty(len(x)+1, dtype=libsvm.svm_node_dtype)
+    y = N.empty(len(x) + 1, dtype=libsvm.svm_node_dtype)
     y[-1] = -1, 0.
     if isinstance(x, dict):
         x = x.items()
@@ -158,7 +158,7 @@
         x.sort(cmp=lambda x,y: cmp(x[0],y[0]))
         y[:-1] = x
     else:
-        y['index'][:-1] = N.arange(1,len(x)+1)
+        y['index'][:-1] = N.arange(1,len(x) + 1)
         y['value'][:-1] = x
     assert N.alltrue(y[:-1]['index'] >= 1), \
         'indexes must be positive'
@@ -176,5 +176,5 @@
     for j in indexes:
         if j in xidx and j in yidx:
             # dot if index is present in both vectors
-            z += x['value'][xidx[j]]*y['value'][yidx[j]]
+            z += x['value'][xidx[j]] * y['value'][yidx[j]]
     return z

Modified: trunk/Lib/sandbox/svm/kernel.py
===================================================================
--- trunk/Lib/sandbox/svm/kernel.py	2006-07-14 22:26:16 UTC (rev 2105)
+++ trunk/Lib/sandbox/svm/kernel.py	2006-07-14 22:45:02 UTC (rev 2106)
@@ -26,7 +26,7 @@
         self.coef0 = coef0
 
     def __call__(self, x, y, dot):
-        base = self.gamma*dot(x, y) + self.coef0
+        base = self.gamma * dot(x, y) + self.coef0
         tmp = base
         ret = 1.0
         t = self.degree
@@ -42,8 +42,8 @@
         self.gamma = gamma
 
     def __call__(self, x, y, dot):
-        z = dot(x, x) + dot(y, y) - 2*dot(x, y)
-        return N.exp(-self.gamma*z)
+        z = dot(x, x) + dot(y, y) - 2 * dot(x, y)
+        return N.exp(-self.gamma * z)
 
 class SigmoidKernel:
     def __init__(self, gamma, coef0):
@@ -52,7 +52,7 @@
         self.coef0 = coef0
 
     def __call__(self, x, y, dot):
-        return N.tanh(self.gamma*dot(x, y)+self.coef0)
+        return N.tanh(self.gamma * dot(x, y) + self.coef0)
 
 class CustomKernel:
     def __init__(self, f):

Modified: trunk/Lib/sandbox/svm/regression.py
===================================================================
--- trunk/Lib/sandbox/svm/regression.py	2006-07-14 22:26:16 UTC (rev 2105)
+++ trunk/Lib/sandbox/svm/regression.py	2006-07-14 22:45:02 UTC (rev 2106)
@@ -80,14 +80,14 @@
             sumvv = sumvv + v * v
             sumyy = sumyy + y * y
             sumvy = sumvy + v * y
-            total_error = total_error + (v-y) * (v-y)
+            total_error = total_error + (v - y) * (v - y)
 
         # mean squared error
         mse = total_error / len(dataset.data)
         # squared correlation coefficient
         l = len(dataset.data)
-        scc = ((l*sumvy - sumv*sumy) * (l*sumvy - sumv*sumy)) / \
-            ((l*sumvv - sumv*sumv) * (l*sumyy - sumy*sumy))
+        scc = ((l * sumvy - sumv * sumy) * (l * sumvy - sumv * sumy)) / \
+            ((l * sumvv - sumv*sumv) * (l * sumyy - sumy * sumy))
 
         return mse, scc
 




More information about the Scipy-svn mailing list