[Scipy-svn] r2122 - in trunk/Lib/sandbox/svm: . tests

scipy-svn at scipy.org scipy-svn at scipy.org
Tue Jul 25 03:48:48 EDT 2006


Author: fullung
Date: 2006-07-25 02:48:25 -0500 (Tue, 25 Jul 2006)
New Revision: 2122

Modified:
   trunk/Lib/sandbox/svm/classification.py
   trunk/Lib/sandbox/svm/model.py
   trunk/Lib/sandbox/svm/oneclass.py
   trunk/Lib/sandbox/svm/predict.py
   trunk/Lib/sandbox/svm/regression.py
   trunk/Lib/sandbox/svm/tests/test_classification.py
   trunk/Lib/sandbox/svm/tests/test_oneclass.py
   trunk/Lib/sandbox/svm/tests/test_precomputed.py
   trunk/Lib/sandbox/svm/tests/test_regression.py
Log:
Collapse predictor classes to yield a nicer API.


Modified: trunk/Lib/sandbox/svm/classification.py
===================================================================
--- trunk/Lib/sandbox/svm/classification.py	2006-07-24 22:07:41 UTC (rev 2121)
+++ trunk/Lib/sandbox/svm/classification.py	2006-07-25 07:48:25 UTC (rev 2122)
@@ -89,6 +89,8 @@
       Machines.
     """
 
+    ResultsType = LibSvmClassificationResults
+
     def __init__(self, kernel, weights, **kwargs):
         LibSvmModel.__init__(self, kernel, **kwargs)
         if weights is not None:

Modified: trunk/Lib/sandbox/svm/model.py
===================================================================
--- trunk/Lib/sandbox/svm/model.py	2006-07-24 22:07:41 UTC (rev 2121)
+++ trunk/Lib/sandbox/svm/model.py	2006-07-25 07:48:25 UTC (rev 2122)
@@ -1,6 +1,7 @@
 from ctypes import POINTER, c_double, c_int
 
 from kernel import *
+from predict import *
 import libsvm
 
 __all__ = [
@@ -43,12 +44,12 @@
 
         self.param = param
 
-    def fit(self, dataset, ResultType, PredictorType):
+    def fit(self, dataset, PredictorType=LibSvmPredictor):
         problem = dataset._create_svm_problem()
         dataset._update_svm_parameter(self.param)
         self._check_problem_param(problem, self.param)
         model = libsvm.svm_train(problem, self.param)
-        return ResultType(model, dataset, self.kernel, PredictorType)
+        return self.ResultsType(model, dataset, self.kernel, PredictorType)
 
     def _check_problem_param(self, problem, param):
         error_msg = libsvm.svm_check_parameter(problem, param)

Modified: trunk/Lib/sandbox/svm/oneclass.py
===================================================================
--- trunk/Lib/sandbox/svm/oneclass.py	2006-07-24 22:07:41 UTC (rev 2121)
+++ trunk/Lib/sandbox/svm/oneclass.py	2006-07-25 07:48:25 UTC (rev 2122)
@@ -43,6 +43,8 @@
     High-Dimensional Distribution.
     """
 
+    ResultsType = LibSvmOneClassResults
+
     def __init__(self, kernel, nu=0.5, **kwargs):
         """
         Parameters:

Modified: trunk/Lib/sandbox/svm/predict.py
===================================================================
--- trunk/Lib/sandbox/svm/predict.py	2006-07-24 22:07:41 UTC (rev 2121)
+++ trunk/Lib/sandbox/svm/predict.py	2006-07-25 07:48:25 UTC (rev 2122)
@@ -6,57 +6,28 @@
 
 __all__ = [
     'LibSvmPredictor',
-    'LibSvmPrecomputedPredictor',
     'LibSvmPythonPredictor'
     ]
 
 class LibSvmPredictor:
     def __init__(self, model, dataset, kernel):
         self.model = model
+        self.kernel = kernel
         modelc = model.contents
         if modelc.param.kernel_type == libsvm.PRECOMPUTED:
-            raise TypeError, '%s is for non-precomputed problems' % \
-                str(self.__class__)
+            ids = [int(modelc.SV[i][0].value) for i in range(modelc.l)]
+            support_vectors = [dataset[id] for id in ids]
+            self.support_vectors = support_vectors
+            # fix support vector ids in precomputed data
+            for i in range(modelc.l):
+                modelc.SV[i][0].value = i
+            self._transform_input = self._create_gramvec
+        else:
+            self._transform_input = lambda x: x
 
     def __del__(self):
         libsvm.svm_destroy_model(self.model)
 
-    def predict(self, x):
-        xptr = cast(x.ctypes.data, POINTER(libsvm.svm_node))
-        return libsvm.svm_predict(self.model, xptr)
-
-    def predict_values(self, x, n):
-        xptr = cast(x.ctypes.data, POINTER(libsvm.svm_node))
-        v = N.empty((n,), dtype=N.float64)
-        vptr = cast(v.ctypes.data, POINTER(c_double))
-        libsvm.svm_predict_values(self.model, xptr, vptr)
-        return v
-
-    def predict_probability(self, x, n):
-        xptr = cast(x.ctypes.data, POINTER(libsvm.svm_node))
-        pe = N.empty((n,), dtype=N.float64)
-        peptr = cast(pe.ctypes.data, POINTER(c_double))
-        label = libsvm.svm_predict_probability(self.model, xptr, peptr)
-        return label, pe
-
-class LibSvmPrecomputedPredictor:
-    def __init__(self, model, dataset, kernel):
-        self.kernel = kernel
-        self.model = model
-        modelc = model.contents
-        if modelc.param.kernel_type != libsvm.PRECOMPUTED:
-            raise TypeError, '%s is for precomputed problems' % \
-                str(self.__class__)
-        ids = [int(modelc.SV[i][0].value) for i in range(modelc.l)]
-        support_vectors = [dataset[id] for id in ids]
-        self.support_vectors = support_vectors
-        # fix support vector ids in precomputed data
-        for i in range(modelc.l):
-            modelc.SV[i][0].value = i
-
-    def __del__(self):
-        libsvm.svm_destroy_model(self.model)
-
     def _create_gramvec(self, x):
         gramvec = N.zeros((self.model.contents.l,),
                           dtype=libsvm.svm_node_dtype)
@@ -65,24 +36,24 @@
         return gramvec
 
     def predict(self, x):
-        g = self._create_gramvec(x)
-        gptr = cast(g.ctypes.data, POINTER(libsvm.svm_node))
-        return libsvm.svm_predict(self.model, gptr)
+        x = self._transform_input(x)
+        xptr = cast(x.ctypes.data, POINTER(libsvm.svm_node))
+        return libsvm.svm_predict(self.model, xptr)
 
     def predict_values(self, x, n):
-        g = self._create_gramvec(x)
-        gptr = cast(g.ctypes.data, POINTER(libsvm.svm_node))
+        x = self._transform_input(x)
+        xptr = cast(x.ctypes.data, POINTER(libsvm.svm_node))
         v = N.empty((n,), dtype=N.float64)
         vptr = cast(v.ctypes.data, POINTER(c_double))
-        libsvm.svm_predict(self.model, gptr, vptr)
+        libsvm.svm_predict_values(self.model, xptr, vptr)
         return v
 
     def predict_probability(self, x, n):
-        g = self._create_gramvec(x)
-        gptr = cast(g.ctypes.data, POINTER(libsvm.svm_node))
+        x = self._transform_input(x)
+        xptr = cast(x.ctypes.data, POINTER(libsvm.svm_node))
         pe = N.empty((n,), dtype=N.float64)
         peptr = cast(pe.ctypes.data, POINTER(c_double))
-        label = libsvm.svm_predict_probability(self.model, gptr, peptr)
+        label = libsvm.svm_predict_probability(self.model, xptr, peptr)
         return label, pe
 
 class LibSvmPythonPredictor:

Modified: trunk/Lib/sandbox/svm/regression.py
===================================================================
--- trunk/Lib/sandbox/svm/regression.py	2006-07-24 22:07:41 UTC (rev 2121)
+++ trunk/Lib/sandbox/svm/regression.py	2006-07-25 07:48:25 UTC (rev 2122)
@@ -45,6 +45,8 @@
         return self.sigma
 
 class LibSvmRegressionModel(LibSvmModel):
+    ResultsType = LibSvmRegressionResults
+
     def __init__(self, kernel, **kwargs):
         LibSvmModel.__init__(self, kernel, **kwargs)
 

Modified: trunk/Lib/sandbox/svm/tests/test_classification.py
===================================================================
--- trunk/Lib/sandbox/svm/tests/test_classification.py	2006-07-24 22:07:41 UTC (rev 2121)
+++ trunk/Lib/sandbox/svm/tests/test_classification.py	2006-07-25 07:48:25 UTC (rev 2122)
@@ -5,7 +5,6 @@
 from svm.classification import *
 from svm.dataset import LibSvmClassificationDataSet, LibSvmTestDataSet
 from svm.kernel import *
-from svm.predict import *
 restore_path()
 
 class test_classification(NumpyTestCase):
@@ -27,8 +26,6 @@
 
     def check_c_basics(self):
         ModelType = LibSvmCClassificationModel
-        ResultType = LibSvmClassificationResults
-        PredictorType = LibSvmPredictor
 
         labels = [0, 1, 1, 2]
         x = [N.array([0, 0]),
@@ -37,15 +34,13 @@
              N.array([1, 1])]
         traindata = LibSvmClassificationDataSet(zip(labels, x))
         model = ModelType(RBFKernel(traindata.gamma))
-        results = model.fit(traindata, ResultType, PredictorType)
+        results = model.fit(traindata)
         testdata = LibSvmTestDataSet(x)
         results.predict(testdata)
         results.predict_values(testdata)
 
     def check_c_more(self):
         ModelType = LibSvmCClassificationModel
-        ResultType = LibSvmClassificationResults
-        PredictorType = LibSvmPredictor
 
         labels = [0, 1, 1, 2]
         x = [N.array([0, 0]),
@@ -72,7 +67,7 @@
         for kernel, expected_rho, expected_error in \
             zip(kernels, expected_rhos, expected_errors):
             model = ModelType(kernel, cost, weights)
-            results = model.fit(traindata, ResultType, PredictorType)
+            results = model.fit(traindata)
 
             self.assertEqual(results.labels, [0, 1, 2])
             self.assertEqual(results.nSV, [1, 2, 1])
@@ -87,8 +82,6 @@
 
     def check_c_probability(self):
         ModelType = LibSvmCClassificationModel
-        ResultType = LibSvmClassificationResults
-        PredictorType = LibSvmPredictor
 
         labels = [0, 1, 1, 2]
         x = [N.array([0, 0]),
@@ -108,7 +101,7 @@
 
         for kernel in kernels:
             model = ModelType(kernel, cost, weights)
-            results = model.fit(traindata, ResultType, PredictorType)
+            results = model.fit(traindata)
             results.predict_probability(testdata)
 
     def check_cross_validate(self):

Modified: trunk/Lib/sandbox/svm/tests/test_oneclass.py
===================================================================
--- trunk/Lib/sandbox/svm/tests/test_oneclass.py	2006-07-24 22:07:41 UTC (rev 2121)
+++ trunk/Lib/sandbox/svm/tests/test_oneclass.py	2006-07-25 07:48:25 UTC (rev 2122)
@@ -5,7 +5,6 @@
 from svm.dataset import LibSvmOneClassDataSet, LibSvmTestDataSet
 from svm.kernel import *
 from svm.oneclass import *
-from svm.predict import *
 restore_path()
 
 class test_oneclass(NumpyTestCase):
@@ -17,24 +16,19 @@
 
     def check_train(self):
         ModelType = LibSvmOneClassModel
-        ResultType = LibSvmOneClassResults
-        PredictorType = LibSvmPredictor
-
         x = [N.array([0, 0]),
              N.array([0, 1]),
              N.array([1, 0]),
              N.array([1, 1])]
         traindata = LibSvmOneClassDataSet(x)
         model = ModelType(LinearKernel())
-        results = model.fit(traindata, ResultType, PredictorType)
+        results = model.fit(traindata)
         testdata = LibSvmTestDataSet(x)
         results.predict(testdata)
         results.predict_values(testdata)
 
     def check_more(self):
         ModelType = LibSvmOneClassModel
-        ResultType = LibSvmOneClassResults
-        PredictorType = LibSvmPredictor
 
         x = [N.array([0, 0]),
              N.array([0, 1]),
@@ -57,7 +51,7 @@
 
         for kernel, expected_pred in zip(kernels, expected_preds):
             model = ModelType(kernel, nu)
-            results = model.fit(traindata, ResultType, PredictorType)
+            results = model.fit(traindata)
             pred = results.predict(testdata)
             self.assertEqual(results.predict(testdata), expected_pred)
             values = results.predict_values(testdata)

Modified: trunk/Lib/sandbox/svm/tests/test_precomputed.py
===================================================================
--- trunk/Lib/sandbox/svm/tests/test_precomputed.py	2006-07-24 22:07:41 UTC (rev 2121)
+++ trunk/Lib/sandbox/svm/tests/test_precomputed.py	2006-07-25 07:48:25 UTC (rev 2122)
@@ -12,7 +12,6 @@
 class test_precomputed(NumpyTestCase):
     def xcheck_precomputed_classification(self):
         ModelType = LibSvmCClassificationModel
-        ResultType = LibSvmClassificationResults
         kernel = LinearKernel()
 
         labels1 = ([0] * 10) + ([1] * 10) + ([2] * 10)
@@ -26,13 +25,13 @@
 
         pcdata12 = pcdata1.combine(data2)
         model = LibSvmCClassificationModel(kernel)
-        results = model.fit(pcdata12, ResultType,LibSvmPrecomputedPredictor)
+        results = model.fit(pcdata12)
 
         reflabels = labels1 + labels2
         refx = N.vstack([x1, x2])
         refdata = LibSvmClassificationDataSet(zip(reflabels, refx))
         model = ModelType(kernel)
-        refresults = model.fit(refdata, ResultType, LibSvmPredictor)
+        refresults = model.fit(refdata)
 
         assert_array_almost_equal(results.rho, refresults.rho)
         assert_array_almost_equal(results.sv_coef, refresults.sv_coef)
@@ -56,7 +55,6 @@
 
     def check_precomputed_regression(self):
         ModelType = LibSvmEpsilonRegressionModel
-        ResultType = LibSvmRegressionResults
 
         kernel = LinearKernel()
 
@@ -74,7 +72,7 @@
 
         pcdata12 = pcdata1.combine(data2)
         model = LibSvmEpsilonRegressionModel(kernel)
-        results = model.fit(pcdata12, ResultType, LibSvmPrecomputedPredictor)
+        results = model.fit(pcdata12, LibSvmPredictor)
 
         # reference model, calculated without involving the
         # precomputed Gram matrix
@@ -82,7 +80,7 @@
         refx = N.vstack([x1, x2])
         refdata = LibSvmRegressionDataSet(zip(refy, refx))
         model = ModelType(kernel)
-        refresults = model.fit(refdata, ResultType, LibSvmPredictor)
+        refresults = model.fit(refdata, LibSvmPredictor)
 
         self.assertAlmostEqual(results.rho, refresults.rho)
         assert_array_almost_equal(results.sv_coef, refresults.sv_coef)

Modified: trunk/Lib/sandbox/svm/tests/test_regression.py
===================================================================
--- trunk/Lib/sandbox/svm/tests/test_regression.py	2006-07-24 22:07:41 UTC (rev 2121)
+++ trunk/Lib/sandbox/svm/tests/test_regression.py	2006-07-25 07:48:25 UTC (rev 2122)
@@ -27,8 +27,6 @@
 
     def check_epsilon_train(self):
         ModelType = LibSvmEpsilonRegressionModel
-        ResultType = LibSvmRegressionResults
-        PredictorType = LibSvmPredictor
 
         y = [10., 20., 30., 40.]
         x = [N.array([0, 0]),
@@ -38,14 +36,12 @@
         traindata = LibSvmRegressionDataSet(zip(y, x))
         testdata = LibSvmTestDataSet(x)
         model = ModelType(LinearKernel())
-        results = model.fit(traindata, ResultType, PredictorType)
+        results = model.fit(traindata)
         results.predict(testdata)
         results.get_svr_probability()
 
     def check_epsilon_more(self):
         ModelType = LibSvmEpsilonRegressionModel
-        ResultType = LibSvmRegressionResults
-        PredictorType = LibSvmPredictor
 
         y = [0.0, 1.0, 1.0, 2.0]
         x = [N.array([0, 0]),
@@ -70,7 +66,7 @@
 
         for kernel, expected_y in zip(kernels, expected_ys):
             model = ModelType(kernel, epsilon, cost)
-            results = model.fit(traindata, ResultType, PredictorType)
+            results = model.fit(traindata)
             predictions = results.predict(testdata)
             # look at differences instead of using assertAlmostEqual
             # due to slight differences between answers obtained on
@@ -121,7 +117,6 @@
         return kernels
 
     def check_all(self):
-        ResultType = LibSvmRegressionResults
         trndata, trndata1, trndata2, testdata = self._make_datasets()
         kernels = self._make_kernels()
         for kernel in kernels:
@@ -132,10 +127,10 @@
                 LibSvmNuRegressionModel(kernel, 0.4, 0.5)
                 ]
             fitargs = [
-                (trndata, ResultType, LibSvmPredictor),
-                (trndata, ResultType, LibSvmPythonPredictor),
-                #(pctrndata, ResultType, LibSvmPrecomputedPredictor),
-                (pctrndata, ResultType, LibSvmPythonPredictor)
+                (trndata, LibSvmPredictor),
+                (trndata, LibSvmPythonPredictor),
+                #(pctrndata, LibSvmPredictor),
+                (pctrndata, LibSvmPythonPredictor)
                 ]
             for model in models:
                 refresults = model.fit(*fitargs[0])




More information about the Scipy-svn mailing list