[Scipy-svn] r2142 - in trunk/Lib/sandbox/svm: . tests
scipy-svn at scipy.org
scipy-svn at scipy.org
Thu Jul 27 20:10:58 EDT 2006
Author: fullung
Date: 2006-07-27 19:10:27 -0500 (Thu, 27 Jul 2006)
New Revision: 2142
Modified:
trunk/Lib/sandbox/svm/classification.py
trunk/Lib/sandbox/svm/dataset.py
trunk/Lib/sandbox/svm/oneclass.py
trunk/Lib/sandbox/svm/predict.py
trunk/Lib/sandbox/svm/regression.py
trunk/Lib/sandbox/svm/tests/test_all.py
trunk/Lib/sandbox/svm/tests/test_classification.py
Log:
Classification in Python in preparation for some optimization.
Modified: trunk/Lib/sandbox/svm/classification.py
===================================================================
--- trunk/Lib/sandbox/svm/classification.py 2006-07-27 19:59:34 UTC (rev 2141)
+++ trunk/Lib/sandbox/svm/classification.py 2006-07-28 00:10:27 UTC (rev 2142)
@@ -74,6 +74,9 @@
return int(label), prob_estimates
return [p(x) for x in dataset]
+ def compact(self):
+ self.predictor.compact()
+
class LibSvmClassificationModel(LibSvmModel):
"""
A model for support vector classification.
Modified: trunk/Lib/sandbox/svm/dataset.py
===================================================================
--- trunk/Lib/sandbox/svm/dataset.py 2006-07-27 19:59:34 UTC (rev 2141)
+++ trunk/Lib/sandbox/svm/dataset.py 2006-07-28 00:10:27 UTC (rev 2142)
@@ -16,13 +16,9 @@
for y, x in data:
key = x.__array_interface__['data'][0]
self.iddatamap[key] = x
+ self.__len__ = self.data.__len__
+ self.__iter__ = self.data.__iter__
- def __iter__(self):
- return self.data.__iter__()
-
- def __len__(self):
- return len(self.data)
-
def getgamma(self):
maxlen = 0
for y, x in self.data:
@@ -164,13 +160,10 @@
class LibSvmTestDataSet:
def __init__(self, origdata):
self.data = map(lambda x: convert_to_svm_node(x), origdata)
+ self.__len__ = self.data.__len__
+ self.__iter__ = self.data.__iter__
+ self.__getitem__ = self.data.__getitem__
- def __len__(self):
- return len(self.data)
-
- def __iter__(self):
- return self.data.__iter__()
-
def convert_to_svm_node(x):
y = N.empty(len(x) + 1, dtype=libsvm.svm_node_dtype)
y[-1] = -1, 0.
Modified: trunk/Lib/sandbox/svm/oneclass.py
===================================================================
--- trunk/Lib/sandbox/svm/oneclass.py 2006-07-27 19:59:34 UTC (rev 2141)
+++ trunk/Lib/sandbox/svm/oneclass.py 2006-07-28 00:10:27 UTC (rev 2142)
@@ -35,6 +35,9 @@
"""
return [self.predictor.predict_values(x, 1)[0] for x in dataset]
+ def compact(self):
+ self.predictor.compact()
+
class LibSvmOneClassModel(LibSvmModel):
"""
A model for distribution estimation (one-class SVM).
Modified: trunk/Lib/sandbox/svm/predict.py
===================================================================
--- trunk/Lib/sandbox/svm/predict.py 2006-07-27 19:59:34 UTC (rev 2141)
+++ trunk/Lib/sandbox/svm/predict.py 2006-07-28 00:10:27 UTC (rev 2142)
@@ -56,14 +56,29 @@
label = libsvm.svm_predict_probability(self.model, xptr, peptr)
return label, pe
+ def compact(self):
+ raise NotImplementedError
+
class LibSvmPythonPredictor:
def __init__(self, model, dataset, kernel):
self.kernel = kernel
modelc = model.contents
-
- self.rho = modelc.rho[0]
- self.sv_coef = modelc.sv_coef[0][:modelc.l]
self.svm_type = modelc.param.svm_type
+ if self.svm_type in [libsvm.C_SVC, libsvm.NU_SVC]:
+ self.nr_class = modelc.nr_class
+ self.labels = modelc.labels[:self.nr_class]
+ nrho = self.nr_class * (self.nr_class - 1) / 2
+ self.rho = modelc.rho[:nrho]
+ self.sv_coef = [modelc.sv_coef[i][:modelc.l]
+ for i in range(self.nr_class - 1)]
+ self.nSV = [modelc.nSV[i] for i in range(self.nr_class)]
+ start = N.zeros((self.nr_class,), N.intc)
+ for i in range(1, self.nr_class):
+ start[i] = start[i - 1] + modelc.nSV[i - 1]
+ self.start = start
+ else:
+ self.rho = modelc.rho[0]
+ self.sv_coef = modelc.sv_coef[0][:modelc.l]
if modelc.param.kernel_type != libsvm.PRECOMPUTED:
svptrs = [modelc.SV[i] for i in range(modelc.l)]
@@ -73,21 +88,55 @@
ids = [int(modelc.SV[i][0].value) for i in range(modelc.l)]
support_vectors = [dataset[id] for id in ids]
self.support_vectors = support_vectors
-
libsvm.svm_destroy_model(model)
def predict(self, x):
if self.svm_type in [libsvm.C_SVC, libsvm.NU_SVC]:
- raise NotImplementedError
+ n = self.nr_class * (self.nr_class - 1) / 2
+ dec_values = self.predict_values(x, n)
+ vote = N.zeros((self.nr_class,), N.intc)
+ pos = 0
+ for i in range(self.nr_class):
+ for j in range(i + 1, self.nr_class):
+ if dec_values[pos] > 0:
+ vote[i] += 1
+ else:
+ vote[j] += 1
+ pos += 1
+ return self.labels[vote.argmax()]
else:
return self.predict_values(x, 1)
def predict_values(self, x, n):
- z = -self.rho
- # XXX possible optimization: izip
- for sv_coef, sv in zip(self.sv_coef, self.support_vectors):
- z += sv_coef * self.kernel(x, sv, svm_node_dot)
- return z
+ if self.svm_type in [libsvm.C_SVC, libsvm.NU_SVC]:
+ kvalue = N.empty((len(self.support_vectors),))
+ for i, sv in enumerate(self.support_vectors):
+ kvalue[i] = self.kernel(x, sv, svm_node_dot)
+ p = 0
+ dec_values = N.empty((n,))
+ for i in range(self.nr_class):
+ for j in range(i + 1, self.nr_class):
+ sum = 0
+ si, sj = self.start[i], self.start[j]
+ ci, cj = self.nSV[i], self.nSV[j]
+ coef1 = self.sv_coef[j - 1]
+ coef2 = self.sv_coef[i]
+ sum = -self.rho[p]
+ for k in range(ci):
+ sum += coef1[si + k] * kvalue[si + k]
+ for k in range(cj):
+ sum += coef2[sj + k] * kvalue[sj + k]
+ dec_values[p] = sum
+ p += 1
+ return dec_values
+ else:
+ z = -self.rho
+ for sv_coef, sv in zip(self.sv_coef, self.support_vectors):
+ z += sv_coef * self.kernel(x, sv, svm_node_dot)
+ return z
def predict_probability(self, x, n):
raise NotImplementedError
+
+ def compact(self):
+ raise NotImplementedError
Modified: trunk/Lib/sandbox/svm/regression.py
===================================================================
--- trunk/Lib/sandbox/svm/regression.py 2006-07-27 19:59:34 UTC (rev 2141)
+++ trunk/Lib/sandbox/svm/regression.py 2006-07-28 00:10:27 UTC (rev 2142)
@@ -44,6 +44,9 @@
"""
return self.sigma
+ def compact(self):
+ self.predictor.compact()
+
class LibSvmRegressionModel(LibSvmModel):
ResultsType = LibSvmRegressionResults
Modified: trunk/Lib/sandbox/svm/tests/test_all.py
===================================================================
--- trunk/Lib/sandbox/svm/tests/test_all.py 2006-07-27 19:59:34 UTC (rev 2141)
+++ trunk/Lib/sandbox/svm/tests/test_all.py 2006-07-28 00:10:27 UTC (rev 2142)
@@ -1,8 +1,8 @@
from test_classification import *
from test_dataset import *
+from test_kernel import *
from test_libsvm import *
from test_oneclass import *
-from test_precomputed import *
from test_regression import *
if __name__ == '__main__':
Modified: trunk/Lib/sandbox/svm/tests/test_classification.py
===================================================================
--- trunk/Lib/sandbox/svm/tests/test_classification.py 2006-07-27 19:59:34 UTC (rev 2141)
+++ trunk/Lib/sandbox/svm/tests/test_classification.py 2006-07-28 00:10:27 UTC (rev 2142)
@@ -205,5 +205,26 @@
except NotImplementedError:
self.assert_(fitargs[-1] is LibSvmPythonPredictor)
+ def check_python_predict(self):
+ traindata, testdata = self._make_basic_datasets()
+ kernel = LinearKernel()
+ cost = 10.0
+ weights = [(1, 10.0)]
+ model = LibSvmCClassificationModel(kernel, cost, weights)
+
+ refresults = model.fit(traindata)
+ results = model.fit(traindata, LibSvmPythonPredictor)
+
+ refv = refresults.predict_values(testdata)
+ v = results.predict_values(testdata)
+ self.assertEqual(len(refv), len(v))
+ for pred, refpred in zip(v, refv):
+ for key, value in refpred.iteritems():
+ assert_array_almost_equal(value, pred[key])
+
+ refp = refresults.predict(testdata)
+ p = results.predict(testdata)
+ assert_array_equal(p, refp)
+
if __name__ == '__main__':
NumpyTest().run()
More information about the Scipy-svn
mailing list