[Scipy-svn] r2309 - in trunk/Lib/sandbox/models: . tests

scipy-svn at scipy.org scipy-svn at scipy.org
Tue Nov 7 17:07:29 EST 2006


Author: jonathan.taylor
Date: 2006-11-07 16:04:53 -0600 (Tue, 07 Nov 2006)
New Revision: 2309

Modified:
   trunk/Lib/sandbox/models/__init__.py
   trunk/Lib/sandbox/models/glm.py
   trunk/Lib/sandbox/models/regression.py
   trunk/Lib/sandbox/models/rlm.py
   trunk/Lib/sandbox/models/tests/test_formula.py
   trunk/Lib/sandbox/models/tests/test_glm.py
   trunk/Lib/sandbox/models/tests/test_regression.py
   trunk/Lib/sandbox/models/tests/test_utils.py
Log:
more name change fixes, changed names in tests, too


Modified: trunk/Lib/sandbox/models/__init__.py
===================================================================
--- trunk/Lib/sandbox/models/__init__.py	2006-11-07 21:50:20 UTC (rev 2308)
+++ trunk/Lib/sandbox/models/__init__.py	2006-11-07 22:04:53 UTC (rev 2309)
@@ -3,11 +3,10 @@
 import regression
 import robust
 import family
-from glm import Model as glm
-from rlm import Model as rlm
+from glm import model as glm
+from rlm import model as rlm
 
 
-
 import unittest
 def suite():
     return unittest.TestSuite([tests.suite()])

Modified: trunk/Lib/sandbox/models/glm.py
===================================================================
--- trunk/Lib/sandbox/models/glm.py	2006-11-07 21:50:20 UTC (rev 2308)
+++ trunk/Lib/sandbox/models/glm.py	2006-11-07 22:04:53 UTC (rev 2309)
@@ -1,8 +1,8 @@
 import numpy as N
 from scipy.sandbox.models import family
-from scipy.sandbox.models.regression import WLSModel
+from scipy.sandbox.models.regression import wls_model
 
-class Model(WLSModel):
+class model(wls_model):
 
     niter = 10
     
@@ -20,7 +20,7 @@
         """
         Return (unnormalized) log-likelihood for glm.
 
-        Note that self.scale is interpreted as a variance in OLSModel, so
+        Note that self.scale is interpreted as a variance in old_model, so
         we divide the residuals by its sqrt.
         """
         if results is None:
@@ -32,7 +32,7 @@
         self.weights = self.family.weights(results.mu)
         self.initialize(self.design)
         Z = results.predict + self.family.link.deriv(results.mu) * (Y - results.mu)
-        newresults = WLSModel.fit(self, Z)
+        newresults = wls_model.fit(self, Z)
         newresults.mu = self.family.link.inverse(newresults.predict)
         self.iter += 1
         return newresults
@@ -41,7 +41,7 @@
         """
         Continue iterating, or has convergence been obtained?
         """
-        if self.iter >= Model.niter:
+        if self.iter >= model.niter:
             return False
 
         curdev = self.deviance(results=results)
@@ -67,7 +67,7 @@
 
         self.Y = N.asarray(Y, N.float64)
         iter(self)
-        self.results = WLSModel.fit(self, self.family.link(Y, initialize=True))
+        self.results = wls_model.fit(self, self.family.link.initialize(Y))
         self.results.mu = self.family.link.inverse(self.results.predict)
         self.scale = self.results.scale = self.estimate_scale()
         

Modified: trunk/Lib/sandbox/models/regression.py
===================================================================
--- trunk/Lib/sandbox/models/regression.py	2006-11-07 21:50:20 UTC (rev 2308)
+++ trunk/Lib/sandbox/models/regression.py	2006-11-07 22:04:53 UTC (rev 2309)
@@ -4,7 +4,7 @@
 from scipy.sandbox.models.model import LikelihoodModel, LikelihoodModelResults
 from scipy.sandbox.models import utils
 
-class OLSModel(LikelihoodModel):
+class ols_model(LikelihoodModel):
     
     """
     A simple ordinary least squares model.
@@ -62,7 +62,7 @@
         
         return lfit
 
-class ARModel(OLSModel):
+class ar_model(ols_model):
     """
     A regression model with an AR(1) covariance structure.
 
@@ -79,7 +79,7 @@
         factor = 1. / N.sqrt(1 - self.rho**2)
         return N.concatenate([[X[0]], (X[1:] - self.rho * X[0:-1]) * factor])
 
-class WLSModel(ARModel):
+class wls_model(ar_model):
     """
 
     A regression model with diagonal but non-identity covariance

Modified: trunk/Lib/sandbox/models/rlm.py
===================================================================
--- trunk/Lib/sandbox/models/rlm.py	2006-11-07 21:50:20 UTC (rev 2308)
+++ trunk/Lib/sandbox/models/rlm.py	2006-11-07 22:04:53 UTC (rev 2309)
@@ -1,9 +1,9 @@
 import numpy as N
 
-from scipy.sandbox.models.regression import WLSModel
+from scipy.sandbox.models.regression import wls_model
 from scipy.sandbox.models.robust import norms, scale
 
-class Model(WLSModel):
+class model(wls_model):
 
     niter = 20
     scale_est = 'MAD'
@@ -22,7 +22,7 @@
         """
         Return (unnormalized) log-likelihood from M estimator.
 
-        Note that self.scale is interpreted as a variance in OLSModel, so
+        Note that self.scale is interpreted as a variance in ols_model, so
         we divide the residuals by its sqrt.
         """
         if results is None:
@@ -32,7 +32,7 @@
     def next(self, results):
         self.weights = self.M.weights((results.Y - results.predict) / N.sqrt(results.scale))
         self.initialize(self.design)
-        results = WLSModel.fit(self, results.Y)
+        results = wls_model.fit(self, results.Y)
         self.scale = results.scale = self.estimate_scale(results)
         self.iter += 1
         return results
@@ -41,7 +41,7 @@
         """
         Continue iterating, or has convergence been obtained?
         """
-        if self.iter >= Model.niter:
+        if self.iter >= model.niter:
             return False
 
         curdev = self.deviance(results)
@@ -53,7 +53,7 @@
 
     def estimate_scale(self, results):
         """
-        Note that self.scale is interpreted as a variance in OLSModel, so
+        Note that self.scale is interpreted as a variance in ols_model, so
         we return MAD(resid)**2 by default. 
         """
         resid = results.Y - results.predict
@@ -67,7 +67,7 @@
     def fit(self, Y, **keywords):
         
         iter(self)
-        self.results = WLSModel.fit(self, Y)
+        self.results = wls_model.fit(self, Y)
         self.scale = self.results.scale = self.estimate_scale(self.results)
         
         while self.cont(self.results):

Modified: trunk/Lib/sandbox/models/tests/test_formula.py
===================================================================
--- trunk/Lib/sandbox/models/tests/test_formula.py	2006-11-07 21:50:20 UTC (rev 2308)
+++ trunk/Lib/sandbox/models/tests/test_formula.py	2006-11-07 22:04:53 UTC (rev 2309)
@@ -7,42 +7,42 @@
 
 from scipy.sandbox.models import utils, formula, contrast
 
-class test_Term(ScipyTestCase):
+class test_term(unittest.TestCase):
 
     def test_init(self):
-        t1 = formula.Term("trivial")
+        t1 = formula.term("trivial")
         sqr = lambda x: x*x
 
-        t2 = formula.Term("not_so_trivial", sqr, "sqr")
+        t2 = formula.term("not_so_trivial", sqr, "sqr")
 
-        self.assertRaises(ValueError, formula.Term, "name", termname=0)
+        self.assertRaises(ValueError, formula.term, "name", termname=0)
 
     def test_str(self):
-        t = formula.Term("name")
+        t = formula.term("name")
         s = str(t)
 
     def test_add(self):
-        t1 = formula.Term("t1")
-        t2 = formula.Term("t2")
+        t1 = formula.term("t1")
+        t2 = formula.term("t2")
         f = t1 + t2
-        self.assert_(isinstance(f, formula.Formula))
+        self.assert_(isinstance(f, formula.formula))
         self.assert_(f.hasterm(t1))
         self.assert_(f.hasterm(t2))
 
     def test_mul(self):
-        t1 = formula.Term("t1")
-        t2 = formula.Term("t2")
+        t1 = formula.term("t1")
+        t2 = formula.term("t2")
         f = t1 * t2
-        self.assert_(isinstance(f, formula.Formula))
+        self.assert_(isinstance(f, formula.formula))
 
-        intercept = formula.Term("intercept")
+        intercept = formula.term("intercept")
         f = t1 * intercept
-        self.assertEqual(str(f), str(formula.Formula(t1)))
+        self.assertEqual(str(f), str(formula.formula(t1)))
 
         f = intercept * t1
-        self.assertEqual(str(f), str(formula.Formula(t1)))
+        self.assertEqual(str(f), str(formula.formula(t1)))
 
-class test_Formula(ScipyTestCase):
+class test_formula(ScipyTestCase):
 
     def setUp(self):
         self.X = R.standard_normal((40,10))
@@ -51,7 +51,7 @@
         for i in range(10):
             name = '%s' % string.uppercase[i]
             self.namespace[name] = self.X[:,i]
-            self.terms.append(formula.Term(name))
+            self.terms.append(formula.term(name))
 
         self.formula = self.terms[0]
         for i in range(1, 10):
@@ -86,7 +86,7 @@
 
     def test_contrast2(self):
     
-        dummy = formula.Term('zero')
+        dummy = formula.term('zero')
         self.namespace['zero'] = N.zeros((40,), N.float64)
         term = dummy + self.terms[2]
         c = contrast.Contrast(term, self.formula)
@@ -99,7 +99,7 @@
         X = self.formula.design(namespace=self.namespace)
         P = N.dot(X, L.pinv(X))
         
-        dummy = formula.Term('noise')
+        dummy = formula.term('noise')
         resid = N.identity(40) - P
         self.namespace['noise'] = N.transpose(N.dot(resid, R.standard_normal((40,5))))
         term = dummy + self.terms[2]
@@ -120,9 +120,9 @@
         self.assertEquals(estimable, False)
 
 def suite():
-    suite = unittest.makeSuite(FormulaTest)
+    suite = unittest.makeSuite(formulaTest)
     return suite
 
 
 if __name__ == '__main__':
-    ScipyTest.run()
+    unittest.main()

Modified: trunk/Lib/sandbox/models/tests/test_glm.py
===================================================================
--- trunk/Lib/sandbox/models/tests/test_glm.py	2006-11-07 21:50:20 UTC (rev 2308)
+++ trunk/Lib/sandbox/models/tests/test_glm.py	2006-11-07 22:04:53 UTC (rev 2309)
@@ -3,7 +3,7 @@
 import numpy.random as R
 import numpy as N
 from numpy.testing import *
-from scipy.sandbox.models.glm import Model
+from scipy.sandbox.models.glm import model
 
 W = R.standard_normal
 
@@ -14,8 +14,8 @@
         X = W((40,10))
         Y = N.greater(W((40,)), 0)
         family = S.family.Binomial()
-        model = Model(design=X, family=S.family.Binomial())
-        results = model.fit(Y)
+        cmodel = model(design=X, family=S.family.Binomial())
+        results = cmodel.fit(Y)
         self.assertEquals(results.df_resid, 30)
 
     def check_Logisticdegenerate(self):
@@ -23,8 +23,8 @@
         X[:,0] = X[:,1] + X[:,2]
         Y = N.greater(W((40,)), 0)
         family = S.family.Binomial()
-        model = Model(design=X, family=S.family.Binomial())
-        results = model.fit(Y)
+        cmodel = model(design=X, family=S.family.Binomial())
+        results = cmodel.fit(Y)
         self.assertEquals(results.df_resid, 31)
 
 

Modified: trunk/Lib/sandbox/models/tests/test_regression.py
===================================================================
--- trunk/Lib/sandbox/models/tests/test_regression.py	2006-11-07 21:50:20 UTC (rev 2308)
+++ trunk/Lib/sandbox/models/tests/test_regression.py	2006-11-07 22:04:53 UTC (rev 2309)
@@ -5,7 +5,7 @@
 
 W = standard_normal
 
-class test_Regression(ScipyTestCase):
+class test_Regression(unittest.TestCase):
 
     def testOLS(self):
         X = W((40,10))
@@ -39,4 +39,4 @@
 
 
 if __name__ == '__main__':
-    ScipyTest.run()
+    unittest.main()

Modified: trunk/Lib/sandbox/models/tests/test_utils.py
===================================================================
--- trunk/Lib/sandbox/models/tests/test_utils.py	2006-11-07 21:50:20 UTC (rev 2308)
+++ trunk/Lib/sandbox/models/tests/test_utils.py	2006-11-07 22:04:53 UTC (rev 2309)
@@ -5,7 +5,7 @@
 from numpy.testing import *
 from scipy.sandbox.models import utils
 
-class test_Utils(ScipyTestCase):
+class test_Utils(unittest.TestCase):
 
     def test_recipr(self):
         X = N.array([[2,1],[-1,0]])
@@ -52,4 +52,4 @@
         self.assertRaises(ValueError, utils.StepFunction, x, y)
 
 if __name__ == '__main__':
-    ScipyTest.run()
+    unittest.main()




More information about the Scipy-svn mailing list