[Scipy-svn] r6797 - trunk/scipy/optimize/tests

scipy-svn at scipy.org scipy-svn at scipy.org
Sun Sep 12 17:26:17 EDT 2010


Author: warren.weckesser
Date: 2010-09-12 16:26:17 -0500 (Sun, 12 Sep 2010)
New Revision: 6797

Modified:
   trunk/scipy/optimize/tests/test_cobyla.py
   trunk/scipy/optimize/tests/test_linesearch.py
   trunk/scipy/optimize/tests/test_minpack.py
   trunk/scipy/optimize/tests/test_nnls.py
   trunk/scipy/optimize/tests/test_nonlin.py
   trunk/scipy/optimize/tests/test_optimize.py
   trunk/scipy/optimize/tests/test_slsqp.py
   trunk/scipy/optimize/tests/test_zeros.py
Log:
TST: optimize: Don't use 'import *'. Don't use plain 'assert'.

Modified: trunk/scipy/optimize/tests/test_cobyla.py
===================================================================
--- trunk/scipy/optimize/tests/test_cobyla.py	2010-09-12 21:25:13 UTC (rev 6796)
+++ trunk/scipy/optimize/tests/test_cobyla.py	2010-09-12 21:26:17 UTC (rev 6797)
@@ -1,6 +1,6 @@
 import math
 
-from numpy.testing import *
+from numpy.testing import assert_almost_equal, TestCase, run_module_suite
 
 from scipy.optimize import cobyla as co
 

Modified: trunk/scipy/optimize/tests/test_linesearch.py
===================================================================
--- trunk/scipy/optimize/tests/test_linesearch.py	2010-09-12 21:25:13 UTC (rev 6796)
+++ trunk/scipy/optimize/tests/test_linesearch.py	2010-09-12 21:26:17 UTC (rev 6797)
@@ -2,7 +2,7 @@
 Tests for line search routines
 """
 
-from numpy.testing import *
+from numpy.testing import assert_, assert_equal
 import scipy.optimize.linesearch as ls
 import numpy as np
 
@@ -127,7 +127,7 @@
             assert_equal(phi1, phi(s), name)
             assert_wolfe(s, phi, derphi, err_msg=name)
 
-        assert c > 3 # check that the iterator really works...
+        assert_(c > 3) # check that the iterator really works...
 
     def test_scalar_search_wolfe2(self):
         for name, phi, derphi, old_phi0 in self.scalar_iter():
@@ -167,7 +167,7 @@
                 c += 1
                 assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
 
-        assert c > 3 # check that the iterator really works...
+        assert_(c > 3) # check that the iterator really works...
 
     def test_line_search_wolfe2(self):
         c = 0
@@ -187,7 +187,7 @@
             if s < smax:
                 c += 1
                 assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
-        assert c > 3 # check that the iterator really works...
+        assert_(c > 3) # check that the iterator really works...
 
     def test_line_search_armijo(self):
         c = 0
@@ -200,7 +200,7 @@
             assert_equal(self.fcount, fc)
             assert_equal(fv, f(x + s*p))
             assert_line_armijo(x, p, s, f, err_msg=name)
-        assert c >= 9
+        assert_(c >= 9)
 
     # -- More specific tests
 
@@ -231,6 +231,6 @@
         for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:
             count = [0]
             r = func(phi, derphi, phi(0), None, derphi(0))
-            assert r[0] is not None, (r, func)
-            assert count[0] <= 2 + 2, (count, func)
+            assert_(r[0] is not None, (r, func))
+            assert_(count[0] <= 2 + 2, (count, func))
             assert_wolfe(r[0], phi, derphi, err_msg=str(func))

Modified: trunk/scipy/optimize/tests/test_minpack.py
===================================================================
--- trunk/scipy/optimize/tests/test_minpack.py	2010-09-12 21:25:13 UTC (rev 6796)
+++ trunk/scipy/optimize/tests/test_minpack.py	2010-09-12 21:26:17 UTC (rev 6797)
@@ -2,12 +2,13 @@
 Unit tests for optimization routines from minpack.py.
 """
 
-from numpy.testing import *
+from numpy.testing import assert_, assert_almost_equal, assert_array_equal, \
+        assert_array_almost_equal, TestCase, run_module_suite
 import numpy as np
 from numpy import array, float64
 
 from scipy import optimize
-from scipy.optimize.minpack import fsolve, leastsq, curve_fit
+from scipy.optimize.minpack import leastsq, curve_fit
 
 class TestFSolve(object):
     def pressure_network(self, flow_rates, Qtot, k):
@@ -131,8 +132,8 @@
         def func(x,a):
             return x**a
         popt, pcov = curve_fit(func, self.x, self.y)
-        assert len(popt)==1
-        assert pcov.shape==(1,1)
+        assert_(len(popt) == 1)
+        assert_(pcov.shape == (1,1))
         assert_almost_equal(popt[0], 1.9149, decimal=4)
         assert_almost_equal(pcov[0,0], 0.0016, decimal=4)
 
@@ -140,8 +141,8 @@
         def func(x, a, b):
             return b*x**a
         popt, pcov = curve_fit(func, self.x, self.y)
-        assert len(popt)==2
-        assert pcov.shape==(2,2)
+        assert_(len(popt) == 2)
+        assert_(pcov.shape == (2,2))
         assert_array_almost_equal(popt, [1.7989, 1.1642], decimal=4)
         assert_array_almost_equal(pcov, [[0.0852, -0.1260],[-0.1260, 0.1912]], decimal=4)
 

Modified: trunk/scipy/optimize/tests/test_nnls.py
===================================================================
--- trunk/scipy/optimize/tests/test_nnls.py	2010-09-12 21:25:13 UTC (rev 6796)
+++ trunk/scipy/optimize/tests/test_nnls.py	2010-09-12 21:26:17 UTC (rev 6797)
@@ -3,7 +3,7 @@
 Sep 2008
 """
 
-from numpy.testing import *
+from numpy.testing import assert_, TestCase, run_module_suite
 
 from scipy.optimize import nnls
 from numpy import arange, dot
@@ -13,12 +13,12 @@
 class TestNNLS(TestCase):
 
     def test_nnls(self):
-        a=arange(25.0).reshape(-1,5)
-        x=arange(5.0)
-        y=dot(a,x)
-        x, res= nnls(a,y)
-        assert res<1e-7
-        assert norm(dot(a,x)-y)<1e-7
+        a = arange(25.0).reshape(-1,5)
+        x = arange(5.0)
+        y = dot(a,x)
+        x, res = nnls(a,y)
+        assert_(res < 1e-7)
+        assert_(norm(dot(a,x)-y) < 1e-7)
 
 if __name__ == "__main__":
     run_module_suite()

Modified: trunk/scipy/optimize/tests/test_nonlin.py
===================================================================
--- trunk/scipy/optimize/tests/test_nonlin.py	2010-09-12 21:25:13 UTC (rev 6796)
+++ trunk/scipy/optimize/tests/test_nonlin.py	2010-09-12 21:26:17 UTC (rev 6797)
@@ -3,7 +3,7 @@
 May 2007
 """
 
-from numpy.testing import *
+from numpy.testing import assert_, dec, TestCase, run_module_suite
 
 from scipy.optimize import nonlin
 from numpy import matrix, diag, dot
@@ -80,7 +80,7 @@
 
     def _check_func(self, f, func, f_tol=1e-2):
         x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
-        assert np.absolute(f(x)).max() < f_tol
+        assert_(np.absolute(f(x)).max() < f_tol)
 
     @dec.knownfailureif(True)
     def _check_func_fail(self, *a, **kw):
@@ -122,13 +122,13 @@
             for k in xrange(min(npoints, j+1)):
                 dx = self.xs[j-k+1] - self.xs[j-k]
                 df = self.fs[j-k+1] - self.fs[j-k]
-                assert np.allclose(dx, jac.solve(df))
+                assert_(np.allclose(dx, jac.solve(df)))
 
             # Check that the `npoints` secant bound is strict
             if j >= npoints:
                 dx = self.xs[j-npoints+1] - self.xs[j-npoints]
                 df = self.fs[j-npoints+1] - self.fs[j-npoints]
-                assert not np.allclose(dx, jac.solve(df))
+                assert_(not np.allclose(dx, jac.solve(df)))
 
     def test_broyden1(self):
         self._check_secant(nonlin.BroydenFirst)
@@ -148,7 +148,7 @@
             dx = x - self.xs[last_j]
             B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
             jac.update(x, f)
-            assert np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13)
+            assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
 
     def test_broyden2_update(self):
         # Check that BroydenSecond update works as for a dense matrix
@@ -162,7 +162,7 @@
             dx = x - self.xs[last_j]
             H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
             jac.update(x, f)
-            assert np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13)
+            assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
             
     def test_anderson(self):
         # Anderson mixing (with w0=0) satisfies secant conditions
@@ -190,7 +190,7 @@
 
         sol = nonlin.nonlin_solve(func, b*0, jac, maxiter=maxiter,
                                   f_tol=1e-6, line_search=None, verbose=0)
-        assert np.allclose(dot(A, sol), b, atol=1e-6)
+        assert_(np.allclose(dot(A, sol), b, atol=1e-6))
 
     def test_broyden1(self):
         # Broyden methods solve linear systems exactly in 2*N steps
@@ -314,32 +314,32 @@
 
     def test_broyden1(self):
         x= nonlin.broyden1(F,F.xin,iter=12,alpha=1)
-        assert nonlin.norm(x)<1e-9
-        assert nonlin.norm(F(x))<1e-9
+        assert_(nonlin.norm(x) < 1e-9)
+        assert_(nonlin.norm(F(x)) < 1e-9)
 
     def test_broyden2(self):
         x= nonlin.broyden2(F,F.xin,iter=12,alpha=1)
-        assert nonlin.norm(x)<1e-9
-        assert nonlin.norm(F(x))<1e-9
+        assert_(nonlin.norm(x) < 1e-9)
+        assert_(nonlin.norm(F(x)) < 1e-9)
 
     def test_anderson(self):
         x= nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
-        assert nonlin.norm(x)<0.33
+        assert_(nonlin.norm(x) < 0.33)
 
     def test_linearmixing(self):
         x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
-        assert nonlin.norm(x)<1e-7
-        assert nonlin.norm(F(x))<1e-7
+        assert_(nonlin.norm(x) < 1e-7)
+        assert_(nonlin.norm(F(x)) < 1e-7)
 
     def test_exciting(self):
         x= nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
-        assert nonlin.norm(x)<1e-5
-        assert nonlin.norm(F(x))<1e-5
+        assert_(nonlin.norm(x) < 1e-5)
+        assert_(nonlin.norm(F(x)) < 1e-5)
 
     def test_diagbroyden(self):
         x= nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
-        assert nonlin.norm(x)<1e-8
-        assert nonlin.norm(F(x))<1e-8
+        assert_(nonlin.norm(x) < 1e-8)
+        assert_(nonlin.norm(F(x)) < 1e-8)
 
 if __name__ == "__main__":
     run_module_suite()

Modified: trunk/scipy/optimize/tests/test_optimize.py
===================================================================
--- trunk/scipy/optimize/tests/test_optimize.py	2010-09-12 21:25:13 UTC (rev 6796)
+++ trunk/scipy/optimize/tests/test_optimize.py	2010-09-12 21:26:17 UTC (rev 6797)
@@ -10,10 +10,10 @@
 
 """
 
-from numpy.testing import *
+from numpy.testing import assert_raises, assert_almost_equal, \
+        assert_, TestCase, run_module_suite
 
 from scipy import optimize
-from scipy.optimize import leastsq
 from numpy import array, zeros, float64, dot, log, exp, inf, sin, cos
 import numpy as np
 from scipy.optimize.tnc import RCSTRINGS, MSG_NONE
@@ -66,19 +66,19 @@
 
         err = abs(self.func(params) - self.func(self.solution))
         #print "CG: Difference is: " + str(err)
-        assert err < 1e-6
+        assert_(err < 1e-6)
         print self.funccalls, self.gradcalls
 
         # Ensure that function call counts are 'known good'; these are from
         # Scipy 0.7.0. Don't allow them to increase.
-        assert self.funccalls == 9, self.funccalls
-        assert self.gradcalls == 7, self.gradcalls
+        assert_(self.funccalls == 9, self.funccalls)
+        assert_(self.gradcalls == 7, self.gradcalls)
 
         # Ensure that the function behaves the same; this is from Scipy 0.7.0
-        assert np.allclose(self.trace[2:4],
+        assert_(np.allclose(self.trace[2:4],
                            [[0, -0.5, 0.5],
                             [0, -5.05700028e-01, 4.95985862e-01]],
-                           atol=1e-14, rtol=1e-7), self.trace[2:4]
+                           atol=1e-14, rtol=1e-7), self.trace[2:4])
 
 
     def test_bfgs(self):
@@ -92,18 +92,18 @@
 
         err = abs(self.func(params) - self.func(self.solution))
         #print "BFGS: Difference is: " + str(err)
-        assert err < 1e-6
+        assert_(err < 1e-6)
 
         # Ensure that function call counts are 'known good'; these are from
         # Scipy 0.7.0. Don't allow them to increase.
-        assert self.funccalls == 10, self.funccalls
-        assert self.gradcalls == 8, self.gradcalls
+        assert_(self.funccalls == 10, self.funccalls)
+        assert_(self.gradcalls == 8, self.gradcalls)
 
         # Ensure that the function behaves the same; this is from Scipy 0.7.0
-        assert np.allclose(self.trace[6:8],
+        assert_(np.allclose(self.trace[6:8],
                            [[0, -5.25060743e-01,   4.87748473e-01],
                             [0, -5.24885582e-01,   4.87530347e-01]],
-                           atol=1e-14, rtol=1e-7), self.trace[6:8]
+                           atol=1e-14, rtol=1e-7), self.trace[6:8])
 
 
     def test_powell(self):
@@ -117,21 +117,21 @@
 
         err = abs(self.func(params) - self.func(self.solution))
         #print "Powell: Difference is: " + str(err)
-        assert err < 1e-6
+        assert_(err < 1e-6)
 
         # Ensure that function call counts are 'known good'; these are from
         # Scipy 0.7.0. Don't allow them to increase.
-        assert self.funccalls == 116, self.funccalls
-        assert self.gradcalls == 0, self.gradcalls
+        assert_(self.funccalls == 116, self.funccalls)
+        assert_(self.gradcalls == 0, self.gradcalls)
 
         # Ensure that the function behaves the same; this is from Scipy 0.7.0
-        assert np.allclose(self.trace[34:39],
+        assert_(np.allclose(self.trace[34:39],
                            [[ 0.72949016, -0.44156936,  0.47100962],
                             [ 0.72949016, -0.44156936,  0.48052496],
                             [ 1.45898031, -0.88313872,  0.95153458],
                             [ 0.72949016, -0.44156936,  0.47576729],
                             [ 1.72949016, -0.44156936,  0.47576729]],
-                           atol=1e-14, rtol=1e-7), self.trace[34:39]
+                           atol=1e-14, rtol=1e-7), self.trace[34:39])
 
     def test_neldermead(self):
         """ Nelder-Mead simplex algorithm
@@ -144,18 +144,18 @@
 
         err = abs(self.func(params) - self.func(self.solution))
         #print "Nelder-Mead: Difference is: " + str(err)
-        assert err < 1e-6
+        assert_(err < 1e-6)
 
         # Ensure that function call counts are 'known good'; these are from
         # Scipy 0.7.0. Don't allow them to increase.
-        assert self.funccalls == 167, self.funccalls
-        assert self.gradcalls == 0, self.gradcalls
+        assert_(self.funccalls == 167, self.funccalls)
+        assert_(self.gradcalls == 0, self.gradcalls)
 
         # Ensure that the function behaves the same; this is from Scipy 0.7.0
-        assert np.allclose(self.trace[76:78],
+        assert_(np.allclose(self.trace[76:78],
                            [[0.1928968 , -0.62780447,  0.35166118],
                             [0.19572515, -0.63648426,  0.35838135]],
-                           atol=1e-14, rtol=1e-7), self.trace[76:78]
+                           atol=1e-14, rtol=1e-7), self.trace[76:78])
 
     def test_ncg(self):
         """ line-search Newton conjugate gradient optimization routine
@@ -169,19 +169,19 @@
 
         err = abs(self.func(params) - self.func(self.solution))
         #print "NCG: Difference is: " + str(err)
-        assert err < 1e-6
+        assert_(err < 1e-6)
 
         # Ensure that function call counts are 'known good'; these are from
         # Scipy 0.7.0. Don't allow them to increase.
-        assert self.funccalls == 7, self.funccalls
-        assert self.gradcalls == 18, self.gradcalls # 0.8.0
-        #assert self.gradcalls == 22, self.gradcalls # 0.7.0
+        assert_(self.funccalls == 7, self.funccalls)
+        assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0
+        #assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0
 
         # Ensure that the function behaves the same; this is from Scipy 0.7.0
-        assert np.allclose(self.trace[3:5],
+        assert_(np.allclose(self.trace[3:5],
                            [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],
                             [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],
-                           atol=1e-6, rtol=1e-7), self.trace[:5]
+                           atol=1e-6, rtol=1e-7), self.trace[:5])
 
 
     def test_l_bfgs_b(self):
@@ -195,18 +195,18 @@
 
         err = abs(self.func(params) - self.func(self.solution))
         #print "LBFGSB: Difference is: " + str(err)
-        assert err < 1e-6
+        assert_(err < 1e-6)
 
         # Ensure that function call counts are 'known good'; these are from
         # Scipy 0.7.0. Don't allow them to increase.
-        assert self.funccalls == 7, self.funccalls
-        assert self.gradcalls == 5, self.gradcalls
+        assert_(self.funccalls == 7, self.funccalls)
+        assert_(self.gradcalls == 5, self.gradcalls)
 
         # Ensure that the function behaves the same; this is from Scipy 0.7.0
-        assert np.allclose(self.trace[3:5],
+        assert_(np.allclose(self.trace[3:5],
                            [[0.        , -0.52489628,  0.48753042],
                             [0.        , -0.52489628,  0.48753042]],
-                           atol=1e-14, rtol=1e-7), self.trace[3:5]
+                           atol=1e-14, rtol=1e-7), self.trace[3:5])
 
     def test_brent(self):
         """ brent algorithm
@@ -220,19 +220,19 @@
         x = optimize.brent(lambda x: (x-1.5)**2-0.8, brack = (-15,-1,15))
         err4 = abs(x - 1.5)
 
-        assert max((err1,err2,err3,err4)) < 1e-6
+        assert_(max((err1,err2,err3,err4)) < 1e-6)
 
 
     def test_fminbound(self):
         """Test fminbound
         """
         x = optimize.fminbound(lambda x: (x - 1.5)**2 - 0.8, 0, 1)
-        assert abs(x - 1) < 1e-5
+        assert_(abs(x - 1) < 1e-5)
         x = optimize.fminbound(lambda x: (x - 1.5)**2 - 0.8, 1, 5)
-        assert abs(x - 1.5) < 1e-6
+        assert_(abs(x - 1.5) < 1e-6)
         x = optimize.fminbound(lambda x: (x - 1.5)**2 - 0.8,
                                numpy.array([1]), numpy.array([5]))
-        assert abs(x - 1.5) < 1e-6
+        assert_(abs(x - 1.5) < 1e-6)
         assert_raises(ValueError,
                 optimize.fminbound, lambda x: (x - 1.5)**2 - 0.8, 5, 1)
 

Modified: trunk/scipy/optimize/tests/test_slsqp.py
===================================================================
--- trunk/scipy/optimize/tests/test_slsqp.py	2010-09-12 21:25:13 UTC (rev 6796)
+++ trunk/scipy/optimize/tests/test_slsqp.py	2010-09-12 21:26:17 UTC (rev 6797)
@@ -1,4 +1,4 @@
-from numpy.testing import *
+from numpy.testing import assert_array_almost_equal, TestCase, run_module_suite
 import numpy as np
 
 from scipy.optimize import  fmin_slsqp

Modified: trunk/scipy/optimize/tests/test_zeros.py
===================================================================
--- trunk/scipy/optimize/tests/test_zeros.py	2010-09-12 21:25:13 UTC (rev 6796)
+++ trunk/scipy/optimize/tests/test_zeros.py	2010-09-12 21:26:17 UTC (rev 6797)
@@ -3,7 +3,7 @@
 from math import sqrt
 
 from numpy.testing import TestCase, assert_almost_equal, assert_warns, \
-                            run_module_suite
+                            assert_, run_module_suite
 
 from scipy.optimize import zeros as cc
 
@@ -16,7 +16,7 @@
         b = sqrt(3)
         for function, fname in zip(functions, fstrings):
             zero, r = method(function, a, b, xtol=0.1e-12, full_output=True)
-            assert r.converged
+            assert_(r.converged)
             assert_almost_equal(zero, 1.0, decimal=12,
                 err_msg='method %s, function %s' % (name, fname))
 




More information about the Scipy-svn mailing list