From scipy-svn at scipy.org Fri Feb 1 14:41:43 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 1 Feb 2008 13:41:43 -0600 (CST) Subject: [Scipy-svn] r3886 - in trunk/scipy: sandbox/multigrid sparse Message-ID: <20080201194143.D439939C06E@new.scipy.org> Author: wnbell Date: 2008-02-01 13:41:26 -0600 (Fri, 01 Feb 2008) New Revision: 3886 Modified: trunk/scipy/sandbox/multigrid/sa.py trunk/scipy/sparse/spfuncs.py Log: accept options to SA solver Modified: trunk/scipy/sandbox/multigrid/sa.py =================================================================== --- trunk/scipy/sandbox/multigrid/sa.py 2008-02-01 03:13:28 UTC (rev 3885) +++ trunk/scipy/sandbox/multigrid/sa.py 2008-02-01 19:41:26 UTC (rev 3886) @@ -1,3 +1,5 @@ +"""Functions for Smoothed Aggregation AMG""" + from numpy import array, arange, ones, zeros, sqrt, asarray, empty, diff from scipy.sparse import csr_matrix, isspmatrix_csr, bsr_matrix, isspmatrix_bsr @@ -24,6 +26,7 @@ return A if isspmatrix_csr(A): + #TODO rework this Sp,Sj,Sx = multigridtools.sa_strong_connections(A.shape[0],epsilon,A.indptr,A.indices,A.data) return csr_matrix((Sx,Sj,Sp),shape=A.shape) elif ispmatrix_bsr(A): @@ -198,14 +201,51 @@ return P +def sa_prolongator(A, B, strength='standard', aggregate='standard', smooth='standard'): -def smoothed_aggregation_solver(A, B=None, - max_levels = 10, - max_coarse = 500, - strength = sa_strong_connections, - aggregate = sa_standard_aggregation, - tentative = sa_fit_candidates, - smooth = sa_smoothed_prolongator): + def unpack_arg(v): + if isinstance(v,tuple): + return v[0],v[1] + else: + return v,{} + + # strength of connection + fn, kwargs = unpack_arg(strength) + if fn == 'standard': + C = sa_strong_connections(A,**kwargs) + elif fn == 'ode': + C = sa_ode_strong_connections(A,B,**kwargs) + else: + raise ValueError('unrecognized strength of connection method: %s' % fn) + + # aggregation + fn, kwargs = unpack_arg(aggregate) + if fn == 'standard': + AggOp = sa_standard_aggregation(C,**kwargs) + else: + raise ValueError('unrecognized aggregation method' % fn ) + + # tentative prolongator + T,B = sa_fit_candidates(AggOp,B) + + # tentative prolongator smoother + fn, kwargs = unpack_arg(smooth) + if fn == 'standard': + P = sa_smoothed_prolongator(A,T,**kwargs) + elif fn == 'energy_min': + P = sa_energy_min(A,T,C,B,**kwargs) + else: + raise ValueError('unrecognized prolongation smoother method % ' % fn) + + return P,B + + + + + + +def smoothed_aggregation_solver(A, B=None, max_levels = 10, max_coarse = 500, + solver = multilevel_solver, **kwargs): """Create a multilevel solver using Smoothed Aggregation (SA) *Parameters*: @@ -219,19 +259,23 @@ Maximum number of levels to be used in the multilevel solver. max_coarse: {integer} : default 500 Maximum number of variables permitted on the coarse grid. - strength : - Function that computes the strength of connection matrix C - strength(A) -> C - aggregate : - Function that computes an aggregation operator - aggregate(C) -> AggOp - tentative: - Function that computes a tentative prolongator - tentative(AggOp,B) -> T,B_coarse - smooth : - Function that smooths the tentative prolongator - smooth(A,C,T) -> P + + *Optional Parameters*: + strength : strength of connection method + Possible values are: + 'standard' + 'ode' + + aggregate : aggregation method + Possible values are: + 'standard' + + smooth : prolongation smoother + Possible values are: + 'standard' + 'energy_min' + Unused Parameters epsilon: {float} : default 0.0 Strength of connection parameter used in aggregation. @@ -287,10 +331,7 @@ Rs = [] while len(As) < max_levels and A.shape[0] > max_coarse: - C = strength(A) - AggOp = aggregate(C) - T,B = tentative(AggOp,B) - P = smooth(A,T) + P,B = sa_prolongator(A,B,**kwargs) R = P.T.asformat(P.format) @@ -301,6 +342,6 @@ Ps.append(P) - return multilevel_solver(As,Ps,Rs=Rs,preprocess=pre,postprocess=post) + return solver(As,Ps,Rs=Rs,preprocess=pre,postprocess=post) Modified: trunk/scipy/sparse/spfuncs.py =================================================================== --- trunk/scipy/sparse/spfuncs.py 2008-02-01 03:13:28 UTC (rev 3885) +++ trunk/scipy/sparse/spfuncs.py 2008-02-01 19:41:26 UTC (rev 3886) @@ -88,7 +88,7 @@ def count_blocks(A,blocksize): """For a given blocksize=(r,c) count the number of occupied - blocks in a sparse matrix A using + blocks in a sparse matrix A """ r,c = blocksize if r < 1 or c < 1: From scipy-svn at scipy.org Fri Feb 1 14:49:34 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 1 Feb 2008 13:49:34 -0600 (CST) Subject: [Scipy-svn] r3887 - trunk/scipy/splinalg/isolve Message-ID: <20080201194934.E4C0639C3CE@new.scipy.org> Author: wnbell Date: 2008-02-01 13:49:30 -0600 (Fri, 01 Feb 2008) New Revision: 3887 Modified: trunk/scipy/splinalg/isolve/minres.py Log: fixed missing import Modified: trunk/scipy/splinalg/isolve/minres.py =================================================================== --- trunk/scipy/splinalg/isolve/minres.py 2008-02-01 19:41:26 UTC (rev 3886) +++ trunk/scipy/splinalg/isolve/minres.py 2008-02-01 19:49:30 UTC (rev 3887) @@ -1,4 +1,4 @@ -from numpy import sqrt, inner, finfo +from numpy import sqrt, inner, finfo, asarray, zeros from numpy.linalg import norm def psolve(x): return x @@ -277,10 +277,11 @@ if __name__ == '__main__': - from scipy import * - from scipy.sparse import * - from scipy.splinalg import * - from scipy.sandbox.multigrid import * + from scipy import ones, arange + from scipy.linalg import norm + from scipy.sparse import spdiags + from scipy.splinalg import cg + #from scipy.sandbox.multigrid import * n = 100 @@ -292,6 +293,6 @@ #A = poisson((10,),format='csr') A = spdiags( [arange(1,n+1,dtype=float)], [0], n, n, format='csr') b = ones( A.shape[0] ) - #x = minres(A,b,tol=1e-12,maxiter=None,callback=cb) - x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0] + x = minres(A,b,tol=1e-12,maxiter=None,callback=cb) + #x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0] From scipy-svn at scipy.org Fri Feb 1 18:29:41 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 1 Feb 2008 17:29:41 -0600 (CST) Subject: [Scipy-svn] r3888 - in trunk/scipy/splinalg/isolve: . tests Message-ID: <20080201232941.ECEC339C088@new.scipy.org> Author: wnbell Date: 2008-02-01 17:29:37 -0600 (Fri, 01 Feb 2008) New Revision: 3888 Added: trunk/scipy/splinalg/isolve/utils.py Modified: trunk/scipy/splinalg/isolve/__init__.py trunk/scipy/splinalg/isolve/minres.py trunk/scipy/splinalg/isolve/tests/test_iterative.py Log: updated MINRES code abstracted iterative solver setup code Modified: trunk/scipy/splinalg/isolve/__init__.py =================================================================== --- trunk/scipy/splinalg/isolve/__init__.py 2008-02-01 19:49:30 UTC (rev 3887) +++ trunk/scipy/splinalg/isolve/__init__.py 2008-02-01 23:29:37 UTC (rev 3888) @@ -2,6 +2,7 @@ #from info import __doc__ from iterative import * +from minres import minres __all__ = filter(lambda s:not s.startswith('_'),dir()) from scipy.testing.pkgtester import Tester Modified: trunk/scipy/splinalg/isolve/minres.py =================================================================== --- trunk/scipy/splinalg/isolve/minres.py 2008-02-01 19:49:30 UTC (rev 3887) +++ trunk/scipy/splinalg/isolve/minres.py 2008-02-01 23:29:37 UTC (rev 3888) @@ -1,11 +1,10 @@ -from numpy import sqrt, inner, finfo, asarray, zeros +from numpy import ndarray, matrix, sqrt, inner, finfo, asarray, zeros from numpy.linalg import norm -def psolve(x): return x -def check_sizes(A,x,b): pass +from utils import make_system def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, xtype=None, - precond=None, callback=None, show=False, check=False): + M=None, callback=None, show=False, check=True): """Use the Minimum Residual Method (MINRES) to solve Ax=b MINRES minimizes norm(A*x - b) for the symmetric matrix A. Unlike @@ -30,23 +29,19 @@ http://www.stanford.edu/group/SOL/software/minres/matlab/ """ + A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) - show = True #TODO remove - check = True #TODO remove + matvec = A.matvec + psolve = M.matvec first = 'Enter minres. ' last = 'Exit minres. ' - assert(A.shape[0] == A.shape[1]) - assert(A.shape[1] == len(b)) - - b = asarray(b).ravel() n = A.shape[0] if maxiter is None: maxiter = 5 * n - matvec = A.matvec msg =[' beta2 = 0. If M = I, b and x are eigenvectors ', # -1 ' beta1 = 0. The exact solution is x = 0 ', # 0 @@ -56,9 +51,9 @@ ' x has converged to an eigenvector ', # 4 ' acond has exceeded 0.1/eps ', # 5 ' The iteration limit was reached ', # 6 - ' Aname does not define a symmetric matrix ', # 7 - ' Mname does not define a symmetric matrix ', # 8 - ' Mname does not define a pos-def preconditioner '] # 9 + ' A does not define a symmetric matrix ', # 7 + ' M does not define a symmetric matrix ', # 8 + ' M does not define a pos-def preconditioner '] # 9 if show: @@ -90,7 +85,7 @@ if beta1 < 0: raise ValueError('indefinite preconditioner') elif beta1 == 0: - return x + return (postprocess(x), 0) beta1 = sqrt( beta1 ) @@ -262,7 +257,7 @@ if callback is not None: callback(x) - if istop > 0: break + if istop != 0: break #TODO check this if show: @@ -273,7 +268,7 @@ print last + ' Arnorm = %12.4e' % (Arnorm,) print last + msg[istop+1] - return x + return (postprocess(x),0) if __name__ == '__main__': @@ -283,7 +278,7 @@ from scipy.splinalg import cg #from scipy.sandbox.multigrid import * - n = 100 + n = 10 residuals = [] @@ -292,7 +287,9 @@ #A = poisson((10,),format='csr') A = spdiags( [arange(1,n+1,dtype=float)], [0], n, n, format='csr') - b = ones( A.shape[0] ) + M = spdiags( [1.0/arange(1,n+1,dtype=float)], [0], n, n, format='csr') + A.psolve = M.matvec + b = 0*ones( A.shape[0] ) x = minres(A,b,tol=1e-12,maxiter=None,callback=cb) #x = cg(A,b,x0=b,tol=1e-12,maxiter=None,callback=cb)[0] Modified: trunk/scipy/splinalg/isolve/tests/test_iterative.py =================================================================== --- trunk/scipy/splinalg/isolve/tests/test_iterative.py 2008-02-01 19:49:30 UTC (rev 3887) +++ trunk/scipy/splinalg/isolve/tests/test_iterative.py 2008-02-01 23:29:37 UTC (rev 3888) @@ -9,7 +9,7 @@ from scipy.linalg import norm from scipy.sparse import spdiags -from scipy.splinalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr +from scipy.splinalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres #def callback(x): # global A, b @@ -36,7 +36,7 @@ self.solvers.append( (bicgstab, False, False) ) self.solvers.append( (gmres, False, False) ) self.solvers.append( (qmr, False, False) ) - #self.solvers.append( (minres, True, False) ) + self.solvers.append( (minres, True, False) ) # list of tuples (A, symmetric, positive_definite ) self.cases = [] @@ -91,7 +91,7 @@ if req_pos and not pos: continue M,N = A.shape - D = spdiags( [1.0/A.diagonal()], [0], M, N) + D = spdiags( [abs(1.0/A.diagonal())], [0], M, N) def precond(b,which=None): return D*b Added: trunk/scipy/splinalg/isolve/utils.py =================================================================== --- trunk/scipy/splinalg/isolve/utils.py 2008-02-01 19:49:30 UTC (rev 3887) +++ trunk/scipy/splinalg/isolve/utils.py 2008-02-01 23:29:37 UTC (rev 3888) @@ -0,0 +1,79 @@ +from numpy import asanyarray, asmatrix, array, matrix, zeros + +from scipy.splinalg.interface import aslinearoperator, LinearOperator + +_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F', + ('f','D'):'D', ('d','f'):'d', ('d','d'):'d', + ('d','F'):'D', ('d','D'):'D', ('F','f'):'F', + ('F','d'):'D', ('F','F'):'F', ('F','D'):'D', + ('D','f'):'D', ('D','d'):'D', ('D','F'):'D', + ('D','D'):'D'} + +def coerce(x,y): + if x not in 'fdFD': + x = 'd' + if y not in 'fdFD': + y = 'd' + return _coerce_rules[x,y] + +def id(x): + return x + +def make_system(A, M, x0, b, xtype=None): + A_ = A + A = aslinearoperator(A) + + if A.shape[0] != A.shape[1]: + raise ValueError('expected square matrix (shape=%s)' % shape) + + N = A.shape[0] + + b = asanyarray(b) + + if not (b.shape == (N,1) or b.shape == (N,)): + raise ValueError('A and b have incompatible dimensions') + + def postprocess(x): + if isinstance(b,matrix): + x = asmatrix(x) + return x.reshape(b.shape) + + + if xtype is None: + if hasattr(A,'dtype'): + xtype = A.dtype.char + else: + xtype = A.matvec(b).dtype.char + xtype = coerce(xtype, b.dtype.char) + elif xtype == 0: + xtype = b.dtype.char + else: + if xtype not in 'fdFD': + raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'" + + if x0 is None: + x = zeros(N, dtype=xtype) + else: + x = array(x0, dtype=xtype) + if not (x.shape == (N,1) or x.shape == (N,)): + raise ValueError('A and x have incompatible dimensions') + x = x.ravel() + + # process preconditioner + if M is None: + if hasattr(A_,'psolve'): + psolve = A_.psolve + else: + psolve = id + if hasattr(A_,'rpsolve'): + rpsolve = A_.rpsolve + else: + rpsolve = id + M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve, dtype=A.dtype) + else: + if A.shape != M.shape: + raise ValueError('matrix and preconditioner have different shapes') + + return A, M, x, b, postprocess + + From scipy-svn at scipy.org Fri Feb 1 23:26:53 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 1 Feb 2008 22:26:53 -0600 (CST) Subject: [Scipy-svn] r3889 - trunk/scipy/splinalg/isolve Message-ID: <20080202042653.A691639C2C0@new.scipy.org> Author: wnbell Date: 2008-02-01 22:26:51 -0600 (Fri, 01 Feb 2008) New Revision: 3889 Modified: trunk/scipy/splinalg/isolve/minres.py Log: disable checking by default Modified: trunk/scipy/splinalg/isolve/minres.py =================================================================== --- trunk/scipy/splinalg/isolve/minres.py 2008-02-01 23:29:37 UTC (rev 3888) +++ trunk/scipy/splinalg/isolve/minres.py 2008-02-02 04:26:51 UTC (rev 3889) @@ -4,7 +4,7 @@ from utils import make_system def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, xtype=None, - M=None, callback=None, show=False, check=True): + M=None, callback=None, show=False, check=False): """Use the Minimum Residual Method (MINRES) to solve Ax=b MINRES minimizes norm(A*x - b) for the symmetric matrix A. Unlike @@ -90,6 +90,18 @@ beta1 = sqrt( beta1 ) if check: + # are these too strict? + + # see if A is symmetric + w = matvec(y) + r2 = matvec(w) + s = inner(w,w) + t = inner(y,r2) + z = abs( s - t ) + epsa = (s + eps) * eps**(1.0/3.0) + if z > epsa: + raise ValueError('non-symmetric matrix') + # see if M is symmetric r2 = psolve(y) s = inner(y,y) @@ -99,16 +111,7 @@ if z > epsa: raise ValueError('non-symmetric preconditioner') - # see if A is symmetric - w = matvec(y) - r2 = matvec(w) - s = inner(w,w) - t = inner(y,r2) - epsa = (s + eps) * eps**(1.0/3.0) - if z > epsa: - raise ValueError('non-symmetric matrix') - # Initialize other quantities oldb = 0; beta = beta1; dbar = 0; epsln = 0; qrnorm = beta1; phibar = beta1; rhs1 = beta1; From scipy-svn at scipy.org Sat Feb 2 13:17:43 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 2 Feb 2008 12:17:43 -0600 (CST) Subject: [Scipy-svn] r3890 - in trunk/scipy: sandbox/arpack/tests sparse splinalg splinalg/isolve splinalg/isolve/tests splinalg/tests Message-ID: <20080202181743.1A50D39C2C5@new.scipy.org> Author: wnbell Date: 2008-02-02 12:17:25 -0600 (Sat, 02 Feb 2008) New Revision: 3890 Modified: trunk/scipy/sandbox/arpack/tests/test_speigs.py trunk/scipy/sparse/compressed.py trunk/scipy/splinalg/interface.py trunk/scipy/splinalg/isolve/iterative.py trunk/scipy/splinalg/isolve/minres.py trunk/scipy/splinalg/isolve/tests/test_iterative.py trunk/scipy/splinalg/isolve/utils.py trunk/scipy/splinalg/tests/test_interface.py Log: iterative solvers now use LinearOperator added M argument for preconditioners Modified: trunk/scipy/sandbox/arpack/tests/test_speigs.py =================================================================== --- trunk/scipy/sandbox/arpack/tests/test_speigs.py 2008-02-02 04:26:51 UTC (rev 3889) +++ trunk/scipy/sandbox/arpack/tests/test_speigs.py 2008-02-02 18:17:25 UTC (rev 3890) @@ -22,8 +22,8 @@ vals = vals[uv_sortind] vecs = vecs[:,uv_sortind] - from scipy.splinalg.isolve.iterative import get_matvec - matvec = get_matvec(A) + from scipy.splinalg.interface import aslinearoperator + matvec = aslinearoperator(A).matvec #= lambda x: N.asarray(A*x)[0] nev=4 eigvs = ARPACK_eigs(matvec, A.shape[0], nev=nev) Modified: trunk/scipy/sparse/compressed.py =================================================================== --- trunk/scipy/sparse/compressed.py 2008-02-02 04:26:51 UTC (rev 3889) +++ trunk/scipy/sparse/compressed.py 2008-02-02 18:17:25 UTC (rev 3890) @@ -365,10 +365,19 @@ raise TypeError, "need a dense vector" def rmatvec(self, other, conjugate=True): + """Multiplies the vector 'other' by the sparse matrix, returning a + dense vector as a result. + + If 'conjugate' is True: + - returns A.transpose().conj() * other + Otherwise: + - returns A.transpose() * other. + + """ if conjugate: - return transpose( self.transpose().conj().matvec(transpose(other)) ) + return self.transpose().conj().matvec( other ) else: - return transpose( self.transpose().matvec(transpose(other)) ) + return self.transpose().matvec( other ) def getdata(self, ind): return self.data[ind] @@ -376,6 +385,7 @@ def diagonal(self): """Returns the main diagonal of the matrix """ + #TODO support k-th diagonal fn = getattr(sparsetools, self.format + "_diagonal") y = empty( min(self.shape), dtype=upcast(self.dtype) ) fn(self.shape[0], self.shape[1], self.indptr, self.indices, self.data, y) Modified: trunk/scipy/splinalg/interface.py =================================================================== --- trunk/scipy/splinalg/interface.py 2008-02-02 04:26:51 UTC (rev 3889) +++ trunk/scipy/splinalg/interface.py 2008-02-02 18:17:25 UTC (rev 3890) @@ -1,4 +1,5 @@ -import numpy as np +import numpy +from numpy import matrix, ndarray, asarray, dot, atleast_2d from scipy.sparse.sputils import isshape from scipy.sparse import isspmatrix @@ -7,7 +8,42 @@ class LinearOperator: def __init__( self, shape, matvec, rmatvec=None, dtype=None ): """Common interface for performing matrix vector products + + Many iterative methods (e.g. cg, gmres) do not need to know the + individual entries of a matrix to solve a linear system A*x=b. + Such solvers only require the computation of matrix vector + products, A*v where v is a dense vector. This class serves as + an abstract interface between iterative solvers and matrix-like + objects. + + Required Parameters: + shape : tuple of matrix dimensions (M,N) + matvec(x) : function that returns A * x + + Optional Parameters: + rmatvec(x) : function that returns A^H * x where A^H represents + the Hermitian (conjugate) transpose of A + dtype : data type of the matrix + + + See Also: + aslinearoperator() : Construct LinearOperators for SciPy classes + + Example: + + >>> from scipy.splinalg import LinearOperator + >>> from scipy import * + >>> def mv(x): + ... return array([ 2*x[0], 3*x[1]]) + ... + >>> A = LinearOperator( (2,2), matvec=mv ) + >>> A + <2x2 LinearOperator with unspecified dtype> + >>> A.matvec( ones(2) ) + array([ 2., 3.]) + """ + shape = tuple(shape) if not isshape(shape): @@ -24,7 +60,7 @@ self.rmatvec = rmatvec if dtype is not None: - self.dtype = np.dtype(dtype) + self.dtype = numpy.dtype(dtype) def __repr__(self): M,N = self.shape @@ -60,11 +96,16 @@ if isinstance(A, LinearOperator): return A - elif isinstance(A, np.ndarray) or isinstance(A,np.matrix): + elif isinstance(A, ndarray) or isinstance(A, matrix): + if len(A.shape) > 2: + raise ValueError('array must have rank <= 2') + + A = atleast_2d(asarray(A)) + def matvec(x): - return np.dot(np.asarray(A),x) + return dot(A,x) def rmatvec(x): - return np.dot(x,np.asarray(A)) + return dot(A.conj().transpose(),x) return LinearOperator( A.shape, matvec, rmatvec=rmatvec, dtype=A.dtype ) elif isspmatrix(A): Modified: trunk/scipy/splinalg/isolve/iterative.py =================================================================== --- trunk/scipy/splinalg/isolve/iterative.py 2008-02-02 04:26:51 UTC (rev 3889) +++ trunk/scipy/splinalg/isolve/iterative.py 2008-02-02 18:17:25 UTC (rev 3890) @@ -15,102 +15,12 @@ import numpy as sb import copy -try: - False, True -except NameError: - False, True = 0, 1 +from scipy.splinalg.interface import LinearOperator +from utils import make_system _type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} -_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F', - ('f','D'):'D', ('d','f'):'d', ('d','d'):'d', - ('d','F'):'D', ('d','D'):'D', ('F','f'):'F', - ('F','d'):'D', ('F','F'):'F', ('F','D'):'D', - ('D','f'):'D', ('D','d'):'D', ('D','F'):'D', - ('D','D'):'D'} - -class get_matvec: - methname = 'matvec' - def __init__(self, obj, *args): - self.obj = obj - self.args = args - if isinstance(obj, sb.matrix): - self.callfunc = self.type1m - return - if isinstance(obj, sb.ndarray): - self.callfunc = self.type1 - return - meth = getattr(obj,self.methname,None) - if not callable(meth): - raise ValueError, "Object must be an array "\ - "or have a callable %s attribute." % (self.methname,) - - self.obj = meth - self.callfunc = self.type2 - - def __call__(self, x): - return self.callfunc(x) - - def type1(self, x): - return sb.dot(self.obj, x) - - def type1m(self, x): - return sb.dot(self.obj.A, x) - - def type2(self, x): - return self.obj(x,*self.args) - -class get_rmatvec(get_matvec): - methname = 'rmatvec' - def type1(self, x): - return sb.dot(x, self.obj) - def type1m(self, x): - return sb.dot(x, self.obj.A) - -class get_psolve: - methname = 'psolve' - def __init__(self, obj, *args): - self.obj = obj - self.args = args - meth = getattr(obj,self.methname,None) - if meth is None: # no preconditiong available - self.callfunc = self.type1 - return - - if not callable(meth): - raise ValueError, "Preconditioning method %s "\ - "must be callable." % (self.methname,) - - self.obj = meth - self.callfunc = self.type2 - - def __call__(self, x): - return self.callfunc(x) - - def type1(self, x): - return x - - def type2(self, x): - return self.obj(x,*self.args) - -class get_rpsolve(get_psolve): - methname = 'rpsolve' - -class get_psolveq(get_psolve): - - def __call__(self, x, which): - return self.callfunc(x, which) - - def type1(self, x, which): - return x - - def type2(self, x, which): - return self.obj(x,which,*self.args) - -class get_rpsolveq(get_psolveq): - methname = 'rpsolve' - -def bicg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, callback=None): +def bicg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None): """Use BIConjugate Gradient iteration to solve A x = b Inputs: @@ -145,43 +55,22 @@ iteration. It is called as callback(xk), where xk is the current parameter vector. """ - b = sb.asarray(b)+0.0 + A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) + n = len(b) if maxiter is None: maxiter = n*10 - if x0 is None: - x = sb.zeros(n) - else: - x = copy.copy(x0) + matvec, rmatvec = A.matvec, A.rmatvec + psolve, rpsolve = M.matvec, M.rmatvec + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'bicgrevcom') + stoptest = getattr(_iterative, ltr + 'stoptest2') - if xtype is None: - try: - atyp = A.dtype.char - except AttributeError: - atyp = None - if atyp is None: - atyp = A.matvec(x).dtype.char - typ = _coerce_rules[b.dtype.char,atyp] - elif xtype == 0: - typ = b.dtype.char - else: - typ = xtype - if typ not in 'fdFD': - raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'" - - x = sb.asarray(x,typ) - b = sb.asarray(b,typ) - - matvec, psolve, rmatvec, rpsolve = (None,)*4 - ltr = _type_conv[typ] - revcom = _iterative.__dict__[ltr+'bicgrevcom'] - stoptest = _iterative.__dict__[ltr+'stoptest2'] - resid = tol ndx1 = 1 ndx2 = -1 - work = sb.zeros(6*n,typ) + work = sb.zeros(6*n,dtype=x.dtype) ijob = 1 info = 0 ftflag = True @@ -198,26 +87,16 @@ if (ijob == -1): break elif (ijob == 1): - if matvec is None: - matvec = get_matvec(A) work[slice2] *= sclr2 work[slice2] += sclr1*matvec(work[slice1]) elif (ijob == 2): - if rmatvec is None: - rmatvec = get_rmatvec(A) work[slice2] *= sclr2 work[slice2] += sclr1*rmatvec(work[slice1]) elif (ijob == 3): - if psolve is None: - psolve = get_psolve(A) work[slice1] = psolve(work[slice2]) elif (ijob == 4): - if rpsolve is None: - rpsolve = get_rpsolve(A) work[slice1] = rpsolve(work[slice2]) elif (ijob == 5): - if matvec is None: - matvec = get_matvec(A) work[slice2] *= sclr2 work[slice2] += sclr1*matvec(x) elif (ijob == 6): @@ -227,10 +106,10 @@ bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) ijob = 2 - return x, info + return postprocess(x), info -def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, callback=None): +def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None): """Use BIConjugate Gradient STABilized iteration to solve A x = b Inputs: @@ -264,44 +143,22 @@ iteration. It is called as callback(xk), where xk is the current parameter vector. """ - b = sb.asarray(b)+0.0 + A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) + n = len(b) if maxiter is None: maxiter = n*10 - if x0 is None: - x = sb.zeros(n) - else: - x = copy.copy(x0) + matvec = A.matvec + psolve = M.matvec + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'bicgstabrevcom') + stoptest = getattr(_iterative, ltr + 'stoptest2') - - if xtype is None: - try: - atyp = A.dtype.char - except AttributeError: - atyp = None - if atyp is None: - atyp = A.matvec(x).dtype.char - typ = _coerce_rules[b.dtype.char,atyp] - elif xtype == 0: - typ = b.dtype.char - else: - typ = xtype - if typ not in 'fdFD': - raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'" - - x = sb.asarray(x,typ) - b = sb.asarray(b,typ) - - matvec, psolve = (None,)*2 - ltr = _type_conv[typ] - revcom = _iterative.__dict__[ltr+'bicgstabrevcom'] - stoptest = _iterative.__dict__[ltr+'stoptest2'] - resid = tol ndx1 = 1 ndx2 = -1 - work = sb.zeros(7*n,typ) + work = sb.zeros(7*n,dtype=x.dtype) ijob = 1 info = 0 ftflag = True @@ -338,10 +195,10 @@ bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) ijob = 2 - return x, info + return postprocess(x), info -def cg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, callback=None): +def cg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None): """Use Conjugate Gradient iteration to solve A x = b (A^H = A) Inputs: @@ -376,44 +233,22 @@ iteration. It is called as callback(xk), where xk is the current parameter vector. """ - b = sb.asarray(b)+0.0 + A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) + n = len(b) if maxiter is None: maxiter = n*10 - if x0 is None: - x = sb.zeros(n) - else: - x = copy.copy(x0) + matvec = A.matvec + psolve = M.matvec + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'cgrevcom') + stoptest = getattr(_iterative, ltr + 'stoptest2') - - if xtype is None: - try: - atyp = A.dtype.char - except AttributeError: - atyp = None - if atyp is None: - atyp = A.matvec(x).dtype.char - typ = _coerce_rules[b.dtype.char,atyp] - elif xtype == 0: - typ = b.dtype.char - else: - typ = xtype - if typ not in 'fdFD': - raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'" - - x = sb.asarray(x,typ) - b = sb.asarray(b,typ) - - matvec, psolve = (None,)*2 - ltr = _type_conv[typ] - revcom = _iterative.__dict__[ltr+'cgrevcom'] - stoptest = _iterative.__dict__[ltr+'stoptest2'] - resid = tol ndx1 = 1 ndx2 = -1 - work = sb.zeros(4*n,typ) + work = sb.zeros(4*n,dtype=x.dtype) ijob = 1 info = 0 ftflag = True @@ -430,17 +265,11 @@ if (ijob == -1): break elif (ijob == 1): - if matvec is None: - matvec = get_matvec(A) work[slice2] *= sclr2 work[slice2] += sclr1*matvec(work[slice1]) elif (ijob == 2): - if psolve is None: - psolve = get_psolve(A) work[slice1] = psolve(work[slice2]) elif (ijob == 3): - if matvec is None: - matvec = get_matvec(A) work[slice2] *= sclr2 work[slice2] += sclr1*matvec(x) elif (ijob == 4): @@ -450,10 +279,10 @@ bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) ijob = 2 - return x, info + return postprocess(x), info -def cgs(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, callback=None): +def cgs(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None): """Use Conjugate Gradient Squared iteration to solve A x = b Inputs: @@ -488,43 +317,22 @@ iteration. It is called as callback(xk), where xk is the current parameter vector. """ - b = sb.asarray(b) + 0.0 + A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) + n = len(b) if maxiter is None: maxiter = n*10 + + matvec = A.matvec + psolve = M.matvec + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'cgsrevcom') + stoptest = getattr(_iterative, ltr + 'stoptest2') - if x0 is None: - x = sb.zeros(n) - else: - x = copy.copy(x0) - - if xtype is None: - try: - atyp = A.dtype.char - except AttributeError: - atyp = None - if atyp is None: - atyp = A.matvec(x).dtype.char - typ = _coerce_rules[b.dtype.char,atyp] - elif xtype == 0: - typ = b.dtype.char - else: - typ = xtype - if typ not in 'fdFD': - raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'" - - x = sb.asarray(x,typ) - b = sb.asarray(b,typ) - - matvec, psolve = (None,)*2 - ltr = _type_conv[typ] - revcom = _iterative.__dict__[ltr+'cgsrevcom'] - stoptest = _iterative.__dict__[ltr+'stoptest2'] - resid = tol ndx1 = 1 ndx2 = -1 - work = sb.zeros(7*n,typ) + work = sb.zeros(7*n,dtype=x.dtype) ijob = 1 info = 0 ftflag = True @@ -541,17 +349,11 @@ if (ijob == -1): break elif (ijob == 1): - if matvec is None: - matvec = get_matvec(A) work[slice2] *= sclr2 work[slice2] += sclr1*matvec(work[slice1]) elif (ijob == 2): - if psolve is None: - psolve = get_psolve(A) work[slice1] = psolve(work[slice2]) elif (ijob == 3): - if matvec is None: - matvec = get_matvec(A) work[slice2] *= sclr2 work[slice2] += sclr1*matvec(x) elif (ijob == 4): @@ -561,10 +363,10 @@ bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) ijob = 2 - return x, info + return postprocess(x), info -def gmres(A, b, x0=None, tol=1e-5, restrt=None, maxiter=None, xtype=None, callback=None): +def gmres(A, b, x0=None, tol=1e-5, restrt=None, maxiter=None, xtype=None, M=None, callback=None): """Use Generalized Minimal RESidual iteration to solve A x = b Inputs: @@ -601,44 +403,25 @@ iteration. It is called as callback(xk), where xk is the current parameter vector. """ - b = sb.asarray(b)+0.0 + A,M,x,b,postprocess = make_system(A,M,x0,b,xtype) + n = len(b) if maxiter is None: maxiter = n*10 - if x0 is None: - x = sb.zeros(n) - else: - x = copy.copy(x0) + matvec = A.matvec + psolve = M.matvec + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'gmresrevcom') + stoptest = getattr(_iterative, ltr + 'stoptest2') - if xtype is None: - try: - atyp = A.dtype.char - except AttributeError: - atyp = A.matvec(x).dtype.char - typ = _coerce_rules[b.dtype.char,atyp] - elif xtype == 0: - typ = b.dtype.char - else: - typ = xtype - if typ not in 'fdFD': - raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'" - - x = sb.asarray(x,typ) - b = sb.asarray(b,typ) - - matvec, psolve = (None,)*2 - ltr = _type_conv[typ] - revcom = _iterative.__dict__[ltr+'gmresrevcom'] - stoptest = _iterative.__dict__[ltr+'stoptest2'] - if restrt is None: restrt = n resid = tol ndx1 = 1 ndx2 = -1 - work = sb.zeros((6+restrt)*n,typ) - work2 = sb.zeros((restrt+1)*(2*restrt+2),typ) + work = sb.zeros((6+restrt)*n,dtype=x.dtype) + work2 = sb.zeros((restrt+1)*(2*restrt+2),dtype=x.dtype) ijob = 1 info = 0 ftflag = True @@ -655,17 +438,11 @@ if (ijob == -1): break elif (ijob == 1): - if matvec is None: - matvec = get_matvec(A) work[slice2] *= sclr2 work[slice2] += sclr1*matvec(x) elif (ijob == 2): - if psolve is None: - psolve = get_psolve(A) work[slice1] = psolve(work[slice2]) elif (ijob == 3): - if matvec is None: - matvec = get_matvec(A) work[slice2] *= sclr2 work[slice2] += sclr1*matvec(work[slice1]) elif (ijob == 4): @@ -675,10 +452,10 @@ bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) ijob = 2 - return x, info + return postprocess(x), info -def qmr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, callback=None): +def qmr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M1=None, M2=None, callback=None): """Use Quasi-Minimal Residual iteration to solve A x = b Inputs: @@ -714,44 +491,39 @@ iteration. It is called as callback(xk), where xk is the current parameter vector. """ - b = sb.asarray(b)+0.0 + A_ = A + A,M,x,b,postprocess = make_system(A,None,x0,b,xtype) + + if M1 is None and M2 is None: + if hasattr(A_,'psolve'): + def left_psolve(b): + return A_.psolve(b,'left') + def right_psolve(b): + return A_.psolve(b,'right') + def left_rpsolve(b): + return A_.rpsolve(b,'left') + def right_rpsolve(b): + return A_.rpsolve(b,'right') + M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve) + M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve) + else: + def id(b): + return b + M1 = LinearOperator(A.shape, matvec=id, rmatvec=id) + M2 = LinearOperator(A.shape, matvec=id, rmatvec=id) + n = len(b) if maxiter is None: maxiter = n*10 - if x0 is None: - x = sb.zeros(n) - else: - x = copy.copy(x0) + ltr = _type_conv[x.dtype.char] + revcom = getattr(_iterative, ltr + 'qmrrevcom') + stoptest = getattr(_iterative, ltr + 'stoptest2') - if xtype is None: - try: - atyp = A.dtype.char - except AttributeError: - atyp = None - if atyp is None: - atyp = A.matvec(x).dtype.char - typ = _coerce_rules[b.dtype.char,atyp] - elif xtype == 0: - typ = b.dtype.char - else: - typ = xtype - if typ not in 'fdFD': - raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'" - - x = sb.asarray(x,typ) - b = sb.asarray(b,typ) - - - matvec, psolve, rmatvec, rpsolve = (None,)*4 - ltr = _type_conv[typ] - revcom = _iterative.__dict__[ltr+'qmrrevcom'] - stoptest = _iterative.__dict__[ltr+'stoptest2'] - resid = tol ndx1 = 1 ndx2 = -1 - work = sb.zeros(11*n,typ) + work = sb.zeros(11*n,x.dtype) ijob = 1 info = 0 ftflag = True @@ -768,36 +540,22 @@ if (ijob == -1): break elif (ijob == 1): - if matvec is None: - matvec = get_matvec(A) work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(work[slice1]) + work[slice2] += sclr1*A.matvec(work[slice1]) elif (ijob == 2): - if rmatvec is None: - rmatvec = get_rmatvec(A) work[slice2] *= sclr2 - work[slice2] += sclr1*rmatvec(work[slice1]) + work[slice2] += sclr1*A.rmatvec(work[slice1]) elif (ijob == 3): - if psolve is None: - psolve = get_psolveq(A) - work[slice1] = psolve(work[slice2],'left') + work[slice1] = M1.matvec(work[slice2]) elif (ijob == 4): - if psolve is None: - psolve = get_psolveq(A) - work[slice1] = psolve(work[slice2],'right') + work[slice1] = M2.matvec(work[slice2]) elif (ijob == 5): - if rpsolve is None: - rpsolve = get_rpsolveq(A) - work[slice1] = rpsolve(work[slice2],'left') + work[slice1] = M1.rmatvec(work[slice2]) elif (ijob == 6): - if rpsolve is None: - rpsolve = get_rpsolveq(A) - work[slice1] = rpsolve(work[slice2],'right') + work[slice1] = M2.rmatvec(work[slice2]) elif (ijob == 7): - if matvec is None: - matvec = get_matvec(A) work[slice2] *= sclr2 - work[slice2] += sclr1*matvec(x) + work[slice2] += sclr1*A.matvec(x) elif (ijob == 8): if ftflag: info = -1 @@ -805,4 +563,4 @@ bnrm2, resid, info = stoptest(work[slice1], b, bnrm2, tol, info) ijob = 2 - return x, info + return postprocess(x), info Modified: trunk/scipy/splinalg/isolve/minres.py =================================================================== --- trunk/scipy/splinalg/isolve/minres.py 2008-02-02 04:26:51 UTC (rev 3889) +++ trunk/scipy/splinalg/isolve/minres.py 2008-02-02 18:17:25 UTC (rev 3890) @@ -3,6 +3,8 @@ from utils import make_system +__all__ = ['minres'] + def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None, show=False, check=False): """Use the Minimum Residual Method (MINRES) to solve Ax=b Modified: trunk/scipy/splinalg/isolve/tests/test_iterative.py =================================================================== --- trunk/scipy/splinalg/isolve/tests/test_iterative.py 2008-02-02 04:26:51 UTC (rev 3889) +++ trunk/scipy/splinalg/isolve/tests/test_iterative.py 2008-02-02 18:17:25 UTC (rev 3890) @@ -17,6 +17,10 @@ # #print "||A.x - b|| = " + str(norm(dot(A,x)-b)) +#TODO check that method preserve shape and type +#TODO test complex matrices +#TODO test both preconditioner methods + data = ones((3,10)) data[0,:] = 2 data[1,:] = -1 Modified: trunk/scipy/splinalg/isolve/utils.py =================================================================== --- trunk/scipy/splinalg/isolve/utils.py 2008-02-02 04:26:51 UTC (rev 3889) +++ trunk/scipy/splinalg/isolve/utils.py 2008-02-02 18:17:25 UTC (rev 3890) @@ -1,5 +1,7 @@ -from numpy import asanyarray, asmatrix, array, matrix, zeros +from warnings import warn +from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros + from scipy.splinalg.interface import aslinearoperator, LinearOperator _coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F', @@ -20,6 +22,31 @@ return x def make_system(A, M, x0, b, xtype=None): + """Make a linear system Ax=b + + Parameters: + A - LinearOperator + - sparse or dense matrix (or any valid input to aslinearoperator) + M - LinearOperator or None + - preconditioner + - sparse or dense matrix (or any valid input to aslinearoperator) + x0 - array_like or None + - initial guess to iterative method + b - array_like + - right hand side + xtype - None or one of 'fdFD' + - dtype of the x vector + + Returns: + (A, M, x, b, postprocess) where: + - A is a LinearOperator + - M is a LinearOperator + - x is the initial guess (rank 1 array) + - b is the rhs (rank 1 array) + - postprocess is a function that converts the solution vector + to the appropriate type and dimensions (e.g. (N,1) matrix) + + """ A_ = A A = aslinearoperator(A) @@ -33,24 +60,32 @@ if not (b.shape == (N,1) or b.shape == (N,)): raise ValueError('A and b have incompatible dimensions') + if b.dtype.char not in 'fdFD': + b = b.astype('d') # upcast non-FP types to double + def postprocess(x): if isinstance(b,matrix): x = asmatrix(x) return x.reshape(b.shape) - if xtype is None: if hasattr(A,'dtype'): xtype = A.dtype.char else: xtype = A.matvec(b).dtype.char xtype = coerce(xtype, b.dtype.char) - elif xtype == 0: - xtype = b.dtype.char else: - if xtype not in 'fdFD': - raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'" + warn('Use of xtype argument is deprecated. '\ + 'Use LinearOperator( ... , dtype=xtype) instead.',\ + DeprecationWarning) + if xtype == 0: + xtype = b.dtype.char + else: + if xtype not in 'fdFD': + raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'" + b = asarray(b,dtype=xtype) #make b the same type as x + if x0 is None: x = zeros(N, dtype=xtype) else: @@ -71,6 +106,7 @@ rpsolve = id M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve, dtype=A.dtype) else: + M = aslinearoperator(M) if A.shape != M.shape: raise ValueError('matrix and preconditioner have different shapes') Modified: trunk/scipy/splinalg/tests/test_interface.py =================================================================== --- trunk/scipy/splinalg/tests/test_interface.py 2008-02-02 04:26:51 UTC (rev 3889) +++ trunk/scipy/splinalg/tests/test_interface.py 2008-02-02 18:17:25 UTC (rev 3890) @@ -31,18 +31,9 @@ return y def rmatvec(self,x): - if len(x.shape) == 1: - y = array([ 1*x[0] + 4*x[1], - 2*x[0] + 5*x[1], - 3*x[0] + 6*x[1]]) - return y - else: - y = array([ 1*x[0,0] + 4*x[0,1], - 2*x[0,0] + 5*x[0,1], - 3*x[0,0] + 6*x[0,1]]) - return y.reshape(1,-1) - - return y + return array([ 1*x[0] + 4*x[1], + 2*x[0] + 5*x[1], + 3*x[0] + 6*x[1]]) cases.append( matlike() ) @@ -55,7 +46,7 @@ assert_equal(A.matvec(array([[1],[2],[3]])),[[14],[32]]) assert_equal(A.rmatvec(array([1,2])), [9,12,15]) - assert_equal(A.rmatvec(array([[1,2]])),[[9,12,15]]) + assert_equal(A.rmatvec(array([[1],[2]])),[[9],[12],[15]]) if hasattr(M,'dtype'): assert_equal(A.dtype, M.dtype) From scipy-svn at scipy.org Sat Feb 2 14:30:45 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 2 Feb 2008 13:30:45 -0600 (CST) Subject: [Scipy-svn] r3891 - trunk/scipy/splinalg/isolve/tests Message-ID: <20080202193045.081A039C2D0@new.scipy.org> Author: wnbell Date: 2008-02-02 13:30:41 -0600 (Sat, 02 Feb 2008) New Revision: 3891 Modified: trunk/scipy/splinalg/isolve/tests/test_iterative.py Log: add more interesting QMR test Modified: trunk/scipy/splinalg/isolve/tests/test_iterative.py =================================================================== --- trunk/scipy/splinalg/isolve/tests/test_iterative.py 2008-02-02 18:17:25 UTC (rev 3890) +++ trunk/scipy/splinalg/isolve/tests/test_iterative.py 2008-02-02 19:30:41 UTC (rev 3891) @@ -112,5 +112,42 @@ assert( norm(b - A*x) < tol*norm(b) ) +class TestQMR(TestCase): + def test_leftright_precond(self): + """Check that QMR works with left and right preconditioners""" + + from scipy.splinalg.dsolve import splu + from scipy.splinalg.interface import LinearOperator + + n = 100 + + dat = ones(n) + A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1] ,n,n) + b = arange(n,dtype='d') + + L = spdiags([-dat/2, dat], [-1,0], n, n) + U = spdiags([4*dat, -dat], [ 0,1], n, n) + + L_solver = splu(L) + U_solver = splu(U) + + def L_solve(b): + return L_solver.solve(b) + def U_solve(b): + return U_solver.solve(b) + def LT_solve(b): + return L_solver.solve(b,'T') + def UT_solve(b): + return U_solver.solve(b,'T') + + M1 = LinearOperator( (n,n), matvec=L_solve, rmatvec=LT_solve ) + M2 = LinearOperator( (n,n), matvec=U_solve, rmatvec=UT_solve ) + + x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2) + + assert_equal(info,0) + assert( norm(b - A*x) < 1e-8*norm(b) ) + + if __name__ == "__main__": nose.run(argv=['', __file__]) From scipy-svn at scipy.org Sun Feb 3 14:25:31 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 3 Feb 2008 13:25:31 -0600 (CST) Subject: [Scipy-svn] r3892 - trunk/scipy/sparse Message-ID: <20080203192531.1981739C15C@new.scipy.org> Author: wnbell Date: 2008-02-03 13:25:28 -0600 (Sun, 03 Feb 2008) New Revision: 3892 Modified: trunk/scipy/sparse/coo.py Log: make coo_matrix.nnz a property Modified: trunk/scipy/sparse/coo.py =================================================================== --- trunk/scipy/sparse/coo.py 2008-02-02 19:30:41 UTC (rev 3891) +++ trunk/scipy/sparse/coo.py 2008-02-03 19:25:28 UTC (rev 3892) @@ -5,9 +5,8 @@ from itertools import izip from warnings import warn -from numpy import array, asarray, empty, intc, zeros, bincount, \ - unique, searchsorted, atleast_2d, lexsort, cumsum, concatenate, \ - empty_like, arange +from numpy import array, asarray, empty, intc, zeros, \ + unique, searchsorted, atleast_2d, empty_like, rank from sparsetools import coo_tocsr, coo_tocsc, coo_todense from base import isspmatrix @@ -175,16 +174,22 @@ self._check() - - def _check(self): - """ Checks for consistency and stores the number of non-zeros as - self.nnz. - """ + def getnnz(self): nnz = len(self.data) if (nnz != len(self.row)) or (nnz != len(self.col)): raise ValueError, "row, column, and data array must all be "\ "the same length" + if rank(self.data) != 1 or rank(self.row) != 1 or rank(self.col) != 1: + raise ValueError, "row, column, and data arrays must have rank 1" + + return nnz + nnz = property(fget=getnnz) + + def _check(self): + """ Checks data structure for consistency """ + nnz = self.nnz + # index arrays should have integer data types if self.row.dtype.kind != 'i': warn("row index array has non-integer dtype (%s) " \ @@ -210,7 +215,6 @@ # some functions pass floats self.shape = tuple([int(x) for x in self.shape]) - self.nnz = nnz def rowcol(self, num): return (self.row[num], self.col[num]) @@ -305,12 +309,7 @@ dok = dok_matrix((self.shape),dtype=self.dtype) - try: - dok.update( izip(izip(self.row,self.col),self.data) ) - except AttributeError: - # the dict() call is for Python 2.3 compatibility - # ideally dok_matrix would accept an iterator - dok.update( dict( izip(izip(self.row,self.col),self.data) ) ) + dok.update( izip(izip(self.row,self.col),self.data) ) return dok From scipy-svn at scipy.org Sun Feb 3 14:44:44 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 3 Feb 2008 13:44:44 -0600 (CST) Subject: [Scipy-svn] r3893 - trunk/scipy/sparse Message-ID: <20080203194444.1A9D639C07F@new.scipy.org> Author: wnbell Date: 2008-02-03 13:44:39 -0600 (Sun, 03 Feb 2008) New Revision: 3893 Modified: trunk/scipy/sparse/coo.py trunk/scipy/sparse/csr.py trunk/scipy/sparse/dok.py trunk/scipy/sparse/lil.py Log: add nnz properties to all sparse matrices ensure that coo indices are ints Modified: trunk/scipy/sparse/coo.py =================================================================== --- trunk/scipy/sparse/coo.py 2008-02-03 19:25:28 UTC (rev 3892) +++ trunk/scipy/sparse/coo.py 2008-02-03 19:44:39 UTC (rev 3893) @@ -121,8 +121,8 @@ except TypeError: raise TypeError, "invalid input format" - self.row = array(ij[0],copy=copy) - self.col = array(ij[1],copy=copy) + self.row = array(ij[0],copy=copy,dtype=intc) + self.col = array(ij[1],copy=copy,dtype=intc) self.data = array(obj,copy=copy) if shape is None: Modified: trunk/scipy/sparse/csr.py =================================================================== --- trunk/scipy/sparse/csr.py 2008-02-03 19:25:28 UTC (rev 3892) +++ trunk/scipy/sparse/csr.py 2008-02-03 19:44:39 UTC (rev 3893) @@ -117,8 +117,8 @@ self.sort_indices() #lil_matrix needs sorted rows - rows,data = lil.rows,lil.data ptr,ind,dat = self.indptr,self.indices,self.data + rows, data = lil.rows, lil.data for n in xrange(self.shape[0]): start = ptr[n] Modified: trunk/scipy/sparse/dok.py =================================================================== --- trunk/scipy/sparse/dok.py 2008-02-03 19:25:28 UTC (rev 3892) +++ trunk/scipy/sparse/dok.py 2008-02-03 19:44:39 UTC (rev 3893) @@ -54,26 +54,26 @@ def getnnz(self): return dict.__len__(self) + nnz = property(fget=getnnz) def __len__(self): return dict.__len__(self) def __str__(self): val = '' - nnz = self.getnnz() keys = self.keys() keys.sort() #TODO why does dok_matrix wipe out .maxprint? - if nnz > self.maxprint: + if self.nnz > self.maxprint: for k in xrange(self.maxprint / 2): key = keys[k] val += " %s\t%s\n" % (str(key), str(self[key])) val = val + " : \t :\n" - for k in xrange(nnz-self.maxprint/2, nnz): + for k in xrange(self.nnz - self.maxprint/2, self.nnz): key = keys[k] val += " %s\t%s\n" % (str(key), str(self[key])) else: - for k in xrange(nnz): + for k in xrange(self.nnz): key = keys[k] val += " %s\t%s\n" % (str(key), str(self[key])) return val[:-1] @@ -536,7 +536,7 @@ def tocoo(self): """ Return a copy of this matrix in COOrdinate format""" from coo import coo_matrix - if self.getnnz() == 0: + if self.nnz == 0: return coo_matrix(self.shape, dtype=self.dtype) else: data = asarray(self.values(), dtype=self.dtype) Modified: trunk/scipy/sparse/lil.py =================================================================== --- trunk/scipy/sparse/lil.py 2008-02-03 19:25:28 UTC (rev 3892) +++ trunk/scipy/sparse/lil.py 2008-02-03 19:44:39 UTC (rev 3893) @@ -126,6 +126,7 @@ def getnnz(self): return sum([len(rowvals) for rowvals in self.data]) + nnz = property(fget=getnnz) def __str__(self): val = '' @@ -134,12 +135,6 @@ val += " %s\t%s\n" % (str((i, j)), str(self.data[i][pos])) return val[:-1] - #def __repr__(self): - # format = self.getformat() - # return "<%dx%d sparse matrix with %d stored "\ - # "elements in %s format>" % \ - # (self.shape + (self.getnnz(), _formats[format][1])) - def getrowview(self, i): """Returns a view of the 'i'th row (without copying). """ From scipy-svn at scipy.org Tue Feb 5 20:16:58 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 5 Feb 2008 19:16:58 -0600 (CST) Subject: [Scipy-svn] r3894 - trunk/scipy/ndimage/register Message-ID: <20080206011658.72A3539C02A@new.scipy.org> Author: tom.waite Date: 2008-02-05 19:16:48 -0600 (Tue, 05 Feb 2008) New Revision: 3894 Modified: trunk/scipy/ndimage/register/Register_EXT.c Log: Bug fix and added resampler Modified: trunk/scipy/ndimage/register/Register_EXT.c =================================================================== --- trunk/scipy/ndimage/register/Register_EXT.c 2008-02-03 19:44:39 UTC (rev 3893) +++ trunk/scipy/ndimage/register/Register_EXT.c 2008-02-06 01:16:48 UTC (rev 3894) @@ -37,8 +37,8 @@ goto exit; /* check in the Python code that F and G are the same dims, type */ - imageG = (unsigned char *)PyArray_DATA(imgArray1); - imageF = (unsigned char *)PyArray_DATA(imgArray2); + imageF = (unsigned char *)PyArray_DATA(imgArray1); + imageG = (unsigned char *)PyArray_DATA(imgArray2); nd = PyArray_NDIM(imgArray1); /* reads dims as 0 = layers, 1 = rows, 2 = cols */ dimsF = PyArray_DIMS(imgArray1); @@ -106,8 +106,8 @@ goto exit; /* check in the Python code that F and G are the same dims, type */ - imageG = (unsigned char *)PyArray_DATA(imgArray1); - imageF = (unsigned char *)PyArray_DATA(imgArray2); + imageF = (unsigned char *)PyArray_DATA(imgArray1); + imageG = (unsigned char *)PyArray_DATA(imgArray2); /* reads dims as 0 = layers, 1 = rows, 2 = cols */ nd = PyArray_NDIM(imgArray1); dimsF = PyArray_DIMS(imgArray1); @@ -136,15 +136,69 @@ exit: /* return the 2D histogram */ - return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue("O", hArray); + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); } +static PyObject *Register_LinearResample(PyObject *self, PyObject *args) +{ + int num; + int nd; + int type; + int itype; + int nd_rotmatrix; + int nd_S; + npy_intp *dimsF; + npy_intp *dimsG; + npy_intp *dims_rotmatrix; + npy_intp *dims_S; + unsigned char *imageG; + unsigned char *imageF; + double *M; + int *S; + PyObject *imgArray1 = NULL; + PyObject *imgArray2 = NULL; + PyObject *rotArray = NULL; + PyObject *SArray = NULL; + + if(!PyArg_ParseTuple(args, "OOOO", &imgArray1, &imgArray2, &rotArray, &SArray)) + goto exit; + + /* check in the Python code that F and G are the same dims, type */ + imageF = (unsigned char *)PyArray_DATA(imgArray1); + imageG = (unsigned char *)PyArray_DATA(imgArray2); + /* reads dims as 0 = layers, 1 = rows, 2 = cols */ + nd = PyArray_NDIM(imgArray1); + dimsF = PyArray_DIMS(imgArray1); + dimsG = PyArray_DIMS(imgArray2); + type = PyArray_TYPE(imgArray1); + num = PyArray_SIZE(imgArray1); + + M = (double *)PyArray_DATA(rotArray); + nd_rotmatrix = PyArray_NDIM(rotArray); + dims_rotmatrix = PyArray_DIMS(rotArray); + + S = (int *)PyArray_DATA(SArray); + nd_S = PyArray_NDIM(SArray); + dims_S = PyArray_DIMS(SArray); + + if(!NI_LinearResample((int)dimsF[0], (int)dimsF[1], (int)dimsF[2], + (int)dimsG[0], (int)dimsG[1], (int)dimsG[2], + S, M, imageG, imageF)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); + +} + static PyMethodDef RegisterMethods[] = { - { "register_histogram", Register_Histogram, METH_VARARGS, NULL }, - { "register_histogram_lite", Register_HistogramLite, METH_VARARGS, NULL }, + { "register_histogram", Register_Histogram, METH_VARARGS, NULL }, + { "register_histogram_lite", Register_HistogramLite, METH_VARARGS, NULL }, + { "register_linear_resample", Register_LinearResample, METH_VARARGS, NULL }, { NULL, NULL, 0, NULL}, }; From scipy-svn at scipy.org Tue Feb 5 20:17:42 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 5 Feb 2008 19:17:42 -0600 (CST) Subject: [Scipy-svn] r3895 - trunk/scipy/ndimage/register Message-ID: <20080206011742.5D13A39C02A@new.scipy.org> Author: tom.waite Date: 2008-02-05 19:17:39 -0600 (Tue, 05 Feb 2008) New Revision: 3895 Modified: trunk/scipy/ndimage/register/Register_IMPL.c Log: Bug fix and added new resample routine Modified: trunk/scipy/ndimage/register/Register_IMPL.c =================================================================== --- trunk/scipy/ndimage/register/Register_IMPL.c 2008-02-06 01:16:48 UTC (rev 3894) +++ trunk/scipy/ndimage/register/Register_IMPL.c 2008-02-06 01:17:39 UTC (rev 3895) @@ -165,7 +165,8 @@ rx = xp - (int)xp; ry = yp - (int)yp; rz = zp - (int)zp; - vf = trilinear_A(imageF, (int)dx, (int)dy, (int)dz, rx, ry, rz, dims_F); + //vf = trilinear_A(imageF, (int)dx, (int)dy, (int)dz, rx, ry, rz, dims_F); + vf = trilinear_A(imageF, (int)xp, (int)yp, (int)zp, rx, ry, rz, dims_F); /* floor */ ivf = (int)vf; delta = vf - ivf; @@ -173,7 +174,8 @@ rx = dx - (int)dx; ry = dy - (int)dy; rz = dz - (int)dz; - ivg = (int)trilinear_A(imageG, (int)xp, (int)yp, (int)zp, rx, ry, rz, dims_G); + ivg = (int)trilinear_A(imageG, (int)dx, (int)dy, (int)dz, rx, ry, rz, dims_G); + //ivg = (int)trilinear_A(imageG, (int)xp, (int)yp, (int)zp, rx, ry, rz, dims_G); /* ivf will be < 255 as 8 bit data and trilinear doesn't ring */ H[ivf+256*ivg] += 1.0 - delta; if(ivf < 255){ @@ -191,7 +193,6 @@ } - int NI_Histogram2DLite(int layersF, int rowsF, int colsF, int layersG, int rowsG, int colsG, int *dimSteps, double *M, unsigned char *imageG, unsigned char *imageF, double *H) { @@ -303,3 +304,113 @@ } +int NI_LinearResample(int layersF, int rowsF, int colsF, int layersG, int rowsG, int colsG, + int *dimSteps, double *M, unsigned char *imageG, unsigned char *imageF) +{ + + int i; + int status; + int sliceG; + int rowG; + int sliceSizeG; + int dimsF[3]; + int dimsG[3]; + int dims[2]; + int ivf, ivg; + float vf, delta; + float x, y, z; + float xp, yp, zp; + float dx, dy, dz; + + int ptr_x0; + int ptr_y0; + int ptr_z0; + int ptr_x1; + int ptr_y1; + int ptr_z1; + // + // Vxyz for [0,1] values of x, y, z + // + int V000; + int V100; + int V010; + int V001; + int V011; + int V101; + int V110; + int V111; + float valueXYZ; + + // + // G is fixed; F is rotated + // + sliceSizeG = rowsG * colsG; + dimsF[0] = colsF; + dimsF[1] = rowsF; + dimsF[2] = layersF; + dimsG[0] = colsG; + dimsG[1] = rowsG; + dimsG[2] = layersG; + + dims[0] = dimsF[0]; + dims[1] = dimsF[0]*dimsF[1]; + + for(z = 0.0; z < layersG-dimSteps[2]-1; z += dimSteps[2]){ + sliceG = (int)z * sliceSizeG; + for(y = 0.0; y < rowsG-dimSteps[1]-1; y += dimSteps[1]){ + rowG = (int)y * colsG; + for(x = 0.0; x < colsG-dimSteps[0]-1; x += dimSteps[0]){ + // get the 'from' coordinates + xp = M[0]*x + M[1]*y + M[2]*z + M[3]; + yp = M[4]*x + M[5]*y + M[6]*z + M[7]; + zp = M[8]*x + M[9]*y + M[10]*z + M[11]; + // clip the resample window + if((zp >= 0.0 && zp < layersF-dimSteps[2]) && + (yp >= 0.0 && yp < rowsF-dimSteps[1]) && + (xp >= 0.0 && xp < colsF-dimSteps[0])){ + + // corners of the 3D unit volume cube + ptr_z0 = (int)zp * dims[1]; + ptr_z1 = ptr_z0 + dims[1]; + ptr_y0 = (int)yp * dims[0]; + ptr_y1 = ptr_y0 + dims[0]; + ptr_x0 = (int)xp; + ptr_x1 = ptr_x0 + 1; + dx = xp - (int)xp; + dy = yp - (int)yp; + dz = zp - (int)zp; + + // imageF IS rotated. sample the rotated xp,yp,zp + // and stored in imageG + V000 = imageF[ptr_x0+ptr_y0+ptr_z0]; + V100 = imageF[ptr_x1+ptr_y0+ptr_z0]; + V010 = imageF[ptr_x0+ptr_y1+ptr_z0]; + V001 = imageF[ptr_x0+ptr_y0+ptr_z1]; + V011 = imageF[ptr_x0+ptr_y1+ptr_z1]; + V101 = imageF[ptr_x1+ptr_y0+ptr_z1]; + V110 = imageF[ptr_x1+ptr_y1+ptr_z0]; + V111 = imageF[ptr_x1+ptr_y1+ptr_z1]; + + vf = V000 * (1.0-dx) * (1.0 - dy) * (1.0 - dz) + + V100 * (dx) * (1.0 - dy) * (1.0 - dz) + + V010 * (1.0-dx) * (dy) * (1.0 - dz) + + V001 * (1.0-dx) * (1.0 - dy) * (dz) + + V101 * (dx) * (1.0 - dy) * (dz) + + V011 * (1.0-dx) * (dy) * (dz) + + V110 * (dx) * (dy) * (1.0 - dz) + + V111 * (dx) * (dy) * (dz); + + imageG[sliceG+rowG+(int)x] = (int)vf; + + } + } + } + } + + status = 1; + + return status; + +} + + From scipy-svn at scipy.org Tue Feb 5 20:18:30 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 5 Feb 2008 19:18:30 -0600 (CST) Subject: [Scipy-svn] r3896 - trunk/scipy/ndimage Message-ID: <20080206011830.79BAB39C0CF@new.scipy.org> Author: tom.waite Date: 2008-02-05 19:18:26 -0600 (Tue, 05 Feb 2008) New Revision: 3896 Modified: trunk/scipy/ndimage/registration.py Log: New functionality for NIPY registration. Modified: trunk/scipy/ndimage/registration.py =================================================================== --- trunk/scipy/ndimage/registration.py 2008-02-06 01:17:39 UTC (rev 3895) +++ trunk/scipy/ndimage/registration.py 2008-02-06 01:18:26 UTC (rev 3896) @@ -8,36 +8,50 @@ import time # anatomical MRI to test with -# test registration on same image (optimal vector is (0,0,0,0,0,0) inputname = 'ANAT1_V0001.img' filename = os.path.join(os.path.split(__file__)[0], inputname) -def python_coreg(ftype=2, smimage=0, lite=1, smhist=0, method='mi', opt_method='powell'): - # get_images is testing with 2 copies of anatomical MRI - image1, image2, imdata = get_images(ftype, smimage) +def get_mappings(parm_vector): + # get the inverse mapping to rotate the G matrix to F space following registration + M_foreward = build_rotate_matrix(parm_vector) + M_inverse = N.linalg.inv(M_foreward) + return M_foreward, M_inverse + +def python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0, smhist=0, + method='nmi', opt_method='powell'): + # image1 is imageF and image2 is imageG in SPM lingo + # get these from get_test_images for the debug work start = time.time() + # smooth of the images + if smimage: + image_F_xyz1 = filter_image_3D(image1['data'], image1['fwhm'], ftype) + image1['data'] = image_F_xyz1 + image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) + image2['data'] = image_F_xyz2 parm_vector = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method) stop = time.time() print 'Total Optimizer Time is ', (stop-start) - return parm_vector -def get_images(ftype, smimage): +def get_test_images(alpha=0.0, beta=0.0, gamma=0.0): image1 = load_image() - image2 = load_image() - imdata = build_structs() + image2 = load_blank_image() + imdata = build_structs(step=1) + # allow the G image to be rotated for testing + imdata['parms'][0] = alpha + imdata['parms'][1] = beta + imdata['parms'][2] = gamma image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) - if smimage: - image_F_xyz1 = filter_image_3D(image1['data'], image1['fwhm'], ftype) - image1['data'] = image_F_xyz1 - image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) - image2['data'] = image_F_xyz2 - + M = build_rotate_matrix(imdata['parms']) + R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) return image1, image2, imdata def multires_registration(image1, image2, imdata, lite, smhist, method, opt_method): ret_histo=0 + # zero out the start parameter; but this may be set to large values + # if the head is out of range and well off the optimal alignment skirt + imdata['parms'][0:5] = 0.0 # make the step a scalar to can put in a multi-res loop loop = range(imdata['sample'].size) x = imdata['parms'] @@ -54,35 +68,31 @@ print 'CG multi-res registration step size ', step print 'vector ', x x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) + elif opt_method=='hybrid': + if i==0: + print 'Hybrid POWELL multi-res registration step size ', step + print 'vector ', x + lite = 0 + optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + p_args = (optfunc_args,) + x = OPT.fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) + elif i==1: + print 'Hybrid CG multi-res registration step size ', step + print 'vector ', x + lite = 1 + optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + p_args = (optfunc_args,) + x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) return x def test_image_filter(image, imdata, ftype=2): + # test the 3D image filter on an image. ftype 1 is SPM, ftype 2 is simple Gaussian image['fwhm'] = build_fwhm(image['mat'], imdata['step']) filt_image = filter_image_3D(image['data'], image['fwhm'], ftype) return filt_image -def test_optimizers(step=2, smooth=0, shist=0): - opt_stats = {} - print 'powell with stochastic resampling' - x_0, p_time_0 = optimizer_powell(lite=0, smimage=smooth, smhist=shist, stepsize=step) - opt_stats[0] = {'parms' : x_0, 'time' : p_time_0, - 'label' : 'powell with stochastic resampling'} - print 'powell without stochastic resampling' - x_1, p_time_1 = optimizer_powell(lite=1, smimage=smooth, smhist=shist, stepsize=step) - opt_stats[1] = {'parms' : x_1, 'time' : p_time_1, - 'label' : 'powell without stochastic resampling'} - print 'conjugate gradient with stochastic resampling' - xcg_0, cg_time_0 = optimizer_cg(lite=0, smimage=smooth, smhist=shist, stepsize=step) - opt_stats[2] = {'parms' : xcg_0, 'time' : cg_time_0, - 'label' : 'conjugate gradient with stochastic resampling'} - print 'conjugate gradient without stochastic resampling' - xcg_1, cg_time_1 = optimizer_cg(lite=1, smimage=smooth, smhist=shist, stepsize=step) - opt_stats[3] = {'parms' : xcg_1, 'time' : cg_time_1, - 'label' : 'conjugate gradient without stochastic resampling'} - return opt_stats - def callback_powell(x): print 'Parameter Vector from Powell: - ' print x @@ -93,74 +103,15 @@ print x return -def optimizer_powell(lite=0, smhist=0, smimage=1, method='mi', ftype=2, stepsize=2): - # test the Powell registration on the anatomical MRI volume - image1 = load_image() - image2 = load_image() - imdata = build_structs(step=stepsize) - image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) - image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) - M = build_rotate_matrix(imdata['parms']) - if smimage: - image_F_xyz1 = filter_image_3D(image1['data'], image1['fwhm'], ftype) - image1['data'] = image_F_xyz1 - image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) - image2['data'] = image_F_xyz2 - - ret_histo=0 - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) - p_args = (optfunc_args,) - start = time.time() - x = OPT.fmin_powell(optimize_function, imdata['parms'], args=p_args, callback=callback_powell) - stop = time.time() - return x, (stop-start) - - -def optimizer_cg(lite=0, smhist=0, smimage=1, method='mi', ftype=2, stepsize=2): - # test the CG registration on the anatomical MRI volume - image1 = load_image() - image2 = load_image() - imdata = build_structs(step=stepsize) - image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) - image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) - M = build_rotate_matrix(imdata['parms']) - if smimage: - image_F_xyz1 = filter_image_3D(image1['data'], image1['fwhm'], ftype) - image1['data'] = image_F_xyz1 - image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) - image2['data'] = image_F_xyz2 - - ret_histo=0 - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) - p_args = (optfunc_args,) - start = time.time() - x = OPT.fmin_cg(optimize_function, imdata['parms'], args=p_args, callback=callback_cg) - stop = time.time() - return x, (stop-start) - - -def reg_single_pass(lite=0, smhist=0, smimage=0, method='mi', ftype=2, alpha=0.0, - beta=0.0, gamma=0.0, Tx=0.0, Ty=0.0, Tz=0.0, ret_histo=0, stepsize=2): - image1 = load_image() - image2 = load_image() - imdata = build_structs(step=stepsize) +def test_alignment(image1, image2, imdata, method='ncc', lite=0, smhist=0, + alpha=0.0, beta=0.0, gamma=0.0, ret_histo=0): + + # to test the cost function and view the joint histogram + # for 2 images. used for debug imdata['parms'][0] = alpha imdata['parms'][1] = beta imdata['parms'][2] = gamma - imdata['parms'][3] = Tx - imdata['parms'][4] = Ty - imdata['parms'][5] = Tz - image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) - image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) - print 'image1[fwhm] ', image1['fwhm'] - print 'image2[fwhm] ', image2['fwhm'] M = build_rotate_matrix(imdata['parms']) - if smimage: - image_F_xyz1 = filter_image_3D(image1['data'], image1['fwhm'], ftype) - image1['data'] = image_F_xyz1 - image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) - image2['data'] = image_F_xyz2 - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) if ret_histo: @@ -210,6 +161,34 @@ image_F_xyz = NDI.correlate1d(image_F_xy, kernel_z, axis, output) return image_F_xyz + +def resample_image(smimage=0, ftype=2, alpha=0.0, beta=0.0, gamma=0.0, + Tx=0.0, Ty=0.0, Tz=0.0, stepsize=1): + + # takes an image and 3D rotate using trilinear interpolation + image1 = load_image() + image2 = load_blank_image() + imdata = build_structs(step=stepsize) + imdata['parms'][0] = alpha + imdata['parms'][1] = beta + imdata['parms'][2] = gamma + imdata['parms'][3] = Tx + imdata['parms'][4] = Ty + imdata['parms'][5] = Tz + image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) + image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) + M = build_rotate_matrix(imdata['parms']) + if smimage: + image_F_xyz1 = filter_image_3D(image1['data'], image1['fwhm'], ftype) + image1['data'] = image_F_xyz1 + image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) + image2['data'] = image_F_xyz2 + + R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) + + return image2 + + def build_fwhm(M, S): view_3x3 = N.square(M[0:3, 0:3]) vxg = N.sqrt(view_3x3.sum(axis=0)) @@ -243,6 +222,22 @@ image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} return image +def load_blank_image(rows=256, cols=256, layers=90): + ImageVolume = N.zeros(layers*rows*cols, dtype=N.uint16).reshape(layers, rows, cols); + # voxel to pixel is identity for this simulation using anatomical MRI volume + # 4x4 matrix + M = N.eye(4, dtype=N.float64); + # dimensions + D = N.zeros(3, dtype=N.int32); + # Gaussian kernel - fill in with build_fwhm() + F = N.zeros(3, dtype=N.float64); + D[0] = rows + D[1] = cols + D[2] = layers + # make sure the data type is uchar + ImageVolume = ImageVolume.astype(N.uint8) + image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} + return image def optimize_function(x, optfunc_args): image_F = optfunc_args[0] @@ -257,8 +252,8 @@ rot_matrix = build_rotate_matrix(x) cost = 0.0 epsilon = 2.2e-16 - # image_F is base image - # image_G is the rotated image + # image_G is base image + # image_F is the to-be-rotated image # rot_matrix is the 4x4 constructed (current angles and translates) transform matrix # sample_vector is the subsample vector for x-y-z @@ -274,6 +269,7 @@ else: R.register_histogram(image_F['data'], image_G['data'], composite, sample_vector, joint_histogram) + # smooth the histogram if smooth: p = N.ceil(2*fwhm[0]).astype(int) x = N.array(range(-p, p+1)) @@ -366,9 +362,9 @@ T[3] = 0.001 T[4] = 0.001 T[5] = 0.001 - # P[0] = alpha <=> pitch - # P[1] = beta <=> roll - # P[2] = gamma <=> yaw + # P[0] = alpha <=> pitch. + alpha is moving back in the sagittal plane + # P[1] = beta <=> roll. + beta is moving right in the coronal plane + # P[2] = gamma <=> yaw. + gamma is right turn in the transverse plane # P[3] = Tx # P[4] = Ty # P[5] = Tz From scipy-svn at scipy.org Wed Feb 6 12:57:19 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 6 Feb 2008 11:57:19 -0600 (CST) Subject: [Scipy-svn] r3897 - in trunk/scipy: sandbox splinalg splinalg/eigen splinalg/eigen/arpack splinalg/eigen/arpack/tests Message-ID: <20080206175719.50DF839C350@new.scipy.org> Author: hagberg Date: 2008-02-06 11:56:43 -0600 (Wed, 06 Feb 2008) New Revision: 3897 Added: trunk/scipy/splinalg/eigen/arpack/ trunk/scipy/splinalg/eigen/arpack/ARPACK/ trunk/scipy/splinalg/eigen/arpack/README trunk/scipy/splinalg/eigen/arpack/__init__.py trunk/scipy/splinalg/eigen/arpack/arpack.py trunk/scipy/splinalg/eigen/arpack/arpack.pyf.src trunk/scipy/splinalg/eigen/arpack/info.py trunk/scipy/splinalg/eigen/arpack/setup.py trunk/scipy/splinalg/eigen/arpack/speigs.py trunk/scipy/splinalg/eigen/arpack/tests/ Removed: trunk/scipy/sandbox/arpack/ trunk/scipy/splinalg/eigen/arpack/ARPACK/ trunk/scipy/splinalg/eigen/arpack/README trunk/scipy/splinalg/eigen/arpack/__init__.py trunk/scipy/splinalg/eigen/arpack/arpack.py trunk/scipy/splinalg/eigen/arpack/arpack.pyf.src trunk/scipy/splinalg/eigen/arpack/info.py trunk/scipy/splinalg/eigen/arpack/setup.py trunk/scipy/splinalg/eigen/arpack/speigs.py trunk/scipy/splinalg/eigen/arpack/tests/ Modified: trunk/scipy/splinalg/__init__.py trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py trunk/scipy/splinalg/setup.py Log: Move arpack sparse eigenvalue solver from sandbox to splinalg.eigen Also: Fixes #554 and addresses #231 (the part related to the patch for returning wrong number of eigenvalues). Improves docstrings, code comments, implementation notes Uses new LinearOperator Modified: trunk/scipy/splinalg/__init__.py =================================================================== --- trunk/scipy/splinalg/__init__.py 2008-02-06 01:18:26 UTC (rev 3896) +++ trunk/scipy/splinalg/__init__.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -5,7 +5,9 @@ from isolve import * from dsolve import * from interface import * +from eigen import * + __all__ = filter(lambda s:not s.startswith('_'),dir()) from scipy.testing.pkgtester import Tester test = Tester().test Copied: trunk/scipy/splinalg/eigen/arpack (from rev 3876, trunk/scipy/sandbox/arpack) Copied: trunk/scipy/splinalg/eigen/arpack/ARPACK (from rev 3896, trunk/scipy/sandbox/arpack/ARPACK) Deleted: trunk/scipy/splinalg/eigen/arpack/README =================================================================== --- trunk/scipy/sandbox/arpack/README 2008-01-30 17:05:38 UTC (rev 3876) +++ trunk/scipy/splinalg/eigen/arpack/README 2008-02-06 17:56:43 UTC (rev 3897) @@ -1,98 +0,0 @@ -This is the ARPACK package from -http://www.caam.rice.edu/software/ARPACK/ - -Specifically the files are from -http://www.caam.rice.edu/software/ARPACK/SRC/arpack96.tar.gz -with the patch -http://www.caam.rice.edu/software/ARPACK/SRC/patch.tar.gz - -The ARPACK README is at -http://www.caam.rice.edu/software/ARPACK/SRC/readme.arpack - ---- - -ARPACK is a collection of Fortran77 subroutines designed to solve large -scale eigenvalue problems. - -The package is designed to compute a few eigenvalues and corresponding -eigenvectors of a general n by n matrix A. It is most appropriate for large -sparse or structured matrices A where structured means that a matrix-vector -product w <- Av requires order n rather than the usual order n**2 floating -point operations. This software is based upon an algorithmic variant of the -Arnoldi process called the Implicitly Restarted Arnoldi Method (IRAM). When -the matrix A is symmetric it reduces to a variant of the Lanczos process -called the Implicitly Restarted Lanczos Method (IRLM). These variants may be -viewed as a synthesis of the Arnoldi/Lanczos process with the Implicitly -Shifted QR technique that is suitable for large scale problems. For many -standard problems, a matrix factorization is not required. Only the action -of the matrix on a vector is needed. ARPACK software is capable of solving -large scale symmetric, nonsymmetric, and generalized eigenproblems from -significant application areas. The software is designed to compute a few (k) -eigenvalues with user specified features such as those of largest real part -or largest magnitude. Storage requirements are on the order of n*k locations. -No auxiliary storage is required. A set of Schur basis vectors for the desired -k-dimensional eigen-space is computed which is numerically orthogonal to working -precision. Numerically accurate eigenvectors are available on request. - -Important Features: - - o Reverse Communication Interface. - o Single and Double Precision Real Arithmetic Versions for Symmetric, - Non-symmetric, Standard or Generalized Problems. - o Single and Double Precision Complex Arithmetic Versions for Standard - or Generalized Problems. - o Routines for Banded Matrices - Standard or Generalized Problems. - o Routines for The Singular Value Decomposition. - o Example driver routines that may be used as templates to implement - numerous Shift-Invert strategies for all problem types, data types - and precision. - ---- - -The ARPACK license is BSD-like. -http://www.caam.rice.edu/software/ARPACK/RiceBSD.doc - ---- - -Rice BSD Software License -Permits source and binary redistribution of the software ARPACK and -P_ARPACK for both non-commercial and commercial use. - - Copyright (?) 2001, Rice University - Developed by D.C. Sorensen, R.B. Lehoucq, C. Yang, and K. Maschhoff. - All rights reserved. - - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - . Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - . Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - . If you modify the source for these routines we ask that you change the - name of the routine and comment the changes made to the original. - . Written notification is provided to the developers of intent to use - this software. Also, we ask that use of ARPACK is properly cited in - any resulting publications or software documentation. - . Neither the name of Rice University (RICE) nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - -THIS SOFTWARE IS PROVIDED BY RICE AND CONTRIBUTORS "AS IS" AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL RICE OR CONTRIBUTORS BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -DAMAGE. - - - - Copied: trunk/scipy/splinalg/eigen/arpack/README (from rev 3896, trunk/scipy/sandbox/arpack/README) Deleted: trunk/scipy/splinalg/eigen/arpack/__init__.py =================================================================== --- trunk/scipy/sandbox/arpack/__init__.py 2008-01-30 17:05:38 UTC (rev 3876) +++ trunk/scipy/splinalg/eigen/arpack/__init__.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -1,3 +0,0 @@ -from info import __doc__ -from arpack import * -import speigs Copied: trunk/scipy/splinalg/eigen/arpack/__init__.py (from rev 3896, trunk/scipy/sandbox/arpack/__init__.py) Deleted: trunk/scipy/splinalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/sandbox/arpack/arpack.py 2008-01-30 17:05:38 UTC (rev 3876) +++ trunk/scipy/splinalg/eigen/arpack/arpack.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -1,379 +0,0 @@ -""" -arpack - Scipy module to find a few eigenvectors and eigenvalues of a matrix - -Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/ - -""" -__all___=['eigen','eigen_symmetric'] - -import _arpack -import numpy as sb -import warnings - -# inspired by iterative.py -# so inspired, in fact, that some of it was copied directly -try: - False, True -except NameError: - False, True = 0, 1 - -_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} - -class get_matvec: - methname = 'matvec' - def __init__(self, obj, *args): - self.obj = obj - self.args = args - if isinstance(obj, sb.matrix): - self.callfunc = self.type1m - return - if isinstance(obj, sb.ndarray): - self.callfunc = self.type1 - return - meth = getattr(obj,self.methname,None) - if not callable(meth): - raise ValueError, "Object must be an array "\ - "or have a callable %s attribute." % (self.methname,) - - self.obj = meth - self.callfunc = self.type2 - - def __call__(self, x): - return self.callfunc(x) - - def type1(self, x): - return sb.dot(self.obj, x) - - def type1m(self, x): - return sb.dot(self.obj.A, x) - - def type2(self, x): - return self.obj(x,*self.args) - - -def eigen(A,k=6,M=None,ncv=None,which='LM', - maxiter=None,tol=0, return_eigenvectors=True): - """ Return k eigenvalues and eigenvectors of the matrix A. - - Solves A * x[i] = w[i] * x[i], the standard eigenvalue problem for - w[i] eigenvalues with corresponding eigenvectors x[i]. - - Inputs: - - A -- A matrix, array or an object with matvec(x) method to perform - the matrix vector product A * x. The sparse matrix formats - in scipy.sparse are appropriate for A. - - k -- The number of eigenvalue/eigenvectors desired - - M -- (Not implemented) - A symmetric positive-definite matrix for the generalized - eigenvalue problem A * x = w * M * x - - Outputs: - - w -- An array of k eigenvalues - - v -- An array of k eigenvectors, k[i] is the eigenvector corresponding - to the eigenvector w[i] - - Optional Inputs: - - ncv -- Number of Lanczos vectors generated, ncv must be greater than k - and is recommended to be ncv > 2*k - - which -- String specifying which eigenvectors to compute. - Compute the k eigenvalues of: - 'LM' - largest magnitude. - 'SM' - smallest magnitude. - 'LR' - largest real part. - 'SR' - smallest real part. - 'LI' - largest imaginary part. - 'SI' - smallest imaginary part. - - maxiter -- Maximum number of Arnoldi update iterations allowed - - tol -- Relative accuracy for eigenvalues (stopping criterion) - - return_eigenvectors -- True|False, return eigenvectors - - """ - try: - n,ny=A.shape - n==ny - except: - raise AttributeError("matrix is not square") - if M is not None: - raise NotImplementedError("generalized eigenproblem not supported yet") - - # some defaults - if ncv is None: - ncv=2*k+1 - ncv=min(ncv,n) - if maxiter==None: - maxiter=n*10 - - # guess type - resid = sb.zeros(n,'f') - try: - typ = A.dtype.char - except AttributeError: - typ = A.matvec(resid).dtype.char - if typ not in 'fdFD': - raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") - - # some sanity checks - if k <= 0: - raise ValueError("k must be positive, k=%d"%k) - if k == n: - raise ValueError("k must be less than rank(A), k=%d"%k) - if maxiter <= 0: - raise ValueError("maxiter must be positive, maxiter=%d"%maxiter) - whiches=['LM','SM','LR','SR','LI','SI'] - if which not in whiches: - raise ValueError("which must be one of %s"%' '.join(whiches)) - if ncv > n or ncv < k: - raise ValueError("ncv must be k<=ncv<=n, ncv=%s"%ncv) - - # assign solver and postprocessor - ltr = _type_conv[typ] - eigsolver = _arpack.__dict__[ltr+'naupd'] - eigextract = _arpack.__dict__[ltr+'neupd'] - matvec = get_matvec(A) - - v = sb.zeros((n,ncv),typ) # holds Ritz vectors - resid = sb.zeros(n,typ) # residual - workd = sb.zeros(3*n,typ) # workspace - workl = sb.zeros(3*ncv*ncv+6*ncv,typ) # workspace - iparam = sb.zeros(11,'int') # problem parameters - ipntr = sb.zeros(14,'int') # pointers into workspaces - info = 0 - ido = 0 - - if typ in 'FD': - rwork = sb.zeros(ncv,typ.lower()) - - # only supported mode is 1: Ax=lx - ishfts = 1 - mode1 = 1 - bmat = 'I' - iparam[0] = ishfts - iparam[2] = maxiter - iparam[6] = mode1 - - while True: - if typ in 'fd': - ido,resid,v,iparam,ipntr,info =\ - eigsolver(ido,bmat,which,k,tol,resid,v,iparam,ipntr, - workd,workl,info) - else: - ido,resid,v,iparam,ipntr,info =\ - eigsolver(ido,bmat,which,k,tol,resid,v,iparam,ipntr, - workd,workl,rwork,info) - - if (ido == -1 or ido == 1): - # compute y = A * x - xslice = slice(ipntr[0]-1, ipntr[0]-1+n) - yslice = slice(ipntr[1]-1, ipntr[1]-1+n) - workd[yslice]=matvec(workd[xslice]) - else: # done - break - - if info < -1 : - raise RuntimeError("Error info=%d in arpack"%info) - return None - if info == -1: - warnings.warn("Maximum number of iterations taken: %s"%iparam[2]) -# if iparam[3] != k: -# warnings.warn("Only %s eigenvalues converged"%iparam[3]) - - - # now extract eigenvalues and (optionally) eigenvectors - rvec = return_eigenvectors - ierr = 0 - howmny = 'A' # return all eigenvectors - sselect = sb.zeros(ncv,'int') # unused - sigmai = 0.0 # no shifts, not implemented - sigmar = 0.0 # no shifts, not implemented - workev = sb.zeros(3*ncv,typ) - - if typ in 'fd': - dr=sb.zeros(k+1,typ) - di=sb.zeros(k+1,typ) - zr=sb.zeros((n,k+1),typ) - dr,di,z,info=\ - eigextract(rvec,howmny,sselect,sigmar,sigmai,workev, - bmat,which,k,tol,resid,v,iparam,ipntr, - workd,workl,info) - - # make eigenvalues complex - d=dr+1.0j*di - # futz with the eigenvectors: - # complex are stored as real,imaginary in consecutive columns - z=zr.astype(typ.upper()) - for i in range(k): # fix c.c. pairs - if di[i] > 0 : - z[:,i]=zr[:,i]+1.0j*zr[:,i+1] - z[:,i+1]=z[:,i].conjugate() - - else: - d,z,info =\ - eigextract(rvec,howmny,sselect,sigmar,workev, - bmat,which,k,tol,resid,v,iparam,ipntr, - workd,workl,rwork,ierr) - - - - if ierr != 0: - raise RuntimeError("Error info=%d in arpack"%info) - return None - if return_eigenvectors: - return d,z - return d - - -def eigen_symmetric(A,k=6,M=None,ncv=None,which='LM', - maxiter=None,tol=0, return_eigenvectors=True): - """ Return k eigenvalues and eigenvectors of the real symmetric matrix A. - - Solves A * x[i] = w[i] * x[i], the standard eigenvalue problem for - w[i] eigenvalues with corresponding eigenvectors x[i]. - A must be real and symmetric. - See eigen() for nonsymmetric or complex symmetric (Hermetian) matrices. - - Inputs: - - A -- A symmetric matrix, array or an object with matvec(x) method - to perform the matrix vector product A * x. - The sparse matrix formats in scipy.sparse are appropriate for A. - - k -- The number of eigenvalue/eigenvectors desired - - M -- (Not implemented) - A symmetric positive-definite matrix for the generalized - eigenvalue problem A * x = w * M * x - - Outputs: - - w -- An real array of k eigenvalues - - v -- An array of k real eigenvectors, k[i] is the eigenvector corresponding - to the eigenvector w[i] - - Optional Inputs: - - ncv -- Number of Lanczos vectors generated, ncv must be greater than k - and is recommended to be ncv > 2*k - - which -- String specifying which eigenvectors to compute. - Compute the k - 'LA' - largest (algebraic) eigenvalues. - 'SA' - smallest (algebraic) eigenvalues. - 'LM' - largest (in magnitude) eigenvalues. - 'SM' - smallest (in magnitude) eigenvalues. - 'BE' - eigenvalues, half from each end of the - spectrum. When NEV is odd, compute one more from the - high end than from the low end. - - maxiter -- Maximum number of Arnoldi update iterations allowed - - tol -- Relative accuracy for eigenvalues (stopping criterion) - - return_eigenvectors -- True|False, return eigenvectors - - """ - try: - n,ny=A.shape - n==ny - except: - raise AttributeError("matrix is not square") - if M is not None: - raise NotImplementedError("generalized eigenproblem not supported yet") - if ncv is None: - ncv=2*k+1 - ncv=min(ncv,n) - if maxiter==None: - maxiter=n*10 - - - # guess type - resid = sb.zeros(n,'f') - try: - typ = A.dtype.char - except AttributeError: - typ = A.matvec(resid).dtype.char - if typ not in 'fd': - raise ValueError("matrix type must be 'f' or 'd'") - - # some sanity checks - if k <= 0: - raise ValueError("k must be positive, k=%d"%k) - if k == n: - raise ValueError("k must be less than rank(A), k=%d"%k) - if maxiter <= 0: - raise ValueError("maxiter must be positive, maxiter=%d"%maxiter) - whiches=['LM','SM','LA','SA','BE'] - if which not in whiches: - raise ValueError("which must be one of %s"%' '.join(whiches)) - if ncv > n or ncv < k: - raise ValueError("ncv must be k<=ncv<=n, ncv=%s"%ncv) - - # assign solver and postprocessor - ltr = _type_conv[typ] - eigsolver = _arpack.__dict__[ltr+'saupd'] - eigextract = _arpack.__dict__[ltr+'seupd'] - matvec = get_matvec(A) - - v = sb.zeros((n,ncv),typ) - resid = sb.zeros(n,typ) - workd = sb.zeros(3*n,typ) - workl = sb.zeros(ncv*(ncv+8),typ) - iparam = sb.zeros(11,'int') - ipntr = sb.zeros(11,'int') - info = 0 - ido = 0 - - # only supported mode is 1: Ax=lx - ishfts = 1 - mode1 = 1 - bmat='I' - iparam[0] = ishfts - iparam[2] = maxiter - iparam[6] = mode1 - - - while True: - ido,resid,v,iparam,ipntr,info =\ - eigsolver(ido,bmat,which,k,tol,resid,v,iparam,ipntr, - workd,workl,info) - if (ido == -1 or ido == 1): - xslice = slice(ipntr[0]-1, ipntr[0]-1+n) - yslice = slice(ipntr[1]-1, ipntr[1]-1+n) - workd[yslice]=matvec(workd[xslice]) - else: - break - - if info < -1 : - raise RuntimeError("Error info=%d in arpack"%info) - return None - if info == -1: - warnings.warn("Maximum number of iterations taken: %s"%iparam[2]) - - # now extract eigenvalues and (optionally) eigenvectors - rvec = return_eigenvectors - ierr = 0 - howmny = 'A' # return all eigenvectors - sselect = sb.zeros(ncv,'int') # unused - sigma = 0.0 # no shifts, not implemented - - d,z,info =\ - eigextract(rvec,howmny,sselect,sigma, - bmat,which, k,tol,resid,v,iparam[0:7],ipntr, - workd[0:2*n],workl,ierr) - - if ierr != 0: - raise RuntimeError("Error info=%d in arpack"%info) - return None - if return_eigenvectors: - return d,z - return d Copied: trunk/scipy/splinalg/eigen/arpack/arpack.py (from rev 3896, trunk/scipy/sandbox/arpack/arpack.py) =================================================================== --- trunk/scipy/sandbox/arpack/arpack.py 2008-02-06 01:18:26 UTC (rev 3896) +++ trunk/scipy/splinalg/eigen/arpack/arpack.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -0,0 +1,474 @@ +""" +Find a few eigenvectors and eigenvalues of a matrix. + + +Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/ + +""" +# Wrapper implementation notes +# +# ARPACK Entry Points +# ------------------- +# The entry points to ARPACK are +# - (s,d)seupd : single and double precision symmetric matrix +# - (s,d,c,z)neupd: single,double,complex,double complex general matrix +# This wrapper puts the *neupd (general matrix) interfaces in eigen() +# and the *seupd (symmetric matrix) in eigen_symmetric(). +# There is no Hermetian complex/double complex interface. +# To find eigenvalues of a Hermetian matrix you +# must use eigen() and not eigen_symmetric() +# It might be desirable to handle the Hermetian case differently +# and, for example, return real eigenvalues. + +# Number of eigenvalues returned and complex eigenvalues +# ------------------------------------------------------ +# The ARPACK nonsymmetric real and double interface (s,d)naupd return +# eigenvalues and eigenvectors in real (float,double) arrays. +# Since the eigenvalues and eigenvectors are, in general, complex +# ARPACK puts the real and imaginary parts in consecutive entries +# in real-valued arrays. This wrapper puts the real entries +# into complex data types and attempts to return the requested eigenvalues +# and eigenvectors. + + +# Solver modes +# ------------ +# ARPACK and handle shifted and shift-inverse computations +# for eigenvalues by providing a shift (sigma) and a solver. +# This is currently not implemented + +__docformat__ = "restructuredtext en" + +__all___=['eigen','eigen_symmetric'] + +import warnings + +import _arpack +import numpy as np +from scipy.splinalg.interface import aslinearoperator + +_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} +_ndigits = {'f':5, 'd':12, 'F':5, 'D':12} + + +def eigen(A, k=6, M=None, sigma=None, which='LM', + ncv=None, maxiter=None, tol=0, + return_eigenvectors=True): + """Find k eigenvalues and eigenvectors of the square matrix A. + + Solves A * x[i] = w[i] * x[i], the standard eigenvalue problem for + w[i] eigenvalues with corresponding eigenvectors x[i]. + + + Parameters + ---------- + A : A : matrix, array, or object with matvec(x) method + An N x N matrix, array, or an object with matvec(x) method to perform + the matrix vector product A * x. The sparse matrix formats + in scipy.sparse are appropriate for A. + + k : integer + The number of eigenvalues and eigenvectors desired + + Returns + ------- + w : array + Array of k eigenvalues + + v : array + An array of k eigenvectors + The v[i] is the eigenvector corresponding to the eigenvector w[i] + + Other Parameters + ---------------- + + M : matrix or array + (Not implemented) + A symmetric positive-definite matrix for the generalized + eigenvalue problem A * x = w * M * x + + sigma : real or complex + (Not implemented) + Find eigenvalues near sigma. Shift spectrum by sigma. + + + ncv : integer + The number of Lanczos vectors generated + ncv must be greater than k; it is recommended that ncv > 2*k + + which : string + Which k eigenvectors and eigenvalues to find: + - 'LM' : largest magnitude + - 'SM' : smallest magnitude + - 'LR' : largest real part + - 'SR' : smallest real part + - 'LI' : largest imaginary part + - 'SI' : smallest imaginary part + + maxiter : integer + Maximum number of Arnoldi update iterations allowed + + tol : float + Relative accuracy for eigenvalues (stopping criterion) + + return_eigenvectors : boolean + Return eigenvectors (True) in addition to eigenvalues + + See Also + -------- + eigen_symmetric : eigenvalues and eigenvectors for symmetric matrix A + + Notes + ----- + + Examples + -------- + + """ + A = aslinearoperator(A) + + if M is not None: + raise NotImplementedError("generalized eigenproblem not supported yet") + + if A.shape[0] != A.shape[1]: + raise ValueError('expected square matrix (shape=%s)' % shape) + + n = A.shape[0] + + if M is not None: + raise NotImplementedError("generalized eigenproblem not supported yet") + + if sigma is not None: + raise NotImplementedError("shifted eigenproblem not supported yet") + + + # some defaults + if ncv is None: + ncv=2*k+1 + ncv=min(ncv,n) + if maxiter==None: + maxiter=n*10 + + # guess type + resid = np.zeros(n,'f') + try: + typ = A.dtype.char + except AttributeError: + typ = A.matvec(resid).dtype.char + if typ not in 'fdFD': + raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") + + # some sanity checks + if k <= 0: + raise ValueError("k must be positive, k=%d"%k) + if k == n: + raise ValueError("k must be less than rank(A), k=%d"%k) + if maxiter <= 0: + raise ValueError("maxiter must be positive, maxiter=%d"%maxiter) + whiches=['LM','SM','LR','SR','LI','SI'] + if which not in whiches: + raise ValueError("which must be one of %s"%' '.join(whiches)) + if ncv > n or ncv < k: + raise ValueError("ncv must be k<=ncv<=n, ncv=%s"%ncv) + + # assign solver and postprocessor + ltr = _type_conv[typ] + eigsolver = _arpack.__dict__[ltr+'naupd'] + eigextract = _arpack.__dict__[ltr+'neupd'] + matvec = A.matvec + + v = np.zeros((n,ncv),typ) # holds Ritz vectors + resid = np.zeros(n,typ) # residual + workd = np.zeros(3*n,typ) # workspace + workl = np.zeros(3*ncv*ncv+6*ncv,typ) # workspace + iparam = np.zeros(11,'int') # problem parameters + ipntr = np.zeros(14,'int') # pointers into workspaces + info = 0 + ido = 0 + + if typ in 'FD': + rwork = np.zeros(ncv,typ.lower()) + + # only supported mode is 1: Ax=lx + ishfts = 1 + mode1 = 1 + bmat = 'I' + iparam[0] = ishfts + iparam[2] = maxiter + iparam[6] = mode1 + + while True: + if typ in 'fd': + ido,resid,v,iparam,ipntr,info =\ + eigsolver(ido,bmat,which,k,tol,resid,v,iparam,ipntr, + workd,workl,info) + else: + ido,resid,v,iparam,ipntr,info =\ + eigsolver(ido,bmat,which,k,tol,resid,v,iparam,ipntr, + workd,workl,rwork,info) + + if (ido == -1 or ido == 1): + # compute y = A * x + xslice = slice(ipntr[0]-1, ipntr[0]-1+n) + yslice = slice(ipntr[1]-1, ipntr[1]-1+n) + workd[yslice]=matvec(workd[xslice]) + else: # done + break + + if info < -1 : + raise RuntimeError("Error info=%d in arpack"%info) + return None + if info == -1: + warnings.warn("Maximum number of iterations taken: %s"%iparam[2]) +# if iparam[3] != k: +# warnings.warn("Only %s eigenvalues converged"%iparam[3]) + + + # now extract eigenvalues and (optionally) eigenvectors + rvec = return_eigenvectors + ierr = 0 + howmny = 'A' # return all eigenvectors + sselect = np.zeros(ncv,'int') # unused + sigmai = 0.0 # no shifts, not implemented + sigmar = 0.0 # no shifts, not implemented + workev = np.zeros(3*ncv,typ) + + if typ in 'fd': + dr=np.zeros(k+1,typ) + di=np.zeros(k+1,typ) + zr=np.zeros((n,k+1),typ) + dr,di,zr,info=\ + eigextract(rvec,howmny,sselect,sigmar,sigmai,workev, + bmat,which,k,tol,resid,v,iparam,ipntr, + workd,workl,info) + + # The ARPACK nonsymmetric real and double interface (s,d)naupd return + # eigenvalues and eigenvectors in real (float,double) arrays. + + # Build complex eigenvalues from real and imaginary parts + d=dr+1.0j*di + + # Arrange the eigenvectors: complex eigenvectors are stored as + # real,imaginary in consecutive columns + z=zr.astype(typ.upper()) + eps=np.finfo(typ).eps + i=0 + while i<=k: + # check if complex + if abs(d[i].imag)>eps: + # assume this is a complex conjugate pair with eigenvalues + # in consecutive columns + z[:,i]=zr[:,i]+1.0j*zr[:,i+1] + z[:,i+1]=z[:,i].conjugate() + i+=1 + i+=1 + + # Now we have k+1 eigenvalues and eigenvectors + # Return the ones specified by the keyword "which" + + # cut at approx precision for sorting + rd=np.round(d,decimals=_ndigits[typ]) + if which in ['LR','SR']: + ind=np.argsort(rd.real) + elif which in ['LI','SI']: + # for LI,SI ARPACK returns largest,smallest abs(imaginary) why? + ind=np.argsort(abs(rd.imag)) + else: + ind=np.argsort(abs(rd)) + if which in ['LR','LM','LI']: + d=d[ind[-k:]] + z=z[:,ind[-k:]] + if which in ['SR','SM','SI']: + d=d[ind[:k]] + z=z[:,ind[:k]] + + + else: + # complex is so much simpler... + d,z,info =\ + eigextract(rvec,howmny,sselect,sigmar,workev, + bmat,which,k,tol,resid,v,iparam,ipntr, + workd,workl,rwork,ierr) + + + + if ierr != 0: + raise RuntimeError("Error info=%d in arpack"%info) + return None + if return_eigenvectors: + return d,z + return d + + +def eigen_symmetric(A, k=6, M=None, sigma=None, which='LM', + ncv=None, maxiter=None, tol=0, v0=None, + return_eigenvectors=True): + """Find k eigenvalues and eigenvectors of the real symmetric + square matrix A. + + Solves A * x[i] = w[i] * x[i], the standard eigenvalue problem for + w[i] eigenvalues with corresponding eigenvectors x[i]. + + + Parameters + ---------- + A : matrix or array with real entries or object with matvec(x) method + An N x N real symmetric matrix or array or an object with matvec(x) + method to perform the matrix vector product A * x. The sparse + matrix formats in scipy.sparse are appropriate for A. + + k : integer + The number of eigenvalues and eigenvectors desired + + Returns + ------- + w : array + Array of k eigenvalues + + v : array + An array of k eigenvectors + The v[i] is the eigenvector corresponding to the eigenvector w[i] + + Other Parameters + ---------------- + M : matrix or array + (Not implemented) + A symmetric positive-definite matrix for the generalized + eigenvalue problem A * x = w * M * x + + + sigma : real + (Not implemented) + Find eigenvalues near sigma. Shift spectrum by sigma. + + + ncv : integer + The number of Lanczos vectors generated + ncv must be greater than k; it is recommended that ncv > 2*k + + which : string + Which k eigenvectors and eigenvalues to find: + - 'LA' : Largest (algebraic) eigenvalues + - 'SA' : Smallest (algebraic) eigenvalues + - 'LM' : Largest (in magnitude) eigenvalues + - 'SM' : Smallest (in magnitude) eigenvalues + - 'BE' : Half (k/2) from each end of the spectrum + When k is odd, return one more (k/2+1) from the high end + + maxiter : integer + Maximum number of Arnoldi update iterations allowed + + tol : float + Relative accuracy for eigenvalues (stopping criterion) + + return_eigenvectors : boolean + Return eigenvectors (True) in addition to eigenvalues + + See Also + -------- + eigen : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A + + Notes + ----- + + Examples + -------- + """ + A = aslinearoperator(A) + + if A.shape[0] != A.shape[1]: + raise ValueError('expected square matrix (shape=%s)' % shape) + + n = A.shape[0] + + if M is not None: + raise NotImplementedError("generalized eigenproblem not supported yet") + if sigma is not None: + raise NotImplementedError("shifted eigenproblem not supported yet") + if ncv is None: + ncv=2*k+1 + ncv=min(ncv,n) + if maxiter==None: + maxiter=n*10 + + + # guess type + resid = np.zeros(n,'f') + try: + typ = A.dtype.char + except AttributeError: + typ = A.matvec(resid).dtype.char + if typ not in 'fd': + raise ValueError("matrix must be real valued (type must be 'f' or 'd')") + + # some sanity checks + if k <= 0: + raise ValueError("k must be positive, k=%d"%k) + if k == n: + raise ValueError("k must be less than rank(A), k=%d"%k) + if maxiter <= 0: + raise ValueError("maxiter must be positive, maxiter=%d"%maxiter) + whiches=['LM','SM','LA','SA','BE'] + if which not in whiches: + raise ValueError("which must be one of %s"%' '.join(whiches)) + if ncv > n or ncv < k: + raise ValueError("ncv must be k<=ncv<=n, ncv=%s"%ncv) + + # assign solver and postprocessor + ltr = _type_conv[typ] + eigsolver = _arpack.__dict__[ltr+'saupd'] + eigextract = _arpack.__dict__[ltr+'seupd'] + matvec = A.matvec + + v = np.zeros((n,ncv),typ) + resid = np.zeros(n,typ) + workd = np.zeros(3*n,typ) + workl = np.zeros(ncv*(ncv+8),typ) + iparam = np.zeros(11,'int') + ipntr = np.zeros(11,'int') + info = 0 + ido = 0 + + # only supported mode is 1: Ax=lx + ishfts = 1 + mode1 = 1 + bmat='I' + iparam[0] = ishfts + iparam[2] = maxiter + iparam[6] = mode1 + + while True: + ido,resid,v,iparam,ipntr,info =\ + eigsolver(ido,bmat,which,k,tol,resid,v,iparam,ipntr, + workd,workl,info) + if (ido == -1 or ido == 1): + xslice = slice(ipntr[0]-1, ipntr[0]-1+n) + yslice = slice(ipntr[1]-1, ipntr[1]-1+n) + workd[yslice]=matvec(workd[xslice]) + else: + break + + if info < -1 : + raise RuntimeError("Error info=%d in arpack"%info) + return None + if info == -1: + warnings.warn("Maximum number of iterations taken: %s"%iparam[2]) + + # now extract eigenvalues and (optionally) eigenvectors + rvec = return_eigenvectors + ierr = 0 + howmny = 'A' # return all eigenvectors + sselect = np.zeros(ncv,'int') # unused + sigma = 0.0 # no shifts, not implemented + + d,z,info =\ + eigextract(rvec,howmny,sselect,sigma, + bmat,which, k,tol,resid,v,iparam[0:7],ipntr, + workd[0:2*n],workl,ierr) + + if ierr != 0: + raise RuntimeError("Error info=%d in arpack"%info) + return None + if return_eigenvectors: + return d,z + return d Deleted: trunk/scipy/splinalg/eigen/arpack/arpack.pyf.src =================================================================== --- trunk/scipy/sandbox/arpack/arpack.pyf.src 2008-01-30 17:05:38 UTC (rev 3876) +++ trunk/scipy/splinalg/eigen/arpack/arpack.pyf.src 2008-02-06 17:56:43 UTC (rev 3897) @@ -1,207 +0,0 @@ -! -*- f90 -*- -! Note: the context of this file is case sensitive. - -python module _arpack ! in - <_rd=real,double precision> - <_cd=complex,double complex> - interface ! in :_arpack - subroutine saupd(ido,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,info) ! in :_arpack:src/ssaupd.f - integer intent(in,out):: ido - character*1 :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_rd> dimension(n),intent(in,out) :: resid - integer optional,check(shape(v,1)==ncv),depend(v) :: ncv=shape(v,1) - <_rd> dimension(ldv,ncv),intent(in,out) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(11),intent(in,out) :: iparam - integer dimension(11),intent(in,out) :: ipntr - <_rd> dimension(3 * n),depend(n),intent(inout) :: workd - <_rd> dimension(lworkl),intent(inout) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - integer intent(in,out):: info - end subroutine saupd - - subroutine seupd(rvec,howmny,select,d,z,ldz,sigma,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,info) ! in :_arpack:src/sseupd.f - logical :: rvec - character :: howmny - logical dimension(ncv) :: select - <_rd> dimension(nev),intent(out),depend(nev) :: d - <_rd> dimension(n,nev),intent(out),depend(nev) :: z - integer optional,check(shape(z,0)==ldz),depend(z) :: ldz=shape(z,0) - <_rd> :: sigma - character :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_rd> dimension(n) :: resid - integer optional,check(len(select)>=ncv),depend(select) :: ncv=len(select) - <_rd> dimension(ldv,ncv),depend(ncv) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(7) :: iparam - integer dimension(11) :: ipntr - <_rd> dimension(2 * n),depend(n) :: workd - <_rd> dimension(lworkl) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - integer intent(in,out):: info - end subroutine seupd - - subroutine naupd(ido,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,info) ! in :_arpack:src/snaupd.f - integer intent(in,out):: ido - character*1 :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_rd> dimension(n),intent(in,out) :: resid - integer optional,check(shape(v,1)==ncv),depend(v) :: ncv=shape(v,1) - <_rd> dimension(ldv,ncv),intent(in,out) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(11),intent(in,out) :: iparam - integer dimension(14),intent(in,out) :: ipntr - <_rd> dimension(3 * n),depend(n),intent(inout) :: workd - <_rd> dimension(lworkl),intent(inout) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - integer intent(in,out):: info - end subroutine naupd - - subroutine neupd(rvec,howmny,select,dr,di,z,ldz,sigmar,sigmai,workev,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,info) ! in ARPACK/SRC/sneupd.f - logical :: rvec - character :: howmny - logical dimension(ncv) :: select - <_rd> dimension(nev + 1),depend(nev),intent(out) :: dr - <_rd> dimension(nev + 1),depend(nev),intent(out) :: di - <_rd> dimension(n,nev+1),depend(n,nev),intent(out) :: z - integer optional,check(shape(z,0)==ldz),depend(z) :: ldz=shape(z,0) - <_rd> :: sigmar - <_rd> :: sigmai - <_rd> dimension(3 * ncv),depend(ncv) :: workev - character :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_rd> dimension(n) :: resid - integer optional,check(len(select)>=ncv),depend(select) :: ncv=len(select) - <_rd> dimension(n,ncv),depend(n,ncv) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(11) :: iparam - integer dimension(14) :: ipntr - <_rd> dimension(3 * n),depend(n):: workd - <_rd> dimension(lworkl) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - integer intent(in,out):: info - end subroutine neupd - - subroutine naupd(ido,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,rwork,info) ! in :_arpack:src/snaupd.f - integer intent(in,out):: ido - character*1 :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_cd> dimension(n),intent(in,out) :: resid - integer optional,check(shape(v,1)==ncv),depend(v) :: ncv=shape(v,1) - <_cd> dimension(ldv,ncv),intent(in,out) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(11),intent(in,out) :: iparam - integer dimension(14),intent(in,out) :: ipntr - <_cd> dimension(3 * n),depend(n),intent(inout) :: workd - <_cd> dimension(lworkl),intent(inout) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - <_rd> dimension(ncv),depend(ncv),intent(inout) :: rwork - integer intent(in,out):: info - end subroutine naupd - - subroutine neupd(rvec,howmny,select,d,z,ldz,sigma,workev,bmat,n,which,nev,tol,resid,ncv,v,ldv,iparam,ipntr,workd,workl,lworkl,rwork,info) ! in :_arpack:src/sneupd.f - logical :: rvec - character :: howmny - logical dimension(ncv) :: select - <_cd> dimension(nev),depend(nev),intent(out) :: d - <_cd> dimension(n,nev), depend(nev),intent(out) :: z - integer optional,check(shape(z,0)==ldz),depend(z) :: ldz=shape(z,0) - <_cd> :: sigma - <_cd> dimension(3 * ncv),depend(ncv) :: workev - character :: bmat - integer optional,check(len(resid)>=n),depend(resid) :: n=len(resid) - character*2 :: which - integer :: nev - <_rd> :: tol - <_cd> dimension(n) :: resid - integer optional,check(len(select)>=ncv),depend(select) :: ncv=len(select) - <_cd> dimension(ldv,ncv),depend(ncv) :: v - integer optional,check(shape(v,0)==ldv),depend(v) :: ldv=shape(v,0) - integer dimension(11) :: iparam - integer dimension(14) :: ipntr - <_cd> dimension(3 * n),depend(n) :: workd - <_cd> dimension(lworkl) :: workl - integer optional,check(len(workl)>=lworkl),depend(workl) :: lworkl=len(workl) - <_rd> dimension(ncv),depend(ncv) :: rwork - integer intent(in,out):: info - end subroutine neupd - integer :: logfil - integer :: ndigit - integer :: mgetv0 - integer :: msaupd - integer :: msaup2 - integer :: msaitr - integer :: mseigt - integer :: msapps - integer :: msgets - integer :: mseupd - integer :: mnaupd - integer :: mnaup2 - integer :: mnaitr - integer :: mneigh - integer :: mnapps - integer :: mngets - integer :: mneupd - integer :: mcaupd - integer :: mcaup2 - integer :: mcaitr - integer :: mceigh - integer :: mcapps - integer :: mcgets - integer :: mceupd - integer :: nopx - integer :: nbx - integer :: nrorth - integer :: nitref - integer :: nrstrt - real :: tsaupd - real :: tsaup2 - real :: tsaitr - real :: tseigt - real :: tsgets - real :: tsapps - real :: tsconv - real :: tnaupd - real :: tnaup2 - real :: tnaitr - real :: tneigh - real :: tngets - real :: tnapps - real :: tnconv - real :: tcaupd - real :: tcaup2 - real :: tcaitr - real :: tceigh - real :: tcgets - real :: tcapps - real :: tcconv - real :: tmvopx - real :: tmvbx - real :: tgetv0 - real :: titref - real :: trvec - common /debug/ logfil,ndigit,mgetv0,msaupd,msaup2,msaitr,mseigt,msapps,msgets,mseupd,mnaupd,mnaup2,mnaitr,mneigh,mnapps,mngets,mneupd,mcaupd,mcaup2,mcaitr,mceigh,mcapps,mcgets,mceupd - common /timing/ nopx,nbx,nrorth,nitref,nrstrt,tsaupd,tsaup2,tsaitr,tseigt,tsgets,tsapps,tsconv,tnaupd,tnaup2,tnaitr,tneigh,tngets,tnapps,tnconv,tcaupd,tcaup2,tcaitr,tceigh,tcgets,tcapps,tcconv,tmvopx,tmvbx,tgetv0,titref,trvec - - end interface -end python module _arpack - -! This file was auto-generated with f2py (version:2_3198). -! See http://cens.ioc.ee/projects/f2py2e/ Copied: trunk/scipy/splinalg/eigen/arpack/arpack.pyf.src (from rev 3896, trunk/scipy/sandbox/arpack/arpack.pyf.src) Deleted: trunk/scipy/splinalg/eigen/arpack/info.py =================================================================== --- trunk/scipy/sandbox/arpack/info.py 2008-01-30 17:05:38 UTC (rev 3876) +++ trunk/scipy/splinalg/eigen/arpack/info.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -1,14 +0,0 @@ -""" -Eigenvalue solver using iterative methods. - -Find k eigenvectors and eigenvalues of a matrix A using the -Arnoldi/Lanczos iterative methods from ARPACK. - -These methods are most useful for large sparse matrices. - - - eigen(A,k) - - eigen_symmetric(A,k) - -""" -global_symbols = [] -postpone_import = 1 Copied: trunk/scipy/splinalg/eigen/arpack/info.py (from rev 3896, trunk/scipy/sandbox/arpack/info.py) =================================================================== --- trunk/scipy/sandbox/arpack/info.py 2008-02-06 01:18:26 UTC (rev 3896) +++ trunk/scipy/splinalg/eigen/arpack/info.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -0,0 +1,21 @@ +""" +Eigenvalue solver using iterative methods. + +Find k eigenvectors and eigenvalues of a matrix A using the +Arnoldi/Lanczos iterative methods from ARPACK. + +These methods are most useful for large sparse matrices. + + - eigen(A,k) + - eigen_symmetric(A,k) + +Reference +--------- + - http://www.caam.rice.edu/ +software/ARPACK/ + - http://www.caam.rice.edu/software/ARPACK/UG/ug.html + - http://books.google.com/books?hl=en&id=4E9PY7NT8a0C&dq=arpack+users+guide + +""" +global_symbols = [] +postpone_import = 1 Deleted: trunk/scipy/splinalg/eigen/arpack/setup.py =================================================================== --- trunk/scipy/sandbox/arpack/setup.py 2008-01-30 17:05:38 UTC (rev 3876) +++ trunk/scipy/splinalg/eigen/arpack/setup.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -1,36 +0,0 @@ -#!/usr/bin/env python - -from os.path import join - -def configuration(parent_package='',top_path=None): - from numpy.distutils.system_info import get_info, NotFoundError - from numpy.distutils.misc_util import Configuration - - lapack_opt = get_info('lapack_opt') - - if not lapack_opt: - raise NotFoundError,'no lapack/blas resources found' - - config = Configuration('arpack', parent_package, top_path) - - arpack_sources=[join('ARPACK','SRC', '*.f')] - arpack_sources.extend([join('ARPACK','UTIL', '*.f')]) -# arpack_sources.extend([join('ARPACK','BLAS', '*.f')]) - arpack_sources.extend([join('ARPACK','LAPACK', '*.f')]) - - config.add_library('arpack', sources=arpack_sources, - include_dirs=[join('ARPACK', 'SRC')]) - - - config.add_extension('_arpack', - sources='arpack.pyf.src', - libraries=['arpack'], - extra_info = lapack_opt - ) - - config.add_data_dir('tests') - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) Copied: trunk/scipy/splinalg/eigen/arpack/setup.py (from rev 3896, trunk/scipy/sandbox/arpack/setup.py) =================================================================== --- trunk/scipy/sandbox/arpack/setup.py 2008-02-06 01:18:26 UTC (rev 3896) +++ trunk/scipy/splinalg/eigen/arpack/setup.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +from os.path import join + +def configuration(parent_package='',top_path=None): + from numpy.distutils.system_info import get_info, NotFoundError + from numpy.distutils.misc_util import Configuration + + config = Configuration('arpack',parent_package,top_path) + + lapack_opt = get_info('lapack_opt') + + if not lapack_opt: + raise NotFoundError,'no lapack/blas resources found' + + config = Configuration('arpack', parent_package, top_path) + + arpack_sources=[join('ARPACK','SRC', '*.f')] + arpack_sources.extend([join('ARPACK','UTIL', '*.f')]) + arpack_sources.extend([join('ARPACK','LAPACK', '*.f')]) + + config.add_library('arpack', sources=arpack_sources, + include_dirs=[join('ARPACK', 'SRC')]) + + + config.add_extension('_arpack', + sources='arpack.pyf.src', + libraries=['arpack'], + extra_info = lapack_opt + ) + + config.add_data_dir('tests') + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) Deleted: trunk/scipy/splinalg/eigen/arpack/speigs.py =================================================================== --- trunk/scipy/sandbox/arpack/speigs.py 2008-01-30 17:05:38 UTC (rev 3876) +++ trunk/scipy/splinalg/eigen/arpack/speigs.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -1,225 +0,0 @@ -import numpy as N -import _arpack -import warnings - -__all___=['ArpackException','ARPACK_eigs', 'ARPACK_gen_eigs'] - -class ArpackException(RuntimeError): - ARPACKErrors = { 0: """Normal exit.""", - 3: """No shifts could be applied during a cycle of the - Implicitly restarted Arnoldi iteration. One possibility - is to increase the size of NCV relative to NEV.""", - -1: """N must be positive.""", - -2: """NEV must be positive.""", - -3: """NCV-NEV >= 2 and less than or equal to N.""", - -4: """The maximum number of Arnoldi update iteration - must be greater than zero.""", - -5: """WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'""", - -6: """BMAT must be one of 'I' or 'G'.""", - -7: """Length of private work array is not sufficient.""", - -8: """Error return from LAPACK eigenvalue calculation;""", - -9: """Starting vector is zero.""", - -10: """IPARAM(7) must be 1,2,3,4.""", - -11: """IPARAM(7) = 1 and BMAT = 'G' are incompatable.""", - -12: """IPARAM(1) must be equal to 0 or 1.""", - -9999: """Could not build an Arnoldi factorization. - IPARAM(5) returns the size of the current Arnoldi - factorization.""", - } - def __init__(self, info): - self.info = info - def __str__(self): - try: return self.ARPACKErrors[self.info] - except KeyError: return "Unknown ARPACK error" - -def check_init(n, nev, ncv): - assert(nev <= n-4) # ARPACK seems to cause a segfault otherwise - if ncv is None: - ncv = min(2*nev+1, n-1) - maxitr = max(n, 1000) # Maximum number of iterations - return ncv, maxitr - -def init_workspaces(n,nev,ncv): - ipntr = N.zeros(14, N.int32) # Pointers into memory structure used by F77 calls - d = N.zeros((ncv, 3), N.float64, order='FORTRAN') # Temp workspace - # Temp workspace/error residuals upon iteration completion - resid = N.zeros(n, N.float64) - workd = N.zeros(3*n, N.float64) # workspace - workl = N.zeros(3*ncv*ncv+6*ncv, N.float64) # more workspace - # Storage for the Arnoldi basis vectors - v = N.zeros((n, ncv), dtype=N.float64, order='FORTRAN') - return (ipntr, d, resid, workd, workl, v) - -def init_debug(): - # Causes various debug info to be printed by ARPACK - _arpack.debug.ndigit = -3 - _arpack.debug.logfil = 6 - _arpack.debug.mnaitr = 0 - _arpack.debug.mnapps = 0 - _arpack.debug.mnaupd = 1 - _arpack.debug.mnaup2 = 0 - _arpack.debug.mneigh = 0 - _arpack.debug.mneupd = 1 - -def init_postproc_workspace(n, nev, ncv): - # Used as workspace and to return eigenvectors if requested. Not touched if - # eigenvectors are not requested - workev = N.zeros(3*ncv, N.float64) # More workspace - select = N.zeros(ncv, N.int32) # Used as internal workspace since dneupd - # parameter HOWMNY == 'A' - return (workev, select) - -def postproc(n, nev, ncv, sigmar, sigmai, bmat, which, - tol, resid, v, iparam, ipntr, workd, workl, info): - workev, select = init_postproc_workspace(n, nev, ncv) - ierr = 0 - # Postprocess the Arnouldi vectors to extract eigenvalues/vectors - # If dneupd's first paramter is 'True' the eigenvectors are also calculated, - # 'False' only the eigenvalues - dr,di,z,info = _arpack.dneupd( - True, 'A', select, sigmar, sigmai, workev, bmat, which, nev, tol, resid, v, - iparam, ipntr, workd, workl, info) - - if N.abs(di[:-1]).max() == 0: dr = dr[:-1] - else: dr = dr[:-1] + 1j*di[:-1] - return (dr, z[:,:-1]) - - -def ARPACK_eigs(matvec, n, nev, which='SM', ncv=None, tol=1e-14): - """ - Calculate eigenvalues for system with matrix-vector product matvec, dimension n - - Arguments - ========= - matvec -- Function that provides matrix-vector product, i.e. matvec(x) -> A*x - n -- Matrix dimension of the problem - nev -- Number of eigenvalues to calculate - which -- Spectrum selection. See details below. Defaults to 'SM' - ncv -- Number of Arnoldi basisvectors to use. If None, default to 2*nev+1 - tol -- Numerical tollerance for Arnouldi iteration convergence. Defaults to 1e-14 - - Spectrum Selection - ================== - which can take one of several values: - - 'LM' -> Request eigenvalues with largest magnitude. - 'SM' -> Request eigenvalues with smallest magnitude. - 'LR' -> Request eigenvalues with largest real part. - 'SR' -> Request eigenvalues with smallest real part. - 'LI' -> Request eigenvalues with largest imaginary part. - 'SI' -> Request eigenvalues with smallest imaginary part. - - Return Values - ============= - (eig_vals, eig_vecs) where eig_vals are the requested eigenvalues and - eig_vecs the corresponding eigenvectors. If all the eigenvalues are real, - eig_vals is a real array but if some eigenvalues are complex it is a - complex array. - - """ - bmat = 'I' # Standard eigenproblem - ncv, resid, iparam, ipntr, v, workd, workl, info = ARPACK_iteration( - matvec, lambda x: x, n, bmat, which, nev, tol, ncv, mode=1) - return postproc(n, nev, ncv, 0., 0., bmat, which, tol, - resid, v, iparam, ipntr, workd, workl, info) - -def ARPACK_gen_eigs(matvec, sigma_solve, n, sigma, nev, which='LR', ncv=None, tol=1e-14): - """ - Calculate eigenvalues close to sigma for generalised eigen system - - Given a system [A]x = k_i*[M]x where [A] and [M] are matrices and k_i are - eigenvalues, nev eigenvalues close to sigma are calculated. The user needs - to provide routines that calculate [M]*x and solve [A]-sigma*[M]*x = b for x. - - Arguments - ========= - matvec -- Function that provides matrix-vector product, i.e. matvec(x) -> [M]*x - sigma_solve -- sigma_solve(b) -> x, where [A]-sigma*[M]*x = b - n -- Matrix dimension of the problem - sigma -- Eigenvalue spectral shift real value - nev -- Number of eigenvalues to calculate - which -- Spectrum selection. See details below. Defaults to 'LR' - ncv -- Number of Arnoldi basisvectors to use. If None, default to 2*nev+1 - tol -- Numerical tollerance for Arnouldi iteration convergence. Defaults to 1e-14 - - Spectrum Shift - ============== - - The spectrum of the orignal system is shifted by sigma. This transforms the - original eigenvalues to be 1/(original_eig-sigma) in the shifted - system. ARPACK then operates on the shifted system, transforming it back to - the original system in a postprocessing step. - - The spectrum shift causes eigenvalues close to sigma to become very large - in the transformed system. This allows quick convergence for these - eigenvalues. This is particularly useful if a system has a number of - trivial zero-eigenvalues that are to be ignored. - - Spectrum Selection - ================== - which can take one of several values: - - 'LM' -> Request spectrum shifted eigenvalues with largest magnitude. - 'SM' -> Request spectrum shifted eigenvalues with smallest magnitude. - 'LR' -> Request spectrum shifted eigenvalues with largest real part. - 'SR' -> Request spectrum shifted eigenvalues with smallest real part. - 'LI' -> Request spectrum shifted eigenvalues with largest imaginary part. - 'SI' -> Request spectrum shifted eigenvalues with smallest imaginary part. - - The effect on the actual system is: - 'LM' -> Eigenvalues closest to sigma on the complex plane - 'LR' -> Eigenvalues with real part > sigma, provided they exist - - - Return Values - ============= - (eig_vals, eig_vecs) where eig_vals are the requested eigenvalues and - eig_vecs the corresponding eigenvectors. If all the eigenvalues are real, - eig_vals is a real array but if some eigenvalues are complex it is a - complex array. The eigenvalues and vectors correspond to the original - system, not the shifted system. The shifted system is only used interally. - - """ - bmat = 'G' # Generalised eigenproblem - ncv, resid, iparam, ipntr, v, workd, workl, info = ARPACK_iteration( - matvec, sigma_solve, n, bmat, which, nev, tol, ncv, mode=3) - sigmar = sigma - sigmai = 0. - return postproc(n, nev, ncv, sigmar, sigmai, bmat, which, tol, - resid, v, iparam, ipntr, workd, workl, info) - -def ARPACK_iteration(matvec, sigma_solve, n, bmat, which, nev, tol, ncv, mode): - ncv, maxitr = check_init(n, nev, ncv) - ipntr, d, resid, workd, workl, v = init_workspaces(n,nev,ncv) - init_debug() - ishfts = 1 # Some random arpack parameter - # Some random arpack parameter (I think it tells ARPACK to solve the - # general eigenproblem using shift-invert - iparam = N.zeros(11, N.int32) # Array with assorted extra paramters for F77 call - iparam[[0,2,6]] = ishfts, maxitr, mode - ido = 0 # Communication variable used by ARPACK to tell the user what to do - info = 0 # Used for error reporting - # Arnouldi iteration. - while True: - ido,resid,v,iparam,ipntr,info = _arpack.dnaupd( - ido, bmat, which, nev, tol, resid, v, iparam, ipntr, workd, workl, info) - if ido == -1 or ido == 1 and mode not in (3,4): - # Perform y = inv[A - sigma*M]*M*x - x = workd[ipntr[0]-1:ipntr[0]+n-1] - Mx = matvec(x) # Mx = [M]*x - workd[ipntr[1]-1:ipntr[1]+n-1] = sigma_solve(Mx) - elif ido == 1: # Perform y = inv[A - sigma*M]*M*x using saved M*x - # Mx = [M]*x where it was saved by ARPACK - Mx = workd[ipntr[2]-1:ipntr[2]+n-1] - workd[ipntr[1]-1:ipntr[1]+n-1] = sigma_solve(Mx) - elif ido == 2: # Perform y = M*x - x = workd[ipntr[0]-1:ipntr[0]+n-1] - workd[ipntr[1]-1:ipntr[1]+n-1] = matvec(x) - else: # Finished, or error - break - if info == 1: - warn.warn("Maximum number of iterations taken: %s"%iparam[2]) - elif info != 0: - raise ArpackException(info) - - return (ncv, resid, iparam, ipntr, v, workd, workl, info) Copied: trunk/scipy/splinalg/eigen/arpack/speigs.py (from rev 3896, trunk/scipy/sandbox/arpack/speigs.py) =================================================================== --- trunk/scipy/sandbox/arpack/speigs.py 2008-02-06 01:18:26 UTC (rev 3896) +++ trunk/scipy/splinalg/eigen/arpack/speigs.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -0,0 +1,225 @@ +import numpy as np +import _arpack +import warnings + +__all___=['ArpackException','ARPACK_eigs', 'ARPACK_gen_eigs'] + +class ArpackException(RuntimeError): + ARPACKErrors = { 0: """Normal exit.""", + 3: """No shifts could be applied during a cycle of the + Implicitly restarted Arnoldi iteration. One possibility + is to increase the size of NCV relative to NEV.""", + -1: """N must be positive.""", + -2: """NEV must be positive.""", + -3: """NCV-NEV >= 2 and less than or equal to N.""", + -4: """The maximum number of Arnoldi update iteration + must be greater than zero.""", + -5: """WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'""", + -6: """BMAT must be one of 'I' or 'G'.""", + -7: """Length of private work array is not sufficient.""", + -8: """Error return from LAPACK eigenvalue calculation;""", + -9: """Starting vector is zero.""", + -10: """IPARAM(7) must be 1,2,3,4.""", + -11: """IPARAM(7) = 1 and BMAT = 'G' are incompatable.""", + -12: """IPARAM(1) must be equal to 0 or 1.""", + -9999: """Could not build an Arnoldi factorization. + IPARAM(5) returns the size of the current Arnoldi + factorization.""", + } + def __init__(self, info): + self.info = info + def __str__(self): + try: return self.ARPACKErrors[self.info] + except KeyError: return "Unknown ARPACK error" + +def check_init(n, nev, ncv): + assert(nev <= n-4) # ARPACK seems to cause a segfault otherwise + if ncv is None: + ncv = min(2*nev+1, n-1) + maxitr = max(n, 1000) # Maximum number of iterations + return ncv, maxitr + +def init_workspaces(n,nev,ncv): + ipntr = np.zeros(14, np.int32) # Pointers into memory structure used by F77 calls + d = np.zeros((ncv, 3), np.float64, order='FORTRAN') # Temp workspace + # Temp workspace/error residuals upon iteration completion + resid = np.zeros(n, np.float64) + workd = np.zeros(3*n, np.float64) # workspace + workl = np.zeros(3*ncv*ncv+6*ncv, np.float64) # more workspace + # Storage for the Arnoldi basis vectors + v = np.zeros((n, ncv), dtype=np.float64, order='FORTRAN') + return (ipntr, d, resid, workd, workl, v) + +def init_debug(): + # Causes various debug info to be printed by ARPACK + _arpack.debug.ndigit = -3 + _arpack.debug.logfil = 6 + _arpack.debug.mnaitr = 0 + _arpack.debug.mnapps = 0 + _arpack.debug.mnaupd = 1 + _arpack.debug.mnaup2 = 0 + _arpack.debug.mneigh = 0 + _arpack.debug.mneupd = 1 + +def init_postproc_workspace(n, nev, ncv): + # Used as workspace and to return eigenvectors if requested. Not touched if + # eigenvectors are not requested + workev = np.zeros(3*ncv, np.float64) # More workspace + select = np.zeros(ncv, np.int32) # Used as internal workspace since dneupd + # parameter HOWMNY == 'A' + return (workev, select) + +def postproc(n, nev, ncv, sigmar, sigmai, bmat, which, + tol, resid, v, iparam, ipntr, workd, workl, info): + workev, select = init_postproc_workspace(n, nev, ncv) + ierr = 0 + # Postprocess the Arnouldi vectors to extract eigenvalues/vectors + # If dneupd's first paramter is 'True' the eigenvectors are also calculated, + # 'False' only the eigenvalues + dr,di,z,info = _arpack.dneupd( + True, 'A', select, sigmar, sigmai, workev, bmat, which, nev, tol, resid, v, + iparam, ipntr, workd, workl, info) + + if np.abs(di[:-1]).max() == 0: dr = dr[:-1] + else: dr = dr[:-1] + 1j*di[:-1] + return (dr, z[:,:-1]) + + +def ARPACK_eigs(matvec, n, nev, which='SM', ncv=None, tol=1e-14): + """ + Calculate eigenvalues for system with matrix-vector product matvec, dimension n + + Arguments + ========= + matvec -- Function that provides matrix-vector product, i.e. matvec(x) -> A*x + n -- Matrix dimension of the problem + nev -- Number of eigenvalues to calculate + which -- Spectrum selection. See details below. Defaults to 'SM' + ncv -- Number of Arnoldi basisvectors to use. If None, default to 2*nev+1 + tol -- Numerical tollerance for Arnouldi iteration convergence. Defaults to 1e-14 + + Spectrum Selection + ================== + which can take one of several values: + + 'LM' -> Request eigenvalues with largest magnitude. + 'SM' -> Request eigenvalues with smallest magnitude. + 'LR' -> Request eigenvalues with largest real part. + 'SR' -> Request eigenvalues with smallest real part. + 'LI' -> Request eigenvalues with largest imaginary part. + 'SI' -> Request eigenvalues with smallest imaginary part. + + Return Values + ============= + (eig_vals, eig_vecs) where eig_vals are the requested eigenvalues and + eig_vecs the corresponding eigenvectors. If all the eigenvalues are real, + eig_vals is a real array but if some eigenvalues are complex it is a + complex array. + + """ + bmat = 'I' # Standard eigenproblem + ncv, resid, iparam, ipntr, v, workd, workl, info = ARPACK_iteration( + matvec, lambda x: x, n, bmat, which, nev, tol, ncv, mode=1) + return postproc(n, nev, ncv, 0., 0., bmat, which, tol, + resid, v, iparam, ipntr, workd, workl, info) + +def ARPACK_gen_eigs(matvec, sigma_solve, n, sigma, nev, which='LR', ncv=None, tol=1e-14): + """ + Calculate eigenvalues close to sigma for generalised eigen system + + Given a system [A]x = k_i*[M]x where [A] and [M] are matrices and k_i are + eigenvalues, nev eigenvalues close to sigma are calculated. The user needs + to provide routines that calculate [M]*x and solve [A]-sigma*[M]*x = b for x. + + Arguments + ========= + matvec -- Function that provides matrix-vector product, i.e. matvec(x) -> [M]*x + sigma_solve -- sigma_solve(b) -> x, where [A]-sigma*[M]*x = b + n -- Matrix dimension of the problem + sigma -- Eigenvalue spectral shift real value + nev -- Number of eigenvalues to calculate + which -- Spectrum selection. See details below. Defaults to 'LR' + ncv -- Number of Arnoldi basisvectors to use. If None, default to 2*nev+1 + tol -- Numerical tollerance for Arnouldi iteration convergence. Defaults to 1e-14 + + Spectrum Shift + ============== + + The spectrum of the orignal system is shifted by sigma. This transforms the + original eigenvalues to be 1/(original_eig-sigma) in the shifted + system. ARPACK then operates on the shifted system, transforming it back to + the original system in a postprocessing step. + + The spectrum shift causes eigenvalues close to sigma to become very large + in the transformed system. This allows quick convergence for these + eigenvalues. This is particularly useful if a system has a number of + trivial zero-eigenvalues that are to be ignored. + + Spectrum Selection + ================== + which can take one of several values: + + 'LM' -> Request spectrum shifted eigenvalues with largest magnitude. + 'SM' -> Request spectrum shifted eigenvalues with smallest magnitude. + 'LR' -> Request spectrum shifted eigenvalues with largest real part. + 'SR' -> Request spectrum shifted eigenvalues with smallest real part. + 'LI' -> Request spectrum shifted eigenvalues with largest imaginary part. + 'SI' -> Request spectrum shifted eigenvalues with smallest imaginary part. + + The effect on the actual system is: + 'LM' -> Eigenvalues closest to sigma on the complex plane + 'LR' -> Eigenvalues with real part > sigma, provided they exist + + + Return Values + ============= + (eig_vals, eig_vecs) where eig_vals are the requested eigenvalues and + eig_vecs the corresponding eigenvectors. If all the eigenvalues are real, + eig_vals is a real array but if some eigenvalues are complex it is a + complex array. The eigenvalues and vectors correspond to the original + system, not the shifted system. The shifted system is only used interally. + + """ + bmat = 'G' # Generalised eigenproblem + ncv, resid, iparam, ipntr, v, workd, workl, info = ARPACK_iteration( + matvec, sigma_solve, n, bmat, which, nev, tol, ncv, mode=3) + sigmar = sigma + sigmai = 0. + return postproc(n, nev, ncv, sigmar, sigmai, bmat, which, tol, + resid, v, iparam, ipntr, workd, workl, info) + +def ARPACK_iteration(matvec, sigma_solve, n, bmat, which, nev, tol, ncv, mode): + ncv, maxitr = check_init(n, nev, ncv) + ipntr, d, resid, workd, workl, v = init_workspaces(n,nev,ncv) + init_debug() + ishfts = 1 # Some random arpack parameter + # Some random arpack parameter (I think it tells ARPACK to solve the + # general eigenproblem using shift-invert + iparam = np.zeros(11, np.int32) # Array with assorted extra paramters for F77 call + iparam[[0,2,6]] = ishfts, maxitr, mode + ido = 0 # Communication variable used by ARPACK to tell the user what to do + info = 0 # Used for error reporting + # Arnouldi iteration. + while True: + ido,resid,v,iparam,ipntr,info = _arpack.dnaupd( + ido, bmat, which, nev, tol, resid, v, iparam, ipntr, workd, workl, info) + if ido == -1 or ido == 1 and mode not in (3,4): + # Perform y = inv[A - sigma*M]*M*x + x = workd[ipntr[0]-1:ipntr[0]+n-1] + Mx = matvec(x) # Mx = [M]*x + workd[ipntr[1]-1:ipntr[1]+n-1] = sigma_solve(Mx) + elif ido == 1: # Perform y = inv[A - sigma*M]*M*x using saved M*x + # Mx = [M]*x where it was saved by ARPACK + Mx = workd[ipntr[2]-1:ipntr[2]+n-1] + workd[ipntr[1]-1:ipntr[1]+n-1] = sigma_solve(Mx) + elif ido == 2: # Perform y = M*x + x = workd[ipntr[0]-1:ipntr[0]+n-1] + workd[ipntr[1]-1:ipntr[1]+n-1] = matvec(x) + else: # Finished, or error + break + if info == 1: + warn.warn("Maximum number of iterations taken: %s"%iparam[2]) + elif info != 0: + raise ArpackException(info) + + return (ncv, resid, iparam, ipntr, v, workd, workl, info) Copied: trunk/scipy/splinalg/eigen/arpack/tests (from rev 3896, trunk/scipy/sandbox/arpack/tests) Modified: trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py =================================================================== --- trunk/scipy/sandbox/arpack/tests/test_arpack.py 2008-02-06 01:18:26 UTC (rev 3896) +++ trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -1,33 +1,26 @@ #!/usr/bin/env python -__usage__ = """ -First ensure that scipy core modules are installed. -Build interface to arpack - python setup.py build -Run tests locally: - python tests/test_arpack.py [-l] [-v] - -""" - from scipy.testing import * -from scipy.sandbox.arpack import * -import numpy +from scipy.splinalg.eigen.arpack import eigen_symmetric,eigen from scipy.linalg import eig,eigh,norm +from numpy import array,abs,take,concatenate,dot + + class TestEigenNonsymmetric(TestCase): def get_a1(self,typ): - mat=numpy.array([[-2., -8., 1., 2., -5.], - [ 6., 6., 0., 2., 1.], - [ 0., 4., -2., 11., 0.], - [ 1., 6., 1., 0., -4.], - [ 2., -6., 4., 9., -3]],typ) + mat=array([[-2., -8., 1., 2., -5.], + [ 6., 6., 0., 2., 1.], + [ 0., 4., -2., 11., 0.], + [ 1., 6., 1., 0., -4.], + [ 2., -6., 4., 9., -3]],typ) - w=numpy.array([-2.21691+8.59661*1j,-2.21691-8.59661*1j,\ - 4.45961+3.80078*1j, 4.45961-3.80078*1j,\ - -5.48541+0j],typ.upper()) + w=array([-2.21691+8.59661*1j,-2.21691-8.59661*1j,\ + 4.45961+3.80078*1j, 4.45961-3.80078*1j,\ + -5.48541+0j],typ.upper()) return mat,w @@ -35,9 +28,9 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='LM') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.abs(aw) - num=numpy.abs(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=abs(aw) + num=abs(w) exact.sort() num.sort() assert_array_almost_equal(num[-k:],exact[-k:],decimal=5) @@ -46,9 +39,9 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='SM') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.abs(aw) - num=numpy.abs(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=abs(aw) + num=abs(w) exact.sort() num.sort() assert_array_almost_equal(num[:k],exact[:k],decimal=5) @@ -58,9 +51,9 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='LR') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.real(aw) - num=numpy.real(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=aw.real + num=w.real exact.sort() num.sort() assert_array_almost_equal(num[-k:],exact[-k:],decimal=5) @@ -69,9 +62,9 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='SR') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.real(aw) - num=numpy.real(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=aw.real + num=w.real exact.sort() num.sort() assert_array_almost_equal(num[:k],exact[:k],decimal=5) @@ -81,11 +74,11 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='LI') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) print w print aw - exact=numpy.imag(aw) - num=numpy.imag(w) + exact=aw.imag + num=w.imag exact.sort() num.sort() assert_array_almost_equal(num[-k:],exact[-k:],decimal=5) @@ -94,9 +87,9 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='SI') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.imag(aw) - num=numpy.imag(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=aw.imag + num=w.imag exact.sort() num.sort() print num @@ -121,15 +114,15 @@ class TestEigenComplexNonsymmetric(TestCase): def get_a1(self,typ): - mat=numpy.array([[-2., -8., 1., 2., -5.], - [ 6., 6., 0., 2., 1.], - [ 0., 4., -2., 11., 0.], - [ 1., 6., 1., 0., -4.], - [ 2., -6., 4., 9., -3]],typ) + mat=array([[-2., -8., 1., 2., -5.], + [ 6., 6., 0., 2., 1.], + [ 0., 4., -2., 11., 0.], + [ 1., 6., 1., 0., -4.], + [ 2., -6., 4., 9., -3]],typ) - w=numpy.array([-2.21691+8.59661*1j,-2.21691-8.59661*1j,\ - 4.45961+3.80078*1j, 4.45961-3.80078*1j,\ - -5.48541+0j],typ.upper()) + w=array([-2.21691+8.59661*1j,-2.21691-8.59661*1j,\ + 4.45961+3.80078*1j, 4.45961-3.80078*1j,\ + -5.48541+0j],typ.upper()) return mat,w @@ -137,9 +130,9 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='LM') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.abs(aw) - num=numpy.abs(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=abs(aw) + num=abs(w) exact.sort() num.sort() assert_array_almost_equal(num,exact[-k:],decimal=5) @@ -148,9 +141,9 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='SM') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.abs(aw) - num=numpy.abs(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=abs(aw) + num=abs(w) exact.sort() num.sort() assert_array_almost_equal(num,exact[:k],decimal=5) @@ -160,9 +153,9 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='LR') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.real(aw) - num=numpy.real(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=aw.real + num=w.real exact.sort() num.sort() assert_array_almost_equal(num,exact[-k:],decimal=5) @@ -171,9 +164,9 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='SR') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.real(aw) - num=numpy.real(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=aw.real + num=w.real exact.sort() num.sort() assert_array_almost_equal(num,exact[:k],decimal=5) @@ -183,9 +176,9 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='LI') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.imag(aw) - num=numpy.imag(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=aw.imag + num=w.imag exact.sort() num.sort() assert_array_almost_equal(num,exact[-k:],decimal=5) @@ -194,23 +187,23 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='SI') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=numpy.imag(aw) - num=numpy.imag(w) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + exact=aw.imag + num=w.imag exact.sort() num.sort() assert_array_almost_equal(num,exact[:k],decimal=5) - def test_type(self): - k=2 - for typ in 'FD': - self.large_magnitude(typ,k) - self.small_magnitude(typ,k) - self.large_real(typ,k) - self.small_real(typ,k) - self.large_imag(typ,k) - self.small_imag(typ,k) +# def test_type(self): +# k=2 +# for typ in 'FD': +# self.large_magnitude(typ,k) +# self.small_magnitude(typ,k) +# self.large_real(typ,k) +# self.small_real(typ,k) +# self.large_imag(typ,k) +# self.small_imag(typ,k) @@ -218,13 +211,13 @@ class TestEigenSymmetric(TestCase): def get_a1(self,typ): - mat_a1=numpy.array([[ 2., 0., 0., -1., 0., -1.], - [ 0., 2., 0., -1., 0., -1.], - [ 0., 0., 2., -1., 0., -1.], - [-1., -1., -1., 4., 0., -1.], - [ 0., 0., 0., 0., 1., -1.], - [-1., -1., -1., -1., -1., 5.]], - typ) + mat_a1=array([[ 2., 0., 0., -1., 0., -1.], + [ 0., 2., 0., -1., 0., -1.], + [ 0., 0., 2., -1., 0., -1.], + [-1., -1., -1., 4., 0., -1.], + [ 0., 0., 0., 0., 1., -1.], + [-1., -1., -1., -1., -1., 5.]], + typ) w = [0,1,2,2,5,6] # eigenvalues of a1 return mat_a1,w @@ -249,28 +242,28 @@ w,v = eigen_symmetric(a,k,which='LM') ew,ev = eigh(a) ind=ew.argsort() - assert_array_almost_equal(w,numpy.take(ew,ind[-k:])) + assert_array_almost_equal(w,take(ew,ind[-k:])) for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i]) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) def small_eigenvectors(self,typ,k): a,aw = self.get_a1(typ) w,v = eigen_symmetric(a,k,which='SM',tol=1e-7) ew,ev = eigh(a) ind=ew.argsort() - assert_array_almost_equal(w,numpy.take(ew,ind[:k])) + assert_array_almost_equal(w,take(ew,ind[:k])) for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i]) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) def end_eigenvectors(self,typ,k): a,aw = self.get_a1(typ) w,v = eigen_symmetric(a,k,which='BE') ew,ev = eigh(a) ind=ew.argsort() - exact=numpy.concatenate(([ind[:k/2],ind[-k/2:]])) - assert_array_almost_equal(w,numpy.take(ew,exact)) + exact=concatenate(([ind[:k/2],ind[-k/2:]])) + assert_array_almost_equal(w,take(ew,exact)) for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i]) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) def test_eigenvectors(self): k=2 @@ -290,21 +283,21 @@ class TestEigenComplexSymmetric(TestCase): def get_a1(self,typ): - mat_a1=numpy.array([[ 2., 0., 0., -1., 0., -1.], - [ 0., 2., 0., -1., 0., -1.], - [ 0., 0., 2., -1., 0., -1.], - [-1., -1., -1., 4., 0., -1.], - [ 0., 0., 0., 0., 1., -1.], - [-1., -1., -1., -1., -1., 5.]], - typ) - w = numpy.array([0+0j,1+0j,2+0j,2+0j,5+0j,6+0j]) # eigenvalues of a1 + mat_a1=array([[ 2., 0., 0., -1., 0., -1.], + [ 0., 2., 0., -1., 0., -1.], + [ 0., 0., 2., -1., 0., -1.], + [-1., -1., -1., 4., 0., -1.], + [ 0., 0., 0., 0., 1., -1.], + [-1., -1., -1., -1., -1., 5.]], + typ) + w = array([0+0j,1+0j,2+0j,2+0j,5+0j,6+0j]) # eigenvalues of a1 return mat_a1,w def large_magnitude(self,typ,k): a,aw = self.get_a1(typ) w,v = eigen(a,k,which='LM') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) aw.real.sort() w.real.sort() assert_array_almost_equal(w,aw[-k:]) @@ -314,7 +307,7 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='SM') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i]) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) aw.real.sort() w.real.sort() assert_array_almost_equal(w,aw[:k]) @@ -323,7 +316,7 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='LR') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i],decimal=5) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) aw.real.sort() w.real.sort() assert_array_almost_equal(w,aw[-k:],decimal=5) @@ -333,18 +326,18 @@ a,aw = self.get_a1(typ) w,v = eigen(a,k,which='SR') for i in range(k): - assert_array_almost_equal(sb.dot(a,v[:,i]),w[i]*v[:,i]) + assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) aw.real.sort() w.real.sort() assert_array_almost_equal(w,aw[:k]) - def test_complex_symmetric(self): - k=2 - for typ in 'FD': - self.large_magnitude(typ,k) - self.small_magnitude(typ,k) - self.large_real(typ,k) - self.small_real(typ,k) +# def test_complex_symmetric(self): +# k=2 +# for typ in 'FD': +# self.large_magnitude(typ,k) +# self.small_magnitude(typ,k) +# self.large_real(typ,k) +# self.small_real(typ,k) Modified: trunk/scipy/splinalg/setup.py =================================================================== --- trunk/scipy/splinalg/setup.py 2008-02-06 01:18:26 UTC (rev 3896) +++ trunk/scipy/splinalg/setup.py 2008-02-06 17:56:43 UTC (rev 3897) @@ -7,6 +7,7 @@ config.add_subpackage(('isolve')) config.add_subpackage(('dsolve')) + config.add_subpackage(('eigen')) config.add_data_dir('tests') From scipy-svn at scipy.org Wed Feb 6 13:25:10 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 6 Feb 2008 12:25:10 -0600 (CST) Subject: [Scipy-svn] r3898 - trunk/scipy/splinalg/eigen/arpack/tests Message-ID: <20080206182510.866BC39C360@new.scipy.org> Author: hagberg Date: 2008-02-06 12:25:05 -0600 (Wed, 06 Feb 2008) New Revision: 3898 Modified: trunk/scipy/splinalg/eigen/arpack/tests/test_speigs.py Log: Adjust imports and use LinearOperator in test_speigs Modified: trunk/scipy/splinalg/eigen/arpack/tests/test_speigs.py =================================================================== --- trunk/scipy/splinalg/eigen/arpack/tests/test_speigs.py 2008-02-06 17:56:43 UTC (rev 3897) +++ trunk/scipy/splinalg/eigen/arpack/tests/test_speigs.py 2008-02-06 18:25:05 UTC (rev 3898) @@ -2,8 +2,10 @@ from scipy.testing import * -from scipy.sandbox.arpack.speigs import * +from scipy.splinalg.interface import aslinearoperator +from scipy.splinalg.eigen.arpack.speigs import * + import numpy as N class TestEigs(TestCase): @@ -22,8 +24,8 @@ vals = vals[uv_sortind] vecs = vecs[:,uv_sortind] - from scipy.splinalg.interface import aslinearoperator - matvec = aslinearoperator(A).matvec + A=aslinearoperator(A) + matvec = A.matvec #= lambda x: N.asarray(A*x)[0] nev=4 eigvs = ARPACK_eigs(matvec, A.shape[0], nev=nev) From scipy-svn at scipy.org Wed Feb 6 13:28:15 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 6 Feb 2008 12:28:15 -0600 (CST) Subject: [Scipy-svn] r3899 - trunk/scipy/splinalg/eigen Message-ID: <20080206182815.413E239C2CF@new.scipy.org> Author: hagberg Date: 2008-02-06 12:28:08 -0600 (Wed, 06 Feb 2008) New Revision: 3899 Added: trunk/scipy/splinalg/eigen/__init__.py trunk/scipy/splinalg/eigen/info.py trunk/scipy/splinalg/eigen/setup.py Log: Add info, __init__, and setup to splinalg.eigen Added: trunk/scipy/splinalg/eigen/__init__.py =================================================================== --- trunk/scipy/splinalg/eigen/__init__.py 2008-02-06 18:25:05 UTC (rev 3898) +++ trunk/scipy/splinalg/eigen/__init__.py 2008-02-06 18:28:08 UTC (rev 3899) @@ -0,0 +1,10 @@ +"Sparse eigenvalue solvers" + +from info import __doc__ + +from arpack import * + +__all__ = filter(lambda s:not s.startswith('_'),dir()) +from scipy.testing.pkgtester import Tester +test = Tester().test +bench = Tester().bench Added: trunk/scipy/splinalg/eigen/info.py =================================================================== --- trunk/scipy/splinalg/eigen/info.py 2008-02-06 18:25:05 UTC (rev 3898) +++ trunk/scipy/splinalg/eigen/info.py 2008-02-06 18:28:08 UTC (rev 3899) @@ -0,0 +1,18 @@ +""" +Sparse Eigenvalue Solvers +========================= + +There are submodules of splinalg: + 1. arpack: spare eigenvalue solver using iterative methods + + +Examples +======== + + + +""" + +#TODO show examples + +postpone_import = 1 Added: trunk/scipy/splinalg/eigen/setup.py =================================================================== --- trunk/scipy/splinalg/eigen/setup.py 2008-02-06 18:25:05 UTC (rev 3898) +++ trunk/scipy/splinalg/eigen/setup.py 2008-02-06 18:28:08 UTC (rev 3899) @@ -0,0 +1,14 @@ +#!/usr/bin/env python + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('eigen',parent_package,top_path) + + config.add_subpackage(('arpack')) + + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) Property changes on: trunk/scipy/splinalg/eigen/setup.py ___________________________________________________________________ Name: svn:executable + * From scipy-svn at scipy.org Wed Feb 6 13:30:28 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 6 Feb 2008 12:30:28 -0600 (CST) Subject: [Scipy-svn] r3900 - trunk/scipy/splinalg/eigen/arpack/tests Message-ID: <20080206183028.B68AF39C2CF@new.scipy.org> Author: hagberg Date: 2008-02-06 12:30:22 -0600 (Wed, 06 Feb 2008) New Revision: 3900 Modified: trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py Log: Improved arpack tests Modified: trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py =================================================================== --- trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py 2008-02-06 18:28:08 UTC (rev 3899) +++ trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py 2008-02-06 18:30:22 UTC (rev 3900) @@ -1,345 +1,255 @@ #!/usr/bin/env python +__usage__ = """ +To run tests locally: + python tests/test_arpack.py [-l] [-v] +""" + from scipy.testing import * - +from numpy import array,real,imag,finfo,concatenate,\ + column_stack,argsort,dot,round,conj,sort from scipy.splinalg.eigen.arpack import eigen_symmetric,eigen -from scipy.linalg import eig,eigh,norm -from numpy import array,abs,take,concatenate,dot +def assert_almost_equal_cc(actual,desired,decimal=7,err_msg='',verbose=True): + # almost equal or complex conjugates almost equal + try: + assert_almost_equal(actual,desired,decimal,err_msg,verbose) + except: + assert_almost_equal(actual,conj(desired),decimal,err_msg,verbose) -class TestEigenNonsymmetric(TestCase): - def get_a1(self,typ): - mat=array([[-2., -8., 1., 2., -5.], - [ 6., 6., 0., 2., 1.], - [ 0., 4., -2., 11., 0.], - [ 1., 6., 1., 0., -4.], - [ 2., -6., 4., 9., -3]],typ) +def assert_array_almost_equal_cc(actual,desired,decimal=7, + err_msg='',verbose=True): + # almost equal or complex conjugates almost equal + try: + assert_array_almost_equal(actual,desired,decimal,err_msg,verbose) + except: + assert_array_almost_equal(actual,conj(desired),decimal,err_msg,verbose) - w=array([-2.21691+8.59661*1j,-2.21691-8.59661*1j,\ - 4.45961+3.80078*1j, 4.45961-3.80078*1j,\ - -5.48541+0j],typ.upper()) - return mat,w - def large_magnitude(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='LM') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=abs(aw) - num=abs(w) - exact.sort() - num.sort() - assert_array_almost_equal(num[-k:],exact[-k:],decimal=5) +# precision for tests +_ndigits = {'f':5, 'd':12, 'F':5, 'D':12} - def small_magnitude(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='SM') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=abs(aw) - num=abs(w) - exact.sort() - num.sort() - assert_array_almost_equal(num[:k],exact[:k],decimal=5) +class TestArpack(TestCase): + def setUp(self): + self.symmetric=[] + self.nonsymmetric=[] - def large_real(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='LR') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=aw.real - num=w.real - exact.sort() - num.sort() - assert_array_almost_equal(num[-k:],exact[-k:],decimal=5) + S1={} + S1['mat']=\ + array([[ 2., 0., 0., -1., 0., -1.], + [ 0., 2., 0., -1., 0., -1.], + [ 0., 0., 2., -1., 0., -1.], + [-1., -1., -1., 4., 0., -1.], + [ 0., 0., 0., 0., 1., -1.], + [-1., -1., -1., -1., -1., 5.]]) - def small_real(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='SR') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=aw.real - num=w.real - exact.sort() - num.sort() - assert_array_almost_equal(num[:k],exact[:k],decimal=5) + S1['eval']=array([0,1,2,2,5,6]) + self.symmetric.append(S1) - - def large_imag(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='LI') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - print w - print aw - exact=aw.imag - num=w.imag - exact.sort() - num.sort() - assert_array_almost_equal(num[-k:],exact[-k:],decimal=5) - - def small_imag(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='SI') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=aw.imag - num=w.imag - exact.sort() - num.sort() - print num - assert_array_almost_equal(num[:k],exact[:k],decimal=5) - - - def test_type(self): - k=2 - for typ in 'fd': - self.large_magnitude(typ,k) - self.small_magnitude(typ,k) - self.large_real(typ,k) - self.small_real(typ,k) -# Maybe my understanding of small imaginary and large imaginary -# isn't too keen. I don't understand why these return -# different answers than in the complex case (the latter seems correct) -# self.large_imag(typ,k) -# self.small_imag(typ,k) - - - -class TestEigenComplexNonsymmetric(TestCase): - - def get_a1(self,typ): - mat=array([[-2., -8., 1., 2., -5.], + N1={} + N1['mat']=\ + array([[-2., -8., 1., 2., -5.], [ 6., 6., 0., 2., 1.], [ 0., 4., -2., 11., 0.], [ 1., 6., 1., 0., -4.], - [ 2., -6., 4., 9., -3]],typ) + [ 2., -6., 4., 9., -3]]) - w=array([-2.21691+8.59661*1j,-2.21691-8.59661*1j,\ - 4.45961+3.80078*1j, 4.45961-3.80078*1j,\ - -5.48541+0j],typ.upper()) - return mat,w + N1['eval']=\ + array([ -5.4854094033782888+0.0j, + -2.2169058544873783+8.5966096591588261j, + -2.2169058544873783-8.5966096591588261j, + 4.4596105561765107+3.8007839204319454j, + 4.4596105561765107-3.8007839204319454j],'D') - def large_magnitude(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='LM') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=abs(aw) - num=abs(w) - exact.sort() - num.sort() - assert_array_almost_equal(num,exact[-k:],decimal=5) - def small_magnitude(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='SM') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=abs(aw) - num=abs(w) - exact.sort() - num.sort() - assert_array_almost_equal(num,exact[:k],decimal=5) + self.nonsymmetric.append(N1) + +class TestEigenSymmetric(TestArpack): - def large_real(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='LR') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=aw.real - num=w.real - exact.sort() - num.sort() - assert_array_almost_equal(num,exact[-k:],decimal=5) + def get_exact_eval(self,d,typ,k,which): + eval=d['eval'].astype(typ) + ind=argsort(eval) + eval=eval[ind] + if which=='LM': + return eval[-k:] + if which=='SM': + return eval[:k] + if which=='BE': + # one ev from each end - if k is odd, extra ev on high end + l=k/2 + h=k/2+k%2 + low=range(len(eval))[:l] + high=range(len(eval))[-h:] + return eval[low+high] - def small_real(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='SR') + def eval_evec(self,d,typ,k,which): + a=d['mat'].astype(typ) + exact_eval=self.get_exact_eval(d,typ,k,which) + eval,evec=eigen_symmetric(a,k,which=which) + # check eigenvalues + assert_array_almost_equal(eval,exact_eval,decimal=_ndigits[typ]) + # check eigenvectors A*evec=eval*evec for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=aw.real - num=w.real - exact.sort() - num.sort() - assert_array_almost_equal(num,exact[:k],decimal=5) + assert_array_almost_equal(dot(a,evec[:,i]), + eval[i]*evec[:,i], + decimal=_ndigits[typ]) + def test_symmetric(self): + k=2 + for typ in 'fd': + for which in ['LM','SM','BE']: + self.eval_evec(self.symmetric[0],typ,k,which) - def large_imag(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='LI') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=aw.imag - num=w.imag - exact.sort() - num.sort() - assert_array_almost_equal(num,exact[-k:],decimal=5) + +class TestEigenComplexSymmetric(TestArpack): - def small_imag(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='SI') + def sort_choose(self,eval,typ,k,which): + # sort and choose the eigenvalues and eigenvectors + # both for the exact answer and that returned from ARPACK + reval=round(eval,decimals=_ndigits[typ]) + ind=argsort(reval) + if which=='LM' or which=='LR': + return ind[-k:] + if which=='SM' or which=='SR': + return ind[:k] + + def eval_evec(self,d,typ,k,which): + a=d['mat'].astype(typ) + # get exact eigenvalues + exact_eval=d['eval'].astype(typ) + ind=self.sort_choose(exact_eval,typ,k,which) + exact_eval=exact_eval[ind] + # compute eigenvalues + eval,evec=eigen(a,k,which=which) + ind=self.sort_choose(eval,typ,k,which) + eval=eval[ind] + evec=evec[:,ind] + + # check eigenvalues + assert_array_almost_equal(eval,exact_eval,decimal=_ndigits[typ]) + # check eigenvectors A*evec=eval*evec for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - exact=aw.imag - num=w.imag - exact.sort() - num.sort() - assert_array_almost_equal(num,exact[:k],decimal=5) + assert_array_almost_equal(dot(a,evec[:,i]), + eval[i]*evec[:,i], + decimal=_ndigits[typ]) -# def test_type(self): +# def test_complex_symmetric(self): # k=2 # for typ in 'FD': -# self.large_magnitude(typ,k) -# self.small_magnitude(typ,k) -# self.large_real(typ,k) -# self.small_real(typ,k) -# self.large_imag(typ,k) -# self.small_imag(typ,k) +# for which in ['LM','SM','LR','SR']: +# self.eval_evec(self.symmetric[0],typ,k,which) + +class TestEigenNonSymmetric(TestArpack): -class TestEigenSymmetric(TestCase): + def sort_choose(self,eval,typ,k,which): + reval=round(eval,decimals=_ndigits[typ]) + if which in ['LR','SR']: + ind=argsort(reval.real) + elif which in ['LI','SI']: + # for LI,SI ARPACK returns largest,smallest abs(imaginary) why? + ind=argsort(abs(reval.imag)) + else: + ind=argsort(abs(reval)) - def get_a1(self,typ): - mat_a1=array([[ 2., 0., 0., -1., 0., -1.], - [ 0., 2., 0., -1., 0., -1.], - [ 0., 0., 2., -1., 0., -1.], - [-1., -1., -1., 4., 0., -1.], - [ 0., 0., 0., 0., 1., -1.], - [-1., -1., -1., -1., -1., 5.]], - typ) - w = [0,1,2,2,5,6] # eigenvalues of a1 - return mat_a1,w + if which in ['LR','LM','LI']: + return ind[-k:] + if which in ['SR','SM','SI']: + return ind[:k] - def large_eigenvalues(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen_symmetric(a,k,which='LM',tol=1e-7) - assert_array_almost_equal(w,aw[-k:]) - def small_eigenvalues(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen_symmetric(a,k,which='SM') - assert_array_almost_equal(w,aw[:k]) - - def end_eigenvalues(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen_symmetric(a,k,which='BE') - exact=[aw[0],aw[-1]] - assert_array_almost_equal(w,exact) - - def large_eigenvectors(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen_symmetric(a,k,which='LM') - ew,ev = eigh(a) - ind=ew.argsort() - assert_array_almost_equal(w,take(ew,ind[-k:])) + def eval_evec(self,d,typ,k,which): + a=d['mat'].astype(typ) + # get exact eigenvalues + exact_eval=d['eval'].astype(typ.upper()) + ind=self.sort_choose(exact_eval,typ,k,which) + exact_eval=exact_eval[ind] + # compute eigenvalues + eval,evec=eigen(a,k,which=which) + ind=self.sort_choose(eval,typ,k,which) + eval=eval[ind] + evec=evec[:,ind] + # check eigenvalues + # check eigenvectors A*evec=eval*evec for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) + assert_almost_equal_cc(eval[i],exact_eval[i],decimal=_ndigits[typ]) + assert_array_almost_equal_cc(dot(a,evec[:,i]), + eval[i]*evec[:,i], + decimal=_ndigits[typ]) - def small_eigenvectors(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen_symmetric(a,k,which='SM',tol=1e-7) - ew,ev = eigh(a) - ind=ew.argsort() - assert_array_almost_equal(w,take(ew,ind[:k])) - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) - def end_eigenvectors(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen_symmetric(a,k,which='BE') - ew,ev = eigh(a) - ind=ew.argsort() - exact=concatenate(([ind[:k/2],ind[-k/2:]])) - assert_array_almost_equal(w,take(ew,exact)) - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) - - def test_eigenvectors(self): + def test_nonsymmetric(self): k=2 for typ in 'fd': - self.large_eigenvectors(typ,k) - self.small_eigenvectors(typ,k) - self.end_eigenvectors(typ,k) + for which in ['LI','LR','LM','SM','SR','SI']: + for m in self.nonsymmetric: + self.eval_evec(m,typ,k,which) - def test_type(self): - k=2 - for typ in 'fd': - self.large_eigenvalues(typ,k) - self.small_eigenvalues(typ,k) - self.end_eigenvalues(typ,k) -class TestEigenComplexSymmetric(TestCase): - def get_a1(self,typ): - mat_a1=array([[ 2., 0., 0., -1., 0., -1.], - [ 0., 2., 0., -1., 0., -1.], - [ 0., 0., 2., -1., 0., -1.], - [-1., -1., -1., 4., 0., -1.], - [ 0., 0., 0., 0., 1., -1.], - [-1., -1., -1., -1., -1., 5.]], - typ) - w = array([0+0j,1+0j,2+0j,2+0j,5+0j,6+0j]) # eigenvalues of a1 - return mat_a1,w - def large_magnitude(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='LM') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - aw.real.sort() - w.real.sort() - assert_array_almost_equal(w,aw[-k:]) +class TestEigenComplexNonSymmetric(TestArpack): + def sort_choose(self,eval,typ,k,which): + eps=finfo(typ).eps + reval=round(eval,decimals=_ndigits[typ]) + if which in ['LR','SR']: + ind=argsort(reval) + elif which in ['LI','SI']: + ind=argsort(reval.imag) + else: + ind=argsort(abs(reval)) - def small_magnitude(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='SM') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) - aw.real.sort() - w.real.sort() - assert_array_almost_equal(w,aw[:k]) + if which in ['LR','LI','LM']: + return ind[-k:] + if which in ['SR','SI','SM']: + return ind[:k] - def large_real(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='LR') - for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],decimal=5) - aw.real.sort() - w.real.sort() - assert_array_almost_equal(w,aw[-k:],decimal=5) + def eval_evec(self,d,typ,k,which): + a=d['mat'].astype(typ) + # get exact eigenvalues + exact_eval=d['eval'].astype(typ.upper()) + ind=self.sort_choose(exact_eval,typ,k,which) + exact_eval=exact_eval[ind] + print "exact" + print exact_eval - def small_real(self,typ,k): - a,aw = self.get_a1(typ) - w,v = eigen(a,k,which='SR') + # compute eigenvalues + eval,evec=eigen(a,k,which=which) + ind=self.sort_choose(eval,typ,k,which) + eval=eval[ind] + evec=evec[:,ind] + print eval + # check eigenvalues + # check eigenvectors A*evec=eval*evec for i in range(k): - assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i]) - aw.real.sort() - w.real.sort() - assert_array_almost_equal(w,aw[:k]) + assert_almost_equal_cc(eval[i],exact_eval[i],decimal=_ndigits[typ]) + assert_array_almost_equal_cc(dot(a,evec[:,i]), + eval[i]*evec[:,i], + decimal=_ndigits[typ]) -# def test_complex_symmetric(self): + +# def test_complex_nonsymmetric(self): # k=2 # for typ in 'FD': -# self.large_magnitude(typ,k) -# self.small_magnitude(typ,k) -# self.large_real(typ,k) -# self.small_real(typ,k) +# for which in ['LI','LR','LM','SI','SR','SM']: +# for m in self.nonsymmetric: +# self.eval_evec(m,typ,k,which) + if __name__ == "__main__": nose.run(argv=['', __file__]) From scipy-svn at scipy.org Wed Feb 6 16:11:46 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 6 Feb 2008 15:11:46 -0600 (CST) Subject: [Scipy-svn] r3901 - trunk/scipy/ndimage Message-ID: <20080206211146.2FDC439C4C2@new.scipy.org> Author: tom.waite Date: 2008-02-06 15:11:37 -0600 (Wed, 06 Feb 2008) New Revision: 3901 Modified: trunk/scipy/ndimage/registration.py Log: Added first cut image G remap using trilinear interpolation. Soon to be replaced with spline. Modified: trunk/scipy/ndimage/registration.py =================================================================== --- trunk/scipy/ndimage/registration.py 2008-02-06 18:30:22 UTC (rev 3900) +++ trunk/scipy/ndimage/registration.py 2008-02-06 21:11:37 UTC (rev 3901) @@ -11,11 +11,27 @@ inputname = 'ANAT1_V0001.img' filename = os.path.join(os.path.split(__file__)[0], inputname) -def get_mappings(parm_vector): +def remap_image(image, parm_vector): + M_inverse = get_inverse_mappings(parm_vector) + # allocate the zero image + remaped_image = load_blank_image() + imdata = build_structs(step=1) + # trilinear interpolation mapping. to be replaced with splines + R.register_linear_resample(image['data'], remaped_image['data'], M_inverse, imdata['step']) + return remaped_image + +def get_inverse_mappings(parm_vector): # get the inverse mapping to rotate the G matrix to F space following registration - M_foreward = build_rotate_matrix(parm_vector) - M_inverse = N.linalg.inv(M_foreward) - return M_foreward, M_inverse + imdata = build_structs(step=1) + # inverse angles and translations + imdata['parms'][0] = -parm_vector[0] + imdata['parms'][1] = -parm_vector[1] + imdata['parms'][2] = -parm_vector[2] + imdata['parms'][3] = -parm_vector[3] + imdata['parms'][4] = -parm_vector[4] + imdata['parms'][5] = -parm_vector[5] + M_inverse = build_rotate_matrix(imdata['parms']) + return M_inverse def python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0, smhist=0, method='nmi', opt_method='powell'): From scipy-svn at scipy.org Wed Feb 6 18:53:43 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 6 Feb 2008 17:53:43 -0600 (CST) Subject: [Scipy-svn] r3902 - in trunk/scipy/splinalg/eigen/arpack: . tests Message-ID: <20080206235343.BCEFA39C0C9@new.scipy.org> Author: hagberg Date: 2008-02-06 17:53:38 -0600 (Wed, 06 Feb 2008) New Revision: 3902 Modified: trunk/scipy/splinalg/eigen/arpack/arpack.py trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py Log: Allow starting vector for arpack eigensolver. Clean up type handling. Modified: trunk/scipy/splinalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/splinalg/eigen/arpack/arpack.py 2008-02-06 21:11:37 UTC (rev 3901) +++ trunk/scipy/splinalg/eigen/arpack/arpack.py 2008-02-06 23:53:38 UTC (rev 3902) @@ -51,7 +51,7 @@ _ndigits = {'f':5, 'd':12, 'F':5, 'D':12} -def eigen(A, k=6, M=None, sigma=None, which='LM', +def eigen(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True): """Find k eigenvalues and eigenvectors of the square matrix A. @@ -91,6 +91,8 @@ (Not implemented) Find eigenvalues near sigma. Shift spectrum by sigma. + v0 : array + Starting vector for iteration. ncv : integer The number of Lanczos vectors generated @@ -126,18 +128,17 @@ """ A = aslinearoperator(A) - - if M is not None: - raise NotImplementedError("generalized eigenproblem not supported yet") - if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % shape) - n = A.shape[0] + # guess type + typ = A.dtype.char + if typ not in 'fdFD': + raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") + if M is not None: raise NotImplementedError("generalized eigenproblem not supported yet") - if sigma is not None: raise NotImplementedError("shifted eigenproblem not supported yet") @@ -148,15 +149,14 @@ ncv=min(ncv,n) if maxiter==None: maxiter=n*10 + # assign starting vector + if v0 is not None: + resid=v0 + info=1 + else: + resid = np.zeros(n,typ) + info=0 - # guess type - resid = np.zeros(n,'f') - try: - typ = A.dtype.char - except AttributeError: - typ = A.matvec(resid).dtype.char - if typ not in 'fdFD': - raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") # some sanity checks if k <= 0: @@ -175,20 +175,18 @@ ltr = _type_conv[typ] eigsolver = _arpack.__dict__[ltr+'naupd'] eigextract = _arpack.__dict__[ltr+'neupd'] - matvec = A.matvec v = np.zeros((n,ncv),typ) # holds Ritz vectors - resid = np.zeros(n,typ) # residual workd = np.zeros(3*n,typ) # workspace workl = np.zeros(3*ncv*ncv+6*ncv,typ) # workspace iparam = np.zeros(11,'int') # problem parameters ipntr = np.zeros(14,'int') # pointers into workspaces - info = 0 ido = 0 if typ in 'FD': rwork = np.zeros(ncv,typ.lower()) + # set solver mode and parameters # only supported mode is 1: Ax=lx ishfts = 1 mode1 = 1 @@ -207,12 +205,15 @@ eigsolver(ido,bmat,which,k,tol,resid,v,iparam,ipntr, workd,workl,rwork,info) - if (ido == -1 or ido == 1): - # compute y = A * x - xslice = slice(ipntr[0]-1, ipntr[0]-1+n) - yslice = slice(ipntr[1]-1, ipntr[1]-1+n) - workd[yslice]=matvec(workd[xslice]) - else: # done + xslice = slice(ipntr[0]-1, ipntr[0]-1+n) + yslice = slice(ipntr[1]-1, ipntr[1]-1+n) + if ido == -1: + # initialization + workd[yslice]=A.matvec(workd[xslice]) + elif ido == 1: + # compute y=Ax + workd[yslice]=A.matvec(workd[xslice]) + else: break if info < -1 : @@ -300,8 +301,8 @@ return d -def eigen_symmetric(A, k=6, M=None, sigma=None, which='LM', - ncv=None, maxiter=None, tol=0, v0=None, +def eigen_symmetric(A, k=6, M=None, sigma=None, which='LM', v0=None, + ncv=None, maxiter=None, tol=0, return_eigenvectors=True): """Find k eigenvalues and eigenvectors of the real symmetric square matrix A. @@ -341,6 +342,8 @@ (Not implemented) Find eigenvalues near sigma. Shift spectrum by sigma. + v0 : array + Starting vector for iteration. ncv : integer The number of Lanczos vectors generated @@ -375,32 +378,33 @@ -------- """ A = aslinearoperator(A) - if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % shape) - n = A.shape[0] + # guess type + typ = A.dtype.char + if typ not in 'fd': + raise ValueError("matrix must be real valued (type must be 'f' or 'd')") + if M is not None: raise NotImplementedError("generalized eigenproblem not supported yet") if sigma is not None: raise NotImplementedError("shifted eigenproblem not supported yet") + if ncv is None: ncv=2*k+1 ncv=min(ncv,n) if maxiter==None: maxiter=n*10 + # assign starting vector + if v0 is not None: + resid=v0 + info=1 + else: + resid = np.zeros(n,typ) + info=0 - - # guess type - resid = np.zeros(n,'f') - try: - typ = A.dtype.char - except AttributeError: - typ = A.matvec(resid).dtype.char - if typ not in 'fd': - raise ValueError("matrix must be real valued (type must be 'f' or 'd')") - # some sanity checks if k <= 0: raise ValueError("k must be positive, k=%d"%k) @@ -418,17 +422,16 @@ ltr = _type_conv[typ] eigsolver = _arpack.__dict__[ltr+'saupd'] eigextract = _arpack.__dict__[ltr+'seupd'] - matvec = A.matvec + # set output arrays, parameters, and workspace v = np.zeros((n,ncv),typ) - resid = np.zeros(n,typ) workd = np.zeros(3*n,typ) workl = np.zeros(ncv*(ncv+8),typ) iparam = np.zeros(11,'int') ipntr = np.zeros(11,'int') - info = 0 ido = 0 + # set solver mode and parameters # only supported mode is 1: Ax=lx ishfts = 1 mode1 = 1 @@ -439,12 +442,17 @@ while True: ido,resid,v,iparam,ipntr,info =\ - eigsolver(ido,bmat,which,k,tol,resid,v,iparam,ipntr, - workd,workl,info) - if (ido == -1 or ido == 1): - xslice = slice(ipntr[0]-1, ipntr[0]-1+n) - yslice = slice(ipntr[1]-1, ipntr[1]-1+n) - workd[yslice]=matvec(workd[xslice]) + eigsolver(ido,bmat,which,k,tol,resid,v, + iparam,ipntr,workd,workl,info) + + xslice = slice(ipntr[0]-1, ipntr[0]-1+n) + yslice = slice(ipntr[1]-1, ipntr[1]-1+n) + if ido == -1: + # initialization + workd[yslice]=A.matvec(workd[xslice]) + elif ido == 1: + # compute y=Ax + workd[yslice]=A.matvec(workd[xslice]) else: break Modified: trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py =================================================================== --- trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py 2008-02-06 21:11:37 UTC (rev 3901) +++ trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py 2008-02-06 23:53:38 UTC (rev 3902) @@ -8,7 +8,7 @@ from scipy.testing import * from numpy import array,real,imag,finfo,concatenate,\ - column_stack,argsort,dot,round,conj,sort + column_stack,argsort,dot,round,conj,sort,random from scipy.splinalg.eigen.arpack import eigen_symmetric,eigen @@ -89,10 +89,10 @@ high=range(len(eval))[-h:] return eval[low+high] - def eval_evec(self,d,typ,k,which): + def eval_evec(self,d,typ,k,which,**kwds): a=d['mat'].astype(typ) exact_eval=self.get_exact_eval(d,typ,k,which) - eval,evec=eigen_symmetric(a,k,which=which) + eval,evec=eigen_symmetric(a,k,which=which,**kwds) # check eigenvalues assert_array_almost_equal(eval,exact_eval,decimal=_ndigits[typ]) # check eigenvectors A*evec=eval*evec @@ -101,12 +101,20 @@ eval[i]*evec[:,i], decimal=_ndigits[typ]) - def test_symmetric(self): + def test_symmetric_modes(self): k=2 for typ in 'fd': for which in ['LM','SM','BE']: self.eval_evec(self.symmetric[0],typ,k,which) + def test_starting_vector(self): + k=2 + for typ in 'fd': + A=self.symmetric[0]['mat'] + n=A.shape[0] + v0 = random.rand(n).astype(typ) + self.eval_evec(self.symmetric[0],typ,k,which='LM',v0=v0) + class TestEigenComplexSymmetric(TestArpack): @@ -141,7 +149,7 @@ decimal=_ndigits[typ]) -# def test_complex_symmetric(self): +# def test_complex_symmetric_modes(self): # k=2 # for typ in 'FD': # for which in ['LM','SM','LR','SR']: @@ -168,14 +176,14 @@ return ind[:k] - def eval_evec(self,d,typ,k,which): + def eval_evec(self,d,typ,k,which,**kwds): a=d['mat'].astype(typ) # get exact eigenvalues exact_eval=d['eval'].astype(typ.upper()) ind=self.sort_choose(exact_eval,typ,k,which) exact_eval=exact_eval[ind] # compute eigenvalues - eval,evec=eigen(a,k,which=which) + eval,evec=eigen(a,k,which=which,**kwds) ind=self.sort_choose(eval,typ,k,which) eval=eval[ind] evec=evec[:,ind] @@ -188,7 +196,7 @@ decimal=_ndigits[typ]) - def test_nonsymmetric(self): + def test_nonsymmetric_modes(self): k=2 for typ in 'fd': for which in ['LI','LR','LM','SM','SR','SI']: @@ -197,8 +205,17 @@ + def test_starting_vector(self): + k=2 + for typ in 'fd': + A=self.symmetric[0]['mat'] + n=A.shape[0] + v0 = random.rand(n).astype(typ) + self.eval_evec(self.symmetric[0],typ,k,which='LM',v0=v0) + + class TestEigenComplexNonSymmetric(TestArpack): def sort_choose(self,eval,typ,k,which): @@ -241,7 +258,7 @@ decimal=_ndigits[typ]) -# def test_complex_nonsymmetric(self): +# def test_complex_nonsymmetric_modes(self): # k=2 # for typ in 'FD': # for which in ['LI','LR','LM','SI','SR','SM']: From scipy-svn at scipy.org Thu Feb 7 01:09:21 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 7 Feb 2008 00:09:21 -0600 (CST) Subject: [Scipy-svn] r3903 - in trunk/scipy/splinalg: . dsolve dsolve/SuperLU dsolve/SuperLU/SRC dsolve/tests dsolve/umfpack dsolve/umfpack/tests eigen eigen/arpack eigen/arpack/ARPACK eigen/arpack/ARPACK/LAPACK eigen/arpack/ARPACK/SRC eigen/arpack/ARPACK/UTIL eigen/arpack/tests isolve isolve/iterative isolve/tests tests Message-ID: <20080207060921.AFC1039C263@new.scipy.org> Author: wnbell Date: 2008-02-07 00:09:12 -0600 (Thu, 07 Feb 2008) New Revision: 3903 Modified: trunk/scipy/splinalg/ trunk/scipy/splinalg/dsolve/ trunk/scipy/splinalg/dsolve/SuperLU/ trunk/scipy/splinalg/dsolve/SuperLU/SRC/ trunk/scipy/splinalg/dsolve/tests/ trunk/scipy/splinalg/dsolve/umfpack/ trunk/scipy/splinalg/dsolve/umfpack/tests/ trunk/scipy/splinalg/eigen/ trunk/scipy/splinalg/eigen/arpack/ trunk/scipy/splinalg/eigen/arpack/ARPACK/ trunk/scipy/splinalg/eigen/arpack/ARPACK/LAPACK/ trunk/scipy/splinalg/eigen/arpack/ARPACK/SRC/ trunk/scipy/splinalg/eigen/arpack/ARPACK/UTIL/ trunk/scipy/splinalg/eigen/arpack/tests/ trunk/scipy/splinalg/isolve/ trunk/scipy/splinalg/isolve/iterative/ trunk/scipy/splinalg/isolve/tests/ trunk/scipy/splinalg/tests/ Log: set ignore on splinalg Property changes on: trunk/scipy/splinalg ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/dsolve ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/dsolve/SuperLU ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/dsolve/SuperLU/SRC ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/dsolve/tests ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/dsolve/umfpack ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/dsolve/umfpack/tests ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/eigen ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/eigen/arpack ___________________________________________________________________ Name: svn:ignore + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/eigen/arpack/ARPACK ___________________________________________________________________ Name: svn:ignore + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/eigen/arpack/ARPACK/LAPACK ___________________________________________________________________ Name: svn:ignore + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/eigen/arpack/ARPACK/SRC ___________________________________________________________________ Name: svn:ignore + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/eigen/arpack/ARPACK/UTIL ___________________________________________________________________ Name: svn:ignore + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/eigen/arpack/tests ___________________________________________________________________ Name: svn:ignore + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/isolve ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/isolve/iterative ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/isolve/tests ___________________________________________________________________ Name: svn:ignore - *.pyc + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp Property changes on: trunk/scipy/splinalg/tests ___________________________________________________________________ Name: svn:ignore + # Run with the following command: #svn -R propset svn:ignore -F .svnignore . *.pyc *.bak *.so *.swp From scipy-svn at scipy.org Fri Feb 8 09:14:29 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 8 Feb 2008 08:14:29 -0600 (CST) Subject: [Scipy-svn] r3904 - trunk/scipy/splinalg/eigen/arpack Message-ID: <20080208141429.A1EB639C08E@new.scipy.org> Author: hagberg Date: 2008-02-08 08:14:23 -0600 (Fri, 08 Feb 2008) New Revision: 3904 Modified: trunk/scipy/splinalg/eigen/arpack/arpack.py Log: More careful handling on which eigenvalues and eigenvectors returned for arpack nonsymmetric case. Modified: trunk/scipy/splinalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/splinalg/eigen/arpack/arpack.py 2008-02-07 06:09:12 UTC (rev 3903) +++ trunk/scipy/splinalg/eigen/arpack/arpack.py 2008-02-08 14:14:23 UTC (rev 3904) @@ -264,26 +264,30 @@ i+=1 i+=1 - # Now we have k+1 eigenvalues and eigenvectors + # Now we have k+1 possible eigenvalues and eigenvectors # Return the ones specified by the keyword "which" + nreturned=iparam[4] # number of good eigenvalues returned + if nreturned==k: # we got exactly how many eigenvalues we wanted + d=d[:k] + z=z[:,:k] + else: # we got one extra eigenvalue (likely a cc pair, but which?) + # cut at approx precision for sorting + rd=np.round(d,decimals=_ndigits[typ]) + if which in ['LR','SR']: + ind=np.argsort(rd.real) + elif which in ['LI','SI']: + # for LI,SI ARPACK returns largest,smallest abs(imaginary) why? + ind=np.argsort(abs(rd.imag)) + else: + ind=np.argsort(abs(rd)) + if which in ['LR','LM','LI']: + d=d[ind[-k:]] + z=z[:,ind[-k:]] + if which in ['SR','SM','SI']: + d=d[ind[:k]] + z=z[:,ind[:k]] - # cut at approx precision for sorting - rd=np.round(d,decimals=_ndigits[typ]) - if which in ['LR','SR']: - ind=np.argsort(rd.real) - elif which in ['LI','SI']: - # for LI,SI ARPACK returns largest,smallest abs(imaginary) why? - ind=np.argsort(abs(rd.imag)) - else: - ind=np.argsort(abs(rd)) - if which in ['LR','LM','LI']: - d=d[ind[-k:]] - z=z[:,ind[-k:]] - if which in ['SR','SM','SI']: - d=d[ind[:k]] - z=z[:,ind[:k]] - else: # complex is so much simpler... d,z,info =\ From scipy-svn at scipy.org Fri Feb 8 11:07:16 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 8 Feb 2008 10:07:16 -0600 (CST) Subject: [Scipy-svn] r3905 - trunk/scipy/splinalg/eigen/arpack Message-ID: <20080208160716.C750939C284@new.scipy.org> Author: hagberg Date: 2008-02-08 10:07:14 -0600 (Fri, 08 Feb 2008) New Revision: 3905 Modified: trunk/scipy/splinalg/eigen/arpack/arpack.py Log: arpack: return eigenvectors as a matrix instead of array Modified: trunk/scipy/splinalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/splinalg/eigen/arpack/arpack.py 2008-02-08 14:14:23 UTC (rev 3904) +++ trunk/scipy/splinalg/eigen/arpack/arpack.py 2008-02-08 16:07:14 UTC (rev 3905) @@ -301,7 +301,7 @@ raise RuntimeError("Error info=%d in arpack"%info) return None if return_eigenvectors: - return d,z + return d,np.asmatrix(z) return d @@ -482,5 +482,5 @@ raise RuntimeError("Error info=%d in arpack"%info) return None if return_eigenvectors: - return d,z + return d,np.asmatrix(z) return d From scipy-svn at scipy.org Fri Feb 8 12:57:54 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 8 Feb 2008 11:57:54 -0600 (CST) Subject: [Scipy-svn] r3906 - branches Message-ID: <20080208175754.7CC3539C4FE@new.scipy.org> Author: chris.burns Date: 2008-02-08 11:57:38 -0600 (Fri, 08 Feb 2008) New Revision: 3906 Removed: branches/ndimage-register/ Log: Remove unused branch From scipy-svn at scipy.org Sat Feb 9 00:27:57 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 8 Feb 2008 23:27:57 -0600 (CST) Subject: [Scipy-svn] r3907 - trunk/scipy/sandbox Message-ID: <20080209052757.0D0C139C4B5@new.scipy.org> Author: wnbell Date: 2008-02-08 23:27:55 -0600 (Fri, 08 Feb 2008) New Revision: 3907 Removed: trunk/scipy/sandbox/multigrid/ Log: removed multigrid from the sandbox the code is now available as PyAMG: http://code.google.com/p/pyamg/ From scipy-svn at scipy.org Sat Feb 9 02:25:30 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 9 Feb 2008 01:25:30 -0600 (CST) Subject: [Scipy-svn] r3908 - in trunk/scipy/sparse: . tests Message-ID: <20080209072530.415B139C012@new.scipy.org> Author: wnbell Date: 2008-02-09 01:25:21 -0600 (Sat, 09 Feb 2008) New Revision: 3908 Modified: trunk/scipy/sparse/construct.py trunk/scipy/sparse/tests/test_construct.py Log: added sparse.bmat partially addresses ticket #602 Modified: trunk/scipy/sparse/construct.py =================================================================== --- trunk/scipy/sparse/construct.py 2008-02-09 05:27:55 UTC (rev 3907) +++ trunk/scipy/sparse/construct.py 2008-02-09 07:25:21 UTC (rev 3908) @@ -2,14 +2,17 @@ """ -__all__ = [ 'spdiags','speye','spidentity','spkron', 'lil_eye', 'lil_diags' ] +__all__ = [ 'spdiags','speye','spidentity','spkron', 'bmat', 'lil_eye', 'lil_diags' ] from itertools import izip from warnings import warn import numpy -from numpy import ones, clip, array, arange, intc +from numpy import ones, clip, array, arange, intc, asarray, rank, zeros, \ + cumsum, concatenate, empty +from sputils import upcast + from csr import csr_matrix, isspmatrix_csr from csc import csc_matrix, isspmatrix_csc from bsr import bsr_matrix @@ -228,3 +231,104 @@ return out +def bmat( blocks, format=None, dtype=None ): + """ + Build a sparse matrix from sparse sub-blocks + + Parameters + ========== + + blocks -- grid of sparse matrices with compatible shapes + - an entry of None implies an all-zero matrix + format -- sparse format of the result (e.g. "csr") + - by default an appropriate sparse matrix format is returned. + This choice is subject to change. + + + Example + ======= + + >>> from scipy.sparse import coo_matrix, bmat + >>> A = coo_matrix([[1,2],[3,4]]) + >>> B = coo_matrix([[5],[6]]) + >>> C = coo_matrix([[7]]) + >>> bmat( [[A,B],[None,C]] ).todense() + matrix([[1, 2, 5], + [3, 4, 6], + [0, 0, 7]]) + + >>> bmat( [[A,None],[None,C]] ).todense() + matrix([[1, 2, 0], + [3, 4, 0], + [0, 0, 7]]) + + + """ + + blocks = asarray(blocks, dtype='object') + + if rank(blocks) != 2: + raise ValueError('blocks must have rank 2') + + M,N = blocks.shape + + block_mask = zeros( blocks.shape, dtype='bool' ) + brow_lengths = zeros( blocks.shape[0], dtype=int ) + bcol_lengths = zeros( blocks.shape[1], dtype=int ) + + # convert everything to COO format + for i in range(M): + for j in range(N): + if blocks[i,j] is not None: + A = coo_matrix(blocks[i,j]) + blocks[i,j] = A + block_mask[i,j] = True + + if brow_lengths[i] == 0: + brow_lengths[i] = A.shape[0] + else: + if brow_lengths[i] != A.shape[0]: + raise ValueError('blocks[%d,:] has incompatible row dimensions' % i) + + if bcol_lengths[j] == 0: + bcol_lengths[j] = A.shape[1] + else: + if bcol_lengths[j] != A.shape[0]: + raise ValueError('blocks[:,%d] has incompatible column dimensions' % j) + + + # ensure that at least one value in each row and col is not None + if brow_lengths.min() == 0: + raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin() ) + if bcol_lengths.min() == 0: + raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin() ) + + nnz = sum([ A.nnz for A in blocks[block_mask] ]) + if dtype is None: + dtype = upcast( *tuple([A.dtype for A in blocks[block_mask]]) ) + + row_offsets = concatenate(([0],cumsum(brow_lengths))) + col_offsets = concatenate(([0],cumsum(bcol_lengths))) + + data = empty(nnz, dtype=dtype) + row = empty(nnz, dtype=intc) + col = empty(nnz, dtype=intc) + + nnz = 0 + for i in range(M): + for j in range(N): + if blocks[i,j] is not None: + A = blocks[i,j] + data[nnz:nnz + A.nnz] = A.data + row[nnz:nnz + A.nnz] = A.row + col[nnz:nnz + A.nnz] = A.col + + row[nnz:nnz + A.nnz] += row_offsets[i] + col[nnz:nnz + A.nnz] += col_offsets[j] + + nnz += A.nnz + + shape = (sum(brow_lengths),sum(bcol_lengths)) + return coo_matrix( (data, (row, col)), shape=shape ) + + Modified: trunk/scipy/sparse/tests/test_construct.py =================================================================== --- trunk/scipy/sparse/tests/test_construct.py 2008-02-09 05:27:55 UTC (rev 3907) +++ trunk/scipy/sparse/tests/test_construct.py 2008-02-09 07:25:21 UTC (rev 3908) @@ -1,13 +1,13 @@ """test sparse matrix construction functions""" -from numpy import array, kron +from numpy import array, matrix, kron from scipy.testing import * -from scipy.sparse import csr_matrix, \ - spidentity, speye, spkron, spdiags, \ - lil_eye, lil_diags +from scipy.sparse import csr_matrix, coo_matrix +from scipy.sparse.construct import * + #TODO check whether format=XXX is respected class TestConstructUtils(TestCase): @@ -101,6 +101,30 @@ assert_array_equal(result,expected) + def test_bmat(self): + + A = coo_matrix([[1,2],[3,4]]) + B = coo_matrix([[5],[6]]) + C = coo_matrix([[7]]) + + expected = matrix([[1, 2, 5], + [3, 4, 6], + [0, 0, 7]]) + assert_equal( bmat( [[A,B],[None,C]] ).todense(), expected ) + + + expected = matrix([[1, 2, 0], + [3, 4, 0], + [0, 0, 7]]) + assert_equal( bmat( [[A,None],[None,C]] ).todense(), expected ) + + expected = matrix([[0, 5], + [0, 6], + [7, 0]]) + assert_equal( bmat( [[None,B],[C,None]] ).todense(), expected ) + + #TODO test failure cases + def test_lil_diags(self): assert_array_equal(lil_diags([[1,2,3],[4,5],[6]], [0,1,2],(3,3)).todense(), From scipy-svn at scipy.org Sat Feb 9 02:26:53 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 9 Feb 2008 01:26:53 -0600 (CST) Subject: [Scipy-svn] r3909 - trunk/scipy/sparse Message-ID: <20080209072653.E13E639C012@new.scipy.org> Author: wnbell Date: 2008-02-09 01:26:50 -0600 (Sat, 09 Feb 2008) New Revision: 3909 Modified: trunk/scipy/sparse/construct.py Log: added missing asformat to bmat() Modified: trunk/scipy/sparse/construct.py =================================================================== --- trunk/scipy/sparse/construct.py 2008-02-09 07:25:21 UTC (rev 3908) +++ trunk/scipy/sparse/construct.py 2008-02-09 07:26:50 UTC (rev 3909) @@ -329,6 +329,6 @@ nnz += A.nnz shape = (sum(brow_lengths),sum(bcol_lengths)) - return coo_matrix( (data, (row, col)), shape=shape ) + return coo_matrix( (data, (row, col)), shape=shape ).asformat(format) From scipy-svn at scipy.org Sat Feb 9 16:02:39 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 9 Feb 2008 15:02:39 -0600 (CST) Subject: [Scipy-svn] r3910 - in trunk/scipy/sparse: . tests Message-ID: <20080209210239.0A56339C073@new.scipy.org> Author: wnbell Date: 2008-02-09 15:02:27 -0600 (Sat, 09 Feb 2008) New Revision: 3910 Modified: trunk/scipy/sparse/construct.py trunk/scipy/sparse/tests/test_base.py trunk/scipy/sparse/tests/test_construct.py Log: added sparse.kronsum and sparse.kron deprecated sparse.spkron Modified: trunk/scipy/sparse/construct.py =================================================================== --- trunk/scipy/sparse/construct.py 2008-02-09 07:26:50 UTC (rev 3909) +++ trunk/scipy/sparse/construct.py 2008-02-09 21:02:27 UTC (rev 3910) @@ -2,7 +2,7 @@ """ -__all__ = [ 'spdiags','speye','spidentity','spkron', 'bmat', 'lil_eye', 'lil_diags' ] +__all__ = [ 'spdiags','speye','spidentity', 'spkron', 'kron', 'kronsum', 'bmat', 'lil_eye', 'lil_diags' ] from itertools import izip from warnings import warn @@ -81,7 +81,7 @@ diags = ones((1, m), dtype = dtype) return spdiags(diags, k, m, n).asformat(format) -def spkron(A, B, format=None): +def kron(A, B, format=None): """kronecker product of sparse matrices A and B Parameters @@ -100,13 +100,13 @@ >>> A = csr_matrix(array([[0,2],[5,0]])) >>> B = csr_matrix(array([[1,2],[3,4]])) - >>> spkron(A,B).todense() + >>> kron(A,B).todense() matrix([[ 0, 0, 2, 4], [ 0, 0, 6, 8], [ 5, 10, 0, 0], [15, 20, 0, 0]]) - >>> spkron(A,[[1,2],[3,4]]).todense() + >>> kron(A,[[1,2],[3,4]]).todense() matrix([[ 0, 0, 2, 4], [ 0, 0, 6, 8], [ 5, 10, 0, 0], @@ -159,78 +159,49 @@ return coo_matrix((data,(row,col)), shape=output_shape).asformat(format) +def kronsum(A, B, format=None): + """kronecker sum of sparse matrices A and B + Kronecker sum of two sparse matrices is a sum of two Kronecker + products kron(I_n,A) + kron(B,I_m) where A has shape (m,m) + and B has shape (n,n) and I_m and I_n are identity matrices + of shape (m,m) and (n,n) respectively. - -def lil_eye((r,c), k=0, dtype='d'): - """Generate a lil_matrix of dimensions (r,c) with the k-th - diagonal set to 1. - Parameters ========== - - r,c : int - - row and column-dimensions of the output. - - k : int - - diagonal offset. In the output matrix, - - out[m,m+k] == 1 for all m. - - dtype : dtype - - data-type of the output array. + A,B : squared dense or sparse matrices + format : format of the result (e.g. "csr") + - By default (format=None) an appropriate sparse matrix + format is returned. This choice is subject to change. - """ - warn("lil_eye is deprecated. use speye(... , format='lil') instead", \ - DeprecationWarning) - return speye(r,c,k,dtype=dtype,format='lil') + Returns + ======= + kronecker sum in a sparse matrix format + Examples + ======== + + """ + A = coo_matrix(A) + B = coo_matrix(B) -#TODO remove this function -def lil_diags(diags,offsets,(m,n),dtype='d'): - """Generate a lil_matrix with the given diagonals. + if A.shape[0] != A.shape[1]: + raise ValueError('A is not square') + + if B.shape[0] != B.shape[1]: + raise ValueError('B is not square') - Parameters - ========== - - diags : list of list of values e.g. [[1,2,3],[4,5]] - - values to be placed on each indicated diagonal. - - offsets : list of ints - - diagonal offsets. This indicates the diagonal on which - the given values should be placed. - - (r,c) : tuple of ints - - row and column dimensions of the output. - - dtype : dtype - - output data-type. + dtype = upcast(A.dtype,B.dtype) - Example - ======= + L = kron(spidentity(B.shape[0],dtype=dtype), A, format=format) + R = kron(B, spidentity(A.shape[0],dtype=dtype), format=format) - >>> lil_diags([[1,2,3],[4,5],[6]],[0,1,2],(3,3)).todense() - matrix([[ 1., 4., 6.], - [ 0., 2., 5.], - [ 0., 0., 3.]]) + return (L+R).asformat(format) #since L + R is not always same format - """ - offsets_unsorted = list(offsets) - diags_unsorted = list(diags) - if len(diags) != len(offsets): - raise ValueError("Number of diagonals provided should " - "agree with offsets.") - sort_indices = numpy.argsort(offsets_unsorted) - diags = [diags_unsorted[k] for k in sort_indices] - offsets = [offsets_unsorted[k] for k in sort_indices] - for i,k in enumerate(offsets): - if len(diags[i]) < m-abs(k): - raise ValueError("Not enough values specified to fill " - "diagonal %s." % k) - out = lil_matrix((m,n),dtype=dtype) - for k,diag in izip(offsets,diags): - for ix,c in enumerate(xrange(clip(k,0,n),clip(m+k,0,n))): - out.rows[c-k].append(c) - out.data[c-k].append(diag[ix]) - return out - - def bmat( blocks, format=None, dtype=None ): """ Build a sparse matrix from sparse sub-blocks @@ -332,3 +303,79 @@ return coo_matrix( (data, (row, col)), shape=shape ).asformat(format) + +################################# +# Deprecated functions +################################ +from numpy import deprecate + +spkron = deprecate(kron, oldname='spkron', newname='kron') + +def lil_eye((r,c), k=0, dtype='d'): + """Generate a lil_matrix of dimensions (r,c) with the k-th + diagonal set to 1. + + Parameters + ========== + - r,c : int + - row and column-dimensions of the output. + - k : int + - diagonal offset. In the output matrix, + - out[m,m+k] == 1 for all m. + - dtype : dtype + - data-type of the output array. + + """ + warn("lil_eye is deprecated. use speye(... , format='lil') instead", \ + DeprecationWarning) + return speye(r,c,k,dtype=dtype,format='lil') + + + +#TODO remove this function +def lil_diags(diags,offsets,(m,n),dtype='d'): + """Generate a lil_matrix with the given diagonals. + + Parameters + ========== + - diags : list of list of values e.g. [[1,2,3],[4,5]] + - values to be placed on each indicated diagonal. + - offsets : list of ints + - diagonal offsets. This indicates the diagonal on which + the given values should be placed. + - (r,c) : tuple of ints + - row and column dimensions of the output. + - dtype : dtype + - output data-type. + + Example + ======= + + >>> lil_diags([[1,2,3],[4,5],[6]],[0,1,2],(3,3)).todense() + matrix([[ 1., 4., 6.], + [ 0., 2., 5.], + [ 0., 0., 3.]]) + + """ + offsets_unsorted = list(offsets) + diags_unsorted = list(diags) + if len(diags) != len(offsets): + raise ValueError("Number of diagonals provided should " + "agree with offsets.") + + sort_indices = numpy.argsort(offsets_unsorted) + diags = [diags_unsorted[k] for k in sort_indices] + offsets = [offsets_unsorted[k] for k in sort_indices] + + for i,k in enumerate(offsets): + if len(diags[i]) < m-abs(k): + raise ValueError("Not enough values specified to fill " + "diagonal %s." % k) + + out = lil_matrix((m,n),dtype=dtype) + for k,diag in izip(offsets,diags): + for ix,c in enumerate(xrange(clip(k,0,n),clip(m+k,0,n))): + out.rows[c-k].append(c) + out.data[c-k].append(diag[ix]) + return out + Modified: trunk/scipy/sparse/tests/test_base.py =================================================================== --- trunk/scipy/sparse/tests/test_base.py 2008-02-09 07:26:50 UTC (rev 3909) +++ trunk/scipy/sparse/tests/test_base.py 2008-02-09 21:02:27 UTC (rev 3910) @@ -17,14 +17,15 @@ import numpy from numpy import arange, zeros, array, dot, ones, matrix, asmatrix, \ - asarray, vstack, ndarray, kron, transpose, diag + asarray, vstack, ndarray, transpose, diag import random from scipy.testing import * +import scipy.sparse as sparse from scipy.sparse import csc_matrix, csr_matrix, dok_matrix, \ coo_matrix, lil_matrix, dia_matrix, bsr_matrix, \ - speye, spkron, SparseEfficiencyWarning + speye, SparseEfficiencyWarning from scipy.sparse.sputils import supported_dtypes from scipy.splinalg import splu @@ -84,12 +85,12 @@ mats.append( [[0,1],[0,2],[0,3]] ) mats.append( [[0,0,1],[0,0,2],[0,3,0]] ) - mats.append( kron(mats[0],[[1,2]]) ) - mats.append( kron(mats[0],[[1],[2]]) ) - mats.append( kron(mats[1],[[1,2],[3,4]]) ) - mats.append( kron(mats[2],[[1,2],[3,4]]) ) - mats.append( kron(mats[3],[[1,2],[3,4]]) ) - mats.append( kron(mats[3],[[1,2,3,4]]) ) + mats.append( numpy.kron(mats[0],[[1,2]]) ) + mats.append( numpy.kron(mats[0],[[1],[2]]) ) + mats.append( numpy.kron(mats[1],[[1,2],[3,4]]) ) + mats.append( numpy.kron(mats[2],[[1,2],[3,4]]) ) + mats.append( numpy.kron(mats[3],[[1,2],[3,4]]) ) + mats.append( numpy.kron(mats[3],[[1,2,3,4]]) ) for m in mats: assert_equal(self.spmatrix(m).diagonal(),diag(m)) @@ -345,7 +346,7 @@ assert_equal( result, dot(a,b) ) def test_formatconversions(self): - A = spkron([[1,0,1],[0,1,1],[1,0,0]], [[1,1],[0,1]] ) + A = sparse.kron([[1,0,1],[0,1,1],[1,0,0]], [[1,1],[0,1]] ) D = A.todense() A = self.spmatrix(A) @@ -371,7 +372,7 @@ def test_tocompressedblock(self): x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]]) y = array([[0,1,2],[3,0,5]]) - A = kron(x,y) + A = numpy.kron(x,y) Asp = self.spmatrix(A) for format in ['bsr']: fn = getattr(Asp, 'to' + format ) @@ -1277,7 +1278,7 @@ data[3] = array([[ 0, 5, 10], [15, 0, 25]]) - A = kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] ) + A = numpy.kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] ) Asp = bsr_matrix((data,indices,indptr),shape=(6,12)) assert_equal(Asp.todense(),A) @@ -1296,7 +1297,7 @@ assert_equal(bsr_matrix(A,blocksize=(2,2)).todense(),A) assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A) - A = kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] ) + A = numpy.kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] ) assert_equal(bsr_matrix(A).todense(),A) assert_equal(bsr_matrix(A,shape=(6,12)).todense(),A) assert_equal(bsr_matrix(A,blocksize=(1,1)).todense(),A) @@ -1306,11 +1307,11 @@ assert_equal(bsr_matrix(A,blocksize=(3,12)).todense(),A) assert_equal(bsr_matrix(A,blocksize=(6,12)).todense(),A) - A = kron( [[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]] ) + A = numpy.kron( [[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]] ) assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A) def test_eliminate_zeros(self): - data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T + data = numpy.kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T data = data.reshape(-1,2,2) indices = array( [1, 2, 3, 4, 5, 6, 7, 8] ) indptr = array( [0, 3, 8] ) Modified: trunk/scipy/sparse/tests/test_construct.py =================================================================== --- trunk/scipy/sparse/tests/test_construct.py 2008-02-09 07:26:50 UTC (rev 3909) +++ trunk/scipy/sparse/tests/test_construct.py 2008-02-09 21:02:27 UTC (rev 3910) @@ -1,6 +1,7 @@ """test sparse matrix construction functions""" -from numpy import array, matrix, kron +import numpy +from numpy import array, matrix from scipy.testing import * @@ -77,7 +78,7 @@ b = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='d') assert_array_equal(a.toarray(), b) - def test_spkron(self): + def test_kron(self): cases = [] cases.append(array([[ 0]])) @@ -96,11 +97,30 @@ for a in cases: for b in cases: - result = spkron(csr_matrix(a),csr_matrix(b)).todense() - expected = kron(a,b) + result = kron(csr_matrix(a),csr_matrix(b)).todense() + expected = numpy.kron(a,b) + assert_array_equal(result,expected) + def test_kronsum(self): + cases = [] + + cases.append(array([[ 0]])) + cases.append(array([[-1]])) + cases.append(array([[ 4]])) + cases.append(array([[10]])) + cases.append(array([[1,2],[3,4]])) + cases.append(array([[0,2],[5,0]])) + cases.append(array([[0,2,-6],[8,0,14],[0,3,0]])) + cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]])) + + for a in cases: + for b in cases: + result = kronsum(csr_matrix(a),csr_matrix(b)).todense() + expected = numpy.kron(numpy.eye(len(b)), a) + \ + numpy.kron(b, numpy.eye(len(a))) assert_array_equal(result,expected) + def test_bmat(self): A = coo_matrix([[1,2],[3,4]]) From scipy-svn at scipy.org Sat Feb 9 16:28:47 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 9 Feb 2008 15:28:47 -0600 (CST) Subject: [Scipy-svn] r3911 - in trunk/scipy/sparse: . tests Message-ID: <20080209212847.99E3039C21C@new.scipy.org> Author: wnbell Date: 2008-02-09 15:28:39 -0600 (Sat, 09 Feb 2008) New Revision: 3911 Modified: trunk/scipy/sparse/base.py trunk/scipy/sparse/construct.py trunk/scipy/sparse/tests/test_base.py trunk/scipy/sparse/tests/test_construct.py Log: deprecated speye and spidentity in favor of sparse.eye and sparse.identity Modified: trunk/scipy/sparse/base.py =================================================================== --- trunk/scipy/sparse/base.py 2008-02-09 21:02:27 UTC (rev 3910) +++ trunk/scipy/sparse/base.py 2008-02-09 21:28:39 UTC (rev 3911) @@ -267,8 +267,8 @@ raise ValueError,'exponent must be >= 0' if other == 0: - from construct import spidentity - return spidentity( self.shape[0], dtype=self.dtype ) + from construct import identity + return identity( self.shape[0], dtype=self.dtype ) elif other == 1: return self.copy() else: Modified: trunk/scipy/sparse/construct.py =================================================================== --- trunk/scipy/sparse/construct.py 2008-02-09 21:02:27 UTC (rev 3910) +++ trunk/scipy/sparse/construct.py 2008-02-09 21:28:39 UTC (rev 3911) @@ -1,8 +1,8 @@ """ Functions to construct sparse matrices """ +__all__ = [ 'spdiags', 'eye', 'identity', 'kron', 'kronsum', 'bmat' ] -__all__ = [ 'spdiags','speye','spidentity', 'spkron', 'kron', 'kronsum', 'bmat', 'lil_eye', 'lil_diags' ] from itertools import izip from warnings import warn @@ -57,8 +57,8 @@ """ return dia_matrix((data, diags), shape=(m,n)).asformat(format) -def spidentity(n, dtype='d', format=None): - """spidentity(n) returns an (n x n) identity matrix""" +def identity(n, dtype='d', format=None): + """identity(n) returns a sparse (n x n) identity matrix""" if format in ['csr','csc']: indptr = arange(n+1, dtype=intc) indices = arange(n, dtype=intc) @@ -69,16 +69,15 @@ row = arange(n, dtype=intc) col = arange(n, dtype=intc) data = ones(n, dtype=dtype) - cls = eval('%s_matrix' % format) return coo_matrix((data,(row,col)),(n,n)) else: - return spidentity( n, dtype=dtype, format='csr').asformat(format) + return identity( n, dtype=dtype, format='csr').asformat(format) -def speye(m, n, k=0, dtype='d', format=None): - """speye(m, n) returns an (m x n) matrix where the k-th diagonal +def eye(m, n, k=0, dtype='d', format=None): + """eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal is all ones and everything else is zeros. """ - diags = ones((1, m), dtype = dtype) + diags = ones((1, m), dtype=dtype) return spdiags(diags, k, m, n).asformat(format) def kron(A, B, format=None): @@ -194,8 +193,8 @@ dtype = upcast(A.dtype,B.dtype) - L = kron(spidentity(B.shape[0],dtype=dtype), A, format=format) - R = kron(B, spidentity(A.shape[0],dtype=dtype), format=format) + L = kron(identity(B.shape[0],dtype=dtype), A, format=format) + R = kron(B, identity(A.shape[0],dtype=dtype), format=format) return (L+R).asformat(format) #since L + R is not always same format @@ -307,10 +306,16 @@ ################################# # Deprecated functions ################################ + +__all__ += [ 'speye','spidentity', 'spkron', 'lil_eye', 'lil_diags' ] + from numpy import deprecate -spkron = deprecate(kron, oldname='spkron', newname='kron') +spkron = deprecate(kron, oldname='spkron', newname='kron') +speye = deprecate(eye, oldname='speye', newname='eye') +spidentity = deprecate(identity, oldname='spidenitiy', newname='identity') + def lil_eye((r,c), k=0, dtype='d'): """Generate a lil_matrix of dimensions (r,c) with the k-th diagonal set to 1. @@ -326,9 +331,10 @@ - data-type of the output array. """ - warn("lil_eye is deprecated. use speye(... , format='lil') instead", \ + warn("lil_eye is deprecated." \ + "use scipy.sparse.eye(r, c, k, format='lil') instead", \ DeprecationWarning) - return speye(r,c,k,dtype=dtype,format='lil') + return eye(r,c,k,dtype=dtype,format='lil') Modified: trunk/scipy/sparse/tests/test_base.py =================================================================== --- trunk/scipy/sparse/tests/test_base.py 2008-02-09 21:02:27 UTC (rev 3910) +++ trunk/scipy/sparse/tests/test_base.py 2008-02-09 21:28:39 UTC (rev 3911) @@ -25,7 +25,7 @@ import scipy.sparse as sparse from scipy.sparse import csc_matrix, csr_matrix, dok_matrix, \ coo_matrix, lil_matrix, dia_matrix, bsr_matrix, \ - speye, SparseEfficiencyWarning + eye, SparseEfficiencyWarning from scipy.sparse.sputils import supported_dtypes from scipy.splinalg import splu @@ -1147,7 +1147,7 @@ def test_lil_sequence_assignement(self): A = lil_matrix((4,3)) - B = speye(3,4,format='lil') + B = eye(3,4,format='lil') i0 = [0,1,2] i1 = (0,1,2) Modified: trunk/scipy/sparse/tests/test_construct.py =================================================================== --- trunk/scipy/sparse/tests/test_construct.py 2008-02-09 21:02:27 UTC (rev 3910) +++ trunk/scipy/sparse/tests/test_construct.py 2008-02-09 21:28:39 UTC (rev 3911) @@ -61,20 +61,24 @@ def test_identity(self): - a = spidentity(3) + a = identity(3) b = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='d') assert_array_equal(a.toarray(), b) + a = identity(1) + b = array([[1]], dtype='d') + assert_array_equal(a.toarray(), b) + def test_eye(self): - a = speye(2, 3 ) + a = eye(2, 3 ) b = array([[1, 0, 0], [0, 1, 0]], dtype='d') assert_array_equal(a.toarray(), b) - a = speye(3, 2) + a = eye(3, 2) b = array([[1, 0], [0, 1], [0, 0]], dtype='d') assert_array_equal( a.toarray(), b) - a = speye(3, 3) + a = eye(3, 3) b = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype='d') assert_array_equal(a.toarray(), b) From scipy-svn at scipy.org Sun Feb 10 17:52:59 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 10 Feb 2008 16:52:59 -0600 (CST) Subject: [Scipy-svn] r3912 - in trunk/scipy/sparse: . tests Message-ID: <20080210225259.9CCE139C01C@new.scipy.org> Author: wnbell Date: 2008-02-10 16:52:55 -0600 (Sun, 10 Feb 2008) New Revision: 3912 Modified: trunk/scipy/sparse/base.py trunk/scipy/sparse/compressed.py trunk/scipy/sparse/coo.py trunk/scipy/sparse/csc.py trunk/scipy/sparse/csr.py trunk/scipy/sparse/dok.py trunk/scipy/sparse/tests/test_base.py Log: made str() work for all sparse mats deprecated rowcol() and getdata() Modified: trunk/scipy/sparse/base.py =================================================================== --- trunk/scipy/sparse/base.py 2008-02-09 21:28:39 UTC (rev 3911) +++ trunk/scipy/sparse/base.py 2008-02-10 22:52:55 UTC (rev 3912) @@ -6,7 +6,7 @@ from warnings import warn import numpy -from numpy import asarray, asmatrix, asanyarray, ones +from numpy import asarray, asmatrix, asanyarray, ones, deprecate from sputils import isdense, isscalarlike, isintlike @@ -136,9 +136,11 @@ format = 'und' return format + @deprecate def rowcol(self, num): return (None, None) + @deprecate def getdata(self, num): return None @@ -156,17 +158,26 @@ (self.shape + (self.dtype.type, nnz, _formats[format][1])) def __str__(self): + maxprint = self.getmaxprint() + + A = self.tocoo() nnz = self.getnnz() - maxprint = self.getmaxprint() - val = '' + + # helper function, outputs "(i,j) v" + def tostr(row,col,data): + triples = zip(zip(row,col),data) + return '\n'.join( [ (' %s\t%s' % t) for t in triples] ) + if nnz > maxprint: - val = val + self.listprint(0, maxprint/2) - val = val + " :\t:\n" - val = val + self.listprint(nnz-maxprint//2, nnz) + half = maxprint // 2 + out = tostr(A.row[:half], A.col[:half], A.data[:half]) + out += + " :\t:\n" + out += tostr(A.row[:-half], A.col[:-half], A.data[:-half]) else: - val = val + self.listprint(0, nnz) - return val[:-1] + out = tostr(A.row, A.col, A.data) + return out[:-1] + def __nonzero__(self): # Simple -- other ideas? return self.getnnz() > 0 @@ -487,7 +498,6 @@ for i,v in enumerate(values[:max_index]): self[i, i + k] = v - def save(self, file_name, format = '%d %d %f\n'): #deprecated on Dec 14 2007 #remove after 0.7 release Modified: trunk/scipy/sparse/compressed.py =================================================================== --- trunk/scipy/sparse/compressed.py 2008-02-09 21:28:39 UTC (rev 3911) +++ trunk/scipy/sparse/compressed.py 2008-02-10 22:52:55 UTC (rev 3912) @@ -8,7 +8,7 @@ import numpy from numpy import array, matrix, asarray, asmatrix, zeros, rank, intc, \ empty, hstack, isscalar, ndarray, shape, searchsorted, empty_like, \ - where, concatenate, transpose + where, concatenate, transpose, deprecate from base import spmatrix, isspmatrix, SparseEfficiencyWarning from data import _data_matrix @@ -379,6 +379,7 @@ else: return self.transpose().matvec( other ) + @deprecate def getdata(self, ind): return self.data[ind] Modified: trunk/scipy/sparse/coo.py =================================================================== --- trunk/scipy/sparse/coo.py 2008-02-09 21:28:39 UTC (rev 3911) +++ trunk/scipy/sparse/coo.py 2008-02-10 22:52:55 UTC (rev 3912) @@ -6,7 +6,8 @@ from warnings import warn from numpy import array, asarray, empty, intc, zeros, \ - unique, searchsorted, atleast_2d, empty_like, rank + unique, searchsorted, atleast_2d, empty_like, rank, \ + deprecate from sparsetools import coo_tocsr, coo_tocsc, coo_todense from base import isspmatrix @@ -216,9 +217,11 @@ # some functions pass floats self.shape = tuple([int(x) for x in self.shape]) + @deprecate def rowcol(self, num): return (self.row[num], self.col[num]) + @deprecate def getdata(self, num): return self.data[num] Modified: trunk/scipy/sparse/csc.py =================================================================== --- trunk/scipy/sparse/csc.py 2008-02-09 21:28:39 UTC (rev 3911) +++ trunk/scipy/sparse/csc.py 2008-02-10 22:52:55 UTC (rev 3912) @@ -7,7 +7,7 @@ import numpy from numpy import array, matrix, asarray, asmatrix, zeros, rank, intc, \ empty, hstack, isscalar, ndarray, shape, searchsorted, where, \ - concatenate + concatenate, deprecate from base import spmatrix, isspmatrix from sparsetools import csc_tocsr @@ -104,9 +104,9 @@ for r in xrange(self.shape[0]): yield csr[r,:] + @deprecate def rowcol(self, ind): #TODO remove after 0.7 - warn('rowcol() is deprecated',DeprecationWarning) row = self.indices[ind] col = searchsorted(self.indptr, ind+1)-1 return (row, col) Modified: trunk/scipy/sparse/csr.py =================================================================== --- trunk/scipy/sparse/csr.py 2008-02-09 21:28:39 UTC (rev 3911) +++ trunk/scipy/sparse/csr.py 2008-02-10 22:52:55 UTC (rev 3912) @@ -8,7 +8,7 @@ import numpy from numpy import array, matrix, asarray, asmatrix, zeros, rank, intc, \ empty, hstack, isscalar, ndarray, shape, searchsorted, where, \ - concatenate + concatenate, deprecate from base import spmatrix, isspmatrix from sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks @@ -102,10 +102,9 @@ M,N = self.shape return csc_matrix((self.data,self.indices,self.indptr),(N,M),copy=copy) - + @deprecate def rowcol(self, ind): #TODO remove after 0.7 - warn('rowcol() is deprecated',DeprecationWarning) col = self.indices[ind] row = searchsorted(self.indptr, ind+1)-1 return (row, col) Modified: trunk/scipy/sparse/dok.py =================================================================== --- trunk/scipy/sparse/dok.py 2008-02-09 21:28:39 UTC (rev 3911) +++ trunk/scipy/sparse/dok.py 2008-02-10 22:52:55 UTC (rev 3912) @@ -23,7 +23,7 @@ #TODO deprecate argument A in favor of arg1 style dict.__init__(self) - spmatrix.__init__(self,shape) + spmatrix.__init__(self) self.dtype = getdtype(dtype, A, default=float) if A is not None: if isinstance(A, tuple): @@ -59,25 +59,6 @@ def __len__(self): return dict.__len__(self) - def __str__(self): - val = '' - keys = self.keys() - keys.sort() - #TODO why does dok_matrix wipe out .maxprint? - if self.nnz > self.maxprint: - for k in xrange(self.maxprint / 2): - key = keys[k] - val += " %s\t%s\n" % (str(key), str(self[key])) - val = val + " : \t :\n" - for k in xrange(self.nnz - self.maxprint/2, self.nnz): - key = keys[k] - val += " %s\t%s\n" % (str(key), str(self[key])) - else: - for k in xrange(self.nnz): - key = keys[k] - val += " %s\t%s\n" % (str(key), str(self[key])) - return val[:-1] - def get(self, key, default=0.): """This overrides the dict.get method, providing type checking but otherwise equivalent functionality. Modified: trunk/scipy/sparse/tests/test_base.py =================================================================== --- trunk/scipy/sparse/tests/test_base.py 2008-02-09 21:28:39 UTC (rev 3911) +++ trunk/scipy/sparse/tests/test_base.py 2008-02-10 22:52:55 UTC (rev 3912) @@ -33,10 +33,8 @@ warnings.simplefilter('ignore',SparseEfficiencyWarning) - #TODO check that invalid shape in constructor raises exception #TODO check that spmatrix( ... , copy=X ) is respected -#TODO test repr(spmatrix) #TODO test prune #TODO test has_sorted_indices class _TestCommon: @@ -47,9 +45,11 @@ self.datsp = self.spmatrix(self.dat) def test_repr(self): - """make sure __repr__ works""" - repr(self.spmatrix) + repr(self.datsp) + def test_str(self): + str(self.datsp) + def test_empty(self): """Test manipulating empty matrices. Fails in SciPy SVN <= r1768 """ From scipy-svn at scipy.org Mon Feb 11 08:46:31 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 11 Feb 2008 07:46:31 -0600 (CST) Subject: [Scipy-svn] r3913 - trunk/scipy/sparse Message-ID: <20080211134631.96D0239C378@new.scipy.org> Author: wnbell Date: 2008-02-11 07:46:25 -0600 (Mon, 11 Feb 2008) New Revision: 3913 Modified: trunk/scipy/sparse/base.py Log: fixed str(sparse) output Modified: trunk/scipy/sparse/base.py =================================================================== --- trunk/scipy/sparse/base.py 2008-02-10 22:52:55 UTC (rev 3912) +++ trunk/scipy/sparse/base.py 2008-02-11 13:46:25 UTC (rev 3913) @@ -144,6 +144,7 @@ def getdata(self, num): return None + @deprecate def listprint(self, start, stop): """Provides a way to print over a single index. """ @@ -171,12 +172,13 @@ if nnz > maxprint: half = maxprint // 2 out = tostr(A.row[:half], A.col[:half], A.data[:half]) - out += + " :\t:\n" - out += tostr(A.row[:-half], A.col[:-half], A.data[:-half]) + out += "\n :\t:\n" + half = maxprint - maxprint//2 + out += tostr(A.row[-half:], A.col[-half:], A.data[-half:]) else: out = tostr(A.row, A.col, A.data) - return out[:-1] + return out def __nonzero__(self): # Simple -- other ideas? return self.getnnz() > 0 From scipy-svn at scipy.org Mon Feb 11 18:14:13 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 11 Feb 2008 17:14:13 -0600 (CST) Subject: [Scipy-svn] r3914 - trunk/scipy/testing Message-ID: <20080211231413.6B1D639C183@new.scipy.org> Author: matthew.brett at gmail.com Date: 2008-02-11 17:13:58 -0600 (Mon, 11 Feb 2008) New Revision: 3914 Modified: trunk/scipy/testing/nulltester.py trunk/scipy/testing/pkgtester.py Log: Nose version check Modified: trunk/scipy/testing/nulltester.py =================================================================== --- trunk/scipy/testing/nulltester.py 2008-02-11 13:46:25 UTC (rev 3913) +++ trunk/scipy/testing/nulltester.py 2008-02-11 23:13:58 UTC (rev 3914) @@ -1,18 +1,19 @@ -''' Null tester (when nose not importable) +''' Null tester to signal nose tests disabled -Merely returns error reporting lack of nose package +Merely returns error reporting lack of nose package or version number +below requirements. See pkgtester, nosetester modules ''' -nose_url = 'http://somethingaboutorange.com/mrl/projects/nose' - class NullTester(object): + _msg = 'Need nose >=0.10 for tests - see %s' % \ + 'http://somethingaboutorange.com/mrl/projects/nose' def __init__(self, *args, **kwargs): pass def test(self, labels=None, *args, **kwargs): - raise ImportError, 'Need nose for tests - see %s' % nose_url + raise ImportError, self._msg def bench(self, labels=None, *args, **kwargs): - raise ImportError, 'Need nose for benchmarks - see %s' % nose_url + raise ImportError, self._msg Modified: trunk/scipy/testing/pkgtester.py =================================================================== --- trunk/scipy/testing/pkgtester.py 2008-02-11 13:46:25 UTC (rev 3913) +++ trunk/scipy/testing/pkgtester.py 2008-02-11 23:13:58 UTC (rev 3914) @@ -1,8 +1,9 @@ ''' Define test function for scipy package -Module tests for presence of nose. If present returns NoseTester, -otherwise returns a placeholder test routine reporting lack of nose -and inability to run tests. Typical use is in module __init__: +Module tests for presence of useful version of nose. If present +returns NoseTester, otherwise returns a placeholder test routine +reporting lack of nose and inability to run tests. Typical use is in +module __init__: from scipy.testing.pkgtester import Tester test = Tester().test @@ -10,10 +11,17 @@ See nosetester module for test implementation ''' +fine_nose = True try: import nose except ImportError: - from scipy.testing.nulltester import NullTester as Tester + fine_nose = False else: + nose_version = nose.__versioninfo__ + if nose_version[0] < 1 and nose_version[1] < 10: + fine_nose = False + +if fine_nose: from scipy.testing.nosetester import NoseTester as Tester - +else: + from scipy.testing.nulltester import NullTester as Tester From scipy-svn at scipy.org Mon Feb 11 18:27:11 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 11 Feb 2008 17:27:11 -0600 (CST) Subject: [Scipy-svn] r3915 - trunk/scipy/testing Message-ID: <20080211232711.DF62539C13A@new.scipy.org> Author: matthew.brett at gmail.com Date: 2008-02-11 17:27:05 -0600 (Mon, 11 Feb 2008) New Revision: 3915 Modified: trunk/scipy/testing/nulltester.py Log: Trimmed nulltester, no functional change Modified: trunk/scipy/testing/nulltester.py =================================================================== --- trunk/scipy/testing/nulltester.py 2008-02-11 23:13:58 UTC (rev 3914) +++ trunk/scipy/testing/nulltester.py 2008-02-11 23:27:05 UTC (rev 3915) @@ -8,12 +8,9 @@ ''' class NullTester(object): - _msg = 'Need nose >=0.10 for tests - see %s' % \ - 'http://somethingaboutorange.com/mrl/projects/nose' - def __init__(self, *args, **kwargs): - pass def test(self, labels=None, *args, **kwargs): - raise ImportError, self._msg - def bench(self, labels=None, *args, **kwargs): - raise ImportError, self._msg + raise ImportError, \ + 'Need nose >=0.10 for tests - see %s' % \ + 'http://somethingaboutorange.com/mrl/projects/nose' + bench = test From scipy-svn at scipy.org Mon Feb 11 18:32:21 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 11 Feb 2008 17:32:21 -0600 (CST) Subject: [Scipy-svn] r3916 - in trunk/scipy/ndimage: . src src/segment src/segment/tests Message-ID: <20080211233221.C9C9339C042@new.scipy.org> Author: chris.burns Date: 2008-02-11 17:32:11 -0600 (Mon, 11 Feb 2008) New Revision: 3916 Added: trunk/scipy/ndimage/src/segment/ trunk/scipy/ndimage/src/segment/Segmenter_EXT.c trunk/scipy/ndimage/src/segment/Segmenter_IMPL.c trunk/scipy/ndimage/src/segment/ndImage_Segmenter_structs.h trunk/scipy/ndimage/src/segment/objectdata.py trunk/scipy/ndimage/src/segment/tests/ Removed: trunk/scipy/ndimage/segment/ trunk/scipy/ndimage/src/segment/Segmenter_EXT.c trunk/scipy/ndimage/src/segment/Segmenter_IMPL.c trunk/scipy/ndimage/src/segment/__init__.py trunk/scipy/ndimage/src/segment/ndImage_Segmenter_structs.h trunk/scipy/ndimage/src/segment/objectdata.py trunk/scipy/ndimage/src/segment/setup.py trunk/scipy/ndimage/src/segment/tests/ Modified: trunk/scipy/ndimage/segmenter.py trunk/scipy/ndimage/setup.py trunk/scipy/ndimage/src/segment/tests/test_segment.py Log: Reorg package structure for ndimage.segment. Modified: trunk/scipy/ndimage/segmenter.py =================================================================== --- trunk/scipy/ndimage/segmenter.py 2008-02-11 23:27:05 UTC (rev 3915) +++ trunk/scipy/ndimage/segmenter.py 2008-02-11 23:32:11 UTC (rev 3916) @@ -1,6 +1,6 @@ import math import numpy as N -import scipy.ndimage.segment as S +import scipy.ndimage._segment as S # make sure this is local to use as default inputname = 'slice112.raw' Modified: trunk/scipy/ndimage/setup.py =================================================================== --- trunk/scipy/ndimage/setup.py 2008-02-11 23:27:05 UTC (rev 3915) +++ trunk/scipy/ndimage/setup.py 2008-02-11 23:32:11 UTC (rev 3916) @@ -14,7 +14,13 @@ include_dirs=['src']+[get_include()], ) - config.add_subpackage('segment') + config.add_extension('_segment', + sources=['src/segment/Segmenter_EXT.c', + 'src/segment/Segmenter_IMPL.c'], + depends = ['src/segment/ndImage_Segmenter_structs.h'] + ) + + #config.add_subpackage('segment') config.add_data_dir('tests') config.add_subpackage('register') Copied: trunk/scipy/ndimage/src/segment (from rev 3913, trunk/scipy/ndimage/segment) Deleted: trunk/scipy/ndimage/src/segment/Segmenter_EXT.c =================================================================== --- trunk/scipy/ndimage/segment/Segmenter_EXT.c 2008-02-11 13:46:25 UTC (rev 3913) +++ trunk/scipy/ndimage/src/segment/Segmenter_EXT.c 2008-02-11 23:32:11 UTC (rev 3916) @@ -1,460 +0,0 @@ -#include "ndImage_Segmenter_structs.h" -#include "Python.h" -#include "numpy/arrayobject.h" - -static PyObject *Segmenter_CannyEdges(PyObject *self, PyObject *args) -{ - - double sigma; - double cannyLow; - double cannyHigh; - double BPHigh; - int lowThreshold; - int highThreshold; - int apearture; - int num; - int nd; - int type; - int itype; - int mode; - int groups; - npy_intp *dims; - double *fP1; - unsigned short *fP2; - PyObject *iArray = NULL; - PyObject *eArray = NULL; - - /* pass in 2D LPF coefficients */ - if(!PyArg_ParseTuple(args, "dddiiidiO", &sigma, &cannyLow, &cannyHigh, - &mode, &lowThreshold, &highThreshold, - &BPHigh, &apearture, &iArray)) - goto exit; - - fP1 = (double *)PyArray_DATA(iArray); - nd = PyArray_NDIM(iArray); - dims = PyArray_DIMS(iArray); - type = PyArray_TYPE(iArray); - num = PyArray_SIZE(iArray); - - //itype = 4; - itype = NPY_USHORT; - eArray = (PyObject*)PyArray_SimpleNew(nd, dims, itype); - fP2 = (unsigned short *)PyArray_DATA(eArray); - - if(!PyArray_ISCONTIGUOUS(iArray) || !PyArray_ISCONTIGUOUS(eArray)) - goto exit; - - if(!NI_CannyEdges(num, (int)dims[0], (int)dims[1], sigma, cannyLow, - cannyHigh, mode, lowThreshold, - highThreshold, BPHigh, apearture, fP1, fP2, &groups)) - goto exit; - -exit: - - return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue("Oi", eArray, - groups); - -} - -static PyObject *Segmenter_SobelEdges(PyObject *self, PyObject *args) -{ - - double sobelLow; - double BPHigh; - int lowThreshold; - int highThreshold; - int apearture; - int num; - int nd; - int type; - int itype; - int groups; - int mode; - npy_intp *dims; - double *fP1; - unsigned short *fP2; - PyObject *iArray = NULL; - PyObject *eArray = NULL; - - // - // pass in 2D LPF coefficients - if(!PyArg_ParseTuple(args, "diiidiO", &sobelLow, &mode, &lowThreshold, - &highThreshold, &BPHigh, &apearture, &iArray)) - goto exit; - - fP1 = (double *)PyArray_DATA(iArray); - nd = PyArray_NDIM(iArray); - dims = PyArray_DIMS(iArray); - type = PyArray_TYPE(iArray); - num = PyArray_SIZE(iArray); - - // this is int type and hard-wirred. pass this in from Python code - //itype = 4; // unsigned short - itype = NPY_USHORT; - eArray = (PyObject*)PyArray_SimpleNew(nd, dims, itype); - fP2 = (unsigned short *)PyArray_DATA(eArray); - - if(!PyArray_ISCONTIGUOUS(iArray) || !PyArray_ISCONTIGUOUS(eArray)) - goto exit; - - - if(!NI_SobelEdges(num, (int)dims[0], (int)dims[1], sobelLow, mode, - lowThreshold, highThreshold, BPHigh, apearture, - fP1, fP2, &groups)) - goto exit; - -exit: - - return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue("Oi", eArray, - groups-1); - -} - - - -static PyObject *Segmenter_ShenCastanEdges(PyObject *self, PyObject *args) -{ - int window; - int lowThreshold; - int highThreshold; - double ShenCastanLow; - double b; - int num; - int nd; - int type; - int itype; - npy_intp *dims; - double *fP1; - unsigned short *fP2; - int groups; - PyObject *iArray = NULL; - PyObject *eArray = NULL; - - if(!PyArg_ParseTuple(args, "ddiiiO", &ShenCastanLow, &b, &window, - &lowThreshold, &highThreshold, &iArray)) - goto exit; - - fP1 = (double *)PyArray_DATA(iArray); - nd = PyArray_NDIM(iArray); - dims = PyArray_DIMS(iArray); - type = PyArray_TYPE(iArray); - num = PyArray_SIZE(iArray); - - // this is int type and hard-wirred. pass this in from Python code - //itype = 4; // unsigned short - itype = NPY_USHORT; - eArray = (PyObject*)PyArray_SimpleNew(nd, dims, itype); - fP2 = (unsigned short *)PyArray_DATA(eArray); - - if(!PyArray_ISCONTIGUOUS(iArray) || !PyArray_ISCONTIGUOUS(eArray)) - goto exit; - - if(!NI_ShenCastanEdges(num, (int)dims[0], (int)dims[1], b, ShenCastanLow, - window, lowThreshold, highThreshold, - fP1, fP2, &groups)) - goto exit; - -exit: - - return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue("Oi", eArray, - groups-1); - -} - -static PyObject *Segmenter_GetObjectStats(PyObject *self, PyObject *args) -{ - - - int num; - int nd; - int type; - npy_intp *dims; - npy_intp *objNumber; - unsigned short *fP1; - PyObject *iArray = NULL; - PyObject *nArray = NULL; - objStruct *myData; - - if(!PyArg_ParseTuple(args, "OO", &iArray, &nArray)) - goto exit; - - if(!PyArray_ISCONTIGUOUS(iArray) || !PyArray_ISCONTIGUOUS(nArray)) - goto exit; - - // - // PyArray_ContiguousFromObject or PyArray_ContiguousFromAny to be explored - // for non-contiguous - // - - - // pointer to the edge-labeled image - nd = PyArray_NDIM(iArray); - dims = PyArray_DIMS(iArray); - type = PyArray_TYPE(iArray); - num = PyArray_SIZE(iArray); - fP1 = (unsigned short *)PyArray_DATA(iArray); - - // the object descriptor array that was allocated from numpy - objNumber = PyArray_DIMS(nArray); // this is the number of labels in the edge image - myData = (objStruct*)PyArray_DATA(nArray); - - if(!NI_GetObjectStats((int)dims[0], (int)dims[1], (int)objNumber[0], fP1, myData)) - goto exit; - -exit: - - return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); - -} - -static PyObject *Segmenter_MorphoThinFilt(PyObject *self, PyObject *args) -{ - - int num; - int nd; - int type; - npy_intp *dims; - npy_intp *objNumber; - unsigned short *fP1; - PyObject *iArray = NULL; - PyObject *nArray = NULL; - objStruct *ROIList; - - if(!PyArg_ParseTuple(args, "OO", &iArray, &nArray)) - goto exit; - - fP1 = (unsigned short *)PyArray_DATA(iArray); - nd = PyArray_NDIM(iArray); - dims = PyArray_DIMS(iArray); - type = PyArray_TYPE(iArray); - num = PyArray_SIZE(iArray); - - objNumber = PyArray_DIMS(nArray); // this is the number of labels in the edge image - ROIList = (objStruct*)PyArray_DATA(nArray); - - if(!PyArray_ISCONTIGUOUS(iArray)) - goto exit; - - if(!NI_ThinFilter(num, (int)dims[0], (int)dims[1], (int)objNumber[0], fP1, ROIList)) - goto exit; - -exit: - - return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); - -} - -static PyObject *Segmenter_BuildBoundary(PyObject *self, PyObject *args) -{ - - int num; - int nd; - int type; - npy_intp *dims; - npy_intp *objNumber; - unsigned short *fP1; - PyObject *iArray = NULL; - PyObject *nArray = NULL; - objStruct *ROIList; - - if(!PyArg_ParseTuple(args, "OO", &iArray, &nArray)) - goto exit; - - fP1 = (unsigned short *)PyArray_DATA(iArray); - nd = PyArray_NDIM(iArray); - dims = PyArray_DIMS(iArray); - type = PyArray_TYPE(iArray); - num = PyArray_SIZE(iArray); - // - // this is int type and hard-wirred. pass this in from Python code - - objNumber = PyArray_DIMS(nArray); // this is the number of labels in the edge image - ROIList = (objStruct*)PyArray_DATA(nArray); - - if(!PyArray_ISCONTIGUOUS(iArray)) - goto exit; - - // - // pass in ROI list and labeled edges - // return an augmented ROI list - // replace the edgeImage with maskImage - // - if(!NI_BuildBoundary(num, (int)dims[0], (int)dims[1], (int)objNumber[0], fP1, ROIList)) - goto exit; - -exit: - - return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); - -} - - -static PyObject *Segmenter_VoxelMeasures(PyObject *self, PyObject *args) -{ - - int num; - int nd; - int type; - npy_intp *dims; - npy_intp *objNumber; - double *fP1; - unsigned short *fP2; - PyObject *iArray = NULL; - PyObject *nArray = NULL; - PyObject *eArray = NULL; - objStruct *ROIList; - - if(!PyArg_ParseTuple(args, "OOO", &iArray, &eArray, &nArray)) - goto exit; - - fP1 = (double *)PyArray_DATA(iArray); - nd = PyArray_NDIM(iArray); - dims = PyArray_DIMS(iArray); - type = PyArray_TYPE(iArray); - num = PyArray_SIZE(iArray); - - // eArray and iArray are same dims - fP2 = (unsigned short *)PyArray_DATA(eArray); - - objNumber = PyArray_DIMS(nArray); // this is the number of labels in the edge image - ROIList = (objStruct*)PyArray_DATA(nArray); - - if(!PyArray_ISCONTIGUOUS(iArray)) - goto exit; - - // - // pass in ROI list and labeled edges - // return an augmented ROI list - // replace the edgeImage with maskImage - // - - if(!NI_VoxelMeasures(num, (int)dims[0], (int)dims[1], (int)objNumber[0], fP1, - fP2, ROIList)) - goto exit; - -exit: - - return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); - -} - -static PyObject *Segmenter_TextureMeasures(PyObject *self, PyObject *args) -{ - - int num; - int nd; - int type; - npy_intp *dims; - npy_intp *objNumber; - double *fP1; - unsigned short *fP2; - PyObject *iArray = NULL; - PyObject *nArray = NULL; - PyObject *eArray = NULL; - objStruct *ROIList; - - if(!PyArg_ParseTuple(args, "OOO", &iArray, &eArray, &nArray)) - goto exit; - - fP1 = (double *)PyArray_DATA(iArray); - nd = PyArray_NDIM(iArray); - dims = PyArray_DIMS(iArray); - type = PyArray_TYPE(iArray); - num = PyArray_SIZE(iArray); - - // eArray and iArray are same dims - fP2 = (unsigned short *)PyArray_DATA(eArray); - - objNumber = PyArray_DIMS(nArray); // this is the number of labels in the edge image - ROIList = (objStruct*)PyArray_DATA(nArray); - - if(!PyArray_ISCONTIGUOUS(iArray)) - goto exit; - - // - // pass in ROI list and labeled edges - // return an augmented ROI list - // replace the edgeImage with maskImage - // - - if(!NI_TextureMeasures(num, (int)dims[0], (int)dims[1], (int)objNumber[0], fP1, - fP2, ROIList)) - goto exit; - -exit: - - return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); - -} - -static PyObject *Segmenter_RegionGrow(PyObject *self, PyObject *args) -{ - - int lowThreshold; - int highThreshold; - int closeWindow; - int openWindow; - int num; - int nd; - int type; - int itype; - int groups; - npy_intp *dims; - double *fP1; - unsigned short *fP2; - PyObject *iArray = NULL; - PyObject *eArray = NULL; - - // - // pass in 2D LPF coefficients - if(!PyArg_ParseTuple(args, "iiiiO", &lowThreshold, &highThreshold, &closeWindow, - &openWindow, &iArray)) - goto exit; - - fP1 = (double *)PyArray_DATA(iArray); - nd = PyArray_NDIM(iArray); - dims = PyArray_DIMS(iArray); - type = PyArray_TYPE(iArray); - num = PyArray_SIZE(iArray); - - // this is int type and hard-wirred. pass this in from Python code - //itype = 4; // unsigned short - itype = NPY_USHORT; - eArray = (PyObject*)PyArray_SimpleNew(nd, dims, itype); - fP2 = (unsigned short *)PyArray_DATA(eArray); - - if(!PyArray_ISCONTIGUOUS(iArray) || !PyArray_ISCONTIGUOUS(eArray)) - goto exit; - - - if(!NI_RegionGrow(num, (int)dims[0], (int)dims[1], lowThreshold, highThreshold, - closeWindow, openWindow, fP1, fP2, &groups)) - - goto exit; - -exit: - - return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue("Oi", eArray, groups-1); - -} - -static PyMethodDef SegmenterMethods[] = -{ - { "canny_edges", Segmenter_CannyEdges, METH_VARARGS, NULL }, - { "shen_castan_edges", Segmenter_ShenCastanEdges, METH_VARARGS, NULL }, - { "sobel_edges", Segmenter_SobelEdges, METH_VARARGS, NULL }, - { "get_object_stats", Segmenter_GetObjectStats, METH_VARARGS, NULL }, - { "morpho_thin_filt", Segmenter_MorphoThinFilt, METH_VARARGS, NULL }, - { "build_boundary", Segmenter_BuildBoundary, METH_VARARGS, NULL }, - { "voxel_measures", Segmenter_VoxelMeasures, METH_VARARGS, NULL }, - { "texture_measures", Segmenter_TextureMeasures, METH_VARARGS, NULL }, - { "region_grow", Segmenter_RegionGrow, METH_VARARGS, NULL }, - { NULL, NULL, 0, NULL}, -}; - -void init_segmenter(void) -{ - Py_InitModule("_segmenter", SegmenterMethods); - import_array(); -} - Copied: trunk/scipy/ndimage/src/segment/Segmenter_EXT.c (from rev 3915, trunk/scipy/ndimage/segment/Segmenter_EXT.c) =================================================================== --- trunk/scipy/ndimage/segment/Segmenter_EXT.c 2008-02-11 23:27:05 UTC (rev 3915) +++ trunk/scipy/ndimage/src/segment/Segmenter_EXT.c 2008-02-11 23:32:11 UTC (rev 3916) @@ -0,0 +1,460 @@ +#include "ndImage_Segmenter_structs.h" +#include "Python.h" +#include "numpy/arrayobject.h" + +static PyObject *Segmenter_CannyEdges(PyObject *self, PyObject *args) +{ + + double sigma; + double cannyLow; + double cannyHigh; + double BPHigh; + int lowThreshold; + int highThreshold; + int apearture; + int num; + int nd; + int type; + int itype; + int mode; + int groups; + npy_intp *dims; + double *fP1; + unsigned short *fP2; + PyObject *iArray = NULL; + PyObject *eArray = NULL; + + /* pass in 2D LPF coefficients */ + if(!PyArg_ParseTuple(args, "dddiiidiO", &sigma, &cannyLow, &cannyHigh, + &mode, &lowThreshold, &highThreshold, + &BPHigh, &apearture, &iArray)) + goto exit; + + fP1 = (double *)PyArray_DATA(iArray); + nd = PyArray_NDIM(iArray); + dims = PyArray_DIMS(iArray); + type = PyArray_TYPE(iArray); + num = PyArray_SIZE(iArray); + + //itype = 4; + itype = NPY_USHORT; + eArray = (PyObject*)PyArray_SimpleNew(nd, dims, itype); + fP2 = (unsigned short *)PyArray_DATA(eArray); + + if(!PyArray_ISCONTIGUOUS(iArray) || !PyArray_ISCONTIGUOUS(eArray)) + goto exit; + + if(!NI_CannyEdges(num, (int)dims[0], (int)dims[1], sigma, cannyLow, + cannyHigh, mode, lowThreshold, + highThreshold, BPHigh, apearture, fP1, fP2, &groups)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue("Oi", eArray, + groups); + +} + +static PyObject *Segmenter_SobelEdges(PyObject *self, PyObject *args) +{ + + double sobelLow; + double BPHigh; + int lowThreshold; + int highThreshold; + int apearture; + int num; + int nd; + int type; + int itype; + int groups; + int mode; + npy_intp *dims; + double *fP1; + unsigned short *fP2; + PyObject *iArray = NULL; + PyObject *eArray = NULL; + + // + // pass in 2D LPF coefficients + if(!PyArg_ParseTuple(args, "diiidiO", &sobelLow, &mode, &lowThreshold, + &highThreshold, &BPHigh, &apearture, &iArray)) + goto exit; + + fP1 = (double *)PyArray_DATA(iArray); + nd = PyArray_NDIM(iArray); + dims = PyArray_DIMS(iArray); + type = PyArray_TYPE(iArray); + num = PyArray_SIZE(iArray); + + // this is int type and hard-wirred. pass this in from Python code + //itype = 4; // unsigned short + itype = NPY_USHORT; + eArray = (PyObject*)PyArray_SimpleNew(nd, dims, itype); + fP2 = (unsigned short *)PyArray_DATA(eArray); + + if(!PyArray_ISCONTIGUOUS(iArray) || !PyArray_ISCONTIGUOUS(eArray)) + goto exit; + + + if(!NI_SobelEdges(num, (int)dims[0], (int)dims[1], sobelLow, mode, + lowThreshold, highThreshold, BPHigh, apearture, + fP1, fP2, &groups)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue("Oi", eArray, + groups-1); + +} + + + +static PyObject *Segmenter_ShenCastanEdges(PyObject *self, PyObject *args) +{ + int window; + int lowThreshold; + int highThreshold; + double ShenCastanLow; + double b; + int num; + int nd; + int type; + int itype; + npy_intp *dims; + double *fP1; + unsigned short *fP2; + int groups; + PyObject *iArray = NULL; + PyObject *eArray = NULL; + + if(!PyArg_ParseTuple(args, "ddiiiO", &ShenCastanLow, &b, &window, + &lowThreshold, &highThreshold, &iArray)) + goto exit; + + fP1 = (double *)PyArray_DATA(iArray); + nd = PyArray_NDIM(iArray); + dims = PyArray_DIMS(iArray); + type = PyArray_TYPE(iArray); + num = PyArray_SIZE(iArray); + + // this is int type and hard-wirred. pass this in from Python code + //itype = 4; // unsigned short + itype = NPY_USHORT; + eArray = (PyObject*)PyArray_SimpleNew(nd, dims, itype); + fP2 = (unsigned short *)PyArray_DATA(eArray); + + if(!PyArray_ISCONTIGUOUS(iArray) || !PyArray_ISCONTIGUOUS(eArray)) + goto exit; + + if(!NI_ShenCastanEdges(num, (int)dims[0], (int)dims[1], b, ShenCastanLow, + window, lowThreshold, highThreshold, + fP1, fP2, &groups)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue("Oi", eArray, + groups-1); + +} + +static PyObject *Segmenter_GetObjectStats(PyObject *self, PyObject *args) +{ + + + int num; + int nd; + int type; + npy_intp *dims; + npy_intp *objNumber; + unsigned short *fP1; + PyObject *iArray = NULL; + PyObject *nArray = NULL; + objStruct *myData; + + if(!PyArg_ParseTuple(args, "OO", &iArray, &nArray)) + goto exit; + + if(!PyArray_ISCONTIGUOUS(iArray) || !PyArray_ISCONTIGUOUS(nArray)) + goto exit; + + // + // PyArray_ContiguousFromObject or PyArray_ContiguousFromAny to be explored + // for non-contiguous + // + + + // pointer to the edge-labeled image + nd = PyArray_NDIM(iArray); + dims = PyArray_DIMS(iArray); + type = PyArray_TYPE(iArray); + num = PyArray_SIZE(iArray); + fP1 = (unsigned short *)PyArray_DATA(iArray); + + // the object descriptor array that was allocated from numpy + objNumber = PyArray_DIMS(nArray); // this is the number of labels in the edge image + myData = (objStruct*)PyArray_DATA(nArray); + + if(!NI_GetObjectStats((int)dims[0], (int)dims[1], (int)objNumber[0], fP1, myData)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); + +} + +static PyObject *Segmenter_MorphoThinFilt(PyObject *self, PyObject *args) +{ + + int num; + int nd; + int type; + npy_intp *dims; + npy_intp *objNumber; + unsigned short *fP1; + PyObject *iArray = NULL; + PyObject *nArray = NULL; + objStruct *ROIList; + + if(!PyArg_ParseTuple(args, "OO", &iArray, &nArray)) + goto exit; + + fP1 = (unsigned short *)PyArray_DATA(iArray); + nd = PyArray_NDIM(iArray); + dims = PyArray_DIMS(iArray); + type = PyArray_TYPE(iArray); + num = PyArray_SIZE(iArray); + + objNumber = PyArray_DIMS(nArray); // this is the number of labels in the edge image + ROIList = (objStruct*)PyArray_DATA(nArray); + + if(!PyArray_ISCONTIGUOUS(iArray)) + goto exit; + + if(!NI_ThinFilter(num, (int)dims[0], (int)dims[1], (int)objNumber[0], fP1, ROIList)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); + +} + +static PyObject *Segmenter_BuildBoundary(PyObject *self, PyObject *args) +{ + + int num; + int nd; + int type; + npy_intp *dims; + npy_intp *objNumber; + unsigned short *fP1; + PyObject *iArray = NULL; + PyObject *nArray = NULL; + objStruct *ROIList; + + if(!PyArg_ParseTuple(args, "OO", &iArray, &nArray)) + goto exit; + + fP1 = (unsigned short *)PyArray_DATA(iArray); + nd = PyArray_NDIM(iArray); + dims = PyArray_DIMS(iArray); + type = PyArray_TYPE(iArray); + num = PyArray_SIZE(iArray); + // + // this is int type and hard-wirred. pass this in from Python code + + objNumber = PyArray_DIMS(nArray); // this is the number of labels in the edge image + ROIList = (objStruct*)PyArray_DATA(nArray); + + if(!PyArray_ISCONTIGUOUS(iArray)) + goto exit; + + // + // pass in ROI list and labeled edges + // return an augmented ROI list + // replace the edgeImage with maskImage + // + if(!NI_BuildBoundary(num, (int)dims[0], (int)dims[1], (int)objNumber[0], fP1, ROIList)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); + +} + + +static PyObject *Segmenter_VoxelMeasures(PyObject *self, PyObject *args) +{ + + int num; + int nd; + int type; + npy_intp *dims; + npy_intp *objNumber; + double *fP1; + unsigned short *fP2; + PyObject *iArray = NULL; + PyObject *nArray = NULL; + PyObject *eArray = NULL; + objStruct *ROIList; + + if(!PyArg_ParseTuple(args, "OOO", &iArray, &eArray, &nArray)) + goto exit; + + fP1 = (double *)PyArray_DATA(iArray); + nd = PyArray_NDIM(iArray); + dims = PyArray_DIMS(iArray); + type = PyArray_TYPE(iArray); + num = PyArray_SIZE(iArray); + + // eArray and iArray are same dims + fP2 = (unsigned short *)PyArray_DATA(eArray); + + objNumber = PyArray_DIMS(nArray); // this is the number of labels in the edge image + ROIList = (objStruct*)PyArray_DATA(nArray); + + if(!PyArray_ISCONTIGUOUS(iArray)) + goto exit; + + // + // pass in ROI list and labeled edges + // return an augmented ROI list + // replace the edgeImage with maskImage + // + + if(!NI_VoxelMeasures(num, (int)dims[0], (int)dims[1], (int)objNumber[0], fP1, + fP2, ROIList)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); + +} + +static PyObject *Segmenter_TextureMeasures(PyObject *self, PyObject *args) +{ + + int num; + int nd; + int type; + npy_intp *dims; + npy_intp *objNumber; + double *fP1; + unsigned short *fP2; + PyObject *iArray = NULL; + PyObject *nArray = NULL; + PyObject *eArray = NULL; + objStruct *ROIList; + + if(!PyArg_ParseTuple(args, "OOO", &iArray, &eArray, &nArray)) + goto exit; + + fP1 = (double *)PyArray_DATA(iArray); + nd = PyArray_NDIM(iArray); + dims = PyArray_DIMS(iArray); + type = PyArray_TYPE(iArray); + num = PyArray_SIZE(iArray); + + // eArray and iArray are same dims + fP2 = (unsigned short *)PyArray_DATA(eArray); + + objNumber = PyArray_DIMS(nArray); // this is the number of labels in the edge image + ROIList = (objStruct*)PyArray_DATA(nArray); + + if(!PyArray_ISCONTIGUOUS(iArray)) + goto exit; + + // + // pass in ROI list and labeled edges + // return an augmented ROI list + // replace the edgeImage with maskImage + // + + if(!NI_TextureMeasures(num, (int)dims[0], (int)dims[1], (int)objNumber[0], fP1, + fP2, ROIList)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); + +} + +static PyObject *Segmenter_RegionGrow(PyObject *self, PyObject *args) +{ + + int lowThreshold; + int highThreshold; + int closeWindow; + int openWindow; + int num; + int nd; + int type; + int itype; + int groups; + npy_intp *dims; + double *fP1; + unsigned short *fP2; + PyObject *iArray = NULL; + PyObject *eArray = NULL; + + // + // pass in 2D LPF coefficients + if(!PyArg_ParseTuple(args, "iiiiO", &lowThreshold, &highThreshold, &closeWindow, + &openWindow, &iArray)) + goto exit; + + fP1 = (double *)PyArray_DATA(iArray); + nd = PyArray_NDIM(iArray); + dims = PyArray_DIMS(iArray); + type = PyArray_TYPE(iArray); + num = PyArray_SIZE(iArray); + + // this is int type and hard-wirred. pass this in from Python code + //itype = 4; // unsigned short + itype = NPY_USHORT; + eArray = (PyObject*)PyArray_SimpleNew(nd, dims, itype); + fP2 = (unsigned short *)PyArray_DATA(eArray); + + if(!PyArray_ISCONTIGUOUS(iArray) || !PyArray_ISCONTIGUOUS(eArray)) + goto exit; + + + if(!NI_RegionGrow(num, (int)dims[0], (int)dims[1], lowThreshold, highThreshold, + closeWindow, openWindow, fP1, fP2, &groups)) + + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue("Oi", eArray, groups-1); + +} + +static PyMethodDef SegmenterMethods[] = +{ + { "canny_edges", Segmenter_CannyEdges, METH_VARARGS, NULL }, + { "shen_castan_edges", Segmenter_ShenCastanEdges, METH_VARARGS, NULL }, + { "sobel_edges", Segmenter_SobelEdges, METH_VARARGS, NULL }, + { "get_object_stats", Segmenter_GetObjectStats, METH_VARARGS, NULL }, + { "morpho_thin_filt", Segmenter_MorphoThinFilt, METH_VARARGS, NULL }, + { "build_boundary", Segmenter_BuildBoundary, METH_VARARGS, NULL }, + { "voxel_measures", Segmenter_VoxelMeasures, METH_VARARGS, NULL }, + { "texture_measures", Segmenter_TextureMeasures, METH_VARARGS, NULL }, + { "region_grow", Segmenter_RegionGrow, METH_VARARGS, NULL }, + { NULL, NULL, 0, NULL}, +}; + +void init_segment(void) +{ + Py_InitModule("_segment", SegmenterMethods); + import_array(); +} + Deleted: trunk/scipy/ndimage/src/segment/Segmenter_IMPL.c =================================================================== --- trunk/scipy/ndimage/segment/Segmenter_IMPL.c 2008-02-11 13:46:25 UTC (rev 3913) +++ trunk/scipy/ndimage/src/segment/Segmenter_IMPL.c 2008-02-11 23:32:11 UTC (rev 3916) @@ -1,3020 +0,0 @@ -#include -#include -#include -#include -#include "ndImage_Segmenter_structs.h" - -// these are for this standalone and come out with the full build -// -#define MAX(a, b) ((a) > (b) ? (a) : (b)) -#define FALSE 0 -#define TRUE 1 - -int NI_GetObjectStats(int rows, int cols, int numberObjects, unsigned short *labeledEdges, - objStruct objectMetrics[]){ - - int i, j, k, m; - int offset; - int count; - int LowX; - int LowY; - int HighX; - int HighY; - int status; - float centerX; - float centerY; - - for(k = 1; k < numberObjects; ++k){ - offset = cols; - LowX = 32767; - LowY = 32767; - HighX = 0; - HighY = 0; - count = 0; - centerX = (float)0.0; - centerY = (float)0.0; - for(i = 1; i < (rows-1); ++i){ - for(j = 1; j < (cols-1); ++j){ - m = labeledEdges[offset+j]; - if(k == m){ - if(i < LowY) LowY = i; - if(j < LowX) LowX = j; - if(i > HighY) HighY = i; - if(j > HighX) HighX = j; - centerX += (float)j; - centerY += (float)i; - ++count; - } - } - offset += cols; - } - /* the bounding box for the 2D blob */ - objectMetrics[k-1].L = LowX; - objectMetrics[k-1].R = HighX; - objectMetrics[k-1].B = LowY; - objectMetrics[k-1].T = HighY; - objectMetrics[k-1].Area = count; - objectMetrics[k-1].cX = centerX/(float)count; - objectMetrics[k-1].cY = centerY/(float)count; - objectMetrics[k-1].Label = k; - } - - status = numberObjects; - return status; - -} - - -void buildKernel(double BPHigh, int HalfFilterTaps, int apearture, float *kernel){ - - int i, j; - float r, t1, t2, t3, t4; - float LC, HC, tLOW, tHIGH; - float pi = (float)3.14159, rad = (float)0.01745; - - LC = (float)0.0; - HC = BPHigh * rad; - t2 = (float)2.0*pi; - t1 = (float)2.0*HalfFilterTaps + (float)1.0; - /* - // build the Filter Kernel - // the kernel starts at 1 only because it is linked to the internal filter2D routine - // the code is not a Fortran code - */ - j = 1; - for(i = -HalfFilterTaps; i <= HalfFilterTaps; ++i){ - r = (float)i; - if(r == (float)0.0){ - tLOW = LC; - tHIGH = HC; - } - else{ - tLOW = (float)(sin(r*LC))/r; - tHIGH = (float)(sin(r*HC))/r; - } - t3 = (float)0.54 + (float)0.46*((float)cos(r*t2/t1)); - t4 = t3*(tHIGH-tLOW); - kernel[j++] = t4; - } - - /* normalize the kernel so unity gain (as is LP filter this is easy) */ - t1 = (float)0.0; - for(j = 1; j <= apearture; ++j){ - t1 += kernel[j]; - } - for(j = 1; j <= apearture; ++j){ - kernel[j] /= t1; - } - - t1 = (float)0.0; - for(j = 1; j <= apearture; ++j){ - t1 += kernel[j]; - } - return; -} - -void filter2D(int HalfFilterTaps, int rows, int cols, int lowThreshold, int highThreshold, - float *kernel, double *Image){ - - int i, j, k, n, num1; - int offset; - float sum, value; - float buffer[1024]; - - num1 = HalfFilterTaps + 1; - offset = 0; - for(i = 0; i < rows; ++i){ - /* copy image row to local buffer */ - for(j = 0; j < cols; ++j){ - buffer[num1+j] = Image[offset+j]; - } - /* constant pad the ends of the buffer */ - for(j = 0; j < num1; ++j){ - buffer[j] = buffer[num1]; - } - for(j = cols+num1; j < cols+2*num1; ++j){ - buffer[j] = buffer[cols-1+num1]; - } - - /* Perform Symmetric Convolution in the X dimension. */ - for(n = 0, j = num1; j < (cols+num1); ++j, ++n){ - sum = buffer[j] * kernel[num1]; - for(k = 1; k < num1; ++k){ - sum += kernel[num1-k] * (buffer[j+k] + buffer[j-k]); - } - Image[offset+n] = sum; - } - offset += cols; - } - - offset = 0; - for(i = 0; i < cols; ++i){ - /* copy image column to local buffer */ - offset = 0; - for(j = 0; j < rows; ++j){ - buffer[num1+j] = Image[offset+i]; - offset += cols; - } - /* constant pad the ends of the buffer */ - for(j = 0; j < num1; ++j){ - buffer[j] = buffer[num1]; - } - for(j = rows+num1; j < rows+2*num1; ++j){ - buffer[j] = buffer[rows-1+num1]; - } - - /* Perform Symmetric Convolution in the Y dimension. */ - offset = 0; - for(j = num1; j < (rows+num1); ++j){ - sum = buffer[j] * kernel[num1]; - for(k = 1; k < num1; ++k){ - sum += kernel[num1-k] * (buffer[j+k] + buffer[j-k]); - } - Image[offset+i] = sum; - offset += cols; - } - } - - /* threshold the image */ - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - value = Image[offset+j]; - if(value < (float)lowThreshold) value = (float)0.0; - if(value > (float)highThreshold) value = (float)0.0; - Image[offset+j] = value; - } - offset += cols; - } - - return; - -} - -void doPreProcess(int samples, int rows, int cols, double *rawImage, double BPHigh, - int apearture, int lowThreshold, int highThreshold){ - - /* - // 2D low pass filter using bisinc and threshold - // this specific example is on cardiac CT and focuses on segmenting the - // aorta and blood-filled chambers. for MRI the threshold will be different - */ - - float *kernel; - int HalfFilterTaps = (apearture-1)/2; - kernel = calloc(apearture+16, sizeof(float)); - - buildKernel(BPHigh, HalfFilterTaps, apearture, kernel); - filter2D(HalfFilterTaps, rows, cols, lowThreshold, highThreshold, kernel, rawImage); - - free(kernel); - - return; - -} - - -int ConnectedEdgePoints(int rows, int cols, unsigned short *connectedEdges){ - - int i, j, k, l, m; - int offset; - int Label; - int Classes[4096]; - bool NewLabel; - bool Change; - unsigned short T[12]; - - /* - // connected components labeling. pixels touch within 3x3 mask for edge connectedness. - */ - Label = 1; - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - if(connectedEdges[offset+j] == 1){ - connectedEdges[offset+j] = Label++; - } - } - offset += cols; - } - - while(1){ - Change = FALSE; - /* - // TOP-DOWN Pass for labeling - */ - offset = cols; - for(i = 1; i < rows-1; ++i){ - for(j = 1; j < cols-1; ++j){ - if(connectedEdges[offset+j] != 0){ - T[0] = connectedEdges[offset+j]; - T[1] = connectedEdges[offset+j+1]; - T[2] = connectedEdges[offset-cols+j+1]; - T[3] = connectedEdges[offset-cols+j]; - T[4] = connectedEdges[offset-cols+j-1]; - T[5] = connectedEdges[offset+j-1]; - T[6] = connectedEdges[offset+cols+j-1]; - T[7] = connectedEdges[offset+cols+j]; - T[8] = connectedEdges[offset+cols+j+1]; - m = T[0]; - for(l = 1; l < 9; ++l){ - if(T[l] != 0){ - if(T[l] < m) m = T[l]; - } - } - if(m != connectedEdges[offset+j]){ - Change = TRUE; - connectedEdges[offset+j] = m; - } - } - } - offset += cols; - } - /* - // BOTTOM-UP Pass for labeling - */ - offset = (rows-1)*cols; - for(i = (rows-1); i > 1; --i){ - for(j = (cols-1); j > 1; --j){ - if(connectedEdges[offset+j] != 0){ - T[0] = connectedEdges[offset+j]; - T[1] = connectedEdges[offset+j+1]; - T[2] = connectedEdges[offset-cols+j+1]; - T[3] = connectedEdges[offset-cols+j]; - T[4] = connectedEdges[offset-cols+j-1]; - T[5] = connectedEdges[offset+j-1]; - T[6] = connectedEdges[offset+cols+j-1]; - T[7] = connectedEdges[offset+cols+j]; - T[8] = connectedEdges[offset+cols+j+1]; - m = T[0]; - for(l = 1; l < 9; ++l){ - if(T[l] != 0){ - if(T[l] < m) m = T[l]; - } - } - if(m != connectedEdges[offset+j]){ - Change = TRUE; - connectedEdges[offset+j] = m; - } - } - } - offset -= cols; - } - if(!Change) break; - } /* end while loop */ - - Classes[0] = 0; - Label = 1; - offset = cols; - for(i = 1; i < (rows-1); ++i){ - for(j = 1; j < (cols-1); ++j){ - m = connectedEdges[offset+j]; - if(m > 0){ - NewLabel = TRUE; - for(k = 1; k < Label; ++k){ - if(Classes[k] == m) NewLabel = FALSE; - } - if(NewLabel){ - Classes[Label++] = m; - if(Label > 4000){ - return 0; /* too many labeled regions. this is a pathology */ - } - } - } - } - offset += cols; - } - - /* - // re-label the connected blobs in continuous label order - */ - offset = cols; - for(i = 1; i < (rows-1); ++i){ - for(j = 1; j < (cols-1); ++j){ - m = connectedEdges[offset+j]; - if(m > 0){ - for(k = 1; k < Label; ++k){ - if(Classes[k] == m){ - connectedEdges[offset+j] = (unsigned short)k; - break; - } - } - } - } - offset += cols; - } - - return Label; -} - -float magnitude(float X, float Y){ - - return (float)sqrt(X*X + Y*Y); -} - -int traceEdge(int i, int j, int rows, int cols, double cannyLow, float *magImage, - float *HYSImage){ - - int n, m; - int ptr; - int flag; - - ptr = i * cols; - if(HYSImage[ptr+j] == (float)0.0){ - /* - // this point is above high threshold - */ - HYSImage[ptr+j] = (float)1.0; - flag = 0; - for(n = -1; n <= 1; ++n){ - for(m = -1; m <= 1; ++m){ - if(n == 0 && m == 0) continue; - if(((i+n) > 0) && ((j+m) > 0) && ((i+n) < rows) && ((j+m) < cols)){ - ptr = (i+n) * cols; - if(magImage[ptr+j+m] > cannyLow){ - /* - // this point is above low threshold - */ - if(traceEdge(i+n, j+m, rows, cols, cannyLow, magImage, HYSImage)){ - flag = 1; - break; - } - } - } - } - if(flag) break; - } - return(1); - } - - return(0); - -} - - -void edgeThreshold(int rows, int cols, double cannyLow, float *magImage, - float *HYSImage){ - - int i, j; - int ptr; - - for(i = 0; i < rows; ++i){ - ptr = i * cols; - for(j = 0; j < cols; ++j){ - if(magImage[ptr+j] > cannyLow){ - HYSImage[ptr+j] = (float)1.0; - } - } - } - - return; - -} - -void edgeHysteresis(int rows, int cols, double cannyLow, double cannyHigh, - float *magImage, float *HYSImage){ - - int i, j; - int ptr; - - for(i = 0; i < rows; ++i){ - ptr = i * cols; - for(j = 0; j < cols; ++j){ - if(magImage[ptr+j] > cannyHigh){ - traceEdge(i, j, rows, cols, cannyLow, magImage, HYSImage); - } - } - } - - return; - -} - -void nonMaxSupress(int rows, int cols, float aveXValue, float aveYValue, - double *cannyLow, double *cannyHigh, int mode, - float *hDGImage, float *vDGImage, float *magImage){ - - int i, j; - int ptr, ptr_m1, ptr_p1; - float xSlope, ySlope, G1, G2, G3, G4, G, xC, yC; - float scale; - float maxValue = (float)0.0; - float minValue = (float)-1.0; - int histogram[256]; - int value; - int mValue; - int mIndex; - int count; - double step; - double tAve; - - for(i = 1; i < rows-1; ++i){ - ptr = i * cols; - ptr_m1 = ptr - cols; - ptr_p1 = ptr + cols; - for(j = 1; j < cols; ++j){ - magImage[ptr+j] = (float)0.0; - xC = hDGImage[ptr+j]; - yC = vDGImage[ptr+j]; - if((fabs(xC) < aveXValue) && (fabs(yC) < aveYValue)) continue; - G = magnitude(xC, yC); - if(fabs(yC) > fabs(xC)){ - /* vertical gradient */ - xSlope = (float)(fabs(xC) / fabs(yC)); - ySlope = (float)1.0; - G2 = magnitude(hDGImage[ptr_m1+j], vDGImage[ptr_m1+j]); - G4 = magnitude(hDGImage[ptr_p1+j], vDGImage[ptr_p1+j]); - if((xC*yC) > (float)0.0){ - G1 = magnitude(hDGImage[ptr_m1+j-1], vDGImage[ptr_m1+j-1]); - G3 = magnitude(hDGImage[ptr_p1+j+1], vDGImage[ptr_p1+j+1]); - } - else{ - G1 = magnitude(hDGImage[ptr_m1+j+1], vDGImage[ptr_m1+j+1]); - G3 = magnitude(hDGImage[ptr_p1+j-1], vDGImage[ptr_p1+j-1]); - } - } - else{ - /* horizontal gradient */ - xSlope = (float)(fabs(yC) / fabs(xC)); - ySlope = (float)1.0; - G2 = magnitude(hDGImage[ptr+j+1], vDGImage[ptr+j+1]); - G4 = magnitude(hDGImage[ptr+j-1], vDGImage[ptr+j-1]); - if((xC*yC) > (float)0.0){ - G1 = magnitude(hDGImage[ptr_p1+j+1], vDGImage[ptr_p1+j+1]); - G3 = magnitude(hDGImage[ptr_m1+j-1], vDGImage[ptr_m1+j-1]); - } - else{ - G1 = magnitude(hDGImage[ptr_m1+j+1], vDGImage[ptr_m1+j+1]); - G3 = magnitude(hDGImage[ptr_p1+j-1], vDGImage[ptr_p1+j-1]); - } - } - if((G > (xSlope*G1+(ySlope-xSlope)*G2))&&(G > (xSlope*G3+(ySlope-xSlope)*G4))){ - magImage[ptr+j] = G; - } - if(magImage[ptr+j] > maxValue) maxValue = magImage[ptr+j]; - if(magImage[ptr+j] < minValue) minValue = magImage[ptr+j]; - } - } - - scale = (float)1.0 / (maxValue-minValue); - ptr = 0; - count = 0; - tAve = 0.0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - magImage[ptr] = scale * (magImage[ptr]-minValue); - if(magImage[ptr] > 0.0){ - tAve += magImage[ptr]; - ++count; - } - ++ptr; - } - } - tAve /= (float)count; - - step = 255.0; - for(i = 0; i < 256; ++i){ - histogram[i] = 0; - } - ptr = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - value = (int)(step*(magImage[ptr])); - ++histogram[value]; - ++ptr; - } - } - /* - // now get the max after skipping the low values - */ - mValue = -1; - mIndex = 0; - for(i = 10; i < 256; ++i){ - if(histogram[i] > mValue){ - mValue = histogram[i]; - mIndex = i; - } - } - - if(mode == 1){ - /* based on the mean value of edge energy */ - *cannyLow = ((*cannyLow) * tAve); - *cannyHigh = ((*cannyHigh) * tAve); - } - else{ - /* based on the mode value of edge energy */ - *cannyLow = ((*cannyLow) * ((float)mIndex/step)); - *cannyHigh = ((*cannyHigh) * ((float)mIndex/step)); - } - - return; - -} - -void DGFilters(int samples, int rows, int cols, double cannySigma, int gWidth, - float *aveXValue, float *aveYValue, double *rawImage, - double *dgKernel, float *hDGImage, float *vDGImage){ - - /* - // implements the derivative of Gaussian filter. kernel set by CannyEdges - */ - int i, j, k; - int ptr; - int mLength; - int count; - float *tBuffer = NULL; - double sum; - - *aveXValue = (float)0.0; - *aveYValue = (float)0.0; - - mLength = MAX(rows, cols) + 64; - tBuffer = calloc(mLength, sizeof(float)); - - /* - // filter X - */ - count = 0; - for(i = 0; i < rows; ++i){ - ptr = i * cols; - for(j = gWidth; j < cols-gWidth; ++j){ - sum = dgKernel[0] * rawImage[ptr+j]; - for(k = 1; k < gWidth; ++k){ - sum += dgKernel[k] * (-rawImage[ptr+j+k] + rawImage[ptr+j-k]); - } - hDGImage[ptr+j] = (float)sum; - if(sum != (float)0.0){ - ++count; - *aveXValue += (float)fabs(sum); - } - } - } - if(count){ - *aveXValue /= (float)count; - *aveXValue = (float)0.5 * (*aveXValue); - /* this is 50% of the max, hardwirred for now, and is part of the threshold */ - } - /* - // filter Y - */ - count = 0; - for(i = 0; i < cols; ++i){ - for(j = 0; j < rows; ++j){ - ptr = j * cols; - tBuffer[j] = rawImage[ptr+i]; - } - for(j = gWidth; j < rows-gWidth; ++j){ - ptr = j * cols; - sum = dgKernel[0] * tBuffer[j]; - for(k = 1; k < gWidth; ++k){ - sum += dgKernel[k] * (-tBuffer[j+k] + tBuffer[j-k]); - } - vDGImage[ptr+i] = sum; - if(sum != (float)0.0){ - ++count; - *aveYValue += (float)fabs(sum); - } - } - } - if(count){ - *aveYValue /= (float)count; - *aveYValue = (float)0.5 * (*aveYValue); - /* this is 50% of the max, hardwirred for now, and is part of the threshold */ - } - - free(tBuffer); - - return; - -} - - -int NI_CannyEdges(int samples, int rows, int cols, double cannySigma, - double cannyLow, double cannyHigh, int mode, - int lowThreshold, int highThreshold, double BPHigh, - int apearture, double *rawImage, - unsigned short *edgeImage, int *groups){ - - int i, j; - int offset; - int doHysteresis = 0; - int gWidth; - int mLength; - int status; - float aveXValue; - float aveYValue; - double t; - double dgKernel[20]; - float *HYSImage = NULL; - float *hDGImage = NULL; - float *vDGImage = NULL; - float *magImage = NULL; - float *tBuffer = NULL; - - /* filter */ - doPreProcess(samples, rows, cols, rawImage, BPHigh, apearture, lowThreshold, highThreshold); - - /* - // memory for magnitude, horizontal and vertical derivative of Gaussian filter - */ - mLength = MAX(rows, cols) + 64; - HYSImage = calloc(samples, sizeof(float)); - hDGImage = calloc(samples, sizeof(float)); - vDGImage = calloc(samples, sizeof(float)); - magImage = calloc(samples, sizeof(float)); - tBuffer = calloc(mLength, sizeof(float)); - - /* - // build derivative of Gaussian filter kernel - // kernel is anti-symmetric so convolution is k[j]*(v[i+j] - v[i-j]) - */ - gWidth = 20; - for(i = 0; i < gWidth; ++i){ - t = (float)i; - dgKernel[i] = (float)exp((double)((-t*t)/((float)2.0 * cannySigma * cannySigma))); - dgKernel[i] *= -(t / (cannySigma * cannySigma)); - } - for(i = 0; i < samples; ++i){ - HYSImage[i] = (float)0.0; - } - - DGFilters(samples, rows, cols, cannySigma, gWidth, &aveXValue, &aveYValue, - rawImage, dgKernel, hDGImage, vDGImage); - nonMaxSupress(rows, cols, aveXValue, aveYValue, &cannyLow, &cannyHigh, - mode, hDGImage, vDGImage, magImage); - if(doHysteresis){ - edgeHysteresis(rows, cols, cannyLow, cannyHigh, magImage, HYSImage); - } - else{ - edgeThreshold(rows, cols, cannyLow, magImage, HYSImage); - } - - /* - // edge image - */ - for(i = 0; i < samples; ++i){ - edgeImage[i] = (unsigned short)HYSImage[i]; - } - *groups = ConnectedEdgePoints(rows, cols, edgeImage); - - /* - // prune the isolated pixels - */ - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - if(edgeImage[offset+j] > (*groups)){ - edgeImage[offset+j] = 0; - } - } - offset += cols; - } - - - free(tBuffer); - free(hDGImage); - free(vDGImage); - free(magImage); - free(HYSImage); - - status = *groups; - return status; - -} - -void doSobel(int samples, int rows, int cols, double sobelLow, int mode, - double *rawImage, unsigned short *edgeImage){ - - int i, j; - int p, m, n; - int offset; - int offsetM1; - int offsetP1; - int minValue, maxValue; - int pAve = 0; - int count = 0; - int histogram[256]; - int value; - int maxIndex; - float pThreshold; - double scale; - double step; - float *filteredImage = NULL; - - filteredImage = calloc(samples, sizeof(float)); - - minValue = 10000; - maxValue = -10000; - - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - filteredImage[offset+j] = 0; - edgeImage[offset+j] = 0; - } - offset += cols; - } - - /* - // Sobel - */ - offset = cols; - for(i = 1; i < rows-1; ++i){ - offsetM1 = offset - cols; - offsetP1 = offset + cols; - for(j = 1; j < cols-1; ++j){ - n = 2*rawImage[offsetM1+j] + rawImage[offsetM1+j-1] + rawImage[offsetM1+j+1] - - 2*rawImage[offsetP1+j] - rawImage[offsetP1+j-1] - rawImage[offsetP1+j+1]; - m = 2*rawImage[offset+j-1] + rawImage[offsetM1+j-1] + rawImage[offsetP1+j-1] - - 2*rawImage[offset+j+1] - rawImage[offsetM1+j+1] - rawImage[offsetP1+j+1]; - p = (int)sqrt((float)(m*m) + (float)(n*n)); - if(p > 0){ - pAve += p; - ++count; - if(p > maxValue) maxValue = p; - if(p < minValue) minValue = p; - } - filteredImage[offset+j] = p; - } - offset += cols; - } - - /* threshold based on ave */ - pAve /= count; - scale = 1.0 / maxValue; - - step = 255.0/(maxValue-minValue); - for(i = 0; i < 256; ++i){ - histogram[i] = 0; - } - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - value = (int)(step*(filteredImage[offset+j]-minValue)); - ++histogram[value]; - } - offset += cols; - } - /* - // now get the max after skipping the low values - */ - maxValue = -1; - maxIndex = 0; - for(i = 10; i < 256; ++i){ - if(histogram[i] > maxValue){ - maxValue = histogram[i]; - maxIndex = i; - } - } - - if(mode == 1){ - /* based on the mean value of edge energy */ - pThreshold = (int)(sobelLow * (float)pAve); - } - else{ - /* based on the mode value of edge energy */ - pThreshold = (sobelLow * (minValue + ((float)maxIndex/step))); - } - - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - if(filteredImage[offset+j] > pThreshold){ - edgeImage[offset+j] = 1; - } - else{ - edgeImage[offset+j] = 0; - } - filteredImage[offset+j] *= scale; - } - offset += cols; - } - - free(filteredImage); - - return; - - -} - -void estimateThreshold(float *lowThreshold, float *highThreshold, float ShenCastanLow, - int rows, int cols, float *SourceImage){ - - int i, j; - int offset; - int value; - int mIndex; - int histogram[256]; - float low, high; - float scale; - - low = (float)1000.0; - high = (float)-1000.0; - - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - if(fabs(SourceImage[offset+j]) > high) high = fabs(SourceImage[offset+j]); - if(fabs(SourceImage[offset+j]) < low) low = fabs(SourceImage[offset+j]); - } - offset += cols; - } - - scale = (float)255.0 / (high-low); - for(i = 0; i < 256; ++i){ - histogram[i] = 0; - } - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - value = (int)(scale*(fabs(SourceImage[offset+j]) - low)); - ++histogram[value]; - } - offset += cols; - } - - /* - // now get the edge energy mode - */ - value = 0; - mIndex = 10; - for(i = 10; i < 256; ++i){ - if(histogram[i] > value){ - value = histogram[i]; - mIndex = i; - } - } - - *highThreshold = ((float)mIndex / scale) + low; - *lowThreshold = ((float)mIndex / scale) + low; - - *highThreshold *= ShenCastanLow; - *lowThreshold *= ShenCastanLow; - - return; - -} - -void thresholdEdges(float *SourceImage, unsigned short *EdgeImage, double ShenCastanLow, - int rows, int cols){ - - int i, j; - int offset; - float tLow, tHigh; - - /* - // SourceImage contains the adaptive gradient - // get threshold from the mode of the edge energy - */ - estimateThreshold(&tLow, &tHigh, ShenCastanLow, rows, cols, SourceImage); - - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - if(SourceImage[offset+j] > tLow){ - EdgeImage[offset+j] = 1; - } - else{ - EdgeImage[offset+j] = 0; - } - } - offset += cols; - } - - return; - -} - -float adaptiveGradient(float *BLImage, float *FilterImage, int nrow, int ncol, - int cols, int window){ - - int i, j; - int offset; - int numOn, numOff; - int hWindow = window/2; - float sumOn, sumOff; - float aveOn, aveOff; - - numOn = 0; - numOff = 0; - - sumOn = (float)0.0; - sumOff = (float)0.0; - - aveOn = (float)0.0; - aveOff = (float)0.0; - - offset = nrow * cols; - for(i = -hWindow; i < hWindow; ++i){ - for(j = -hWindow; j < hWindow; ++j){ - if(BLImage[offset+(i*cols)+(j+ncol)] == 1){ - sumOn += FilterImage[offset+(i*cols)+(j+ncol)]; - ++numOn; - } - else{ - sumOff += FilterImage[offset+(i*cols)+(j+ncol)]; - ++numOff; - } - } - } - - if(numOn){ - aveOn = sumOn / numOn; - } - - if(numOff){ - aveOff = sumOff / numOff; - } - - return (aveOff-aveOn); - -} - -void getZeroCrossings(float *SourceImage, float *FilterImage, float *BLImage, - int rows, int cols, int window){ - - int i, j; - int offset; - bool validEdge; - - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - SourceImage[offset+j] = 0.0; - } - offset += cols; - } - - offset = window*cols; - for(i = window; i < rows-window; ++i){ - for(j = window; j < cols-window; ++j){ - validEdge = FALSE; - if((BLImage[offset+j] == 1) && (BLImage[offset+cols+j] == 0)){ - if((FilterImage[offset+cols+j] - FilterImage[offset-cols+j]) > 0.0){ - validEdge = TRUE; - } - } - else if((BLImage[offset+j] == 1) && (BLImage[offset+j+1] == 0)){ - if((FilterImage[offset+j+1] - FilterImage[offset+j-1]) > 0.0){ - validEdge = TRUE; - } - } - else if((BLImage[offset+j] == 1) && (BLImage[offset-cols+j] == 0)){ - if((FilterImage[offset+cols+j] - FilterImage[offset-cols+j]) < 0.0){ - validEdge = TRUE; - } - } - else if((BLImage[offset+j] == 1) && (BLImage[offset+j-1] == 0)){ - if((FilterImage[offset+j+1] - FilterImage[offset+j-1]) < 0.0){ - validEdge = TRUE; - } - } - if(validEdge){ - /* adaptive gradeint is signed */ - SourceImage[offset+j] = (float)fabs(adaptiveGradient(BLImage, FilterImage, i, j, cols, window)); - } - } - offset += cols; - } - - return; - -} - - -void computeBandedLaplacian(float *image1, float *image2, float *BLImage, int rows, int cols){ - - int i, j; - int offset; - float t; - - /* - // like an unsharp mask - */ - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - t = image1[offset+j] - image2[offset+j]; - if(t < (float)0.0){ - t = (float)0.0; - } - else{ - t = (float)1.0; - } - BLImage[offset+j] = t; - } - offset += cols; - } - - return; - -} - -void thresholdImage(float *Raw, float *Filtered, int rows, int cols, int tLow, int tHigh){ - - int i, j; - int ptr; - - ptr = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - if(Raw[ptr] > tHigh){ - Raw[ptr] = 0.0; - Filtered[ptr] = 0.0; - } - if(Raw[ptr] < tLow){ - Raw[ptr] = 0.0; - Filtered[ptr] = 0.0; - } - ++ptr; - } - } - - return; - -} - -void ISEF_Vertical(float *SourceImage, float *FilterImage, float *A, float *B, - int rows, int cols, double b){ - - - int i, j; - int offset; - float b1, b2; - - b1 = ((float)1.0 - b)/((float)1.0 + b); - b2 = b * b1; - - /* - // set the boundaries - */ - offset = (rows-1)*cols; - for(i = 0; i < cols; ++i){ - /* process row 0 */ - A[i] = b1 * SourceImage[i]; - /* process row N-1 */ - B[offset+i] = b2 * SourceImage[offset+i]; - } - - /* - // causal component of IIR filter - */ - offset = cols; - for(i = 1; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - /* - // IIR ISEF filter applied across rows - */ - A[offset+j] = (b * A[offset-cols+j]) + (b1 * SourceImage[offset+j]); - } - offset += cols; - } - - /* - // anti-causal component of IIR filter - */ - offset = (rows-2)*cols; - for(i = rows-2; i >= 0; --i){ - for(j = 0; j < cols; ++j){ - /* - // IIR ISEF filter applied across rows - */ - B[offset+j] = (b * B[offset+cols+j]) + (b2 * SourceImage[offset+j]); - } - offset -= cols; - } - - offset = (rows-1)*cols; - for(j = 0; j < cols-1; ++j){ - FilterImage[offset+j] = A[offset+j]; - } - - /* - // add causal and anti-causal IIR parts - */ - offset = 0; - for(i = 1; i < rows-2; ++i){ - for(j = 0; j < cols-1; ++j){ - FilterImage[offset+j] = A[offset+j] + B[offset+cols+j]; - } - offset += cols; - } - - return; - -} - -void ISEF_Horizontal(float *SourceImage, float *FilterImage, float *A, float *B, - int rows, int cols, double b){ - - - /* - // source and smooth are the same in this pass of the 2D IIR - */ - - int i, j; - int offset; - float b1, b2; - - b1 = ((float)1.0 - b)/((float)1.0 + b); - b2 = b * b1; - - /* - // columns boundaries - */ - offset = 0; - for(i = 0; i < rows; ++i){ - // col 0 - A[offset] = b1 * SourceImage[offset]; - // col N-1 - B[offset+cols-1] = b2 * SourceImage[offset+cols-1]; - } - - /* - // causal IIR part - */ - offset = 0; - for(j = 1; j < cols; ++j){ - for(i = 0; i < rows; ++i){ - A[offset+j] = (b * A[offset+j-1]) + (b1 * SourceImage[offset+j]); - } - offset += cols; - } - - /* - // anti-causal IIR part - */ - offset = 0; - for(j = cols-2; j > 0; --j){ - for(i = 0; i < rows; ++i){ - B[offset+j] = (b * B[offset+j+1]) + (b2 * SourceImage[offset+j]); - } - offset += cols; - } - - /* - // filtered output. this is 2-pass IIR and pass 1 is vertical - */ - offset = 0; - for(i = 0; i < rows; ++i){ - FilterImage[offset+cols-1] = A[offset+cols-1]; - } - - /* - // add causal and anti-causal IIR parts - */ - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols-1; ++j){ - FilterImage[offset+j] = A[offset+j] + B[offset+j+1]; - } - offset += cols; - } - - return; - -} - - -void computeISEF(float *SourceImage, float *FilterImage, int rows, int cols, double b){ - - int imageSize = rows*cols; - float *A; - float *B; - - A = calloc(imageSize, sizeof(float)); - B = calloc(imageSize, sizeof(float)); - - ISEF_Vertical(SourceImage, FilterImage, A, B, rows, cols, b); - ISEF_Horizontal(FilterImage, FilterImage, A, B, rows, cols, b); - - free(A); - free(B); - - return; - -} - -void Shen_Castan(double b, double ShenCastanLow, int rows, int cols, int window, - int lowThreshold, int highThreshold, - double *RawImage, unsigned short *EdgeImage){ - - int i; - int imageSize = rows*cols; - float *FilterImage; - float *BinaryLaplacianImage; - float *SourceImage; - - FilterImage = calloc(imageSize, sizeof(float)); - BinaryLaplacianImage = calloc(imageSize, sizeof(float)); - SourceImage = calloc(imageSize, sizeof(float)); - - for(i = 0; i < imageSize; ++i){ - SourceImage[i] = RawImage[i]; - } - computeISEF(SourceImage, FilterImage, rows, cols, b); - /* optional thresholding based on low, high */ - thresholdImage(SourceImage, FilterImage, rows, cols, lowThreshold, highThreshold); - computeBandedLaplacian(FilterImage, SourceImage, BinaryLaplacianImage, rows, cols); - /* the new source image is now the adaptive gradient */ - getZeroCrossings(SourceImage, FilterImage, BinaryLaplacianImage, rows, cols, window); - thresholdEdges(SourceImage, EdgeImage, ShenCastanLow, rows, cols); - - free(FilterImage); - free(BinaryLaplacianImage); - free(SourceImage); - - return; - -} - -int NI_ShenCastanEdges(int samples, int rows, int cols, double b, double ShenCastanLow, - int window, int lowThreshold, int highThreshold, - double *rawImage, unsigned short *edgeImage, int *groups){ - - - int i, j; - int offset; - int status = 0; - - Shen_Castan(b, ShenCastanLow, rows, cols, window, lowThreshold, highThreshold, rawImage, edgeImage); - *groups = ConnectedEdgePoints(rows, cols, edgeImage); - - - // - // prune the isolated pixels - // - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - if(edgeImage[offset+j] > (*groups)){ - edgeImage[offset+j] = 0; - } - } - offset += cols; - } - - status = *groups; - - return status; - -} - -void buildBinaryImage(int rows, int cols, double *rawImage, unsigned short *edgeImage, - int lowThreshold, int highThreshold){ - - int i, j; - int offset; - double value; - int maskValue; - - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - value = rawImage[offset+j]; - maskValue = 1; - if(value < (double)lowThreshold) maskValue = 0; - if(value > (double)highThreshold) maskValue = 0; - edgeImage[offset+j] = maskValue; - } - offset += cols; - } - - return; - -} - - - -void morphoFilterBinaryImage(int rows, int cols, unsigned short *edgeImage, - int CloseSize, int OpenSize){ - - - int i, j; - int offset, offset2; - unsigned short *cmask; - unsigned short *omask; - int olapValuesC[4]; - int olapValuesO[4]; - int CloseMaskSize = 1; - int OpenMaskSize = 1; - int LowValue1, HighValue1; - int LowValue2, HighValue2; - int spadSize; - int maskSize = 11; - unsigned char *ImageE; - unsigned char *ImageC; - - spadSize = MAX(rows, cols); - - ImageE = calloc(spadSize*spadSize, sizeof(unsigned char)); - ImageC = calloc(spadSize*spadSize, sizeof(unsigned char)); - - cmask = calloc(11*11, sizeof(unsigned short)); - omask = calloc(11*11, sizeof(unsigned short)); - - // - // Close filter - // - if(CloseSize){ - CloseMaskSize = (CloseSize-1)/2; - for(i = 0; i < 2*CloseMaskSize+1; ++i){ - for(j = 0; j < 2*CloseMaskSize+1; ++j){ - cmask[i*maskSize+j] = 1; - } - } - LowValue1 = 0; - HighValue1 = 1; - LowValue2 = 1; - HighValue2 = 0; - olapValuesC[0] = LowValue1; - olapValuesC[1] = HighValue1; - olapValuesC[2] = LowValue2; - olapValuesC[3] = HighValue2; - } - - /* - // Open filter - */ - if(OpenSize){ - OpenMaskSize = (OpenSize-1)/2; - for(i = 0; i < 2*OpenMaskSize+1; ++i){ - for(j = 0; j < 2*OpenMaskSize+1; ++j){ - omask[i*maskSize+j] = 1; - } - } - LowValue1 = 1; - HighValue1 = 0; - LowValue2 = 0; - HighValue2 = 1; - olapValuesO[0] = LowValue1; - olapValuesO[1] = HighValue1; - olapValuesO[2] = LowValue2; - olapValuesO[3] = HighValue2; - } - - offset = 0; - offset2 = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - ImageE[offset2+j] = (unsigned char)edgeImage[offset+j]; - } - offset2 += spadSize; - offset += cols; - } - - if(OpenSize){ - OpenCloseFilter(olapValuesO, OpenMaskSize, rows, cols, spadSize, ImageE, ImageC, omask); - } - - if(CloseSize){ - OpenCloseFilter(olapValuesC, CloseMaskSize, rows, cols, spadSize, ImageE, ImageC, cmask); - } - - offset = 0; - offset2 = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - if(ImageE[offset2+j] == 1){ - /* this will activate some original off-pixels */ - edgeImage[offset+j] = 1; - } - else{ - /* this will zero some original on-pixels */ - edgeImage[offset+j] = 0; - } - } - offset2 += spadSize; - offset += cols; - } - - free(ImageE); - free(ImageC); - - free(cmask); - free(omask); - - return; - -} - -void doRegionGrow(int samples, int rows, int cols, double *rawImage, - unsigned short *edgeImage, int lowThreshold, - int highThreshold, int closeWindow, int openWindow){ - - buildBinaryImage(rows, cols, rawImage, edgeImage, lowThreshold, highThreshold); - morphoFilterBinaryImage(rows, cols, edgeImage, closeWindow, openWindow); - - return; - -} - -int NI_RegionGrow(int samples, int rows, int cols, int lowThreshold, int highThreshold, - int closeWindow, int openWindow, double *rawImage, - unsigned short *edgeImage, int *groups){ - - int i, j; - int offset; - int status; - - doRegionGrow(samples, rows, cols, rawImage, edgeImage, lowThreshold, - highThreshold, closeWindow, openWindow); - *groups = ConnectedEdgePoints(rows, cols, edgeImage); - - // - // prune the isolated pixels - // - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - if(edgeImage[offset+j] > (*groups)){ - edgeImage[offset+j] = 0; - } - } - offset += cols; - } - - status = *groups; - return status; - -} - -int NI_SobelEdges(int samples, int rows, int cols, double sobelLow, int mode, - int lowThreshold, int highThreshold, double BPHigh, - int apearture, double *rawImage, unsigned short *edgeImage, int *groups){ - - - int i, j; - int offset; - int status; - - doPreProcess(samples, rows, cols, rawImage, BPHigh, apearture, lowThreshold, highThreshold); - doSobel(samples, rows, cols, sobelLow, mode, rawImage, edgeImage); - *groups = ConnectedEdgePoints(rows, cols, edgeImage); - - - /* - // prune the isolated pixels - */ - offset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < cols; ++j){ - if(edgeImage[offset+j] > (*groups)){ - edgeImage[offset+j] = 0; - } - } - offset += cols; - } - - status = *groups; - return status; - -} - -void initThinFilter(int *J_mask, int *K_mask){ - - int i, j; - int Column; - int maskCols = 3; - - for(i = 0; i < 3; ++i){ - for(j = 0; j < 30; ++j){ - J_mask[i+j*maskCols] = 0; - K_mask[i+j*maskCols] = 0; - } - } - - Column = 0; - J_mask[0+maskCols*(Column+0)] = 1; - J_mask[0+maskCols*(Column+1)] = 1; - J_mask[0+maskCols*(Column+2)] = 1; - J_mask[1+maskCols*(Column+1)] = 1; - - Column += 3; - J_mask[0+maskCols*(Column+1)] = 1; - J_mask[1+maskCols*(Column+1)] = 1; - J_mask[1+maskCols*(Column+2)] = 1; - - Column += 3; - J_mask[0+maskCols*(Column+0)] = 1; - J_mask[1+maskCols*(Column+0)] = 1; - J_mask[2+maskCols*(Column+0)] = 1; - J_mask[1+maskCols*(Column+1)] = 1; - - Column += 3; - J_mask[0+maskCols*(Column+1)] = 1; - J_mask[1+maskCols*(Column+0)] = 1; - J_mask[1+maskCols*(Column+1)] = 1; - - Column += 3; - J_mask[0+maskCols*(Column+2)] = 1; - J_mask[1+maskCols*(Column+1)] = 1; - J_mask[1+maskCols*(Column+2)] = 1; - J_mask[2+maskCols*(Column+2)] = 1; - - Column += 3; - J_mask[1+maskCols*(Column+0)] = 1; - J_mask[1+maskCols*(Column+1)] = 1; - J_mask[2+maskCols*(Column+1)] = 1; - - Column += 3; - J_mask[1+maskCols*(Column+1)] = 1; - J_mask[2+maskCols*(Column+0)] = 1; - J_mask[2+maskCols*(Column+1)] = 1; - J_mask[2+maskCols*(Column+2)] = 1; - - Column += 3; - J_mask[1+maskCols*(Column+1)] = 1; - J_mask[1+maskCols*(Column+2)] = 1; - J_mask[2+maskCols*(Column+1)] = 1; - - Column = 0; - K_mask[2+maskCols*(Column+0)] = 1; - K_mask[2+maskCols*(Column+1)] = 1; - K_mask[2+maskCols*(Column+2)] = 1; - - Column += 3; - K_mask[1+maskCols*(Column+0)] = 1; - K_mask[2+maskCols*(Column+0)] = 1; - K_mask[2+maskCols*(Column+1)] = 1; - - Column += 3; - K_mask[0+maskCols*(Column+2)] = 1; - K_mask[1+maskCols*(Column+2)] = 1; - K_mask[2+maskCols*(Column+2)] = 1; - - Column += 3; - K_mask[1+maskCols*(Column+2)] = 1; - K_mask[2+maskCols*(Column+1)] = 1; - K_mask[2+maskCols*(Column+2)] = 1; - - Column += 3; - K_mask[0+maskCols*(Column+0)] = 1; - K_mask[1+maskCols*(Column+0)] = 1; - K_mask[2+maskCols*(Column+0)] = 1; - - Column += 3; - K_mask[0+maskCols*(Column+1)] = 1; - K_mask[0+maskCols*(Column+2)] = 1; - K_mask[1+maskCols*(Column+2)] = 1; - - Column += 3; - K_mask[0+maskCols*(Column+0)] = 1; - K_mask[0+maskCols*(Column+1)] = 1; - K_mask[0+maskCols*(Column+2)] = 1; - - Column += 3; - K_mask[0+maskCols*(Column+0)] = 1; - K_mask[0+maskCols*(Column+1)] = 1; - K_mask[1+maskCols*(Column+0)] = 1; - - return; - -} - -void ThinningFilter(int regRows, int regColumns, int spadSize, int *J_mask, int *K_mask, - unsigned char *Input, unsigned char *CInput, unsigned char *ErosionStage, - unsigned char *DialationStage, unsigned char *HMT, unsigned char *Copy){ - - int i, j, k, l, m, n, overlap, hit; - int LowValue1, HighValue1; - int LowValue2, HighValue2; - int Column, T, nloop; - int Offset; - int N, M; - int maskCols = 3; - int j_mask[3][3]; - int k_mask[3][3]; - - N = regRows; - M = regColumns; - - LowValue1 = 1; - HighValue1 = 0; - - LowValue2 = 0; - HighValue2 = 1; - - Offset = 0; - for(i = 0; i < N; ++i){ - for(j = 0; j < M; ++j){ - Copy[Offset+j] = Input[Offset+j]; - } - Offset += spadSize; - } - - nloop = 0; - while(1){ - /* erode */ - Column = 0; - for(n = 0; n < 8; ++n){ - for(i = 0; i < 3; ++i){ - for(j = 0; j < 3; ++j){ - j_mask[i][j] = J_mask[i+maskCols*(Column+j)]; - } - } - for(i = 0; i < 3; ++i){ - for(j = 0; j < 3; ++j){ - k_mask[i][j] = K_mask[i+maskCols*(Column+j)]; - } - } - Column += 3; - - Offset = spadSize; - for(i = 1; i < N-1; ++i){ - for(j = 1; j < M-1; ++j){ - hit = LowValue1; - for(k = -1; k < 2; ++k){ - for(l = -1; l < 2; ++l){ - T = j_mask[k+1][l+1]; - if(T == 1){ - overlap = T*Input[Offset+(k*spadSize)+j+l]; - if(overlap == HighValue1) hit = HighValue1; - } - } - } - ErosionStage[Offset+j] = hit; - } - Offset += spadSize; - } - - /* dialate */ - Offset = 0; - for(i = 0; i < N; ++i){ - for(j = 0; j < M; ++j){ - CInput[Offset+j] = (~Input[Offset+j]) & 0x1; - } - Offset += spadSize; - } - - Offset = spadSize; - for(i = 1; i < N-1; ++i){ - for(j = 1; j < M-1; ++j){ - hit = LowValue1; - for(k = -1; k < 2; ++k){ - for(l = -1; l < 2; ++l){ - T = k_mask[k+1][l+1]; - if(T == 1){ - overlap = T*CInput[Offset+(k*spadSize)+j+l]; - if(overlap == HighValue1) hit = HighValue1; - } - } - } - DialationStage[Offset+j] = hit; - } - Offset += spadSize; - } - - /* form the HMT */ - Offset = 0; - for(i = 0; i < N; ++i){ - for(j = 0; j < M; ++j){ - m = (ErosionStage[Offset+j]*DialationStage[Offset+j]); - HMT[Offset+j] = m; - } - Offset += spadSize; - } - - /* Thin for stage n */ - - Offset = 0; - for(i = 0; i < N; ++i){ - for(j = 0; j < M; ++j){ - HMT[Offset+j] = (~HMT[Offset+j]) & 0x1; - } - Offset += spadSize; - } - - Offset = 0; - for (i = 0; i < N; ++i){ - for (j = 0; j < M; ++j){ - m = (Input[Offset+j]*HMT[Offset+j]); - Input[Offset+j] = m; - } - Offset += spadSize; - } - } - - /* check for no change */ - hit = 0; - Offset = 0; - for(i = 0; i < N; ++i){ - for(j = 0; j < M; ++j){ - hit += abs(Copy[Offset+j]-Input[Offset+j]); - } - Offset += spadSize; - } - if(!hit) break; - - hit = 0; - Offset = 0; - for(i = 0; i < N; ++i){ - for(j = 0; j < M; ++j){ - Copy[Offset+j] = Input[Offset+j]; - if(Input[Offset+j]) ++hit; - } - Offset += spadSize; - } - /* nloop is data dependent. */ - ++nloop; - } - - - return; - -} - - -int NI_ThinFilter(int samples, int rows, int cols, int numberObjects, - unsigned short *edgeImage, objStruct objectMetrics[]){ - - int i, j; - int loop; - int label; - int left, right, top, bottom; - int roiRows, roiCols; - int srcOffset; - int dstOffset; - int status; - int inflate = 1; - int *J_mask; - int *K_mask; - - unsigned char *Input; - unsigned char *CInput; - unsigned char *ErosionStage; - unsigned char *DialationStage; - unsigned char *HMT; - unsigned char *Copy; - unsigned short *thinEdgeImage; - - /* - // scratch pad (spad) memory - */ - Input = calloc(samples, sizeof(unsigned char)); - CInput = calloc(samples, sizeof(unsigned char)); - ErosionStage = calloc(samples, sizeof(unsigned char)); - DialationStage = calloc(samples, sizeof(unsigned char)); - HMT = calloc(samples, sizeof(unsigned char)); - Copy = calloc(samples, sizeof(unsigned char)); - thinEdgeImage = calloc(samples, sizeof(unsigned short)); - J_mask = calloc(3*30, sizeof(int)); - K_mask = calloc(3*30, sizeof(int)); - - initThinFilter(J_mask, K_mask); - for(loop = 0; loop < numberObjects; ++loop){ - label = objectMetrics[loop].Label; - left = objectMetrics[loop].L; - right = objectMetrics[loop].R; - top = objectMetrics[loop].T; - bottom = objectMetrics[loop].B; - roiRows = top-bottom+2*inflate; - roiCols = right-left+2*inflate; - - /* - // clear the scratch pad - */ - srcOffset = 0; - for(i = 0; i < roiRows; ++i){ - for(j = 0; j < roiCols; ++j){ - Input[srcOffset+j] = 0; - } - srcOffset += cols; - } - - /* - // copy the ROI for MAT (medial axis transformation) filter - */ - dstOffset = inflate*rows; - for(i = bottom; i < top; ++i){ - srcOffset = i*cols; - for(j = left; j < right; ++j){ - if(edgeImage[srcOffset+j] == label){ - Input[dstOffset+j-left+inflate] = 1; - } - } - dstOffset += cols; - } - ThinningFilter(roiRows, roiCols, cols, J_mask, K_mask, Input, CInput, - ErosionStage, DialationStage, HMT, Copy); - - /* - // copy the MAT roi to the new edgeImage (clip the inflate border) - */ - dstOffset = inflate*rows; - for(i = bottom; i < top; ++i){ - srcOffset = i*cols; - for(j = left; j < right; ++j){ - if(Input[dstOffset+j-left+inflate]){ - thinEdgeImage[srcOffset+j] = label; - } - } - dstOffset += cols; - } - } - - /* - // copy the MAT edges and return the thinned edges - // this will prune the isolated edge points from the edgeImage source - */ - for(i = 0; i < rows*cols; ++i){ - edgeImage[i] = thinEdgeImage[i]; - } - - free(Input); - free(CInput); - free(ErosionStage); - free(DialationStage); - free(HMT); - free(Copy); - free(thinEdgeImage); - free(J_mask); - free(K_mask); - - status = 1; - - return status; - -} - - -void generateMask(unsigned char *ImageH, bPOINT *boundary, int newSamples, int label, int cols){ - - /* - // get the boundary point pairs (left, right) for each line - // if there is no pair, then the boundary is open - // then fill the image in with the current label - */ - - int i, j, k, m; - int list[2048]; - int distance; - int neighbor = 4; - int index; - int offset; - int maxDistance = 1024; - int x, y; - int low, high; - - for(i = 0; i < newSamples; ++i){ - boundary[i].haveLink = FALSE; - boundary[i].linkIndex = -1; - } - - for(i = 0; i < newSamples; ++i){ - if(!boundary[i].haveLink){ - boundary[i].haveLink = TRUE; - x = boundary[i].x; - y = boundary[i].y; - for(k = 0, j = 0; j < newSamples; ++j){ - if((j != i)){ - if(boundary[j].y == y){ - list[k] = j; - ++k; - } - } - } - /* now get the closest boundary */ - if(k){ - distance = maxDistance; - index = -1; - for(j = 0; j < k; ++j){ - m = abs(x - boundary[list[j]].x); - if((m < distance) && (m > neighbor)){ - distance = m; - index = list[j]; - } - else if(m <= neighbor){ - boundary[list[j]].haveLink = TRUE; - } - } - if(index != -1){ - boundary[i].linkIndex = index; - boundary[index].linkIndex = i; - boundary[index].haveLink = TRUE; - if(boundary[i].x < boundary[index].x){ - low = boundary[i].x; - high = boundary[index].x; - } - else{ - low = boundary[index].x; - high = boundary[i].x; - } - /* - // do the fill - */ - offset = y * cols; - for(j = low; j <= high; ++j){ - ImageH[offset+j] = label; - } - } - } - else{ - /* boundary point is isolated */ - boundary[i].linkIndex = i; - } - } - } - - return; - -} - -void getBoundaryMetrics(bPOINT *boundary, float *length, float *minRadius, - float *maxRadius, float *aveRadius, - float Xcenter, float Ycenter, int newSamples){ - - int j; - float dX, dY; - float distance; - - if(newSamples < 2){ - *length = (float)0.0; - *minRadius = (float)0.0; - *maxRadius = (float)0.0; - *aveRadius = (float)0.0; - return; - } - - *length = (float)0.0; - for(j = 1; j < newSamples; ++j){ - dX = (float)(boundary[j].x - boundary[j-1].x); - dY = (float)(boundary[j].y - boundary[j-1].y); - distance = (float)sqrt(dX*dX + dY*dY); - *length += distance; - } - - *minRadius = (float)10000.0; - *maxRadius = (float)-10000.0; - *aveRadius = (float)0.0; - for(j = 0; j < newSamples; ++j){ - dX = (float)(boundary[j].x - Xcenter); - dY = (float)(boundary[j].y - Ycenter); - distance = (float)sqrt(dX*dX + dY*dY); - *aveRadius += distance; - if(distance < *minRadius) *minRadius = distance; - if(distance > *maxRadius) *maxRadius = distance; - } - - if(newSamples){ - *aveRadius /= (float)newSamples; - } - - return; - -} - -void trackBoundary(unsigned char *Input, blobBoundary lBoundary[], int mcount, int spadSize, - blobBoundary seedValue, int searchWindow){ - - - int i, j, k, m, p; - int offset; - int CurI; - int CurJ; - int StrI; - int StrJ; - int NewI; - int NewJ; - int MinD; - int inflate = searchWindow; - - CurI = seedValue.xy.x; - CurJ = seedValue.xy.y; - StrI = CurI; - StrJ = CurJ; - - p = 0; - lBoundary[p].xy.x = StrI; - lBoundary[p].xy.y = StrJ; - offset = StrI * spadSize; - - p = 1; - while(p < mcount){ - offset = (CurI-inflate)*spadSize; - MinD = 1024; - NewI = -1; - NewJ = -1; - for(i = CurI-inflate; i < CurI+inflate; ++i){ - for(j = CurJ-inflate; j < CurJ+inflate; ++j){ - m = Input[offset+j]; - if(m == 1){ - /* city block distance */ - k = abs(i-CurI) + abs(j-CurJ); - if(k < MinD){ - MinD = k; - NewI = i; - NewJ = j; - } - } - } - offset += spadSize; - } - if(NewI != -1) CurI = NewI; - if(NewJ != -1) CurJ = NewJ; - offset = CurI * spadSize; - Input[offset+CurJ] = 0; - lBoundary[p].xy.x = CurJ; - lBoundary[p].xy.y = CurI; - ++p; - } - - return; - -} - - -void OpenCloseFilter(int olapValues[], int maskSize, int rows, int columns, int spadSize, - unsigned char *input, unsigned char *output, unsigned short *mask){ - - - /* - // do morphological open/close image filtering. the olapValues array determines - // if the filter is Open or Close. - */ - int i, j, k, l, m, overlap, hit; - int offset; - int LowValue1, HighValue1; - int LowValue2, HighValue2; - int morphoMaskSize = 11; - - LowValue1 = olapValues[0]; - HighValue1 = olapValues[1]; - LowValue2 = olapValues[2]; - HighValue2 = olapValues[3]; - - /* close - step 1 is dialate - open - step 1 is erode */ - offset = maskSize*spadSize; - for(i = maskSize; i < rows-maskSize; ++i){ - for(j = maskSize; j < columns-maskSize; ++j){ - hit = LowValue1; - for(k = -maskSize; k < maskSize; ++k){ - m = k*spadSize; - for(l = -maskSize; l < maskSize; ++l){ - overlap = mask[morphoMaskSize*(k+maskSize)+(l+maskSize)]*input[offset+m+j+l]; - if(overlap == HighValue1){ - hit = HighValue1; - } - } - } - output[offset+j] = hit; - } - offset += spadSize; - } - - /* close - step 2 is erode - open - step 2 is dialate */ - offset = maskSize*spadSize; - for(i = maskSize; i < rows-maskSize; ++i){ - for(j = maskSize; j < columns-maskSize; ++j){ - hit = LowValue2; - for(k = -maskSize; k < maskSize; ++k){ - m = k*spadSize; - for(l = -maskSize; l < maskSize; ++l){ - overlap = mask[morphoMaskSize*(k+maskSize)+(l+maskSize)]*output[offset+m+j+l]; - if(overlap == HighValue2){ - hit = HighValue2; - } - } - } - input[offset+j] = hit; - } - offset += spadSize; - } - - return; -} - -void getCompactness(unsigned char *Input, RECT roi, int label, int spadSize, - float *vCompactness, float length){ - - int i, j; - int maskOffset; - int area; - static float fpi = (float)(4.0 * 3.14159); - - area = 0; - for(i = roi.bottom; i < roi.top; ++i){ - maskOffset = i*spadSize; - for(j = roi.left; j < roi.right; ++j){ - if(Input[maskOffset+j] == label){ - ++area; - } - } - } - if(area && (length != (float)0.0)){ - *vCompactness = (fpi * (float)area) / (length*length); - } - else{ - *vCompactness = (float)0.0; - } - - return; -} - - -void doMorphology(unsigned char *Input, unsigned char *ImageE, unsigned char *ImageC, - unsigned char *ImageH, int olapValuesC[], int olapValuesO[], - unsigned short *cmask, unsigned short *omask, - RECT roi, int label, int CloseMaskSize, int OpenMaskSize, int spadSize){ - - int i, j; - int rows, cols; - int srcOffset; - int dstOffset; - int maskSize; - - cols = roi.right - roi.left; - rows = roi.top - roi.bottom; - - for(i = 0; i < spadSize*spadSize; ++i){ - ImageE[i] = 0; - ImageC[i] = 0; - } - - /* - // put the ROI in the ImageE array centered in ULC - */ - dstOffset = 0; - for(i = roi.bottom; i < roi.top; ++i){ - srcOffset = i*spadSize; - for(j = roi.left; j < roi.right; ++j){ - if(ImageH[srcOffset+j] == label){ - ImageE[dstOffset+j-roi.left] = 1; - } - } - dstOffset += spadSize; - } - - /* - // open - */ - maskSize = OpenMaskSize; - OpenCloseFilter(olapValuesO, maskSize, rows, cols, spadSize, ImageE, ImageC, omask); - /* - // close - */ - maskSize = CloseMaskSize; - OpenCloseFilter(olapValuesC, maskSize, rows, cols, spadSize, ImageE, ImageC, cmask); - - /* - // put the closed ROI (in ImageE) back in its roi space - */ - - srcOffset = 0; - for(i = roi.bottom; i < roi.top+2*maskSize+1; ++i){ - dstOffset = (i-(2*maskSize+1))*spadSize; - for(j = roi.left-maskSize-1; j < roi.right+maskSize+1; ++j){ - if(ImageE[srcOffset+j-roi.left] == 1){ - Input[dstOffset+j-maskSize+1] = label; - } - } - srcOffset += spadSize; - } - - return; - -} - - -void getBoundary(unsigned short *ThinEdgeImage, unsigned char *Input, - blobBoundary *pBoundary, blobBoundary *lBoundary, - boundaryIndex *pBoundaryIndex, RECT boundBox, int label, - int bBox, int nextSlot, int memOffset, - int spadSize, int searchWindow){ - - int i, j; - int dstOffset; - int srcOffset; - int mcount; - int rows; - int columns; - bool first; - blobBoundary value; - int inflate = searchWindow+1; - int count; - - pBoundaryIndex[bBox+1].rectangle.left = boundBox.left; - pBoundaryIndex[bBox+1].rectangle.right = boundBox.right; - pBoundaryIndex[bBox+1].rectangle.top = boundBox.top; - pBoundaryIndex[bBox+1].rectangle.bottom = boundBox.bottom; - - for(i = 0; i < spadSize*spadSize; ++i){ - Input[i] = 0; - } - - /* copy to spad */ - - count = 0; - rows = boundBox.top-boundBox.bottom+2*inflate; - columns = boundBox.right-boundBox.left+2*inflate; - dstOffset = inflate*spadSize; - for(i = boundBox.bottom; i < boundBox.top; ++i){ - srcOffset = i*spadSize; - for(j = boundBox.left; j < boundBox.right; ++j){ - if(ThinEdgeImage[srcOffset+j] == label){ - Input[dstOffset+j-boundBox.left+inflate] = 1; - ++count; - } - } - dstOffset += spadSize; - } - - mcount = 0; - first = TRUE; - srcOffset = 0; - for(i = 0; i < rows; ++i){ - for(j = 0; j < columns; ++j){ - if(Input[srcOffset+j]){ - if(first){ - first = FALSE; - /* index of the seed sample */ - value.xy.x = i; - value.xy.y = j; - } - ++mcount; - } - } - srcOffset += spadSize; - } - - trackBoundary(Input, lBoundary, mcount, spadSize, value, searchWindow); - - pBoundaryIndex[nextSlot].numberPoints = mcount; - for(i = 0; i < mcount; ++i){ - value.xy.x = lBoundary[i].xy.x + boundBox.left - inflate; - value.xy.y = lBoundary[i].xy.y + boundBox.bottom - inflate + 1; - pBoundary[memOffset].xy.x = value.xy.x; - pBoundary[memOffset].xy.y = value.xy.y; - ++memOffset; - } - - return; - -} - - -void buildBoundary(objStruct objectMetrics[], int searchWindow, unsigned short *ThinEdgeImage, - int numberObjects, int srcRows, int srcCols){ - - int i, j, k; - int count; - int numBoundaries; - int numSamples; - int offset; - int offset2; - int end; - int label; - int distance; - /* these will be user-setup parameters */ - int closureDistance = 12; - int CloseSize = 5; - int OpenSize = 5; - int threshold = 3; - int newSamples; - int spadSize; - POINT rectPoint[4]; - int in[4]; - float length; - float minRadius; - float maxRadius; - float aveRadius; - float vCompactness; - /* for morphological close of mask. max structuring element is 11x11 */ - unsigned short *cmask; - unsigned short *omask; - int maskSize = 11; - int olapValuesC[4]; - int olapValuesO[4]; - int CloseMaskSize; - int OpenMaskSize; - int LowValue1, HighValue1; - int LowValue2, HighValue2; - RECT bBox; - - boundaryIndex *pBoundaryIndex; - blobBoundary *pBoundary; - blobBoundary *lBoundary; - bPOINT *boundary; - unsigned char *Input; - unsigned char *ImageE; - unsigned char *ImageC; - unsigned char *ImageH; - - spadSize = srcCols; - pBoundaryIndex = calloc(srcRows+srcCols, sizeof(boundaryIndex)); - Input = calloc(spadSize*spadSize, sizeof(unsigned char)); - ImageE = calloc(spadSize*spadSize, sizeof(unsigned char)); - ImageC = calloc(spadSize*spadSize, sizeof(unsigned char)); - ImageH = calloc(spadSize*spadSize, sizeof(unsigned char)); - pBoundary = calloc(srcRows*srcCols, sizeof(blobBoundary)); - lBoundary = calloc(32767, sizeof(blobBoundary)); - boundary = calloc(32767, sizeof(POINT)); - cmask = calloc(11*11, sizeof(unsigned short)); - omask = calloc(11*11, sizeof(unsigned short)); - - /* - // Close filter - */ - CloseMaskSize = (CloseSize-1)/2; - for(i = 0; i < 2*CloseMaskSize+1; ++i){ - for(j = 0; j < 2*CloseMaskSize+1; ++j){ - cmask[i*maskSize+j] = 1; - } - } - LowValue1 = 0; - HighValue1 = 1; - LowValue2 = 1; - HighValue2 = 0; - olapValuesC[0] = LowValue1; - olapValuesC[1] = HighValue1; - olapValuesC[2] = LowValue2; - olapValuesC[3] = HighValue2; - - /* - // Open filter - */ - OpenMaskSize = (OpenSize-1)/2; - for(i = 0; i < 2*OpenMaskSize+1; ++i){ - for(j = 0; j < 2*OpenMaskSize+1; ++j){ - omask[i*maskSize+j] = 1; - } - } - LowValue1 = 1; - HighValue1 = 0; - LowValue2 = 0; - HighValue2 = 1; - olapValuesO[0] = LowValue1; - olapValuesO[1] = HighValue1; - olapValuesO[2] = LowValue2; - olapValuesO[3] = HighValue2; - - for(i = 0; i < (srcRows+srcCols); ++i){ - pBoundaryIndex[i].numberPoints = 0; - pBoundaryIndex[i].curveClose = 0; - pBoundaryIndex[i].isWithin = FALSE; - pBoundaryIndex[i].criticalSize = FALSE; - pBoundaryIndex[i].closedCurve = FALSE; - } - - - for(i = 0; i < numberObjects; ++i){ - ++pBoundaryIndex[0].numberPoints; - count = 0; - j = 1; - while(pBoundaryIndex[j].numberPoints){ - count += pBoundaryIndex[j++].numberPoints; - } - bBox.left = objectMetrics[i].L; - bBox.right = objectMetrics[i].R; - bBox.top = objectMetrics[i].T; - bBox.bottom = objectMetrics[i].B; - label = objectMetrics[i].Label; - pBoundaryIndex[i+1].Label = label; - getBoundary(ThinEdgeImage, Input, pBoundary, lBoundary, pBoundaryIndex, bBox, label, - i, pBoundaryIndex[0].numberPoints, count, spadSize, searchWindow); - } - - /* - // Input will now be used in the fill. Copy the labeled edge image - */ - - offset = 0; - numBoundaries = pBoundaryIndex[0].numberPoints; - for(i = 0; i < numBoundaries; ++i){ - numSamples = pBoundaryIndex[i+1].numberPoints; - end = numSamples-2; - newSamples = numSamples-1; - for(j = 0; j < numSamples; ++j){ - boundary[j].x = pBoundary[offset+j+1].xy.x; - boundary[j].y = pBoundary[offset+j+1].xy.y; - } - - /* - // clip off the ends where stray boundary pixels were left over - */ - while(1){ - distance = abs(boundary[end].x-boundary[end-1].x) + abs(boundary[end].y-boundary[end-1].y); - if(distance > threshold){ - --end; - --newSamples; - } - else{ - break; - } - } - - distance = abs(boundary[0].x-boundary[end-2].x) + abs(boundary[0].y-boundary[end-2].y); - pBoundaryIndex[i+1].curveClose = distance; - - if(pBoundaryIndex[i+1].curveClose < closureDistance){ - pBoundaryIndex[i+1].closedCurve = TRUE; - } - pBoundaryIndex[i+1].centroid.x = 0; - pBoundaryIndex[i+1].centroid.y = 0; - for(j = 0; j < newSamples; ++j){ - pBoundaryIndex[i+1].centroid.x += boundary[j].x; - pBoundaryIndex[i+1].centroid.y += boundary[j].y; - } - if(newSamples){ - pBoundaryIndex[i+1].centroid.x /= newSamples; - pBoundaryIndex[i+1].centroid.y /= newSamples; - } - getBoundaryMetrics(boundary, &length, &minRadius, &maxRadius, &aveRadius, - (float)pBoundaryIndex[i+1].centroid.x, - (float)pBoundaryIndex[i+1].centroid.y, newSamples); - pBoundaryIndex[i+1].boundaryLength = length; - pBoundaryIndex[i+1].minRadius = minRadius; - pBoundaryIndex[i+1].maxRadius = maxRadius; - pBoundaryIndex[i+1].aveRadius = aveRadius; - if(minRadius != 0.0){ - pBoundaryIndex[i+1].ratio = maxRadius / minRadius; - } - else{ - pBoundaryIndex[i+1].ratio = -1.0; - } - - /* - // augment the ROI boundary - */ - pBoundaryIndex[i+1].rectangle.left -= 2*CloseMaskSize; - pBoundaryIndex[i+1].rectangle.right += 2*CloseMaskSize; - pBoundaryIndex[i+1].rectangle.bottom -= 2*CloseMaskSize; - pBoundaryIndex[i+1].rectangle.top += 2*CloseMaskSize; - label = pBoundaryIndex[i+1].Label; - - /* - // mask goes in ImageH. morpho filter the mask first - */ - generateMask(ImageH, boundary, newSamples, label, spadSize); - - /* - // open-close the mask - */ - doMorphology(Input, ImageE, ImageC, ImageH, olapValuesC, olapValuesO, cmask, omask, - pBoundaryIndex[i+1].rectangle, label, CloseMaskSize, OpenMaskSize, spadSize); - - /* - // now get the compactness metrics - */ - getCompactness(Input, pBoundaryIndex[i+1].rectangle, label, spadSize, &vCompactness, length); - pBoundaryIndex[i+1].compactness = vCompactness; - - /* - // reset the ROI boundary - */ - pBoundaryIndex[i+1].rectangle.left += 2*CloseMaskSize; - pBoundaryIndex[i+1].rectangle.right -= 2*CloseMaskSize; - pBoundaryIndex[i+1].rectangle.bottom += 2*CloseMaskSize; - pBoundaryIndex[i+1].rectangle.top -= 2*CloseMaskSize; - offset += numSamples; - } - - - for(i = 0; i < numBoundaries; ++i){ - for(j = 0; j < numBoundaries; ++j){ - if(j != i){ - rectPoint[0].x = pBoundaryIndex[j+1].rectangle.left; - rectPoint[0].y = pBoundaryIndex[j+1].rectangle.bottom; - rectPoint[1].x = pBoundaryIndex[j+1].rectangle.left; - rectPoint[1].y = pBoundaryIndex[j+1].rectangle.top; - rectPoint[2].x = pBoundaryIndex[j+1].rectangle.right; - rectPoint[2].y = pBoundaryIndex[j+1].rectangle.bottom; - rectPoint[3].x = pBoundaryIndex[j+1].rectangle.right; - rectPoint[3].y = pBoundaryIndex[j+1].rectangle.top; - in[0] = 0; - in[1] = 0; - in[2] = 0; - in[3] = 0; - for(k = 0; k < 4; ++k){ - if((rectPoint[k].x > pBoundaryIndex[i+1].rectangle.left) && - (rectPoint[k].x < pBoundaryIndex[i+1].rectangle.right)){ - if((rectPoint[k].y > pBoundaryIndex[i+1].rectangle.bottom) && - (rectPoint[k].y < pBoundaryIndex[i+1].rectangle.top)){ - in[k] = 1; - } - } - } - if(in[0] && in[1] && in[2] && in[3]){ - pBoundaryIndex[j+1].isWithin = TRUE; - } - } - } - } - - /* - // fill in the Python features - */ - for(i = 0; i < numBoundaries; ++i){ - objectMetrics[i].curveClose = pBoundaryIndex[i+1].curveClose; - objectMetrics[i].cXBoundary = pBoundaryIndex[i+1].centroid.x; - objectMetrics[i].cYBoundary = pBoundaryIndex[i+1].centroid.y; - objectMetrics[i].boundaryLength = pBoundaryIndex[i+1].boundaryLength; - objectMetrics[i].minRadius = pBoundaryIndex[i+1].minRadius; - objectMetrics[i].maxRadius = pBoundaryIndex[i+1].maxRadius; - objectMetrics[i].aveRadius = pBoundaryIndex[i+1].aveRadius; - objectMetrics[i].ratio = pBoundaryIndex[i+1].ratio; - objectMetrics[i].compactness = pBoundaryIndex[i+1].compactness; - } - - // debug only - if(0){ - for(i = 0; i < numBoundaries; ++i){ - if(pBoundaryIndex[i+1].boundaryLength != (float)0.0){ - printf("boundary %d:\n", i); - printf("\t\tRect (%d, %d, %d, %d)\n", pBoundaryIndex[i+1].rectangle.left, - pBoundaryIndex[i+1].rectangle.right, - pBoundaryIndex[i+1].rectangle.top, - pBoundaryIndex[i+1].rectangle.bottom); - printf("\t\tCentroid (%d, %d)\n", pBoundaryIndex[i+1].centroid.x, pBoundaryIndex[i+1].centroid.y); - printf("\t\tLength (%f)\n", pBoundaryIndex[i+1].boundaryLength); - printf("\t\tRatio (%f)\n", pBoundaryIndex[i+1].ratio); - printf("\t\taveRadius (%f)\n", pBoundaryIndex[i+1].aveRadius); - printf("\t\tLabel (%d)\n", pBoundaryIndex[i+1].Label); - printf("\t\tCompactness (%f)\n", pBoundaryIndex[i+1].compactness); - printf("\t\tCurveClose (%d)\n", pBoundaryIndex[i+1].curveClose); - if(pBoundaryIndex[i+1].isWithin){ - printf("\t\tContained (T)\n"); - } - else{ - printf("\t\tContained (F)\n"); - } - if(pBoundaryIndex[i+1].closedCurve){ - printf("\t\tclosedCurve (T)\n"); - } - else{ - printf("\t\tclosedCurve (F)\n"); - } - } - } - } - - /* - // need to return input which is now mask image - */ - - offset = 0; - offset2 = 0; - for(i = 0; i < srcRows; ++i){ - for(j = 0; j < srcCols; ++j){ - ThinEdgeImage[offset+j] = (unsigned short)Input[offset2+j]; - } - offset += srcCols; - offset2 += spadSize; - } - - free(pBoundaryIndex); - free(Input); - free(ImageE); - free(ImageC); - free(ImageH); - free(pBoundary); - free(lBoundary); - free(boundary); - free(cmask); - free(omask); - - return; - -} - - -void initLaws(LawsFilter7 *lawsFilter){ - - int i; - float sum; - float L7[7] = { 1.0, 6.0, 15.0, 20.0, 15.0, 6.0, 1.0}; - float E7[7] = {-1.0, -4.0, -5.0, 0.0, 5.0, 4.0, 1.0}; - float S7[7] = {-1.0, -2.0, 1.0, 4.0, 1.0, -2.0, -1.0}; - float W7[7] = {-1.0, 0.0, 3.0, 0.0, -3.0, 0.0, 1.0}; - float R7[7] = { 1.0, -2.0, -1.0, 4.0, -1.0, -2.0, 1.0}; - float O7[7] = {-1.0, 6.0, -15.0, 20.0, -15.0, 6.0, -1.0}; - - lawsFilter->numberKernels = 6; - lawsFilter->kernelLength = 7; - lawsFilter->numberFilterLayers = 21; - lawsFilter->name[0] = 'L'; - lawsFilter->name[1] = 'E'; - lawsFilter->name[2] = 'S'; - lawsFilter->name[3] = 'W'; - lawsFilter->name[4] = 'R'; - lawsFilter->name[5] = 'O'; - for(i = 0; i < 7; ++i){ - lawsFilter->lawsKernel[0][i] = L7[i]; - lawsFilter->lawsKernel[1][i] = E7[i]; - lawsFilter->lawsKernel[2][i] = S7[i]; - lawsFilter->lawsKernel[3][i] = W7[i]; - lawsFilter->lawsKernel[4][i] = R7[i]; - lawsFilter->lawsKernel[5][i] = O7[i]; - } - - /* L filter is unity gain */ - sum = (float)0.0; - for(i = 0; i < 7; ++i){ - sum += lawsFilter->lawsKernel[0][i]; - } - for(i = 0; i < 7; ++i){ - lawsFilter->lawsKernel[0][i] /= sum; - } - - return; - -} - -float lawsConvolution(float *image, float *rowFilter, float *colFilter, int kernelSize){ - - int i, j; - int offset; - float result[7]; - float sum; - - /* filter rows */ - for(i = 0; i < kernelSize; ++i){ - sum = (float)0.0; - offset = i * kernelSize; - for(j = 0; j < kernelSize; ++j){ - sum += (rowFilter[j]*image[offset+j]); - } - result[i] = sum; - } - - /* filter columns */ - sum = (float)0.0; - for(j = 0; j < kernelSize; ++j){ - sum += (rowFilter[j]*result[j]); - } - - return(sum); - -} - - -void getLawsTexture(LawsFilter7 lawsFilter, tTEM LawsFeatures[], - objStruct objectMetrics[], double *sourceImage, - unsigned short *MaskImage, int numberObjects, - int srcRows, int srcCols){ - - int i, j; - int label; - RECT bBox; - int aperature = (lawsFilter.kernelLength-1)/2; - unsigned char *ImageH; - float *ImageT; - float *lawsImage; - - ImageH = calloc(srcRows*srcCols, sizeof(unsigned char)); - ImageT = calloc(srcRows*srcCols, sizeof(float)); - lawsImage = calloc(lawsFilter.numberFilterLayers*srcRows*srcCols, sizeof(float)); - - for(i = 0; i < numberObjects; ++i){ - bBox.left = objectMetrics[i].L; - bBox.right = objectMetrics[i].R; - bBox.top = objectMetrics[i].T; - bBox.bottom = objectMetrics[i].B; - label = objectMetrics[i].Label; - if(objectMetrics[i].voxelMean != (float)0.0){ - /* - // valid size region - */ - computeLaws(lawsFilter, LawsFeatures, bBox, label, aperature, srcRows, srcCols, ImageH, ImageT, - MaskImage, lawsImage, sourceImage); - for(j = 1; j < lawsFilter.numberFilterLayers; ++j){ - objectMetrics[i].TEM[j-1] = LawsFeatures[j].Variance; - } - /* -- later will need to return a view of the texture images - int index; - int offset; - int layerStep = srcRows*srcCols; - if(label == debugBlob){ - index = 0; - for(j = 1; j < lawsFilter.numberFilterLayers; ++j){ - if(LawsFeatures[j].Variance == (float)1.0) index = j; - } - // overwrite the raw image - offset = index * layerStep; - for(j = 0; j < layerStep; ++j){ - sourceImage[j] = lawsImage[offset+j]; - } - } - */ - } - } - - free(ImageH); - free(ImageT); - free(lawsImage); - - return; - -} - -void computeLaws(LawsFilter7 lawsFilter, tTEM LawsFeatures[], RECT roi, int label, - int aperature, int srcRows, int srcCols, - unsigned char *ImageH, float *ImageT, unsigned short *MaskImage, - float *lawsImage, double *sourceImage){ - - /* - // hard-wirred to Law's 7 kernels - */ - int i, j, k; - int lawsLayer; - int column, row; - int offset; - int maskOffset[7]; - int dataOffset[7]; - float myImage[49]; - int count; - int outerKernelNumber; - int innerKernelNumber; - int rowNumber; - int kernelSize = lawsFilter.kernelLength; - int fullMask = kernelSize*kernelSize; - int layerStep = srcRows*srcCols; - float *rowFilter; - float *colFilter; - float filterResult1; - float filterResult2; - float lawsLL=1.0; - float t; - float maxValue; - float scale; - char I, J; - char combo[24]; - char dual[24]; - - - /* zero the laws mask memory first */ - for(i = 0; i < srcRows*srcCols; ++i){ - ImageH[i] = 0; - } - for(j = 0; j < lawsFilter.numberFilterLayers; ++j){ - LawsFeatures[j].Mean = (float)0.0; - LawsFeatures[j].Variance = (float)0.0; - } - - for(i = roi.bottom+aperature; i < roi.top-aperature; ++i){ - // get the row array offset for mask and data source. - for(row = -aperature; row <= aperature; ++row){ - maskOffset[row+aperature] = (i+row)*srcCols; - dataOffset[row+aperature] = maskOffset[row+aperature]; - } - for(j = roi.left+aperature; j < roi.right-aperature; ++j){ - /* - // get 7x7 segment and make sure have 100% mask coverage - */ - count = 0; - for(row = -aperature; row <= aperature; ++row){ - rowNumber = (row+aperature)*kernelSize; - for(column = -aperature; column <= aperature; ++column){ - if(MaskImage[maskOffset[row+aperature]+j+column] == label){ - myImage[rowNumber+column+aperature] = sourceImage[dataOffset[row+aperature]+j+column]; - ++count; - } - } - } - if(count == fullMask){ - /* - // 100% coverage. now do the Law's texture filters - */ - ImageH[i*srcCols+j] = 1; - lawsLayer = 0; - for(outerKernelNumber = 0; outerKernelNumber < lawsFilter.numberKernels; ++outerKernelNumber){ - /* - // outer loop pulls the i'th kernel. kernel 0 is the LP kernel - // the outer loop is the iso-kernel - */ - I = lawsFilter.name[outerKernelNumber]; - sprintf(dual, "%c_%c", I, I); - rowFilter = &lawsFilter.lawsKernel[outerKernelNumber][0]; - colFilter = &lawsFilter.lawsKernel[outerKernelNumber][0]; - filterResult1 = lawsConvolution(myImage, rowFilter, colFilter, kernelSize); - /* lawsLayer 0 is the LP and needs to be used to scale. */ - if(outerKernelNumber){ - lawsImage[lawsLayer*layerStep + i*srcCols + j] = (float)2.0 * filterResult1 / lawsLL; - } - else{ - lawsLL = (float)2.0 * filterResult1; - lawsImage[lawsLayer*layerStep + i*srcCols + j] = (float)2.0 * filterResult1; - } - strcpy(&LawsFeatures[lawsLayer].filterName[0], dual); - ++lawsLayer; - /* - // now do the inner loop and get the column filters for the other laws kernels - */ - for(innerKernelNumber = outerKernelNumber+1; - innerKernelNumber < lawsFilter.numberKernels; - ++innerKernelNumber){ - J = lawsFilter.name[innerKernelNumber]; - sprintf(combo, "%c_%c", I, J); - strcpy(&LawsFeatures[lawsLayer].filterName[0], combo); - colFilter = &lawsFilter.lawsKernel[innerKernelNumber][0]; - filterResult1 = lawsConvolution(myImage, rowFilter, colFilter, kernelSize); - filterResult2 = lawsConvolution(myImage, colFilter, rowFilter, kernelSize); - lawsImage[lawsLayer*layerStep + i*srcCols + j] = - (filterResult1 / lawsLL) + (filterResult2 / lawsLL); - ++lawsLayer; - } - } - } - } - } - - for(i = 0; i < lawsFilter.numberFilterLayers; ++i){ - LawsFeatures[i].Mean = (float)0.0; - LawsFeatures[i].Variance = (float)0.0; - } - - count = 0; - for(i = roi.bottom+aperature; i < roi.top-aperature; ++i){ - row = i * srcCols; - for(j = roi.left+aperature; j < roi.right-aperature; ++j){ - if(ImageH[row+j]){ - ++count; - for(k = 0; k < lawsFilter.numberFilterLayers; ++k){ - offset = k * layerStep + row; - LawsFeatures[k].Mean += lawsImage[offset+j]; - } - } - } - } - - if(count == 0){ - // debug statement - printf("no samples for texture\n"); - return; - } - - for(k = 0; k < lawsFilter.numberFilterLayers; ++k){ - LawsFeatures[k].Mean /= (float)count; - } - for(i = roi.bottom+aperature; i < roi.top-aperature; ++i){ - row = i * srcCols; - for(j = roi.left+aperature; j < roi.right-aperature; ++j){ - if(ImageH[row+j]){ - for(k = 0; k < lawsFilter.numberFilterLayers; ++k){ - offset = k * layerStep + row; - t = lawsImage[offset+j] - LawsFeatures[k].Mean; - LawsFeatures[k].Variance += (t * t); - } - } - } - } - for(k = 0; k < lawsFilter.numberFilterLayers; ++k){ - LawsFeatures[k].Variance /= (float)count; - LawsFeatures[k].Variance = (float)(sqrt(LawsFeatures[k].Variance)); - } - - /* - // now normalize the variance feature (TEM) - */ - maxValue = (float)0.0; - for(i = 1; i < lawsFilter.numberFilterLayers; ++i){ - if((LawsFeatures[i].Variance) > maxValue) maxValue = LawsFeatures[i].Variance; - } - scale = (float)1.0 / maxValue; - for(i = 1; i < lawsFilter.numberFilterLayers; ++i){ - LawsFeatures[i].Variance = scale * LawsFeatures[i].Variance; - } - - - return; - -} - -void getVoxelMeasures(objStruct objectMetrics[], double *sourceImage, - unsigned short *MaskImage, int numberObjects, - int srcRows, int srcCols){ - - int i, j, k; - int label; - int offset; - int count; - float mean, std, t; - RECT bBox; - - for(i = 0; i < numberObjects; ++i){ - bBox.left = objectMetrics[i].L; - bBox.right = objectMetrics[i].R; - bBox.top = objectMetrics[i].T; - bBox.bottom = objectMetrics[i].B; - label = objectMetrics[i].Label; - count = 0; - mean = (float)0.0; - for(j = bBox.bottom; j < bBox.top; ++j){ - offset = j * srcCols; - for(k = bBox.left; k < bBox.right; ++k){ - if(MaskImage[offset+k] == label){ - mean += sourceImage[offset+k]; - ++count; - } - } - } - if(count){ - mean /= (float)count; - std = (float)0.0; - for(j = bBox.bottom; j < bBox.top; ++j){ - offset = j * srcCols; - for(k = bBox.left; k < bBox.right; ++k){ - if(MaskImage[offset+k] == label){ - t = (sourceImage[offset+k]-mean); - std += (t * t); - } - } - } - } - if(count){ - std /= (float)count; - std = sqrt(std); - objectMetrics[i].voxelMean = mean; - objectMetrics[i].voxelVar = std; - } - else{ - objectMetrics[i].voxelMean = 0.0; - objectMetrics[i].voxelVar = 0.0; - } - } - - return; - -} - -int NI_BuildBoundary(int samples, int rows, int cols, int numberObjects, - unsigned short *edgeImage, objStruct objectMetrics[]){ - - int searchWindow = 5; // 5 is good value for Sobel - int status = 1; - - buildBoundary(objectMetrics, searchWindow, edgeImage, numberObjects, rows, cols); - - return status; - -} - -int NI_VoxelMeasures(int samples, int rows, int cols, int numberObjects, double *sourceImage, - unsigned short *maskImage, objStruct objectMetrics[]){ - - int status = 1; - getVoxelMeasures(objectMetrics, sourceImage, maskImage, numberObjects, rows, cols); - - return status; - -} - - -int NI_TextureMeasures(int samples, int rows, int cols, int numberObjects, double *sourceImage, - unsigned short *maskImage, objStruct objectMetrics[]){ - - int status = 1; - LawsFilter7 lawsFilter; - tTEM LawsFeatures[21]; - - initLaws(&lawsFilter); - getLawsTexture(lawsFilter, LawsFeatures, objectMetrics, sourceImage, - maskImage, numberObjects, rows, cols); - - return status; - -} - - - Copied: trunk/scipy/ndimage/src/segment/Segmenter_IMPL.c (from rev 3915, trunk/scipy/ndimage/segment/Segmenter_IMPL.c) Deleted: trunk/scipy/ndimage/src/segment/__init__.py =================================================================== --- trunk/scipy/ndimage/segment/__init__.py 2008-02-11 13:46:25 UTC (rev 3913) +++ trunk/scipy/ndimage/src/segment/__init__.py 2008-02-11 23:32:11 UTC (rev 3916) @@ -1,5 +0,0 @@ -# Segmentation package -# Author: Tom Waite, 2007 - -from _segmenter import * -from objectdata import * Deleted: trunk/scipy/ndimage/src/segment/ndImage_Segmenter_structs.h =================================================================== --- trunk/scipy/ndimage/segment/ndImage_Segmenter_structs.h 2008-02-11 13:46:25 UTC (rev 3913) +++ trunk/scipy/ndimage/src/segment/ndImage_Segmenter_structs.h 2008-02-11 23:32:11 UTC (rev 3916) @@ -1,155 +0,0 @@ -#ifndef V1_STRUCTSH -#define V1_STRUCTSH - -#define bool unsigned char - -typedef struct{ - int x; - int y; -}POINT; - -typedef struct{ - int x; - int y; - int linkIndex; - bool haveLink; -}bPOINT; - -typedef struct{ - int left; - int right; - int top; - int bottom; -}RECT; - -typedef struct{ - char filterName[20]; - float Mean; - float Variance; -}tTEM; - -typedef struct{ - int numberKernels; - int kernelLength; - int numberFilterLayers; - float lawsKernel[6][7]; - char name[7]; -}LawsFilter7; - -typedef struct{ - // filled in GetObjectStats - int L; - int R; - int T; - int B; - int Label; - int Area; - float cX; - float cY; - // filled in BuildBoundary - int curveClose; - float cXBoundary; - float cYBoundary; - float boundaryLength; - float minRadius; - float maxRadius; - float aveRadius; - float ratio; - float compactness; - // filled in VoxelMeasures - float voxelMean; - float voxelVar; - // filled in TextureMeasures - float TEM[20]; -}objStruct; - -typedef struct{ - int numberPoints; - int curveClose; - int classify; - float boundaryLength; - float minRadius; - float maxRadius; - float aveRadius; - float ratio; - float compactness; - float voxelMean; - float voxelVar; - RECT rectangle; - POINT centroid; - bool isWithin; - bool closedCurve; - bool criticalSize; - int Label; -}boundaryIndex; - - -typedef struct{ - POINT xy; -}blobBoundary; - - -// -// prototypes -// -int NI_RegionGrow(int, int, int, int, int, int, int, double *, unsigned short *, int *); -int NI_TextureMeasures(int, int, int, int, double *, unsigned short *, objStruct objectMetrics[]); -int NI_VoxelMeasures(int, int, int, int, double *, unsigned short *, objStruct objectMetrics[]); -int NI_BuildBoundary(int, int, int, int, unsigned short *, objStruct objectMetrics[]); -int NI_GetObjectStats(int, int, int, unsigned short *, objStruct objectMetrics[]); -int NI_ThinFilter(int, int, int, int, unsigned short *, objStruct objectMetrics[]); -int NI_SobelEdges(int, int, int, double, int, int, int, double, int, double *, unsigned short *, int *); -int NI_ShenCastanEdges(int, int, int, double, double, int, int, int, double *, unsigned short *, int *); -int NI_CannyEdges(int, int, int, double, double, double, int, int, int, double, int, - double *, unsigned short *, int *); - -void computeLaws(LawsFilter7, tTEM LawsFeatures[], RECT, int, int, int, int, unsigned char *, float *, - unsigned short *, float *, double *); -float lawsConvolution(float *, float *, float *, int); -void initLaws(LawsFilter7*); -void getVoxelMeasures(objStruct objectMetrics[], double *, unsigned short *, int, int, int); -void getLawsTexture(LawsFilter7, tTEM LawsFeatures[], objStruct objectMetrics[], double *, unsigned short *, int, int, int); - -void morphoFilterBinaryImage(int, int, unsigned short *, int, int); -void buildBinaryImage(int, int, double *, unsigned short *, int, int); -void doRegionGrow(int, int, int, double *, unsigned short *, int, int, int, int); -void buildBoundary(objStruct objectMetrics[], int, unsigned short *, int, int, int); -void getBoundary(unsigned short *, unsigned char *, blobBoundary *, blobBoundary *, - boundaryIndex *, RECT, int, int, int, int, int, int); -void doMorphology(unsigned char *, unsigned char *, unsigned char *, unsigned char *, int olapValuesC[], - int olapValuesO[], unsigned short *, unsigned short *, - RECT, int, int, int, int); -void getCompactness(unsigned char *, RECT, int, int, float *, float); -void OpenCloseFilter(int olapValues[], int, int, int, int, unsigned char *, - unsigned char *, unsigned short *); -void trackBoundary(unsigned char *, blobBoundary lBoundary[], int, int, blobBoundary, int); -void getBoundaryMetrics(bPOINT *, float *, float *, float *, float *, float, float, int); -void generateMask(unsigned char *, bPOINT *, int, int, int); -void ThinningFilter(int, int, int, int *, int *, unsigned char *, - unsigned char *, unsigned char *, unsigned char *, unsigned char *, unsigned char *); -void initThinFilter(int *, int *); -void Shen_Castan(double, double, int, int, int, int, int, double *, unsigned short *); -void computeISEF(float *, float *, int, int, double); -void ISEF_Horizontal(float *, float *, float *, float *, int, int, double); -void ISEF_Vertical(float *, float *, float *, float *, int, int, double); -void thresholdImage(float *, float *, int, int, int, int); -void computeBandedLaplacian(float *, float *, float *, int, int); -void getZeroCrossings(float *, float *, float *, int, int, int); -float adaptiveGradient(float *, float *, int, int, int, int); -void thresholdEdges(float *, unsigned short *, double, int, int); -void estimateThreshold(float *, float *, float, int, int, float *); -void doSobel(int, int, int, double, int, double *, unsigned short *); -void DGFilters(int, int, int, double, int, float *, float *, double *, double *, float *, float *); -void nonMaxSupress(int, int, float, float, double *, double *, int, float *, float *, float *); -void edgeHysteresis(int, int, double, double, float *, float *); -void edgeThreshold(int, int, double, float *, float *); -int traceEdge(int, int, int, int, double, float *, float *); -float magnitude(float, float); -int ConnectedEdgePoints(int, int, unsigned short *); -void doPreProcess(int, int, int, double *, double, int, int, int); -void filter2D(int, int, int, int, int, float *, double *); -void buildKernel(double, int, int, float *); - - - -#endif Copied: trunk/scipy/ndimage/src/segment/ndImage_Segmenter_structs.h (from rev 3915, trunk/scipy/ndimage/segment/ndImage_Segmenter_structs.h) Deleted: trunk/scipy/ndimage/src/segment/objectdata.py =================================================================== --- trunk/scipy/ndimage/segment/objectdata.py 2008-02-11 13:46:25 UTC (rev 3913) +++ trunk/scipy/ndimage/src/segment/objectdata.py 2008-02-11 23:32:11 UTC (rev 3916) @@ -1,25 +0,0 @@ - -import numpy as N - -objstruct =N.dtype([('L', 'i'), - ('R', 'i'), - ('T', 'i'), - ('B', 'i'), - ('Label', 'i'), - ('Area', 'i'), - ('cX', 'f'), - ('cY', 'f'), - ('curveClose', 'i'), - ('cXB', 'f'), - ('cYB', 'f'), - ('bLength', 'f'), - ('minRadius', 'f'), - ('maxRadius', 'f'), - ('aveRadius', 'f'), - ('ratio', 'f'), - ('compactness', 'f'), - ('voxelMean', 'f'), - ('voxelVar', 'f'), - ('TEM', 'f', 20)] - ) - Copied: trunk/scipy/ndimage/src/segment/objectdata.py (from rev 3915, trunk/scipy/ndimage/segment/objectdata.py) Deleted: trunk/scipy/ndimage/src/segment/setup.py =================================================================== --- trunk/scipy/ndimage/segment/setup.py 2008-02-11 13:46:25 UTC (rev 3913) +++ trunk/scipy/ndimage/src/segment/setup.py 2008-02-11 23:32:11 UTC (rev 3916) @@ -1,23 +0,0 @@ - -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('segment', parent_package, top_path) - - config.add_extension('_segmenter', - sources=['Segmenter_EXT.c', - 'Segmenter_IMPL.c'], - depends = ['ndImage_Segmenter_structs.h'] - ) - - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) - - Copied: trunk/scipy/ndimage/src/segment/tests (from rev 3915, trunk/scipy/ndimage/segment/tests) Modified: trunk/scipy/ndimage/src/segment/tests/test_segment.py =================================================================== --- trunk/scipy/ndimage/segment/tests/test_segment.py 2008-02-11 23:27:05 UTC (rev 3915) +++ trunk/scipy/ndimage/src/segment/tests/test_segment.py 2008-02-11 23:32:11 UTC (rev 3916) @@ -1,313 +1,313 @@ - -import numpy as N + +import numpy as N from scipy.testing import * -import scipy.ndimage.segment as S - -inputname = 'slice112.raw' - -import os -filename = os.path.join(os.path.split(__file__)[0],inputname) - - -def shen_castan(image, IIRFilter=0.8, scLow=0.3, window=7, lowThreshold=220+2048, - highThreshold=600+2048, dust=16): - """ - labeledEdges, ROIList = shen_castan(image, [default]) - - implements Shen-Castan edge finding - - Inputs - image, IIR filter, shen_castan_low, window, low_threshold, high_threshold, dust - - image is the numarray 2D image - - IIR filter is filter parameter for exponential filter - - shen_castan_low is edge threshold is range (0.0, 1.0] - - window is search window for edge detection - - low_ and high_ threshold are density values - - dust is blob filter. blob area (length x width of bounding box) under this - size threshold are filtered (referred to as dust and blown away) - - Outputs - labeledEdges, ROIList[>dust] - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList[>dust] is a blob feature list. Only values - with bounding box area greater than dust threshold are returned - - """ - labeledEdges, numberObjects = S.shen_castan_edges(scLow, IIRFilter, window, - lowThreshold, highThreshold, image) - # allocated struct array for edge object measures. for now just the rect bounding box - ROIList = N.zeros(numberObjects, dtype=S.objstruct) - # return the bounding box for each connected edge - S.get_object_stats(labeledEdges, ROIList) - return labeledEdges, ROIList[ROIList['Area']>dust] - -def sobel(image, sLow=0.3, tMode=1, lowThreshold=220+2048, highThreshold=600+2048, BPHigh=10.0, - apearture=21, dust=16): - """ - labeledEdges, ROIList = sobel(image, [default]) - - implements sobel magnitude edge finding - - Inputs - image, sobel_low, tMode, low_threshold, high_threshold, - high_filter_cutoff, filter_aperature, dust - - image is the numarray 2D image - - sobel_low is edge threshold is range (0.0, 1.0] - - tMode is threshold mode: 1 for ave, 2 for mode (histogram peak) - - low_ and high_ threshold are density values - - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] - - aperature is odd filter kernel length - - dust is blob filter. blob area (length x width of bounding box) under this - size threshold are filtered (referred to as dust and blown away) - - Outputs - labeledEdges, ROIList[>dust] - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList[>dust] is a blob feature list. Only values - with bounding box area greater than dust threshold are returned - - """ - # get sobel edge points. return edges that are labeled (1..numberObjects) - labeledEdges, numberObjects = S.sobel_edges(sLow, tMode, lowThreshold, - highThreshold, BPHigh, apearture, image) - # allocated struct array for edge object measures. for now just the rect bounding box - ROIList = N.zeros(numberObjects, dtype=S.objstruct) - # return the bounding box for each connected edge - S.get_object_stats(labeledEdges, ROIList) - # thin (medial axis transform) of the sobel edges as the sobel produces a 'band edge' - S.morpho_thin_filt(labeledEdges, ROIList) - return labeledEdges, ROIList[ROIList['Area']>dust] - -def canny(image, cSigma=1.0, cLow=0.5, cHigh=0.8, tMode=1, lowThreshold=220+2048, - highThreshold=600+2048, BPHigh=10.0, apearture=21, dust=16): - """ - labeledEdges, ROIList = canny(image, [default]) - - implements canny edge finding - - Inputs - image, DG_sigma, canny_low, canny_high, tMode, low_threshold, - high_threshold, high_filter_cutoff, filter_aperature, dust - - image is the numarray 2D image - - DG_sigma is Gaussain sigma for the derivative-of-gaussian filter - - clow is low edge threshold is range (0.0, 1.0] - - chigh is high edge threshold is range (0.0, 1.0] - - tMode is threshold mode: 1 for ave, 2 for mode (histogram peak) - - low_ and high_ threshold are density values - - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] - - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] - - aperature is odd filter kernel length - - dust is blob filter. blob area (length x width of bounding box) under this - size threshold are filtered (referred to as dust and blown away) - - Outputs - labeledEdges, ROIList[>dust] - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList[>dust] is a blob feature list. Only values - with bounding box area greater than dust threshold are returned - - """ - # get canny edge points. return edges that are labeled (1..numberObjects) - labeledEdges, numberObjects = S.canny_edges(cSigma, cLow, cHigh, tMode, lowThreshold, highThreshold, - BPHigh, apearture, image) - # allocated struct array for edge object measures. for now just the rect bounding box - ROIList = N.zeros(numberObjects, dtype=S.objstruct) - # return the bounding box for each connected edge - S.get_object_stats(labeledEdges, ROIList) - return labeledEdges, ROIList[ROIList['Area']>dust] - -def get_shape_mask(labeledEdges, ROIList): - """ - get_shape_mask(labeledEdges, ROIList) - - takes labeled edge image plus ROIList (blob descriptors) and generates - boundary shape features and builds labeled blob masks. 'labeledEdges' - is over-written by 'labeledMask'. Adds features to ROIList structure - - Inputs - labeledEdges, ROIList - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList is a blob feature list. - - Output - no return. edge image input is over-written with mask image. - ROIList added to. - - """ - - # pass in Sobel morph-thinned labeled edge image (LEI) and ROIList - # GetShapeMask will augment the ROI list - # labeledEdges is the original edge image and overwritten as mask image - # maskImage is the mask that is used for blob texture / pixel features - S.build_boundary(labeledEdges, ROIList) - return - -def get_voxel_measures(rawImage, labeledEdges, ROIList): - """ - get_voxel_measures(rawImage, labeledEdges, ROIList) - - takes raw 2D image, labeled blob mask and ROIList. computes voxel features - (moments, histogram) for each blob. Adds features to ROIList structure. - - Inputs - rawImage, labeledEdges, ROIList - - rawImage is the original source 2D image - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList is a blob feature list. - - Output - no return. ROIList added to. - - """ - # - # pass raw image, labeled mask and the partially filled ROIList - # VoxelMeasures will fill the voxel features in the list - # - S.voxel_measures(rawImage, labeledEdges, ROIList) - return - -def get_texture_measures(rawImage, labeledEdges, ROIList): - """ - get_texture_measures(rawImage, labeledEdges, ROIList) - - takes raw 2D image, labeled blob mask and ROIList. computes 2D - texture features using 7x7 Law's texture filters applied - to segmented blobs. TEM (texture energy metric) is computed - for each Law's filter image and stored in TEM part of ROIList. - - Inputs - rawImage, labeledEdges, ROIList - - rawImage is the original source 2D image - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList is a blob feature list. - - Output - no return. ROIList added to. - """ - # - # pass raw image, labeled mask and the partially filled ROIList - # VoxelMeasures will fill the texture (Law's, sub-edges, co-occurence, Gabor) features in the list - # - S.texture_measures(rawImage, labeledEdges, ROIList) - return - -def segment_regions(): - """ - sourceImage, labeledMask, ROIList = segment_regions() - - Inputs - No Input - - Outputs - sourceImage, labeledMask, ROIList - - sourceImage is raw 2D image (default cardiac CT slice for demo - - labeledMask is mask of segmented 'blobs', - numerically labeled by blob number - - ROIList is numerical Python structure of intensity, shape and - texture features for each blob - - High level script calls Python functions: - get_slice() - a cardiac CT slice demo file - sobel() - sobel magnitude edge finder, - returns connected edges - get_shape_mask() - gets segmented blob boundary and mask - and shape features - get_voxel_measures() - uses masks get object voxel moment - and histogram features - get_texture_measures() - uses masks get object 2D texture features - """ - # get slice from the CT volume - image = get_slice(filename) - # need a copy of original image as filtering will occur on the extracted slice - sourceImage = image.copy() - # Sobel is the first level segmenter. Sobel magnitude and MAT (medial axis transform) - # followed by connected component analysis. What is returned is labeled edges and the object list - labeledMask, ROIList = sobel(image) - # From the labeled edges and the object list get the labeled mask for each blob object - get_shape_mask(labeledMask, ROIList) - # Use the labeled mask and source image (raw) to get voxel features - get_voxel_measures(sourceImage, labeledMask, ROIList) - # Use the labeled mask and source image (raw) to get texture features - get_texture_measures(sourceImage, labeledMask, ROIList) - return sourceImage, labeledMask, ROIList - -def grow_regions(): - """ - regionMask, numberRegions = region_grow() - Inputs - No Input - Outputs - regionMask, numberRegions - - regionMask is the labeled segment masks from 2D image - - numberRegions is the number of segmented blobs - - High level script calls Python functions: - get_slice() - a cardiac CT slice demo file - region_grow() - "grows" connected blobs. default threshold - and morphological filter structuring element - """ - # get slice from the CT volume - image = get_slice(filename) - regionMask, numberRegions = region_grow(image) - return regionMask, numberRegions - - -def region_grow(image, lowThreshold=220+2048, highThreshold=600+2048, open=7, close=7): - """ - regionMask, numberRegions = region_grow(image, [defaults]) - - Inputs - image, low_threshold, high_threshold, open, close - - image is the numarray 2D image - - low_ and high_ threshold are density values - - open is open morphology structuring element - odd size. 0 to turn off. max is 11 - - close is close morphology structuring element - odd size. 0 to turn off. max is 11 - - Outputs - regionMask, numberRegions - - regionMask is the labeled segment masks from 2D image - - numberRegions is the number of segmented blobs - """ - # morphology filters need to be clipped to 11 max and be odd - regionMask, numberRegions = S.region_grow(lowThreshold, highThreshold, close, open, image) - return regionMask, numberRegions - - -def get_slice(imageName='slice112.raw', bytes=2, rows=512, columns=512): - # get a slice alrady extracted from the CT volume - #image = open(imageName, 'rb') - #slice = image.read(rows*columns*bytes) - #values = struct.unpack('h'*rows*columns, slice) - #ImageSlice = N.array(values, dtype=float).reshape(rows, columns) - - ImageSlice = N.fromfile(imageName, dtype=N.uint16).reshape(rows, columns); - - # clip the ends for this test CT image file as the spine runs off the end of the image - ImageSlice[505:512, :] = 0 - return (ImageSlice).astype(float) - -def get_slice2(image_name='slice112.raw', bytes=2, shape=(512,512)): - import mmap - file = open(image_name, 'rb') - mm = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) - slice = N.frombuffer(mm, dtype='u%d' % bytes).reshape(shape) - slice = slice.astype(float) - slice[505:512,:] = 0 - return slice - -def save_slice(mySlice, filename='junk.raw', bytes=4): - # just save the slice to a fixed file - slice = mySlice.astype('u%d' % bytes) - slice.tofile(filename) - - +import scipy.ndimage._segment as S + +inputname = 'slice112.raw' + +import os +filename = os.path.join(os.path.split(__file__)[0],inputname) + + +def shen_castan(image, IIRFilter=0.8, scLow=0.3, window=7, lowThreshold=220+2048, + highThreshold=600+2048, dust=16): + """ + labeledEdges, ROIList = shen_castan(image, [default]) + + implements Shen-Castan edge finding + + Inputs - image, IIR filter, shen_castan_low, window, low_threshold, high_threshold, dust + - image is the numarray 2D image + - IIR filter is filter parameter for exponential filter + - shen_castan_low is edge threshold is range (0.0, 1.0] + - window is search window for edge detection + - low_ and high_ threshold are density values + - dust is blob filter. blob area (length x width of bounding box) under this + size threshold are filtered (referred to as dust and blown away) + + Outputs - labeledEdges, ROIList[>dust] + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList[>dust] is a blob feature list. Only values + with bounding box area greater than dust threshold are returned + + """ + labeledEdges, numberObjects = S.shen_castan_edges(scLow, IIRFilter, window, + lowThreshold, highThreshold, image) + # allocated struct array for edge object measures. for now just the rect bounding box + ROIList = N.zeros(numberObjects, dtype=S.objstruct) + # return the bounding box for each connected edge + S.get_object_stats(labeledEdges, ROIList) + return labeledEdges, ROIList[ROIList['Area']>dust] + +def sobel(image, sLow=0.3, tMode=1, lowThreshold=220+2048, highThreshold=600+2048, BPHigh=10.0, + apearture=21, dust=16): + """ + labeledEdges, ROIList = sobel(image, [default]) + + implements sobel magnitude edge finding + + Inputs - image, sobel_low, tMode, low_threshold, high_threshold, + high_filter_cutoff, filter_aperature, dust + - image is the numarray 2D image + - sobel_low is edge threshold is range (0.0, 1.0] + - tMode is threshold mode: 1 for ave, 2 for mode (histogram peak) + - low_ and high_ threshold are density values + - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] + - aperature is odd filter kernel length + - dust is blob filter. blob area (length x width of bounding box) under this + size threshold are filtered (referred to as dust and blown away) + + Outputs - labeledEdges, ROIList[>dust] + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList[>dust] is a blob feature list. Only values + with bounding box area greater than dust threshold are returned + + """ + # get sobel edge points. return edges that are labeled (1..numberObjects) + labeledEdges, numberObjects = S.sobel_edges(sLow, tMode, lowThreshold, + highThreshold, BPHigh, apearture, image) + # allocated struct array for edge object measures. for now just the rect bounding box + ROIList = N.zeros(numberObjects, dtype=S.objstruct) + # return the bounding box for each connected edge + S.get_object_stats(labeledEdges, ROIList) + # thin (medial axis transform) of the sobel edges as the sobel produces a 'band edge' + S.morpho_thin_filt(labeledEdges, ROIList) + return labeledEdges, ROIList[ROIList['Area']>dust] + +def canny(image, cSigma=1.0, cLow=0.5, cHigh=0.8, tMode=1, lowThreshold=220+2048, + highThreshold=600+2048, BPHigh=10.0, apearture=21, dust=16): + """ + labeledEdges, ROIList = canny(image, [default]) + + implements canny edge finding + + Inputs - image, DG_sigma, canny_low, canny_high, tMode, low_threshold, + high_threshold, high_filter_cutoff, filter_aperature, dust + - image is the numarray 2D image + - DG_sigma is Gaussain sigma for the derivative-of-gaussian filter + - clow is low edge threshold is range (0.0, 1.0] + - chigh is high edge threshold is range (0.0, 1.0] + - tMode is threshold mode: 1 for ave, 2 for mode (histogram peak) + - low_ and high_ threshold are density values + - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] + - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] + - aperature is odd filter kernel length + - dust is blob filter. blob area (length x width of bounding box) under this + size threshold are filtered (referred to as dust and blown away) + + Outputs - labeledEdges, ROIList[>dust] + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList[>dust] is a blob feature list. Only values + with bounding box area greater than dust threshold are returned + + """ + # get canny edge points. return edges that are labeled (1..numberObjects) + labeledEdges, numberObjects = S.canny_edges(cSigma, cLow, cHigh, tMode, lowThreshold, highThreshold, + BPHigh, apearture, image) + # allocated struct array for edge object measures. for now just the rect bounding box + ROIList = N.zeros(numberObjects, dtype=S.objstruct) + # return the bounding box for each connected edge + S.get_object_stats(labeledEdges, ROIList) + return labeledEdges, ROIList[ROIList['Area']>dust] + +def get_shape_mask(labeledEdges, ROIList): + """ + get_shape_mask(labeledEdges, ROIList) + + takes labeled edge image plus ROIList (blob descriptors) and generates + boundary shape features and builds labeled blob masks. 'labeledEdges' + is over-written by 'labeledMask'. Adds features to ROIList structure + + Inputs - labeledEdges, ROIList + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList is a blob feature list. + + Output - no return. edge image input is over-written with mask image. + ROIList added to. + + """ + + # pass in Sobel morph-thinned labeled edge image (LEI) and ROIList + # GetShapeMask will augment the ROI list + # labeledEdges is the original edge image and overwritten as mask image + # maskImage is the mask that is used for blob texture / pixel features + S.build_boundary(labeledEdges, ROIList) + return + +def get_voxel_measures(rawImage, labeledEdges, ROIList): + """ + get_voxel_measures(rawImage, labeledEdges, ROIList) + + takes raw 2D image, labeled blob mask and ROIList. computes voxel features + (moments, histogram) for each blob. Adds features to ROIList structure. + + Inputs - rawImage, labeledEdges, ROIList + - rawImage is the original source 2D image + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList is a blob feature list. + + Output - no return. ROIList added to. + + """ + # + # pass raw image, labeled mask and the partially filled ROIList + # VoxelMeasures will fill the voxel features in the list + # + S.voxel_measures(rawImage, labeledEdges, ROIList) + return + +def get_texture_measures(rawImage, labeledEdges, ROIList): + """ + get_texture_measures(rawImage, labeledEdges, ROIList) + + takes raw 2D image, labeled blob mask and ROIList. computes 2D + texture features using 7x7 Law's texture filters applied + to segmented blobs. TEM (texture energy metric) is computed + for each Law's filter image and stored in TEM part of ROIList. + + Inputs - rawImage, labeledEdges, ROIList + - rawImage is the original source 2D image + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList is a blob feature list. + + Output - no return. ROIList added to. + """ + # + # pass raw image, labeled mask and the partially filled ROIList + # VoxelMeasures will fill the texture (Law's, sub-edges, co-occurence, Gabor) features in the list + # + S.texture_measures(rawImage, labeledEdges, ROIList) + return + +def segment_regions(): + """ + sourceImage, labeledMask, ROIList = segment_regions() + + Inputs - No Input + + Outputs - sourceImage, labeledMask, ROIList + - sourceImage is raw 2D image (default cardiac CT slice for demo + - labeledMask is mask of segmented 'blobs', + numerically labeled by blob number + - ROIList is numerical Python structure of intensity, shape and + texture features for each blob + + High level script calls Python functions: + get_slice() - a cardiac CT slice demo file + sobel() - sobel magnitude edge finder, + returns connected edges + get_shape_mask() - gets segmented blob boundary and mask + and shape features + get_voxel_measures() - uses masks get object voxel moment + and histogram features + get_texture_measures() - uses masks get object 2D texture features + """ + # get slice from the CT volume + image = get_slice(filename) + # need a copy of original image as filtering will occur on the extracted slice + sourceImage = image.copy() + # Sobel is the first level segmenter. Sobel magnitude and MAT (medial axis transform) + # followed by connected component analysis. What is returned is labeled edges and the object list + labeledMask, ROIList = sobel(image) + # From the labeled edges and the object list get the labeled mask for each blob object + get_shape_mask(labeledMask, ROIList) + # Use the labeled mask and source image (raw) to get voxel features + get_voxel_measures(sourceImage, labeledMask, ROIList) + # Use the labeled mask and source image (raw) to get texture features + get_texture_measures(sourceImage, labeledMask, ROIList) + return sourceImage, labeledMask, ROIList + +def grow_regions(): + """ + regionMask, numberRegions = region_grow() + Inputs - No Input + Outputs - regionMask, numberRegions + - regionMask is the labeled segment masks from 2D image + - numberRegions is the number of segmented blobs + + High level script calls Python functions: + get_slice() - a cardiac CT slice demo file + region_grow() - "grows" connected blobs. default threshold + and morphological filter structuring element + """ + # get slice from the CT volume + image = get_slice(filename) + regionMask, numberRegions = region_grow(image) + return regionMask, numberRegions + + +def region_grow(image, lowThreshold=220+2048, highThreshold=600+2048, open=7, close=7): + """ + regionMask, numberRegions = region_grow(image, [defaults]) + + Inputs - image, low_threshold, high_threshold, open, close + - image is the numarray 2D image + - low_ and high_ threshold are density values + - open is open morphology structuring element + odd size. 0 to turn off. max is 11 + - close is close morphology structuring element + odd size. 0 to turn off. max is 11 + + Outputs - regionMask, numberRegions + - regionMask is the labeled segment masks from 2D image + - numberRegions is the number of segmented blobs + """ + # morphology filters need to be clipped to 11 max and be odd + regionMask, numberRegions = S.region_grow(lowThreshold, highThreshold, close, open, image) + return regionMask, numberRegions + + +def get_slice(imageName='slice112.raw', bytes=2, rows=512, columns=512): + # get a slice alrady extracted from the CT volume + #image = open(imageName, 'rb') + #slice = image.read(rows*columns*bytes) + #values = struct.unpack('h'*rows*columns, slice) + #ImageSlice = N.array(values, dtype=float).reshape(rows, columns) + + ImageSlice = N.fromfile(imageName, dtype=N.uint16).reshape(rows, columns); + + # clip the ends for this test CT image file as the spine runs off the end of the image + ImageSlice[505:512, :] = 0 + return (ImageSlice).astype(float) + +def get_slice2(image_name='slice112.raw', bytes=2, shape=(512,512)): + import mmap + file = open(image_name, 'rb') + mm = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) + slice = N.frombuffer(mm, dtype='u%d' % bytes).reshape(shape) + slice = slice.astype(float) + slice[505:512,:] = 0 + return slice + +def save_slice(mySlice, filename='junk.raw', bytes=4): + # just save the slice to a fixed file + slice = mySlice.astype('u%d' % bytes) + slice.tofile(filename) + + class TestSegment(TestCase): - def test1(self): - image = get_slice(filename) - sourceImage = image.copy() - edges, objects = sobel(image) - get_shape_mask(edges, objects) - get_voxel_measures(sourceImage, edges, objects) - get_texture_measures(sourceImage, edges, objects) - - def test2(self): - sourceImage, labeledMask, ROIList = segment_regions() - - def test3(self): - regionMask, numberRegions = grow_regions() - regionMask.max() - #save_slice(regionMask, 'regionMask.raw') - - -if __name__ == "__main__": + def test1(self): + image = get_slice(filename) + sourceImage = image.copy() + edges, objects = sobel(image) + get_shape_mask(edges, objects) + get_voxel_measures(sourceImage, edges, objects) + get_texture_measures(sourceImage, edges, objects) + + def test2(self): + sourceImage, labeledMask, ROIList = segment_regions() + + def test3(self): + regionMask, numberRegions = grow_regions() + regionMask.max() + #save_slice(regionMask, 'regionMask.raw') + + +if __name__ == "__main__": inittest.main() From scipy-svn at scipy.org Mon Feb 11 20:25:40 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 11 Feb 2008 19:25:40 -0600 (CST) Subject: [Scipy-svn] r3917 - in trunk/scipy/ndimage: . src/segment tests Message-ID: <20080212012540.3113739C1DC@new.scipy.org> Author: chris.burns Date: 2008-02-11 19:25:33 -0600 (Mon, 11 Feb 2008) New Revision: 3917 Added: trunk/scipy/ndimage/tests/slice112.raw trunk/scipy/ndimage/tests/test_segment.py Removed: trunk/scipy/ndimage/src/segment/objectdata.py trunk/scipy/ndimage/src/segment/tests/ Modified: trunk/scipy/ndimage/segmenter.py trunk/scipy/ndimage/setup.py Log: Reorg tests for segment module. Modified: trunk/scipy/ndimage/segmenter.py =================================================================== --- trunk/scipy/ndimage/segmenter.py 2008-02-11 23:32:11 UTC (rev 3916) +++ trunk/scipy/ndimage/segmenter.py 2008-02-12 01:25:33 UTC (rev 3917) @@ -2,13 +2,32 @@ import numpy as N import scipy.ndimage._segment as S -# make sure this is local to use as default -inputname = 'slice112.raw' +# WARNING: _objstruct data structure mirrors a corresponding data structure +# in ndImage_Segmenter_structs.h that is built into the _segment.so library. +# These structs must match! +_objstruct = N.dtype([('L', 'i'), + ('R', 'i'), + ('T', 'i'), + ('B', 'i'), + ('Label', 'i'), + ('Area', 'i'), + ('cX', 'f'), + ('cY', 'f'), + ('curveClose', 'i'), + ('cXB', 'f'), + ('cYB', 'f'), + ('bLength', 'f'), + ('minRadius', 'f'), + ('maxRadius', 'f'), + ('aveRadius', 'f'), + ('ratio', 'f'), + ('compactness', 'f'), + ('voxelMean', 'f'), + ('voxelVar', 'f'), + ('TEM', 'f', 20)] + ) -import os -filename = os.path.join(os.path.split(__file__)[0],inputname) - def shen_castan(image, IIRFilter=0.8, scLow=0.3, window=7, lowThreshold=220+2048, highThreshold=600+2048, dust=16): """ @@ -35,7 +54,7 @@ labeledEdges, numberObjects = S.shen_castan_edges(scLow, IIRFilter, window, lowThreshold, highThreshold, image) # allocated struct array for edge object measures. for now just the rect bounding box - ROIList = N.zeros(numberObjects, dtype=S.objstruct) + ROIList = N.zeros(numberObjects, dtype=_objstruct) # return the bounding box for each connected edge S.get_object_stats(labeledEdges, ROIList) return labeledEdges, ROIList[ROIList['Area']>dust] @@ -69,7 +88,7 @@ labeledEdges, numberObjects = S.sobel_edges(sLow, tMode, lowThreshold, highThreshold, BPHigh, apearture, image) # allocated struct array for edge object measures. for now just the rect bounding box - ROIList = N.zeros(numberObjects, dtype=S.objstruct) + ROIList = N.zeros(numberObjects, dtype=_objstruct) # return the bounding box for each connected edge S.get_object_stats(labeledEdges, ROIList) # thin (medial axis transform) of the sobel edges as the sobel produces a 'band edge' @@ -108,7 +127,7 @@ labeledEdges, numberObjects = S.canny_edges(cSigma, cLow, cHigh, tMode, lowThreshold, highThreshold, BPHigh, apearture, image) # allocated struct array for edge object measures. for now just the rect bounding box - ROIList = N.zeros(numberObjects, dtype=S.objstruct) + ROIList = N.zeros(numberObjects, dtype=_objstruct) # return the bounding box for each connected edge S.get_object_stats(labeledEdges, ROIList) return labeledEdges, ROIList[ROIList['Area']>dust] @@ -185,7 +204,7 @@ S.texture_measures(rawImage, labeledEdges, ROIList) return -def segment_regions(): +def segment_regions(filename): """ sourceImage, labeledMask, ROIList = segment_regions() @@ -223,7 +242,7 @@ get_texture_measures(sourceImage, labeledMask, ROIList) return sourceImage, labeledMask, ROIList -def grow_regions(): +def grow_regions(filename): """ regionMask, numberRegions = region_grow() Inputs - No Input Modified: trunk/scipy/ndimage/setup.py =================================================================== --- trunk/scipy/ndimage/setup.py 2008-02-11 23:32:11 UTC (rev 3916) +++ trunk/scipy/ndimage/setup.py 2008-02-12 01:25:33 UTC (rev 3917) @@ -20,7 +20,6 @@ depends = ['src/segment/ndImage_Segmenter_structs.h'] ) - #config.add_subpackage('segment') config.add_data_dir('tests') config.add_subpackage('register') Deleted: trunk/scipy/ndimage/src/segment/objectdata.py =================================================================== --- trunk/scipy/ndimage/src/segment/objectdata.py 2008-02-11 23:32:11 UTC (rev 3916) +++ trunk/scipy/ndimage/src/segment/objectdata.py 2008-02-12 01:25:33 UTC (rev 3917) @@ -1,25 +0,0 @@ - -import numpy as N - -objstruct =N.dtype([('L', 'i'), - ('R', 'i'), - ('T', 'i'), - ('B', 'i'), - ('Label', 'i'), - ('Area', 'i'), - ('cX', 'f'), - ('cY', 'f'), - ('curveClose', 'i'), - ('cXB', 'f'), - ('cYB', 'f'), - ('bLength', 'f'), - ('minRadius', 'f'), - ('maxRadius', 'f'), - ('aveRadius', 'f'), - ('ratio', 'f'), - ('compactness', 'f'), - ('voxelMean', 'f'), - ('voxelVar', 'f'), - ('TEM', 'f', 20)] - ) - Copied: trunk/scipy/ndimage/tests/slice112.raw (from rev 3916, trunk/scipy/ndimage/src/segment/tests/slice112.raw) Copied: trunk/scipy/ndimage/tests/test_segment.py (from rev 3916, trunk/scipy/ndimage/src/segment/tests/test_segment.py) =================================================================== --- trunk/scipy/ndimage/src/segment/tests/test_segment.py 2008-02-11 23:32:11 UTC (rev 3916) +++ trunk/scipy/ndimage/tests/test_segment.py 2008-02-12 01:25:33 UTC (rev 3917) @@ -0,0 +1,29 @@ + +from scipy.testing import * +from scipy.ndimage.segmenter import * + +inputname = 'slice112.raw' + +from os.path import join, dirname +filename = join(dirname(__file__), inputname) + +class TestSegment(TestCase): + def test1(self): + image = get_slice(filename) + sourceImage = image.copy() + edges, objects = sobel(image) + get_shape_mask(edges, objects) + get_voxel_measures(sourceImage, edges, objects) + get_texture_measures(sourceImage, edges, objects) + + def test2(self): + sourceImage, labeledMask, ROIList = segment_regions(filename) + + def test3(self): + regionMask, numberRegions = grow_regions(filename) + regionMask.max() + #save_slice(regionMask, 'regionMask.raw') + + +if __name__ == "__main__": + inittest.main() From scipy-svn at scipy.org Mon Feb 11 20:51:23 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 11 Feb 2008 19:51:23 -0600 (CST) Subject: [Scipy-svn] r3918 - in trunk/scipy/ndimage: . tests Message-ID: <20080212015123.C642F39C0C9@new.scipy.org> Author: chris.burns Date: 2008-02-11 19:51:18 -0600 (Mon, 11 Feb 2008) New Revision: 3918 Added: trunk/scipy/ndimage/_segmenter.py Removed: trunk/scipy/ndimage/segmenter.py Modified: trunk/scipy/ndimage/tests/test_segment.py Log: Make segmenter.py private. Issue warning on import. Copied: trunk/scipy/ndimage/_segmenter.py (from rev 3917, trunk/scipy/ndimage/segmenter.py) =================================================================== --- trunk/scipy/ndimage/segmenter.py 2008-02-12 01:25:33 UTC (rev 3917) +++ trunk/scipy/ndimage/_segmenter.py 2008-02-12 01:51:18 UTC (rev 3918) @@ -0,0 +1,520 @@ +import math +import numpy as N +import scipy.ndimage._segment as S + +# Issue warning regarding heavy development status of this module +import warnings +_msg = "The segmenter code is under heavy development and therefore public \ +API will change in the future. The NIPY group is actively working on this \ +code, and has every intention of generalizing this for the Scipy community.\ +Use this module minimally, if at all, until it this warning is removed." +warnings.warn(_msg, UserWarning) + +# WARNING: _objstruct data structure mirrors a corresponding data structure +# in ndImage_Segmenter_structs.h that is built into the _segment.so library. +# These structs must match! +_objstruct = N.dtype([('L', 'i'), + ('R', 'i'), + ('T', 'i'), + ('B', 'i'), + ('Label', 'i'), + ('Area', 'i'), + ('cX', 'f'), + ('cY', 'f'), + ('curveClose', 'i'), + ('cXB', 'f'), + ('cYB', 'f'), + ('bLength', 'f'), + ('minRadius', 'f'), + ('maxRadius', 'f'), + ('aveRadius', 'f'), + ('ratio', 'f'), + ('compactness', 'f'), + ('voxelMean', 'f'), + ('voxelVar', 'f'), + ('TEM', 'f', 20)] + ) + + +def shen_castan(image, IIRFilter=0.8, scLow=0.3, window=7, lowThreshold=220+2048, + highThreshold=600+2048, dust=16): + """ + labeledEdges, ROIList = shen_castan(image, [default]) + + implements Shen-Castan edge finding + + Inputs - image, IIR filter, shen_castan_low, window, low_threshold, high_threshold, dust + - image is the numarray 2D image + - IIR filter is filter parameter for exponential filter + - shen_castan_low is edge threshold is range (0.0, 1.0] + - window is search window for edge detection + - low_ and high_ threshold are density values + - dust is blob filter. blob area (length x width of bounding box) under this + size threshold are filtered (referred to as dust and blown away) + + Outputs - labeledEdges, ROIList[>dust] + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList[>dust] is a blob feature list. Only values + with bounding box area greater than dust threshold are returned + + """ + labeledEdges, numberObjects = S.shen_castan_edges(scLow, IIRFilter, window, + lowThreshold, highThreshold, image) + # allocated struct array for edge object measures. for now just the rect bounding box + ROIList = N.zeros(numberObjects, dtype=_objstruct) + # return the bounding box for each connected edge + S.get_object_stats(labeledEdges, ROIList) + return labeledEdges, ROIList[ROIList['Area']>dust] + +def sobel(image, sLow=0.3, tMode=1, lowThreshold=220+2048, highThreshold=600+2048, BPHigh=10.0, + apearture=21, dust=16): + """ + labeledEdges, ROIList = sobel(image, [default]) + + implements sobel magnitude edge finding + + Inputs - image, sobel_low, tMode, low_threshold, high_threshold, + high_filter_cutoff, filter_aperature, dust + - image is the numarray 2D image + - sobel_low is edge threshold is range (0.0, 1.0] + - tMode is threshold mode: 1 for ave, 2 for mode (histogram peak) + - low_ and high_ threshold are density values + - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] + - aperature is odd filter kernel length + - dust is blob filter. blob area (length x width of bounding box) under this + size threshold are filtered (referred to as dust and blown away) + + Outputs - labeledEdges, ROIList[>dust] + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList[>dust] is a blob feature list. Only values + with bounding box area greater than dust threshold are returned + + """ + # get sobel edge points. return edges that are labeled (1..numberObjects) + labeledEdges, numberObjects = S.sobel_edges(sLow, tMode, lowThreshold, + highThreshold, BPHigh, apearture, image) + # allocated struct array for edge object measures. for now just the rect bounding box + ROIList = N.zeros(numberObjects, dtype=_objstruct) + # return the bounding box for each connected edge + S.get_object_stats(labeledEdges, ROIList) + # thin (medial axis transform) of the sobel edges as the sobel produces a 'band edge' + S.morpho_thin_filt(labeledEdges, ROIList) + return labeledEdges, ROIList[ROIList['Area']>dust] + +def canny(image, cSigma=1.0, cLow=0.5, cHigh=0.8, tMode=1, lowThreshold=220+2048, + highThreshold=600+2048, BPHigh=10.0, apearture=21, dust=16): + """ + labeledEdges, ROIList = canny(image, [default]) + + implements canny edge finding + + Inputs - image, DG_sigma, canny_low, canny_high, tMode, low_threshold, + high_threshold, high_filter_cutoff, filter_aperature, dust + - image is the numarray 2D image + - DG_sigma is Gaussain sigma for the derivative-of-gaussian filter + - clow is low edge threshold is range (0.0, 1.0] + - chigh is high edge threshold is range (0.0, 1.0] + - tMode is threshold mode: 1 for ave, 2 for mode (histogram peak) + - low_ and high_ threshold are density values + - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] + - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] + - aperature is odd filter kernel length + - dust is blob filter. blob area (length x width of bounding box) under this + size threshold are filtered (referred to as dust and blown away) + + Outputs - labeledEdges, ROIList[>dust] + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList[>dust] is a blob feature list. Only values + with bounding box area greater than dust threshold are returned + + """ + # get canny edge points. return edges that are labeled (1..numberObjects) + labeledEdges, numberObjects = S.canny_edges(cSigma, cLow, cHigh, tMode, lowThreshold, highThreshold, + BPHigh, apearture, image) + # allocated struct array for edge object measures. for now just the rect bounding box + ROIList = N.zeros(numberObjects, dtype=_objstruct) + # return the bounding box for each connected edge + S.get_object_stats(labeledEdges, ROIList) + return labeledEdges, ROIList[ROIList['Area']>dust] + +def get_shape_mask(labeledEdges, ROIList): + """ + get_shape_mask(labeledEdges, ROIList) + + takes labeled edge image plus ROIList (blob descriptors) and generates + boundary shape features and builds labeled blob masks. 'labeledEdges' + is over-written by 'labeledMask'. Adds features to ROIList structure + + Inputs - labeledEdges, ROIList + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList is a blob feature list. + + Output - no return. edge image input is over-written with mask image. + ROIList added to. + + """ + + # pass in Sobel morph-thinned labeled edge image (LEI) and ROIList + # GetShapeMask will augment the ROI list + # labeledEdges is the original edge image and overwritten as mask image + # maskImage is the mask that is used for blob texture / pixel features + S.build_boundary(labeledEdges, ROIList) + return + +def get_voxel_measures(rawImage, labeledEdges, ROIList): + """ + get_voxel_measures(rawImage, labeledEdges, ROIList) + + takes raw 2D image, labeled blob mask and ROIList. computes voxel features + (moments, histogram) for each blob. Adds features to ROIList structure. + + Inputs - rawImage, labeledEdges, ROIList + - rawImage is the original source 2D image + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList is a blob feature list. + + Output - no return. ROIList added to. + + """ + # + # pass raw image, labeled mask and the partially filled ROIList + # VoxelMeasures will fill the voxel features in the list + # + S.voxel_measures(rawImage, labeledEdges, ROIList) + return + +def get_texture_measures(rawImage, labeledEdges, ROIList): + """ + get_texture_measures(rawImage, labeledEdges, ROIList) + + takes raw 2D image, labeled blob mask and ROIList. computes 2D + texture features using 7x7 Law's texture filters applied + to segmented blobs. TEM (texture energy metric) is computed + for each Law's filter image and stored in TEM part of ROIList. + + Inputs - rawImage, labeledEdges, ROIList + - rawImage is the original source 2D image + - labeledEdges is boundary (edges) of segmented 'blobs', + numerically labeled by blob number + - ROIList is a blob feature list. + + Output - no return. ROIList added to. + """ + # + # pass raw image, labeled mask and the partially filled ROIList + # VoxelMeasures will fill the texture (Law's, sub-edges, co-occurence, Gabor) features in the list + # + S.texture_measures(rawImage, labeledEdges, ROIList) + return + +def segment_regions(filename): + """ + sourceImage, labeledMask, ROIList = segment_regions() + + Inputs - No Input + + Outputs - sourceImage, labeledMask, ROIList + - sourceImage is raw 2D image (default cardiac CT slice for demo + - labeledMask is mask of segmented 'blobs', + numerically labeled by blob number + - ROIList is numerical Python structure of intensity, shape and + texture features for each blob + + High level script calls Python functions: + get_slice() - a cardiac CT slice demo file + sobel() - sobel magnitude edge finder, + returns connected edges + get_shape_mask() - gets segmented blob boundary and mask + and shape features + get_voxel_measures() - uses masks get object voxel moment + and histogram features + get_texture_measures() - uses masks get object 2D texture features + """ + # get slice from the CT volume + image = get_slice(filename) + # need a copy of original image as filtering will occur on the extracted slice + sourceImage = image.copy() + # Sobel is the first level segmenter. Sobel magnitude and MAT (medial axis transform) + # followed by connected component analysis. What is returned is labeled edges and the object list + labeledMask, ROIList = sobel(image) + # From the labeled edges and the object list get the labeled mask for each blob object + get_shape_mask(labeledMask, ROIList) + # Use the labeled mask and source image (raw) to get voxel features + get_voxel_measures(sourceImage, labeledMask, ROIList) + # Use the labeled mask and source image (raw) to get texture features + get_texture_measures(sourceImage, labeledMask, ROIList) + return sourceImage, labeledMask, ROIList + +def grow_regions(filename): + """ + regionMask, numberRegions = region_grow() + Inputs - No Input + Outputs - regionMask, numberRegions + - regionMask is the labeled segment masks from 2D image + - numberRegions is the number of segmented blobs + + High level script calls Python functions: + get_slice() - a cardiac CT slice demo file + region_grow() - "grows" connected blobs. default threshold + and morphological filter structuring element + """ + # get slice from the CT volume + image = get_slice(filename) + regionMask, numberRegions = region_grow(image) + return regionMask, numberRegions + + +def region_grow(image, lowThreshold=220+2048, highThreshold=600+2048, open=7, close=7): + """ + regionMask, numberRegions = region_grow(image, [defaults]) + + Inputs - image, low_threshold, high_threshold, open, close + - image is the numarray 2D image + - low_ and high_ threshold are density values + - open is open morphology structuring element + odd size. 0 to turn off. max is 11 + - close is close morphology structuring element + odd size. 0 to turn off. max is 11 + + Outputs - regionMask, numberRegions + - regionMask is the labeled segment masks from 2D image + - numberRegions is the number of segmented blobs + """ + # morphology filters need to be clipped to 11 max and be odd + regionMask, numberRegions = S.region_grow(lowThreshold, highThreshold, close, open, image) + return regionMask, numberRegions + + +def get_slice(imageName='slice112.raw', bytes=2, rows=512, columns=512): + # get a slice alrady extracted from the CT volume + #image = open(imageName, 'rb') + #slice = image.read(rows*columns*bytes) + #values = struct.unpack('h'*rows*columns, slice) + #ImageSlice = N.array(values, dtype=float).reshape(rows, columns) + + ImageSlice = N.fromfile(imageName, dtype=N.uint16).reshape(rows, columns); + + # clip the ends for this test CT image file as the spine runs off the end of the image + ImageSlice[505:512, :] = 0 + return (ImageSlice).astype(float) + +def get_slice2(image_name='slice112.raw', bytes=2, shape=(512,512)): + import mmap + file = open(image_name, 'rb') + mm = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) + slice = N.frombuffer(mm, dtype='u%d' % bytes).reshape(shape) + slice = slice.astype(float) + # this is for the test CT as spine runs off back of image + slice[505:512,:] = 0 + return slice + +def save_slice(mySlice, filename='junk.raw', bytes=4): + # just save the slice to a fixed file + slice = mySlice.astype('u%d' % bytes) + slice.tofile(filename) + +def build_d_gauss_kernel(gWidth=21, sigma=1.0): + + """ + build the derivative of Gaussian kernel for Canny edge filter + DGFilter = build_d_gauss_kernel(gWidth, sigma) + Inputs: + gWdith is width of derivative of Gaussian kernel + sigma is sigma term of derivative of Gaussian kernel + Output: + DGFilter (a struct). Use in Canny filter call + + """ + kernel = N.zeros((1+2*(gWidth-1)), dtype=float) + indices = range(1, gWidth) + + i = 0 + kernel[gWidth-1] = math.exp(((-i*i)/(2.0 * sigma * sigma))) + kernel[gWidth-1] *= -(i / (sigma * sigma)) + for i in indices: + kernel[gWidth-1+i] = math.exp(((-i*i)/(2.0 * sigma * sigma))) + kernel[gWidth-1+i] *= -(i / (sigma * sigma)) + kernel[gWidth-1-i] = -kernel[gWidth-1+i] + + DGFilter= {'kernelSize' : gWidth, 'coefficients': kernel} + + return DGFilter + +def build_2d_kernel(aperature=21, hiFilterCutoff=10.0): + + """ + build flat FIR filter with sinc kernel + this is bandpass, but low cutoff is 0.0 + Use in Sobel and Canny filter edge find as image pre-process + + FIRFilter = build_2d_kernel(aperature, hiFilterCutoff) + Inputs: + aperature is number of FIR taps in sinc kernel + hiFilterCutoff is digital frequency cutoff in range (0.0, 180.0) + Output: + FIRFilter (a struct) + + """ + + rad = math.pi / 180.0 + HalfFilterTaps = (aperature-1) / 2 + kernel = N.zeros((aperature), dtype=N.float32) + LC = 0.0 + HC = hiFilterCutoff * rad + t2 = 2.0 * math.pi + t1 = 2.0 * HalfFilterTaps + 1.0 + indices = range(-HalfFilterTaps, HalfFilterTaps+1, 1) + j = 0 + for i in indices: + if i == 0: + tLOW = LC + tHIGH = HC + else: + tLOW = math.sin(i*LC)/i + tHIGH = math.sin(i*HC)/i + # Hamming window + t3 = 0.54 + 0.46*(math.cos(i*t2/t1)) + t4 = t3*(tHIGH-tLOW) + kernel[j] = t4 + j += 1 + + # normalize the kernel + sum = kernel.sum() + kernel /= sum + + FIRFilter= {'kernelSize' : aperature, 'coefficients': kernel} + + return FIRFilter + + +def build_laws_kernel(): + + """ + build 6 length-7 Law's texture filter masks + mask names are: 'L', 'S', 'E', 'W', 'R', 'O' + + LAWSFilter = build_laws_kernel() + + Inputs: + None + + Output: + LAWSFilter (a struct) + + """ + aperature = (6, 7) + coefficients = N.zeros((aperature), dtype=N.float32) + names = ('L', 'E', 'S', 'W', 'R', 'O' ) + + coefficients[0, :] = ( 1.0, 6.0, 15.0, 20.0, 15.0, 6.0, 1.0 ) + coefficients[1, :] = (-1.0, -4.0, -5.0, 0.0, 5.0, 4.0, 1.0 ) + coefficients[2, :] = (-1.0, -2.0, 1.0, 4.0, 1.0, -2.0, -1.0 ) + coefficients[3, :] = (-1.0, 0.0, 3.0, 0.0, -3.0, 0.0, 1.0 ) + coefficients[4, :] = ( 1.0, -2.0, -1.0, 4.0, -1.0, -2.0, 1.0 ) + coefficients[5, :] = (-1.0, 6.0, -15.0, 20.0, -15.0, 6.0, -1.0 ) + + LAWSFilter= {'numKernels' : 6, 'kernelSize' : 7, 'coefficients': coefficients, 'names': names} + + return LAWSFilter + +def build_morpho_thin_masks(): + + """ + build 2 sets (J and K) of 8 3x3 morphology masks (structuring elements) + to implement thinning (medial axis transformation - MAT) + + MATFilter = build_morpho_thin_masks() + + Inputs: + None + + Output: + MATFilter (a struct) + + """ + + # (layers, rows, cols) + shape = (8, 3, 3) + J_mask = N.zeros((shape), dtype=N.ushort) + K_mask = N.zeros((shape), dtype=N.ushort) + + # load the 8 J masks for medial axis transformation + J_mask[0][0][0] = 1; + J_mask[0][0][1] = 1; + J_mask[0][0][2] = 1; + J_mask[0][1][1] = 1; + + J_mask[1][0][1] = 1; + J_mask[1][1][1] = 1; + J_mask[1][1][2] = 1; + + J_mask[2][0][0] = 1; + J_mask[2][1][0] = 1; + J_mask[2][2][0] = 1; + J_mask[2][1][1] = 1; + + J_mask[3][0][1] = 1; + J_mask[3][1][0] = 1; + J_mask[3][1][1] = 1; + + J_mask[4][0][2] = 1; + J_mask[4][1][1] = 1; + J_mask[4][1][2] = 1; + J_mask[4][2][2] = 1; + + J_mask[5][1][0] = 1; + J_mask[5][1][1] = 1; + J_mask[5][2][1] = 1; + + J_mask[6][1][1] = 1; + J_mask[6][2][0] = 1; + J_mask[6][2][1] = 1; + J_mask[6][2][2] = 1; + + J_mask[7][1][1] = 1; + J_mask[7][1][2] = 1; + J_mask[7][2][1] = 1; + + + # load the 8 K masks for medial axis transformation + K_mask[0][2][0] = 1; + K_mask[0][2][1] = 1; + K_mask[0][2][2] = 1; + + K_mask[1][1][0] = 1; + K_mask[1][2][0] = 1; + K_mask[1][2][1] = 1; + + K_mask[2][0][2] = 1; + K_mask[2][1][2] = 1; + K_mask[2][2][2] = 1; + + K_mask[3][1][2] = 1; + K_mask[3][2][1] = 1; + K_mask[3][2][2] = 1; + + K_mask[4][0][0] = 1; + K_mask[4][1][0] = 1; + K_mask[4][2][0] = 1; + + K_mask[5][0][1] = 1; + K_mask[5][0][2] = 1; + K_mask[5][1][2] = 1; + + K_mask[6][0][0] = 1; + K_mask[6][0][1] = 1; + K_mask[6][0][2] = 1; + + K_mask[7][0][0] = 1; + K_mask[7][0][1] = 1; + K_mask[7][1][0] = 1; + + MATFilter = {'number3x3Masks' : 8, 'jmask' : J_mask, 'kmask' : K_mask} + + return MATFilter + Deleted: trunk/scipy/ndimage/segmenter.py =================================================================== --- trunk/scipy/ndimage/segmenter.py 2008-02-12 01:25:33 UTC (rev 3917) +++ trunk/scipy/ndimage/segmenter.py 2008-02-12 01:51:18 UTC (rev 3918) @@ -1,512 +0,0 @@ -import math -import numpy as N -import scipy.ndimage._segment as S - -# WARNING: _objstruct data structure mirrors a corresponding data structure -# in ndImage_Segmenter_structs.h that is built into the _segment.so library. -# These structs must match! -_objstruct = N.dtype([('L', 'i'), - ('R', 'i'), - ('T', 'i'), - ('B', 'i'), - ('Label', 'i'), - ('Area', 'i'), - ('cX', 'f'), - ('cY', 'f'), - ('curveClose', 'i'), - ('cXB', 'f'), - ('cYB', 'f'), - ('bLength', 'f'), - ('minRadius', 'f'), - ('maxRadius', 'f'), - ('aveRadius', 'f'), - ('ratio', 'f'), - ('compactness', 'f'), - ('voxelMean', 'f'), - ('voxelVar', 'f'), - ('TEM', 'f', 20)] - ) - - -def shen_castan(image, IIRFilter=0.8, scLow=0.3, window=7, lowThreshold=220+2048, - highThreshold=600+2048, dust=16): - """ - labeledEdges, ROIList = shen_castan(image, [default]) - - implements Shen-Castan edge finding - - Inputs - image, IIR filter, shen_castan_low, window, low_threshold, high_threshold, dust - - image is the numarray 2D image - - IIR filter is filter parameter for exponential filter - - shen_castan_low is edge threshold is range (0.0, 1.0] - - window is search window for edge detection - - low_ and high_ threshold are density values - - dust is blob filter. blob area (length x width of bounding box) under this - size threshold are filtered (referred to as dust and blown away) - - Outputs - labeledEdges, ROIList[>dust] - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList[>dust] is a blob feature list. Only values - with bounding box area greater than dust threshold are returned - - """ - labeledEdges, numberObjects = S.shen_castan_edges(scLow, IIRFilter, window, - lowThreshold, highThreshold, image) - # allocated struct array for edge object measures. for now just the rect bounding box - ROIList = N.zeros(numberObjects, dtype=_objstruct) - # return the bounding box for each connected edge - S.get_object_stats(labeledEdges, ROIList) - return labeledEdges, ROIList[ROIList['Area']>dust] - -def sobel(image, sLow=0.3, tMode=1, lowThreshold=220+2048, highThreshold=600+2048, BPHigh=10.0, - apearture=21, dust=16): - """ - labeledEdges, ROIList = sobel(image, [default]) - - implements sobel magnitude edge finding - - Inputs - image, sobel_low, tMode, low_threshold, high_threshold, - high_filter_cutoff, filter_aperature, dust - - image is the numarray 2D image - - sobel_low is edge threshold is range (0.0, 1.0] - - tMode is threshold mode: 1 for ave, 2 for mode (histogram peak) - - low_ and high_ threshold are density values - - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] - - aperature is odd filter kernel length - - dust is blob filter. blob area (length x width of bounding box) under this - size threshold are filtered (referred to as dust and blown away) - - Outputs - labeledEdges, ROIList[>dust] - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList[>dust] is a blob feature list. Only values - with bounding box area greater than dust threshold are returned - - """ - # get sobel edge points. return edges that are labeled (1..numberObjects) - labeledEdges, numberObjects = S.sobel_edges(sLow, tMode, lowThreshold, - highThreshold, BPHigh, apearture, image) - # allocated struct array for edge object measures. for now just the rect bounding box - ROIList = N.zeros(numberObjects, dtype=_objstruct) - # return the bounding box for each connected edge - S.get_object_stats(labeledEdges, ROIList) - # thin (medial axis transform) of the sobel edges as the sobel produces a 'band edge' - S.morpho_thin_filt(labeledEdges, ROIList) - return labeledEdges, ROIList[ROIList['Area']>dust] - -def canny(image, cSigma=1.0, cLow=0.5, cHigh=0.8, tMode=1, lowThreshold=220+2048, - highThreshold=600+2048, BPHigh=10.0, apearture=21, dust=16): - """ - labeledEdges, ROIList = canny(image, [default]) - - implements canny edge finding - - Inputs - image, DG_sigma, canny_low, canny_high, tMode, low_threshold, - high_threshold, high_filter_cutoff, filter_aperature, dust - - image is the numarray 2D image - - DG_sigma is Gaussain sigma for the derivative-of-gaussian filter - - clow is low edge threshold is range (0.0, 1.0] - - chigh is high edge threshold is range (0.0, 1.0] - - tMode is threshold mode: 1 for ave, 2 for mode (histogram peak) - - low_ and high_ threshold are density values - - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] - - high_filter_cutoff is digital high frequency cutoff in range (0.0, 180.0] - - aperature is odd filter kernel length - - dust is blob filter. blob area (length x width of bounding box) under this - size threshold are filtered (referred to as dust and blown away) - - Outputs - labeledEdges, ROIList[>dust] - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList[>dust] is a blob feature list. Only values - with bounding box area greater than dust threshold are returned - - """ - # get canny edge points. return edges that are labeled (1..numberObjects) - labeledEdges, numberObjects = S.canny_edges(cSigma, cLow, cHigh, tMode, lowThreshold, highThreshold, - BPHigh, apearture, image) - # allocated struct array for edge object measures. for now just the rect bounding box - ROIList = N.zeros(numberObjects, dtype=_objstruct) - # return the bounding box for each connected edge - S.get_object_stats(labeledEdges, ROIList) - return labeledEdges, ROIList[ROIList['Area']>dust] - -def get_shape_mask(labeledEdges, ROIList): - """ - get_shape_mask(labeledEdges, ROIList) - - takes labeled edge image plus ROIList (blob descriptors) and generates - boundary shape features and builds labeled blob masks. 'labeledEdges' - is over-written by 'labeledMask'. Adds features to ROIList structure - - Inputs - labeledEdges, ROIList - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList is a blob feature list. - - Output - no return. edge image input is over-written with mask image. - ROIList added to. - - """ - - # pass in Sobel morph-thinned labeled edge image (LEI) and ROIList - # GetShapeMask will augment the ROI list - # labeledEdges is the original edge image and overwritten as mask image - # maskImage is the mask that is used for blob texture / pixel features - S.build_boundary(labeledEdges, ROIList) - return - -def get_voxel_measures(rawImage, labeledEdges, ROIList): - """ - get_voxel_measures(rawImage, labeledEdges, ROIList) - - takes raw 2D image, labeled blob mask and ROIList. computes voxel features - (moments, histogram) for each blob. Adds features to ROIList structure. - - Inputs - rawImage, labeledEdges, ROIList - - rawImage is the original source 2D image - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList is a blob feature list. - - Output - no return. ROIList added to. - - """ - # - # pass raw image, labeled mask and the partially filled ROIList - # VoxelMeasures will fill the voxel features in the list - # - S.voxel_measures(rawImage, labeledEdges, ROIList) - return - -def get_texture_measures(rawImage, labeledEdges, ROIList): - """ - get_texture_measures(rawImage, labeledEdges, ROIList) - - takes raw 2D image, labeled blob mask and ROIList. computes 2D - texture features using 7x7 Law's texture filters applied - to segmented blobs. TEM (texture energy metric) is computed - for each Law's filter image and stored in TEM part of ROIList. - - Inputs - rawImage, labeledEdges, ROIList - - rawImage is the original source 2D image - - labeledEdges is boundary (edges) of segmented 'blobs', - numerically labeled by blob number - - ROIList is a blob feature list. - - Output - no return. ROIList added to. - """ - # - # pass raw image, labeled mask and the partially filled ROIList - # VoxelMeasures will fill the texture (Law's, sub-edges, co-occurence, Gabor) features in the list - # - S.texture_measures(rawImage, labeledEdges, ROIList) - return - -def segment_regions(filename): - """ - sourceImage, labeledMask, ROIList = segment_regions() - - Inputs - No Input - - Outputs - sourceImage, labeledMask, ROIList - - sourceImage is raw 2D image (default cardiac CT slice for demo - - labeledMask is mask of segmented 'blobs', - numerically labeled by blob number - - ROIList is numerical Python structure of intensity, shape and - texture features for each blob - - High level script calls Python functions: - get_slice() - a cardiac CT slice demo file - sobel() - sobel magnitude edge finder, - returns connected edges - get_shape_mask() - gets segmented blob boundary and mask - and shape features - get_voxel_measures() - uses masks get object voxel moment - and histogram features - get_texture_measures() - uses masks get object 2D texture features - """ - # get slice from the CT volume - image = get_slice(filename) - # need a copy of original image as filtering will occur on the extracted slice - sourceImage = image.copy() - # Sobel is the first level segmenter. Sobel magnitude and MAT (medial axis transform) - # followed by connected component analysis. What is returned is labeled edges and the object list - labeledMask, ROIList = sobel(image) - # From the labeled edges and the object list get the labeled mask for each blob object - get_shape_mask(labeledMask, ROIList) - # Use the labeled mask and source image (raw) to get voxel features - get_voxel_measures(sourceImage, labeledMask, ROIList) - # Use the labeled mask and source image (raw) to get texture features - get_texture_measures(sourceImage, labeledMask, ROIList) - return sourceImage, labeledMask, ROIList - -def grow_regions(filename): - """ - regionMask, numberRegions = region_grow() - Inputs - No Input - Outputs - regionMask, numberRegions - - regionMask is the labeled segment masks from 2D image - - numberRegions is the number of segmented blobs - - High level script calls Python functions: - get_slice() - a cardiac CT slice demo file - region_grow() - "grows" connected blobs. default threshold - and morphological filter structuring element - """ - # get slice from the CT volume - image = get_slice(filename) - regionMask, numberRegions = region_grow(image) - return regionMask, numberRegions - - -def region_grow(image, lowThreshold=220+2048, highThreshold=600+2048, open=7, close=7): - """ - regionMask, numberRegions = region_grow(image, [defaults]) - - Inputs - image, low_threshold, high_threshold, open, close - - image is the numarray 2D image - - low_ and high_ threshold are density values - - open is open morphology structuring element - odd size. 0 to turn off. max is 11 - - close is close morphology structuring element - odd size. 0 to turn off. max is 11 - - Outputs - regionMask, numberRegions - - regionMask is the labeled segment masks from 2D image - - numberRegions is the number of segmented blobs - """ - # morphology filters need to be clipped to 11 max and be odd - regionMask, numberRegions = S.region_grow(lowThreshold, highThreshold, close, open, image) - return regionMask, numberRegions - - -def get_slice(imageName='slice112.raw', bytes=2, rows=512, columns=512): - # get a slice alrady extracted from the CT volume - #image = open(imageName, 'rb') - #slice = image.read(rows*columns*bytes) - #values = struct.unpack('h'*rows*columns, slice) - #ImageSlice = N.array(values, dtype=float).reshape(rows, columns) - - ImageSlice = N.fromfile(imageName, dtype=N.uint16).reshape(rows, columns); - - # clip the ends for this test CT image file as the spine runs off the end of the image - ImageSlice[505:512, :] = 0 - return (ImageSlice).astype(float) - -def get_slice2(image_name='slice112.raw', bytes=2, shape=(512,512)): - import mmap - file = open(image_name, 'rb') - mm = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) - slice = N.frombuffer(mm, dtype='u%d' % bytes).reshape(shape) - slice = slice.astype(float) - # this is for the test CT as spine runs off back of image - slice[505:512,:] = 0 - return slice - -def save_slice(mySlice, filename='junk.raw', bytes=4): - # just save the slice to a fixed file - slice = mySlice.astype('u%d' % bytes) - slice.tofile(filename) - -def build_d_gauss_kernel(gWidth=21, sigma=1.0): - - """ - build the derivative of Gaussian kernel for Canny edge filter - DGFilter = build_d_gauss_kernel(gWidth, sigma) - Inputs: - gWdith is width of derivative of Gaussian kernel - sigma is sigma term of derivative of Gaussian kernel - Output: - DGFilter (a struct). Use in Canny filter call - - """ - kernel = N.zeros((1+2*(gWidth-1)), dtype=float) - indices = range(1, gWidth) - - i = 0 - kernel[gWidth-1] = math.exp(((-i*i)/(2.0 * sigma * sigma))) - kernel[gWidth-1] *= -(i / (sigma * sigma)) - for i in indices: - kernel[gWidth-1+i] = math.exp(((-i*i)/(2.0 * sigma * sigma))) - kernel[gWidth-1+i] *= -(i / (sigma * sigma)) - kernel[gWidth-1-i] = -kernel[gWidth-1+i] - - DGFilter= {'kernelSize' : gWidth, 'coefficients': kernel} - - return DGFilter - -def build_2d_kernel(aperature=21, hiFilterCutoff=10.0): - - """ - build flat FIR filter with sinc kernel - this is bandpass, but low cutoff is 0.0 - Use in Sobel and Canny filter edge find as image pre-process - - FIRFilter = build_2d_kernel(aperature, hiFilterCutoff) - Inputs: - aperature is number of FIR taps in sinc kernel - hiFilterCutoff is digital frequency cutoff in range (0.0, 180.0) - Output: - FIRFilter (a struct) - - """ - - rad = math.pi / 180.0 - HalfFilterTaps = (aperature-1) / 2 - kernel = N.zeros((aperature), dtype=N.float32) - LC = 0.0 - HC = hiFilterCutoff * rad - t2 = 2.0 * math.pi - t1 = 2.0 * HalfFilterTaps + 1.0 - indices = range(-HalfFilterTaps, HalfFilterTaps+1, 1) - j = 0 - for i in indices: - if i == 0: - tLOW = LC - tHIGH = HC - else: - tLOW = math.sin(i*LC)/i - tHIGH = math.sin(i*HC)/i - # Hamming window - t3 = 0.54 + 0.46*(math.cos(i*t2/t1)) - t4 = t3*(tHIGH-tLOW) - kernel[j] = t4 - j += 1 - - # normalize the kernel - sum = kernel.sum() - kernel /= sum - - FIRFilter= {'kernelSize' : aperature, 'coefficients': kernel} - - return FIRFilter - - -def build_laws_kernel(): - - """ - build 6 length-7 Law's texture filter masks - mask names are: 'L', 'S', 'E', 'W', 'R', 'O' - - LAWSFilter = build_laws_kernel() - - Inputs: - None - - Output: - LAWSFilter (a struct) - - """ - aperature = (6, 7) - coefficients = N.zeros((aperature), dtype=N.float32) - names = ('L', 'E', 'S', 'W', 'R', 'O' ) - - coefficients[0, :] = ( 1.0, 6.0, 15.0, 20.0, 15.0, 6.0, 1.0 ) - coefficients[1, :] = (-1.0, -4.0, -5.0, 0.0, 5.0, 4.0, 1.0 ) - coefficients[2, :] = (-1.0, -2.0, 1.0, 4.0, 1.0, -2.0, -1.0 ) - coefficients[3, :] = (-1.0, 0.0, 3.0, 0.0, -3.0, 0.0, 1.0 ) - coefficients[4, :] = ( 1.0, -2.0, -1.0, 4.0, -1.0, -2.0, 1.0 ) - coefficients[5, :] = (-1.0, 6.0, -15.0, 20.0, -15.0, 6.0, -1.0 ) - - LAWSFilter= {'numKernels' : 6, 'kernelSize' : 7, 'coefficients': coefficients, 'names': names} - - return LAWSFilter - -def build_morpho_thin_masks(): - - """ - build 2 sets (J and K) of 8 3x3 morphology masks (structuring elements) - to implement thinning (medial axis transformation - MAT) - - MATFilter = build_morpho_thin_masks() - - Inputs: - None - - Output: - MATFilter (a struct) - - """ - - # (layers, rows, cols) - shape = (8, 3, 3) - J_mask = N.zeros((shape), dtype=N.ushort) - K_mask = N.zeros((shape), dtype=N.ushort) - - # load the 8 J masks for medial axis transformation - J_mask[0][0][0] = 1; - J_mask[0][0][1] = 1; - J_mask[0][0][2] = 1; - J_mask[0][1][1] = 1; - - J_mask[1][0][1] = 1; - J_mask[1][1][1] = 1; - J_mask[1][1][2] = 1; - - J_mask[2][0][0] = 1; - J_mask[2][1][0] = 1; - J_mask[2][2][0] = 1; - J_mask[2][1][1] = 1; - - J_mask[3][0][1] = 1; - J_mask[3][1][0] = 1; - J_mask[3][1][1] = 1; - - J_mask[4][0][2] = 1; - J_mask[4][1][1] = 1; - J_mask[4][1][2] = 1; - J_mask[4][2][2] = 1; - - J_mask[5][1][0] = 1; - J_mask[5][1][1] = 1; - J_mask[5][2][1] = 1; - - J_mask[6][1][1] = 1; - J_mask[6][2][0] = 1; - J_mask[6][2][1] = 1; - J_mask[6][2][2] = 1; - - J_mask[7][1][1] = 1; - J_mask[7][1][2] = 1; - J_mask[7][2][1] = 1; - - - # load the 8 K masks for medial axis transformation - K_mask[0][2][0] = 1; - K_mask[0][2][1] = 1; - K_mask[0][2][2] = 1; - - K_mask[1][1][0] = 1; - K_mask[1][2][0] = 1; - K_mask[1][2][1] = 1; - - K_mask[2][0][2] = 1; - K_mask[2][1][2] = 1; - K_mask[2][2][2] = 1; - - K_mask[3][1][2] = 1; - K_mask[3][2][1] = 1; - K_mask[3][2][2] = 1; - - K_mask[4][0][0] = 1; - K_mask[4][1][0] = 1; - K_mask[4][2][0] = 1; - - K_mask[5][0][1] = 1; - K_mask[5][0][2] = 1; - K_mask[5][1][2] = 1; - - K_mask[6][0][0] = 1; - K_mask[6][0][1] = 1; - K_mask[6][0][2] = 1; - - K_mask[7][0][0] = 1; - K_mask[7][0][1] = 1; - K_mask[7][1][0] = 1; - - MATFilter = {'number3x3Masks' : 8, 'jmask' : J_mask, 'kmask' : K_mask} - - return MATFilter - Modified: trunk/scipy/ndimage/tests/test_segment.py =================================================================== --- trunk/scipy/ndimage/tests/test_segment.py 2008-02-12 01:25:33 UTC (rev 3917) +++ trunk/scipy/ndimage/tests/test_segment.py 2008-02-12 01:51:18 UTC (rev 3918) @@ -1,6 +1,6 @@ from scipy.testing import * -from scipy.ndimage.segmenter import * +from scipy.ndimage._segmenter import * inputname = 'slice112.raw' From scipy-svn at scipy.org Tue Feb 12 05:02:27 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 04:02:27 -0600 (CST) Subject: [Scipy-svn] r3919 - trunk Message-ID: <20080212100227.28DBD39C031@new.scipy.org> Author: hagberg Date: 2008-02-12 04:01:54 -0600 (Tue, 12 Feb 2008) New Revision: 3919 Modified: trunk/THANKS.txt Log: test Modified: trunk/THANKS.txt =================================================================== --- trunk/THANKS.txt 2008-02-12 01:51:18 UTC (rev 3918) +++ trunk/THANKS.txt 2008-02-12 10:01:54 UTC (rev 3919) @@ -56,3 +56,4 @@ Brigham Young University -- for providing resources for students to work on SciPy. Agilent -- which gave a genereous donation for support of SciPy. +Test From scipy-svn at scipy.org Tue Feb 12 05:02:51 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 04:02:51 -0600 (CST) Subject: [Scipy-svn] r3920 - trunk Message-ID: <20080212100251.5B15CC7C011@new.scipy.org> Author: hagberg Date: 2008-02-12 04:02:48 -0600 (Tue, 12 Feb 2008) New Revision: 3920 Modified: trunk/THANKS.txt Log: test Modified: trunk/THANKS.txt =================================================================== --- trunk/THANKS.txt 2008-02-12 10:01:54 UTC (rev 3919) +++ trunk/THANKS.txt 2008-02-12 10:02:48 UTC (rev 3920) @@ -56,4 +56,3 @@ Brigham Young University -- for providing resources for students to work on SciPy. Agilent -- which gave a genereous donation for support of SciPy. -Test From scipy-svn at scipy.org Tue Feb 12 12:35:42 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 11:35:42 -0600 (CST) Subject: [Scipy-svn] r3921 - trunk Message-ID: <20080212173542.2C38F39C106@new.scipy.org> Author: wnbell Date: 2008-02-12 11:35:40 -0600 (Tue, 12 Feb 2008) New Revision: 3921 Modified: trunk/THANKS.txt Log: added Aric to list of contributors Modified: trunk/THANKS.txt =================================================================== --- trunk/THANKS.txt 2008-02-12 10:02:48 UTC (rev 3920) +++ trunk/THANKS.txt 2008-02-12 17:35:40 UTC (rev 3921) @@ -15,9 +15,11 @@ Please add names as needed so that we can keep up with all the contributors. -Nathan Bell -- sparsetools reimplementation (CSR/CSC matrix operations) +Kumar Appaiah -- Dolph Chebyshev window +Nathan Bell -- sparsetools, help with scipy.sparse and scipy.splinalg Robert Cimrman -- UMFpack wrapper for sparse matrix module David M. Cooke -- improvements to system_info, and LBFGSB wrapper +Aric Hagberg -- ARPACK wrappers, help with splinalg.eigen Chuck Harris -- Zeros package in optimize (1d root-finding algorithms) Prabhu Ramachandran -- improvements to gui_thread Robert Kern -- improvements to stats and bug-fixes @@ -27,7 +29,6 @@ sparse matrix module Travis Vaught -- initial work on stats module clean up Jeff Whitaker -- Mac OS X support -Kumar Appaiah -- Dolph Chebyshev window Testing: From scipy-svn at scipy.org Tue Feb 12 14:47:46 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 13:47:46 -0600 (CST) Subject: [Scipy-svn] r3922 - in trunk/scipy/stats/models: . tests Message-ID: <20080212194746.B087F39C0FF@new.scipy.org> Author: jonathan.taylor Date: 2008-02-12 13:47:39 -0600 (Tue, 12 Feb 2008) New Revision: 3922 Modified: trunk/scipy/stats/models/formula.py trunk/scipy/stats/models/tests/test_formula.py Log: added __getitem__ method to Factor fixed bug in ordinal factors, added two tests Modified: trunk/scipy/stats/models/formula.py =================================================================== --- trunk/scipy/stats/models/formula.py 2008-02-12 17:35:40 UTC (rev 3921) +++ trunk/scipy/stats/models/formula.py 2008-02-12 19:47:39 UTC (rev 3922) @@ -136,18 +136,23 @@ def __init__(self, termname, keys, ordinal=False): """ - factor is initialized with keys, representing all valid + Factor is initialized with keys, representing all valid levels of the factor. + + If ordinal is True, the order is taken from the keys. """ - self.keys = list(set(keys)) - self.keys.sort() + if not ordinal: + self.keys = list(set(keys)) + self.keys.sort() + else: + self.keys = keys self._name = termname self.termname = termname self.ordinal = ordinal if self.ordinal: - name = self.name + name = self.termname else: name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys] @@ -166,12 +171,13 @@ v = v(*args, **kw) else: break + n = len(v) + if self.ordinal: - col = [float(self.keys.index(v[i])) for i in range(len(self.keys))] + col = [float(self.keys.index(v[i])) for i in range(n)] return N.array(col) else: - n = len(v) value = [] for key in self.keys: col = [float((v[i] == key)) for i in range(n)] @@ -241,6 +247,24 @@ value.namespace = self.namespace return value + def __getitem__(self, key): + """ + Retrieve the column corresponding to key in a Formula. + + :Parameters: + key : one of the Factor's keys + + :Returns: ndarray corresponding to key, when evaluated in + current namespace + """ + if not self.ordinal: + i = self.names().index('(%s==%s)' % (self.termname, str(key))) + return self()[i] + else: + v = self.namespace[self._name] + return N.array([(vv == key) for vv in v]).astype(N.float) + + class Quantitative(Term): """ A subclass of term that can be used to apply point transformations Modified: trunk/scipy/stats/models/tests/test_formula.py =================================================================== --- trunk/scipy/stats/models/tests/test_formula.py 2008-02-12 17:35:40 UTC (rev 3921) +++ trunk/scipy/stats/models/tests/test_formula.py 2008-02-12 19:47:39 UTC (rev 3922) @@ -215,6 +215,39 @@ _m = N.array([r[0]-r[2],r[1]-r[2]]) assert_almost_equal(_m, m()) + def test_factor5(self): + f = ['a','b','c']*3 + fac = formula.Factor('ff', f) + fac.namespace = {'ff':f} + + assert_equal(fac(), [[1,0,0]*3, + [0,1,0]*3, + [0,0,1]*3]) + assert_equal(fac['a'], [1,0,0]*3) + assert_equal(fac['b'], [0,1,0]*3) + assert_equal(fac['c'], [0,0,1]*3) + + + def test_ordinal_factor(self): + f = ['a','b','c']*3 + fac = formula.Factor('ff', f, ordinal=True) + fac.namespace = {'ff':f} + + assert_equal(fac(), [0,1,2]*3) + assert_equal(fac['a'], [1,0,0]*3) + assert_equal(fac['b'], [0,1,0]*3) + assert_equal(fac['c'], [0,0,1]*3) + + def test_ordinal_factor2(self): + f = ['b','c', 'a']*3 + fac = formula.Factor('ff', ['a','b','c'], ordinal=True) + fac.namespace = {'ff':f} + + assert_equal(fac(), [1,2,0]*3) + assert_equal(fac['a'], [0,0,1]*3) + assert_equal(fac['b'], [1,0,0]*3) + assert_equal(fac['c'], [0,1,0]*3) + def test_contrast4(self): f = self.formula + self.terms[5] + self.terms[5] From scipy-svn at scipy.org Tue Feb 12 15:26:41 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 14:26:41 -0600 (CST) Subject: [Scipy-svn] r3923 - trunk/scipy/stats/models Message-ID: <20080212202641.2E5F039C048@new.scipy.org> Author: jonathan.taylor Date: 2008-02-12 14:26:33 -0600 (Tue, 12 Feb 2008) New Revision: 3923 Modified: trunk/scipy/stats/models/formula.py Log: clarifying docstring, enforcing unique keys for ordinal factors Modified: trunk/scipy/stats/models/formula.py =================================================================== --- trunk/scipy/stats/models/formula.py 2008-02-12 19:47:39 UTC (rev 3922) +++ trunk/scipy/stats/models/formula.py 2008-02-12 20:26:33 UTC (rev 3923) @@ -139,7 +139,11 @@ Factor is initialized with keys, representing all valid levels of the factor. - If ordinal is True, the order is taken from the keys. + If ordinal is False, keys can have repeats: set(keys) is what is + used. + + If ordinal is True, the order is taken from the keys, and + there should be no repeats. """ if not ordinal: @@ -147,6 +151,8 @@ self.keys.sort() else: self.keys = keys + if len(set(keys)) != len(list(keys)): + raise ValueError, 'keys for ordinal Factor should be unique, in increasing order' self._name = termname self.termname = termname self.ordinal = ordinal From scipy-svn at scipy.org Tue Feb 12 16:13:14 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 15:13:14 -0600 (CST) Subject: [Scipy-svn] r3924 - trunk/scipy/stats/models Message-ID: <20080212211314.6969F39C02E@new.scipy.org> Author: jonathan.taylor Date: 2008-02-12 15:13:12 -0600 (Tue, 12 Feb 2008) New Revision: 3924 Modified: trunk/scipy/stats/models/formula.py Log: added a function to generate all interactions up to a certain order of a sequence of terms Modified: trunk/scipy/stats/models/formula.py =================================================================== --- trunk/scipy/stats/models/formula.py 2008-02-12 20:26:33 UTC (rev 3923) +++ trunk/scipy/stats/models/formula.py 2008-02-12 21:13:12 UTC (rev 3924) @@ -631,7 +631,7 @@ I = Term('intercept', func=_intercept_fn) I.__doc__ = """ Intercept term in a formula. If intercept is the -only term in the formula, then a keywords argument +only term in the formula, then a keyword argument \'nrow\' is needed. >>> from scipy.stats.models.formula import Formula, I @@ -644,3 +644,45 @@ array([1, 1, 1, 1, 1]) """ + +def interactions(terms, order=2): + """ + Output all pairwise interactions up to a given order of a + sequence of terms. + + If order is greater than len(terms), it is treated as len(terms). + + >>> print interactions([Term(l) for l in ['a', 'b', 'c']]) + + >>> + >>> print interactions([Term(l) for l in ['a', 'b', 'c']], order=5) + + >>> + + """ + l = len(terms) + + values = {} + + # First order + + for o in range(order): + I = N.indices((l,)*(o+1)) + I.shape = (I.shape[0], N.product(I.shape[1:])) + for m in range(I.shape[1]): + + # only keep combinations that have unique entries + + if (N.unique(I[:,m]).shape == I[:,m].shape and + N.alltrue(N.equal(N.sort(I[:,m]), I[:,m]))): + ll = [terms[j] for j in I[:,m]] + v = ll[0] + for ii in range(len(ll)-1): + v *= ll[ii+1] + values[tuple(I[:,m])] = v + + value = values[(0,)]; del(values[(0,)]) + + for v in values.values(): + value += v + return value From scipy-svn at scipy.org Tue Feb 12 16:32:41 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 15:32:41 -0600 (CST) Subject: [Scipy-svn] r3925 - trunk/scipy/stats/models/tests Message-ID: <20080212213241.58E1639C2A0@new.scipy.org> Author: jonathan.taylor Date: 2008-02-12 15:32:39 -0600 (Tue, 12 Feb 2008) New Revision: 3925 Modified: trunk/scipy/stats/models/tests/test_formula.py Log: fixing one test for ordinal factor, added tests of __sub__ method Modified: trunk/scipy/stats/models/tests/test_formula.py =================================================================== --- trunk/scipy/stats/models/tests/test_formula.py 2008-02-12 21:13:12 UTC (rev 3924) +++ trunk/scipy/stats/models/tests/test_formula.py 2008-02-12 21:32:39 UTC (rev 3925) @@ -230,7 +230,7 @@ def test_ordinal_factor(self): f = ['a','b','c']*3 - fac = formula.Factor('ff', f, ordinal=True) + fac = formula.Factor('ff', ['a','b','c'], ordinal=True) fac.namespace = {'ff':f} assert_equal(fac(), [0,1,2]*3) @@ -259,5 +259,24 @@ self.assertEquals(estimable, False) + def test_interactions(self): + + f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']]) + assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'a*b', 'a*c', 'b*c'])) + + f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=3) + assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d'])) + + def test_subtract(self): + f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']]) + ff = f - f['a*b'] + assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'a*c', 'b*c'])) + + ff = f - f['a*b'] - f['a*c'] + assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c'])) + + ff = f - (f['a*b'] + f['a*c']) + assert_equal(set(ff.termnames()), set(['a', 'b', 'c', 'b*c'])) + if __name__ == "__main__": nose.run(argv=['', __file__]) From scipy-svn at scipy.org Tue Feb 12 18:31:56 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 17:31:56 -0600 (CST) Subject: [Scipy-svn] r3926 - trunk/scipy/ndimage Message-ID: <20080212233156.C954D39C046@new.scipy.org> Author: chris.burns Date: 2008-02-12 17:31:54 -0600 (Tue, 12 Feb 2008) New Revision: 3926 Modified: trunk/scipy/ndimage/_segmenter.py Log: Add stub for documenting C extension functions. Modified: trunk/scipy/ndimage/_segmenter.py =================================================================== --- trunk/scipy/ndimage/_segmenter.py 2008-02-12 21:32:39 UTC (rev 3925) +++ trunk/scipy/ndimage/_segmenter.py 2008-02-12 23:31:54 UTC (rev 3926) @@ -10,6 +10,14 @@ Use this module minimally, if at all, until it this warning is removed." warnings.warn(_msg, UserWarning) +# TODO: Add docstrings for public functions in extension code. +# Add docstrings to extension code. +#from numpy.lib import add_newdoc +#add_newdoc('scipy.ndimage._segment', 'canny_edges', +# """Canney edge detector. +# """) + + # WARNING: _objstruct data structure mirrors a corresponding data structure # in ndImage_Segmenter_structs.h that is built into the _segment.so library. # These structs must match! From scipy-svn at scipy.org Tue Feb 12 18:37:45 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 17:37:45 -0600 (CST) Subject: [Scipy-svn] r3927 - trunk/scipy/ndimage/src/segment Message-ID: <20080212233745.8C15739C046@new.scipy.org> Author: tom.waite Date: 2008-02-12 17:37:43 -0600 (Tue, 12 Feb 2008) New Revision: 3927 Modified: trunk/scipy/ndimage/src/segment/Segmenter_IMPL.c Log: Remove prints in texture filter Modified: trunk/scipy/ndimage/src/segment/Segmenter_IMPL.c =================================================================== --- trunk/scipy/ndimage/src/segment/Segmenter_IMPL.c 2008-02-12 23:31:54 UTC (rev 3926) +++ trunk/scipy/ndimage/src/segment/Segmenter_IMPL.c 2008-02-12 23:37:43 UTC (rev 3927) @@ -2880,7 +2880,7 @@ if(count == 0){ // debug statement - printf("no samples for texture\n"); + // printf("no samples for texture\n"); return; } From scipy-svn at scipy.org Tue Feb 12 19:00:30 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 18:00:30 -0600 (CST) Subject: [Scipy-svn] r3928 - in trunk/scipy/ndimage: . src src/register Message-ID: <20080213000030.537DD39C039@new.scipy.org> Author: chris.burns Date: 2008-02-12 18:00:26 -0600 (Tue, 12 Feb 2008) New Revision: 3928 Added: trunk/scipy/ndimage/src/register/ Removed: trunk/scipy/ndimage/register/ trunk/scipy/ndimage/src/register/__init__.py trunk/scipy/ndimage/src/register/setup.py Modified: trunk/scipy/ndimage/registration.py trunk/scipy/ndimage/setup.py Log: Reorg package structure for ndimage.register. Modified: trunk/scipy/ndimage/registration.py =================================================================== --- trunk/scipy/ndimage/registration.py 2008-02-12 23:37:43 UTC (rev 3927) +++ trunk/scipy/ndimage/registration.py 2008-02-13 00:00:26 UTC (rev 3928) @@ -1,7 +1,7 @@ import math import os import numpy as N -import scipy.ndimage.register as R +import scipy.ndimage._register as R import scipy.special as SP import scipy.ndimage as NDI import scipy.optimize as OPT Modified: trunk/scipy/ndimage/setup.py =================================================================== --- trunk/scipy/ndimage/setup.py 2008-02-12 23:37:43 UTC (rev 3927) +++ trunk/scipy/ndimage/setup.py 2008-02-13 00:00:26 UTC (rev 3928) @@ -20,8 +20,12 @@ depends = ['src/segment/ndImage_Segmenter_structs.h'] ) + config.add_extension('_register', + sources=['src/register/Register_EXT.c', + 'src/register/Register_IMPL.c'] + ) + config.add_data_dir('tests') - config.add_subpackage('register') return config Copied: trunk/scipy/ndimage/src/register (from rev 3927, trunk/scipy/ndimage/register) Deleted: trunk/scipy/ndimage/src/register/__init__.py =================================================================== --- trunk/scipy/ndimage/register/__init__.py 2008-02-12 23:37:43 UTC (rev 3927) +++ trunk/scipy/ndimage/src/register/__init__.py 2008-02-13 00:00:26 UTC (rev 3928) @@ -1,5 +0,0 @@ -# Register package -# Author: Tom Waite, 2008 - -from _register import * - Deleted: trunk/scipy/ndimage/src/register/setup.py =================================================================== --- trunk/scipy/ndimage/register/setup.py 2008-02-12 23:37:43 UTC (rev 3927) +++ trunk/scipy/ndimage/src/register/setup.py 2008-02-13 00:00:26 UTC (rev 3928) @@ -1,20 +0,0 @@ - -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('register', parent_package, top_path) - - config.add_extension('_register', - sources=['Register_EXT.c', - 'Register_IMPL.c'] - ) - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) - - From scipy-svn at scipy.org Tue Feb 12 19:12:01 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 18:12:01 -0600 (CST) Subject: [Scipy-svn] r3929 - trunk/scipy/ndimage Message-ID: <20080213001201.3631039C0C2@new.scipy.org> Author: chris.burns Date: 2008-02-12 18:11:57 -0600 (Tue, 12 Feb 2008) New Revision: 3929 Added: trunk/scipy/ndimage/_registration.py Removed: trunk/scipy/ndimage/registration.py Log: Make registration a private module. Issue development warning on import. Copied: trunk/scipy/ndimage/_registration.py (from rev 3928, trunk/scipy/ndimage/registration.py) =================================================================== --- trunk/scipy/ndimage/registration.py 2008-02-13 00:00:26 UTC (rev 3928) +++ trunk/scipy/ndimage/_registration.py 2008-02-13 00:11:57 UTC (rev 3929) @@ -0,0 +1,457 @@ +import math +import os +import numpy as N +import scipy.ndimage._register as R +import scipy.special as SP +import scipy.ndimage as NDI +import scipy.optimize as OPT +import time + +# Issue warning regarding heavy development status of this module +import warnings +_msg = "The registration code is under heavy development and therefore the \ +public API will change in the future. The NIPY group is actively working on \ +this code, and has every intention of generalizing this for the Scipy \ +community. Use this module minimally, if at all, until it this warning is \ +removed." +warnings.warn(_msg, UserWarning) + +# TODO: Add docstrings for public functions in extension code. +# Add docstrings to extension code. +#from numpy.lib import add_newdoc +#add_newdoc('scipy.ndimage._register', 'register_histogram', +# """A joint histogram used for registration module. +# """) + + +# anatomical MRI to test with +inputname = 'ANAT1_V0001.img' +filename = os.path.join(os.path.split(__file__)[0], inputname) + +def remap_image(image, parm_vector): + M_inverse = get_inverse_mappings(parm_vector) + # allocate the zero image + remaped_image = load_blank_image() + imdata = build_structs(step=1) + # trilinear interpolation mapping. to be replaced with splines + R.register_linear_resample(image['data'], remaped_image['data'], M_inverse, imdata['step']) + return remaped_image + +def get_inverse_mappings(parm_vector): + # get the inverse mapping to rotate the G matrix to F space following registration + imdata = build_structs(step=1) + # inverse angles and translations + imdata['parms'][0] = -parm_vector[0] + imdata['parms'][1] = -parm_vector[1] + imdata['parms'][2] = -parm_vector[2] + imdata['parms'][3] = -parm_vector[3] + imdata['parms'][4] = -parm_vector[4] + imdata['parms'][5] = -parm_vector[5] + M_inverse = build_rotate_matrix(imdata['parms']) + return M_inverse + +def python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0, smhist=0, + method='nmi', opt_method='powell'): + # image1 is imageF and image2 is imageG in SPM lingo + # get these from get_test_images for the debug work + start = time.time() + # smooth of the images + if smimage: + image_F_xyz1 = filter_image_3D(image1['data'], image1['fwhm'], ftype) + image1['data'] = image_F_xyz1 + image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) + image2['data'] = image_F_xyz2 + parm_vector = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method) + stop = time.time() + print 'Total Optimizer Time is ', (stop-start) + return parm_vector + +def get_test_images(alpha=0.0, beta=0.0, gamma=0.0): + image1 = load_image() + image2 = load_blank_image() + imdata = build_structs(step=1) + # allow the G image to be rotated for testing + imdata['parms'][0] = alpha + imdata['parms'][1] = beta + imdata['parms'][2] = gamma + image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) + image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) + M = build_rotate_matrix(imdata['parms']) + R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) + return image1, image2, imdata + +def multires_registration(image1, image2, imdata, lite, smhist, method, opt_method): + ret_histo=0 + # zero out the start parameter; but this may be set to large values + # if the head is out of range and well off the optimal alignment skirt + imdata['parms'][0:5] = 0.0 + # make the step a scalar to can put in a multi-res loop + loop = range(imdata['sample'].size) + x = imdata['parms'] + for i in loop: + step = imdata['sample'][i] + imdata['step'][:] = step + optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + p_args = (optfunc_args,) + if opt_method=='powell': + print 'POWELL multi-res registration step size ', step + print 'vector ', x + x = OPT.fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) + elif opt_method=='cg': + print 'CG multi-res registration step size ', step + print 'vector ', x + x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) + elif opt_method=='hybrid': + if i==0: + print 'Hybrid POWELL multi-res registration step size ', step + print 'vector ', x + lite = 0 + optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + p_args = (optfunc_args,) + x = OPT.fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) + elif i==1: + print 'Hybrid CG multi-res registration step size ', step + print 'vector ', x + lite = 1 + optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + p_args = (optfunc_args,) + x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) + + return x + + +def test_image_filter(image, imdata, ftype=2): + # test the 3D image filter on an image. ftype 1 is SPM, ftype 2 is simple Gaussian + image['fwhm'] = build_fwhm(image['mat'], imdata['step']) + filt_image = filter_image_3D(image['data'], image['fwhm'], ftype) + return filt_image + +def callback_powell(x): + print 'Parameter Vector from Powell: - ' + print x + return + +def callback_cg(x): + print 'Parameter Vector from Conjugate Gradient: - ' + print x + return + +def test_alignment(image1, image2, imdata, method='ncc', lite=0, smhist=0, + alpha=0.0, beta=0.0, gamma=0.0, ret_histo=0): + + # to test the cost function and view the joint histogram + # for 2 images. used for debug + imdata['parms'][0] = alpha + imdata['parms'][1] = beta + imdata['parms'][2] = gamma + M = build_rotate_matrix(imdata['parms']) + optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + + if ret_histo: + cost, joint_histogram = optimize_function(imdata['parms'], optfunc_args) + return cost, joint_histogram + else: + cost = optimize_function(imdata['parms'], optfunc_args) + return cost + + +def smooth_kernel(fwhm, x, ktype=1): + eps = 0.00001 + s = N.square((fwhm/math.sqrt(8.0*math.log(2.0)))) + eps + if ktype==1: + # from SPM: Gauss kernel convolved with 1st degree B spline + w1 = 0.5 * math.sqrt(2.0/s) + w2 = -0.5 / s + w3 = math.sqrt((s*math.pi) /2.0) + kernel = 0.5*(SP.erf(w1*(x+1))*(x+1) + SP.erf(w1*(x-1))*(x-1) - 2.0*SP.erf(w1*(x))*(x) + + w3*(N.exp(w2*N.square(x+1))) + N.exp(w2*(N.square(x-1))) - 2.0*N.exp(w2*N.square(x))) + kernel[kernel<0] = 0 + kernel = kernel / kernel.sum() + else: + # Gauss kernel + kernel = (1.0/math.sqrt(2.0*math.pi*s)) * N.exp(-N.square(x)/(2.0*s)) + kernel = kernel / kernel.sum() + + return kernel + +def filter_image_3D(imageRaw, fwhm, ftype=2): + p = N.ceil(2*fwhm[0]).astype(int) + x = N.array(range(-p, p+1)) + kernel_x = smooth_kernel(fwhm[0], x, ktype=ftype) + p = N.ceil(2*fwhm[1]).astype(int) + x = N.array(range(-p, p+1)) + kernel_y = smooth_kernel(fwhm[1], x, ktype=ftype) + p = N.ceil(2*fwhm[2]).astype(int) + x = N.array(range(-p, p+1)) + kernel_z = smooth_kernel(fwhm[2], x, ktype=ftype) + output=None + # 3D filter in 3 1D separable stages + axis = 0 + image_F_x = NDI.correlate1d(imageRaw, kernel_x, axis, output) + axis = 1 + image_F_xy = NDI.correlate1d(image_F_x, kernel_y, axis, output) + axis = 2 + image_F_xyz = NDI.correlate1d(image_F_xy, kernel_z, axis, output) + return image_F_xyz + + +def resample_image(smimage=0, ftype=2, alpha=0.0, beta=0.0, gamma=0.0, + Tx=0.0, Ty=0.0, Tz=0.0, stepsize=1): + + # takes an image and 3D rotate using trilinear interpolation + image1 = load_image() + image2 = load_blank_image() + imdata = build_structs(step=stepsize) + imdata['parms'][0] = alpha + imdata['parms'][1] = beta + imdata['parms'][2] = gamma + imdata['parms'][3] = Tx + imdata['parms'][4] = Ty + imdata['parms'][5] = Tz + image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) + image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) + M = build_rotate_matrix(imdata['parms']) + if smimage: + image_F_xyz1 = filter_image_3D(image1['data'], image1['fwhm'], ftype) + image1['data'] = image_F_xyz1 + image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) + image2['data'] = image_F_xyz2 + + R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) + + return image2 + + +def build_fwhm(M, S): + view_3x3 = N.square(M[0:3, 0:3]) + vxg = N.sqrt(view_3x3.sum(axis=0)) + # assumes that sampling is the same for xyz + size = N.array([1,1,1])*S[0] + x = N.square(size) - N.square(vxg) + # clip + x[x<0] = 0 + fwhm = N.sqrt(x) / vxg + # pathology when stepsize = 1 for MAT equal to the identity matrix + fwhm[fwhm==0] = 1 + # return the 3D Gaussian kernel width (xyz) + return fwhm + +def load_image(imagename=filename, rows=256, cols=256, layers=90): + ImageVolume = N.fromfile(imagename, dtype=N.uint16).reshape(layers, rows, cols); + # clip to 8 bits. this will be rescale to 8 bits for fMRI + ImageVolume[ImageVolume>255] = 255 + # voxel to pixel is identity for this simulation using anatomical MRI volume + # 4x4 matrix + M = N.eye(4, dtype=N.float64); + # dimensions + D = N.zeros(3, dtype=N.int32); + # Gaussian kernel - fill in with build_fwhm() + F = N.zeros(3, dtype=N.float64); + D[0] = rows + D[1] = cols + D[2] = layers + # make sure the data type is uchar + ImageVolume = ImageVolume.astype(N.uint8) + image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} + return image + +def load_blank_image(rows=256, cols=256, layers=90): + ImageVolume = N.zeros(layers*rows*cols, dtype=N.uint16).reshape(layers, rows, cols); + # voxel to pixel is identity for this simulation using anatomical MRI volume + # 4x4 matrix + M = N.eye(4, dtype=N.float64); + # dimensions + D = N.zeros(3, dtype=N.int32); + # Gaussian kernel - fill in with build_fwhm() + F = N.zeros(3, dtype=N.float64); + D[0] = rows + D[1] = cols + D[2] = layers + # make sure the data type is uchar + ImageVolume = ImageVolume.astype(N.uint8) + image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} + return image + +def optimize_function(x, optfunc_args): + image_F = optfunc_args[0] + image_G = optfunc_args[1] + sample_vector = optfunc_args[2] + fwhm = optfunc_args[3] + do_lite = optfunc_args[4] + smooth = optfunc_args[5] + method = optfunc_args[6] + ret_histo = optfunc_args[7] + + rot_matrix = build_rotate_matrix(x) + cost = 0.0 + epsilon = 2.2e-16 + # image_G is base image + # image_F is the to-be-rotated image + # rot_matrix is the 4x4 constructed (current angles and translates) transform matrix + # sample_vector is the subsample vector for x-y-z + + F_inv = N.linalg.inv(image_F['mat']) + composite = N.dot(F_inv, rot_matrix) + composite = N.dot(composite, image_G['mat']) + + # allocate memory from Python as memory leaks when created in C-ext + joint_histogram = N.zeros([256, 256], dtype=N.float64); + + if do_lite: + R.register_histogram_lite(image_F['data'], image_G['data'], composite, sample_vector, joint_histogram) + else: + R.register_histogram(image_F['data'], image_G['data'], composite, sample_vector, joint_histogram) + + # smooth the histogram + if smooth: + p = N.ceil(2*fwhm[0]).astype(int) + x = N.array(range(-p, p+1)) + kernel1 = smooth_kernel(fwhm[0], x) + p = N.ceil(2*fwhm[1]).astype(int) + x = N.array(range(-p, p+1)) + kernel2 = smooth_kernel(fwhm[1], x) + output=None + # 2D filter in 1D separable stages + axis = 0 + result = NDI.correlate1d(joint_histogram, kernel1, axis, output) + axis = 1 + joint_histogram = NDI.correlate1d(result, kernel1, axis, output) + + joint_histogram += epsilon # prevent log(0) + # normalize the joint histogram + joint_histogram /= joint_histogram.sum() + # get the marginals + marginal_col = joint_histogram.sum(axis=0) + marginal_row = joint_histogram.sum(axis=1) + + if method == 'mi': + # mutual information + marginal_outer = N.outer(marginal_col, marginal_row) + H = joint_histogram * N.log(joint_histogram / marginal_outer) + mutual_information = H.sum() + cost = -mutual_information + + elif method == 'ecc': + # entropy correlation coefficient + marginal_outer = N.outer(marginal_col, marginal_row) + H = joint_histogram * N.log(joint_histogram / marginal_outer) + mutual_information = H.sum() + row_entropy = marginal_row * N.log(marginal_row) + col_entropy = marginal_col * N.log(marginal_col) + ecc = -2.0*mutual_information/(row_entropy.sum() + col_entropy.sum()) + cost = -ecc + + elif method == 'nmi': + # normalized mutual information + row_entropy = marginal_row * N.log(marginal_row) + col_entropy = marginal_col * N.log(marginal_col) + H = joint_histogram * N.log(joint_histogram) + nmi = (row_entropy.sum() + col_entropy.sum()) / (H.sum()) + cost = -nmi + + elif method == 'ncc': + # cross correlation from the joint histogram + r, c = joint_histogram.shape + i = N.array(range(1,c+1)) + j = N.array(range(1,r+1)) + m1 = (marginal_row * i).sum() + m2 = (marginal_col * j).sum() + sig1 = N.sqrt((marginal_row*(N.square(i-m1))).sum()) + sig2 = N.sqrt((marginal_col*(N.square(j-m2))).sum()) + [a, b] = N.mgrid[1:c+1, 1:r+1] + a = a - m1 + b = b - m2 + # element multiplies in the joint histogram and grids + H = ((joint_histogram * a) * b).sum() + ncc = H / (N.dot(sig1, sig2)) + cost = -ncc + + if ret_histo: + return cost, joint_histogram + else: + return cost + + +def build_structs(step=2): + # build image data structures here + P = N.zeros(6, dtype=N.float64); + T = N.zeros(6, dtype=N.float64); + F = N.zeros(2, dtype=N.int32); + S = N.ones(3, dtype=N.int32); + sample = N.zeros(2, dtype=N.int32); + S[0] = step + S[1] = step + S[2] = step + # histogram smoothing + F[0] = 3 + F[1] = 3 + # subsample for multiresolution registration + sample[0] = 4 + sample[1] = 2 + # tolerances for angle (0-2) and translation (3-5) + T[0] = 0.02 + T[1] = 0.02 + T[2] = 0.02 + T[3] = 0.001 + T[4] = 0.001 + T[5] = 0.001 + # P[0] = alpha <=> pitch. + alpha is moving back in the sagittal plane + # P[1] = beta <=> roll. + beta is moving right in the coronal plane + # P[2] = gamma <=> yaw. + gamma is right turn in the transverse plane + # P[3] = Tx + # P[4] = Ty + # P[5] = Tz + img_data = {'parms' : P, 'step' : S, 'fwhm' : F, 'tol' : T, 'sample' : sample} + return img_data + + +def build_rotate_matrix(img_data_parms): + R1 = N.zeros([4,4], dtype=N.float64); + R2 = N.zeros([4,4], dtype=N.float64); + R3 = N.zeros([4,4], dtype=N.float64); + T = N.eye(4, dtype=N.float64); + + alpha = math.radians(img_data_parms[0]) + beta = math.radians(img_data_parms[1]) + gamma = math.radians(img_data_parms[2]) + + R1[0][0] = 1.0 + R1[1][1] = math.cos(alpha) + R1[1][2] = math.sin(alpha) + R1[2][1] = -math.sin(alpha) + R1[2][2] = math.cos(alpha) + R1[3][3] = 1.0 + + R2[0][0] = math.cos(beta) + R2[0][2] = math.sin(beta) + R2[1][1] = 1.0 + R2[2][0] = -math.sin(beta) + R2[2][2] = math.cos(beta) + R2[3][3] = 1.0 + + R3[0][0] = math.cos(gamma) + R3[0][1] = math.sin(gamma) + R3[1][0] = -math.sin(gamma) + R3[1][1] = math.cos(gamma) + R3[2][2] = 1.0 + R3[3][3] = 1.0 + + T[0][0] = 1.0 + T[1][1] = 1.0 + T[2][2] = 1.0 + T[3][3] = 1.0 + T[0][3] = img_data_parms[3] + T[1][3] = img_data_parms[4] + T[2][3] = img_data_parms[5] + + rot_matrix = N.dot(T, R1); + rot_matrix = N.dot(rot_matrix, R2); + rot_matrix = N.dot(rot_matrix, R3); + + return rot_matrix + + + + + + Deleted: trunk/scipy/ndimage/registration.py =================================================================== --- trunk/scipy/ndimage/registration.py 2008-02-13 00:00:26 UTC (rev 3928) +++ trunk/scipy/ndimage/registration.py 2008-02-13 00:11:57 UTC (rev 3929) @@ -1,440 +0,0 @@ -import math -import os -import numpy as N -import scipy.ndimage._register as R -import scipy.special as SP -import scipy.ndimage as NDI -import scipy.optimize as OPT -import time - -# anatomical MRI to test with -inputname = 'ANAT1_V0001.img' -filename = os.path.join(os.path.split(__file__)[0], inputname) - -def remap_image(image, parm_vector): - M_inverse = get_inverse_mappings(parm_vector) - # allocate the zero image - remaped_image = load_blank_image() - imdata = build_structs(step=1) - # trilinear interpolation mapping. to be replaced with splines - R.register_linear_resample(image['data'], remaped_image['data'], M_inverse, imdata['step']) - return remaped_image - -def get_inverse_mappings(parm_vector): - # get the inverse mapping to rotate the G matrix to F space following registration - imdata = build_structs(step=1) - # inverse angles and translations - imdata['parms'][0] = -parm_vector[0] - imdata['parms'][1] = -parm_vector[1] - imdata['parms'][2] = -parm_vector[2] - imdata['parms'][3] = -parm_vector[3] - imdata['parms'][4] = -parm_vector[4] - imdata['parms'][5] = -parm_vector[5] - M_inverse = build_rotate_matrix(imdata['parms']) - return M_inverse - -def python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0, smhist=0, - method='nmi', opt_method='powell'): - # image1 is imageF and image2 is imageG in SPM lingo - # get these from get_test_images for the debug work - start = time.time() - # smooth of the images - if smimage: - image_F_xyz1 = filter_image_3D(image1['data'], image1['fwhm'], ftype) - image1['data'] = image_F_xyz1 - image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) - image2['data'] = image_F_xyz2 - parm_vector = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method) - stop = time.time() - print 'Total Optimizer Time is ', (stop-start) - return parm_vector - -def get_test_images(alpha=0.0, beta=0.0, gamma=0.0): - image1 = load_image() - image2 = load_blank_image() - imdata = build_structs(step=1) - # allow the G image to be rotated for testing - imdata['parms'][0] = alpha - imdata['parms'][1] = beta - imdata['parms'][2] = gamma - image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) - image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) - M = build_rotate_matrix(imdata['parms']) - R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) - return image1, image2, imdata - -def multires_registration(image1, image2, imdata, lite, smhist, method, opt_method): - ret_histo=0 - # zero out the start parameter; but this may be set to large values - # if the head is out of range and well off the optimal alignment skirt - imdata['parms'][0:5] = 0.0 - # make the step a scalar to can put in a multi-res loop - loop = range(imdata['sample'].size) - x = imdata['parms'] - for i in loop: - step = imdata['sample'][i] - imdata['step'][:] = step - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) - p_args = (optfunc_args,) - if opt_method=='powell': - print 'POWELL multi-res registration step size ', step - print 'vector ', x - x = OPT.fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) - elif opt_method=='cg': - print 'CG multi-res registration step size ', step - print 'vector ', x - x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) - elif opt_method=='hybrid': - if i==0: - print 'Hybrid POWELL multi-res registration step size ', step - print 'vector ', x - lite = 0 - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) - p_args = (optfunc_args,) - x = OPT.fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) - elif i==1: - print 'Hybrid CG multi-res registration step size ', step - print 'vector ', x - lite = 1 - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) - p_args = (optfunc_args,) - x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) - - return x - - -def test_image_filter(image, imdata, ftype=2): - # test the 3D image filter on an image. ftype 1 is SPM, ftype 2 is simple Gaussian - image['fwhm'] = build_fwhm(image['mat'], imdata['step']) - filt_image = filter_image_3D(image['data'], image['fwhm'], ftype) - return filt_image - -def callback_powell(x): - print 'Parameter Vector from Powell: - ' - print x - return - -def callback_cg(x): - print 'Parameter Vector from Conjugate Gradient: - ' - print x - return - -def test_alignment(image1, image2, imdata, method='ncc', lite=0, smhist=0, - alpha=0.0, beta=0.0, gamma=0.0, ret_histo=0): - - # to test the cost function and view the joint histogram - # for 2 images. used for debug - imdata['parms'][0] = alpha - imdata['parms'][1] = beta - imdata['parms'][2] = gamma - M = build_rotate_matrix(imdata['parms']) - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) - - if ret_histo: - cost, joint_histogram = optimize_function(imdata['parms'], optfunc_args) - return cost, joint_histogram - else: - cost = optimize_function(imdata['parms'], optfunc_args) - return cost - - -def smooth_kernel(fwhm, x, ktype=1): - eps = 0.00001 - s = N.square((fwhm/math.sqrt(8.0*math.log(2.0)))) + eps - if ktype==1: - # from SPM: Gauss kernel convolved with 1st degree B spline - w1 = 0.5 * math.sqrt(2.0/s) - w2 = -0.5 / s - w3 = math.sqrt((s*math.pi) /2.0) - kernel = 0.5*(SP.erf(w1*(x+1))*(x+1) + SP.erf(w1*(x-1))*(x-1) - 2.0*SP.erf(w1*(x))*(x) + - w3*(N.exp(w2*N.square(x+1))) + N.exp(w2*(N.square(x-1))) - 2.0*N.exp(w2*N.square(x))) - kernel[kernel<0] = 0 - kernel = kernel / kernel.sum() - else: - # Gauss kernel - kernel = (1.0/math.sqrt(2.0*math.pi*s)) * N.exp(-N.square(x)/(2.0*s)) - kernel = kernel / kernel.sum() - - return kernel - -def filter_image_3D(imageRaw, fwhm, ftype=2): - p = N.ceil(2*fwhm[0]).astype(int) - x = N.array(range(-p, p+1)) - kernel_x = smooth_kernel(fwhm[0], x, ktype=ftype) - p = N.ceil(2*fwhm[1]).astype(int) - x = N.array(range(-p, p+1)) - kernel_y = smooth_kernel(fwhm[1], x, ktype=ftype) - p = N.ceil(2*fwhm[2]).astype(int) - x = N.array(range(-p, p+1)) - kernel_z = smooth_kernel(fwhm[2], x, ktype=ftype) - output=None - # 3D filter in 3 1D separable stages - axis = 0 - image_F_x = NDI.correlate1d(imageRaw, kernel_x, axis, output) - axis = 1 - image_F_xy = NDI.correlate1d(image_F_x, kernel_y, axis, output) - axis = 2 - image_F_xyz = NDI.correlate1d(image_F_xy, kernel_z, axis, output) - return image_F_xyz - - -def resample_image(smimage=0, ftype=2, alpha=0.0, beta=0.0, gamma=0.0, - Tx=0.0, Ty=0.0, Tz=0.0, stepsize=1): - - # takes an image and 3D rotate using trilinear interpolation - image1 = load_image() - image2 = load_blank_image() - imdata = build_structs(step=stepsize) - imdata['parms'][0] = alpha - imdata['parms'][1] = beta - imdata['parms'][2] = gamma - imdata['parms'][3] = Tx - imdata['parms'][4] = Ty - imdata['parms'][5] = Tz - image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) - image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) - M = build_rotate_matrix(imdata['parms']) - if smimage: - image_F_xyz1 = filter_image_3D(image1['data'], image1['fwhm'], ftype) - image1['data'] = image_F_xyz1 - image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) - image2['data'] = image_F_xyz2 - - R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) - - return image2 - - -def build_fwhm(M, S): - view_3x3 = N.square(M[0:3, 0:3]) - vxg = N.sqrt(view_3x3.sum(axis=0)) - # assumes that sampling is the same for xyz - size = N.array([1,1,1])*S[0] - x = N.square(size) - N.square(vxg) - # clip - x[x<0] = 0 - fwhm = N.sqrt(x) / vxg - # pathology when stepsize = 1 for MAT equal to the identity matrix - fwhm[fwhm==0] = 1 - # return the 3D Gaussian kernel width (xyz) - return fwhm - -def load_image(imagename=filename, rows=256, cols=256, layers=90): - ImageVolume = N.fromfile(imagename, dtype=N.uint16).reshape(layers, rows, cols); - # clip to 8 bits. this will be rescale to 8 bits for fMRI - ImageVolume[ImageVolume>255] = 255 - # voxel to pixel is identity for this simulation using anatomical MRI volume - # 4x4 matrix - M = N.eye(4, dtype=N.float64); - # dimensions - D = N.zeros(3, dtype=N.int32); - # Gaussian kernel - fill in with build_fwhm() - F = N.zeros(3, dtype=N.float64); - D[0] = rows - D[1] = cols - D[2] = layers - # make sure the data type is uchar - ImageVolume = ImageVolume.astype(N.uint8) - image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} - return image - -def load_blank_image(rows=256, cols=256, layers=90): - ImageVolume = N.zeros(layers*rows*cols, dtype=N.uint16).reshape(layers, rows, cols); - # voxel to pixel is identity for this simulation using anatomical MRI volume - # 4x4 matrix - M = N.eye(4, dtype=N.float64); - # dimensions - D = N.zeros(3, dtype=N.int32); - # Gaussian kernel - fill in with build_fwhm() - F = N.zeros(3, dtype=N.float64); - D[0] = rows - D[1] = cols - D[2] = layers - # make sure the data type is uchar - ImageVolume = ImageVolume.astype(N.uint8) - image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} - return image - -def optimize_function(x, optfunc_args): - image_F = optfunc_args[0] - image_G = optfunc_args[1] - sample_vector = optfunc_args[2] - fwhm = optfunc_args[3] - do_lite = optfunc_args[4] - smooth = optfunc_args[5] - method = optfunc_args[6] - ret_histo = optfunc_args[7] - - rot_matrix = build_rotate_matrix(x) - cost = 0.0 - epsilon = 2.2e-16 - # image_G is base image - # image_F is the to-be-rotated image - # rot_matrix is the 4x4 constructed (current angles and translates) transform matrix - # sample_vector is the subsample vector for x-y-z - - F_inv = N.linalg.inv(image_F['mat']) - composite = N.dot(F_inv, rot_matrix) - composite = N.dot(composite, image_G['mat']) - - # allocate memory from Python as memory leaks when created in C-ext - joint_histogram = N.zeros([256, 256], dtype=N.float64); - - if do_lite: - R.register_histogram_lite(image_F['data'], image_G['data'], composite, sample_vector, joint_histogram) - else: - R.register_histogram(image_F['data'], image_G['data'], composite, sample_vector, joint_histogram) - - # smooth the histogram - if smooth: - p = N.ceil(2*fwhm[0]).astype(int) - x = N.array(range(-p, p+1)) - kernel1 = smooth_kernel(fwhm[0], x) - p = N.ceil(2*fwhm[1]).astype(int) - x = N.array(range(-p, p+1)) - kernel2 = smooth_kernel(fwhm[1], x) - output=None - # 2D filter in 1D separable stages - axis = 0 - result = NDI.correlate1d(joint_histogram, kernel1, axis, output) - axis = 1 - joint_histogram = NDI.correlate1d(result, kernel1, axis, output) - - joint_histogram += epsilon # prevent log(0) - # normalize the joint histogram - joint_histogram /= joint_histogram.sum() - # get the marginals - marginal_col = joint_histogram.sum(axis=0) - marginal_row = joint_histogram.sum(axis=1) - - if method == 'mi': - # mutual information - marginal_outer = N.outer(marginal_col, marginal_row) - H = joint_histogram * N.log(joint_histogram / marginal_outer) - mutual_information = H.sum() - cost = -mutual_information - - elif method == 'ecc': - # entropy correlation coefficient - marginal_outer = N.outer(marginal_col, marginal_row) - H = joint_histogram * N.log(joint_histogram / marginal_outer) - mutual_information = H.sum() - row_entropy = marginal_row * N.log(marginal_row) - col_entropy = marginal_col * N.log(marginal_col) - ecc = -2.0*mutual_information/(row_entropy.sum() + col_entropy.sum()) - cost = -ecc - - elif method == 'nmi': - # normalized mutual information - row_entropy = marginal_row * N.log(marginal_row) - col_entropy = marginal_col * N.log(marginal_col) - H = joint_histogram * N.log(joint_histogram) - nmi = (row_entropy.sum() + col_entropy.sum()) / (H.sum()) - cost = -nmi - - elif method == 'ncc': - # cross correlation from the joint histogram - r, c = joint_histogram.shape - i = N.array(range(1,c+1)) - j = N.array(range(1,r+1)) - m1 = (marginal_row * i).sum() - m2 = (marginal_col * j).sum() - sig1 = N.sqrt((marginal_row*(N.square(i-m1))).sum()) - sig2 = N.sqrt((marginal_col*(N.square(j-m2))).sum()) - [a, b] = N.mgrid[1:c+1, 1:r+1] - a = a - m1 - b = b - m2 - # element multiplies in the joint histogram and grids - H = ((joint_histogram * a) * b).sum() - ncc = H / (N.dot(sig1, sig2)) - cost = -ncc - - if ret_histo: - return cost, joint_histogram - else: - return cost - - -def build_structs(step=2): - # build image data structures here - P = N.zeros(6, dtype=N.float64); - T = N.zeros(6, dtype=N.float64); - F = N.zeros(2, dtype=N.int32); - S = N.ones(3, dtype=N.int32); - sample = N.zeros(2, dtype=N.int32); - S[0] = step - S[1] = step - S[2] = step - # histogram smoothing - F[0] = 3 - F[1] = 3 - # subsample for multiresolution registration - sample[0] = 4 - sample[1] = 2 - # tolerances for angle (0-2) and translation (3-5) - T[0] = 0.02 - T[1] = 0.02 - T[2] = 0.02 - T[3] = 0.001 - T[4] = 0.001 - T[5] = 0.001 - # P[0] = alpha <=> pitch. + alpha is moving back in the sagittal plane - # P[1] = beta <=> roll. + beta is moving right in the coronal plane - # P[2] = gamma <=> yaw. + gamma is right turn in the transverse plane - # P[3] = Tx - # P[4] = Ty - # P[5] = Tz - img_data = {'parms' : P, 'step' : S, 'fwhm' : F, 'tol' : T, 'sample' : sample} - return img_data - - -def build_rotate_matrix(img_data_parms): - R1 = N.zeros([4,4], dtype=N.float64); - R2 = N.zeros([4,4], dtype=N.float64); - R3 = N.zeros([4,4], dtype=N.float64); - T = N.eye(4, dtype=N.float64); - - alpha = math.radians(img_data_parms[0]) - beta = math.radians(img_data_parms[1]) - gamma = math.radians(img_data_parms[2]) - - R1[0][0] = 1.0 - R1[1][1] = math.cos(alpha) - R1[1][2] = math.sin(alpha) - R1[2][1] = -math.sin(alpha) - R1[2][2] = math.cos(alpha) - R1[3][3] = 1.0 - - R2[0][0] = math.cos(beta) - R2[0][2] = math.sin(beta) - R2[1][1] = 1.0 - R2[2][0] = -math.sin(beta) - R2[2][2] = math.cos(beta) - R2[3][3] = 1.0 - - R3[0][0] = math.cos(gamma) - R3[0][1] = math.sin(gamma) - R3[1][0] = -math.sin(gamma) - R3[1][1] = math.cos(gamma) - R3[2][2] = 1.0 - R3[3][3] = 1.0 - - T[0][0] = 1.0 - T[1][1] = 1.0 - T[2][2] = 1.0 - T[3][3] = 1.0 - T[0][3] = img_data_parms[3] - T[1][3] = img_data_parms[4] - T[2][3] = img_data_parms[5] - - rot_matrix = N.dot(T, R1); - rot_matrix = N.dot(rot_matrix, R2); - rot_matrix = N.dot(rot_matrix, R3); - - return rot_matrix - - - - - - From scipy-svn at scipy.org Tue Feb 12 20:32:14 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 19:32:14 -0600 (CST) Subject: [Scipy-svn] r3930 - trunk/scipy/ndimage/tests Message-ID: <20080213013214.9DE3D39C06E@new.scipy.org> Author: tom.waite Date: 2008-02-12 19:32:07 -0600 (Tue, 12 Feb 2008) New Revision: 3930 Modified: trunk/scipy/ndimage/tests/test_segment.py Log: Assert test for boundary length and compactness of the aorta and ventricle Modified: trunk/scipy/ndimage/tests/test_segment.py =================================================================== --- trunk/scipy/ndimage/tests/test_segment.py 2008-02-13 00:11:57 UTC (rev 3929) +++ trunk/scipy/ndimage/tests/test_segment.py 2008-02-13 01:32:07 UTC (rev 3930) @@ -15,14 +15,28 @@ get_shape_mask(edges, objects) get_voxel_measures(sourceImage, edges, objects) get_texture_measures(sourceImage, edges, objects) + # measure the compactness and object boundry length + # Ventricle measure + assert_almost_equal(objects[7]['compactness'], 0.25657323, 4) + assert_almost_equal(objects[7]['bLength'], 1215.70980000, 4) + # Aorta measure + assert_almost_equal(objects[13]['compactness'], 0.91137904, 4) + assert_almost_equal(objects[13]['bLength'], 198.338090000, 4) def test2(self): sourceImage, labeledMask, ROIList = segment_regions(filename) + # measure the compactness and object boundry length + # Ventricle measure + assert_almost_equal(ROIList[7]['compactness'], 0.25657323, 4) + assert_almost_equal(ROIList[7]['bLength'], 1215.70980000, 4) + # Aorta measure + assert_almost_equal(ROIList[13]['compactness'], 0.91137904, 4) + assert_almost_equal(ROIList[13]['bLength'], 198.338090000, 4) def test3(self): regionMask, numberRegions = grow_regions(filename) - regionMask.max() - #save_slice(regionMask, 'regionMask.raw') + number_of_regions = regionMask.max() + assert_equal(number_of_regions, 21) if __name__ == "__main__": From scipy-svn at scipy.org Tue Feb 12 21:06:01 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 20:06:01 -0600 (CST) Subject: [Scipy-svn] r3931 - trunk/scipy/io/matlab Message-ID: <20080213020601.84F1E39C0C2@new.scipy.org> Author: wnbell Date: 2008-02-12 20:05:58 -0600 (Tue, 12 Feb 2008) New Revision: 3931 Modified: trunk/scipy/io/matlab/miobase.py Log: change default to squeeze_me=False Modified: trunk/scipy/io/matlab/miobase.py =================================================================== --- trunk/scipy/io/matlab/miobase.py 2008-02-13 01:32:07 UTC (rev 3930) +++ trunk/scipy/io/matlab/miobase.py 2008-02-13 02:05:58 UTC (rev 3931) @@ -110,7 +110,7 @@ def __init__(self, mat_stream, byte_order=None, mat_dtype=False, - squeeze_me=True, + squeeze_me=False, chars_as_strings=True, matlab_compatible=False, ): From scipy-svn at scipy.org Tue Feb 12 21:39:19 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 20:39:19 -0600 (CST) Subject: [Scipy-svn] r3932 - trunk/scipy/io/matlab Message-ID: <20080213023919.6CB8D39C07A@new.scipy.org> Author: wnbell Date: 2008-02-12 20:39:15 -0600 (Tue, 12 Feb 2008) New Revision: 3932 Modified: trunk/scipy/io/matlab/mio5.py Log: set squeeze_me=False by default Modified: trunk/scipy/io/matlab/mio5.py =================================================================== --- trunk/scipy/io/matlab/mio5.py 2008-02-13 02:05:58 UTC (rev 3931) +++ trunk/scipy/io/matlab/mio5.py 2008-02-13 02:39:15 UTC (rev 3932) @@ -465,7 +465,7 @@ mat_stream, byte_order=None, mat_dtype=False, - squeeze_me=True, + squeeze_me=False, chars_as_strings=True, matlab_compatible=False, uint16_codec=None From scipy-svn at scipy.org Wed Feb 13 00:04:06 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 12 Feb 2008 23:04:06 -0600 (CST) Subject: [Scipy-svn] r3933 - in trunk/scipy/stats/models: . tests Message-ID: <20080213050406.B827139C06E@new.scipy.org> Author: jonathan.taylor Date: 2008-02-12 23:04:04 -0600 (Tue, 12 Feb 2008) New Revision: 3933 Modified: trunk/scipy/stats/models/contrast.py trunk/scipy/stats/models/formula.py trunk/scipy/stats/models/tests/test_formula.py Log: made namespace conventions less ambiguous Modified: trunk/scipy/stats/models/contrast.py =================================================================== --- trunk/scipy/stats/models/contrast.py 2008-02-13 02:39:15 UTC (rev 3932) +++ trunk/scipy/stats/models/contrast.py 2008-02-13 05:04:04 UTC (rev 3933) @@ -1,3 +1,5 @@ +import copy + import numpy as N from numpy.linalg import pinv from scipy.stats.models import utils @@ -78,8 +80,9 @@ then evaldesign can be set to False. """ - self.term.namespace = self.formula.namespace - T = N.transpose(N.array(self.term(*args, **kw))) + t = copy.copy(self.term) + t.namespace = self.formula.namespace + T = N.transpose(N.array(t(*args, **kw))) if T.ndim == 1: T.shape = (T.shape[0], 1) Modified: trunk/scipy/stats/models/formula.py =================================================================== --- trunk/scipy/stats/models/formula.py 2008-02-13 02:39:15 UTC (rev 3932) +++ trunk/scipy/stats/models/formula.py 2008-02-13 05:04:04 UTC (rev 3933) @@ -22,6 +22,31 @@ defaults to formula.default_namespace. When called in an instance of formula, the namespace used is that formula's namespace. + + Inheritance of the namespace under +,*,- operations: + ---------------------------------------------------- + + By default, the namespace is empty, which means it must be + specified before evaluating the design matrix. + + When it is unambiguous, the namespaces of objects are derived from the + context. + + Rules: + ------ + + i) "X * I", "X + I", "X**i": these inherit X's namespace + ii) "F.main_effect()": this inherits the Factor F's namespace + iii) "A-B": this inherits A's namespace + iv) if A.namespace == B.namespace, then A+B inherits this namespace + v) if A.namespace == B.namespace, then A*B inherits this namespace + + Equality of namespaces: + ----------------------- + + This is done by comparing the namespaces directly, if + an exception is raised in the check of equality, they are + assumed not to be equal. """ def __pow__(self, power): @@ -80,9 +105,10 @@ """ Formula(self) + Formula(other) """ - other = Formula(other, namespace=self.namespace) + other = Formula(other) f = other + self - f.namespace = self.namespace + if _namespace_equal(other.namespace, self.namespace): + f.namespace = self.namespace return f def __mul__(self, other): @@ -95,9 +121,10 @@ elif self.name is 'intercept': f = Formula(other, namespace=other.namespace) else: - other = Formula(other, namespace=self.namespace) + other = Formula(other, namespace=other.namespace) f = other * self - f.namespace = self.namespace + if _namespace_equal(other.namespace, self.namespace): + f.namespace = self.namespace return f def names(self): @@ -124,7 +151,8 @@ else: val = self.func if callable(val): - if hasattr(val, "namespace"): + if isinstance(val, (Term, Formula)): + val = copy.copy(val) val.namespace = self.namespace val = val(*args, **kw) @@ -172,7 +200,8 @@ v = self.namespace[self._name] while True: if callable(v): - if hasattr(v, "namespace"): + if isinstance(v, (Term, Formula)): + v = copy.copy(v) v.namespace = self.namespace v = v(*args, **kw) else: break @@ -376,7 +405,7 @@ intercept = False iindex = 0 for t in self.terms: - + t = copy.copy(t) t.namespace = namespace val = t(*args, **kw) @@ -414,7 +443,7 @@ else: allvals = I(nrow=nrow) allvals.shape = (1,) + allvals.shape - return allvals + return N.squeeze(allvals) def hasterm(self, query_term): """ @@ -495,7 +524,7 @@ TO DO: check for nesting relationship. Should not be too difficult. """ - other = Formula(other, namespace=self.namespace) + other = Formula(other) selftermnames = self.termnames() othertermnames = other.termnames() @@ -520,7 +549,6 @@ if self.terms[i].name is 'intercept': _term = other.terms[j] _term.namespace = other.namespace - elif other.terms[j].name is 'intercept': _term = self.terms[i] _term.namespace = self.namespace @@ -548,16 +576,17 @@ sumterms = self + other sumterms.terms = [self, other] # enforce the order we want - sumterms.namespace = self.namespace - _term = Quantitative(names, func=sumterms, termname=termname, + _term = Quantitative(names, func=sumterms, + termname=termname, transform=product_func) - _term.namespace = self.namespace + if _namespace_equal(self.namespace, other.namespace): + _term.namespace = self.namespace terms.append(_term) - return Formula(terms, namespace=self.namespace) + return Formula(terms) def __add__(self, other): @@ -568,12 +597,15 @@ terms in the formula are sorted alphabetically. """ - other = Formula(other, namespace=self.namespace) + other = Formula(other) terms = self.terms + other.terms pieces = [(term.name, term) for term in terms] pieces.sort() terms = [piece[1] for piece in pieces] - return Formula(terms, namespace=self.namespace) + f = Formula(terms) + if _namespace_equal(self.namespace, other.namespace): + f.namespace = self.namespace + return f def __sub__(self, other): @@ -583,7 +615,7 @@ function does not raise an exception. """ - other = Formula(other, namespace=self.namespace) + other = Formula(other) terms = copy.copy(self.terms) for term in other.terms: @@ -591,9 +623,11 @@ if terms[i].termname == term.termname: terms.pop(i) break - return Formula(terms, namespace=self.namespace) + f = Formula(terms) + f.namespace = self.namespace + return f -def isnested(A, B, namespace=globals()): +def isnested(A, B, namespace=None): """ Is factor B nested within factor A or vice versa: a very crude test which depends on the namespace. @@ -603,9 +637,13 @@ """ - a = A(namespace, values=True)[0] - b = B(namespace, values=True)[0] + if namespace is not None: + A = copy.copy(A); A.namespace = namespace + B = copy.copy(B); B.namespace = namespace + a = A(values=True)[0] + b = B(values=True)[0] + if len(a) != len(b): raise ValueError, 'A() and B() should be sequences of the same length' @@ -645,17 +683,25 @@ """ -def interactions(terms, order=2): +def interactions(terms, order=[1,2]): """ - Output all pairwise interactions up to a given order of a + Output all pairwise interactions of given order of a sequence of terms. - If order is greater than len(terms), it is treated as len(terms). + The argument order is a sequence specifying which order + of interactions should be generated -- the default + creates main effects and two-way interactions. If order + is an integer, it is changed to range(1,order+1), so + order=3 is equivalent to order=[1,2,3], generating + all one, two and three-way interactions. + + If any entry of order is greater than len(terms), it is + effectively treated as len(terms). >>> print interactions([Term(l) for l in ['a', 'b', 'c']]) >>> - >>> print interactions([Term(l) for l in ['a', 'b', 'c']], order=5) + >>> print interactions([Term(l) for l in ['a', 'b', 'c']], order=range(5)) >>> @@ -664,10 +710,13 @@ values = {} + if N.asarray(order).shape == (): + order = range(1, int(order)+1) + # First order - for o in range(order): - I = N.indices((l,)*(o+1)) + for o in order: + I = N.indices((l,)*(o)) I.shape = (I.shape[0], N.product(I.shape[1:])) for m in range(I.shape[1]): @@ -681,8 +730,15 @@ v *= ll[ii+1] values[tuple(I[:,m])] = v - value = values[(0,)]; del(values[(0,)]) + key = values.keys()[0] + value = values[key]; del(values[key]) for v in values.values(): value += v return value + +def _namespace_equal(space1, space2): + try: + return space1 == space2 + except: + return False Modified: trunk/scipy/stats/models/tests/test_formula.py =================================================================== --- trunk/scipy/stats/models/tests/test_formula.py 2008-02-13 02:39:15 UTC (rev 3932) +++ trunk/scipy/stats/models/tests/test_formula.py 2008-02-13 05:04:04 UTC (rev 3933) @@ -66,6 +66,7 @@ def test_namespace(self): space1 = {'X':N.arange(50), 'Y':N.arange(50)*2} space2 = {'X':N.arange(20), 'Y':N.arange(20)*2} + space3 = {'X':N.arange(30), 'Y':N.arange(30)*2} X = formula.Term('X') Y = formula.Term('Y') @@ -79,15 +80,44 @@ f.namespace = space1 self.assertEqual(f().shape, (2,50)) - assert_almost_equal(Y(), N.arange(50)*2) + assert_almost_equal(Y(), N.arange(20)*2) assert_almost_equal(X(), N.arange(50)) f.namespace = space2 self.assertEqual(f().shape, (2,20)) assert_almost_equal(Y(), N.arange(20)*2) - assert_almost_equal(X(), N.arange(20)) + assert_almost_equal(X(), N.arange(50)) + f.namespace = space3 + self.assertEqual(f().shape, (2,30)) + assert_almost_equal(Y(), N.arange(20)*2) + assert_almost_equal(X(), N.arange(50)) + xx = X**2 + self.assertEqual(xx().shape, (50,)) + + xx.namespace = space3 + self.assertEqual(xx().shape, (30,)) + + xx = X * formula.I + self.assertEqual(xx().shape, (50,)) + xx.namespace = space3 + self.assertEqual(xx().shape, (30,)) + + xx = X * X + self.assertEqual(xx.namespace, X.namespace) + + xx = X + Y + self.assertEqual(xx.namespace, {}) + + Y.namespace = {'X':N.arange(50), 'Y':N.arange(50)*2} + xx = X + Y + self.assertEqual(xx.namespace, {}) + + Y.namespace = X.namespace + xx = X+Y + self.assertEqual(xx.namespace, Y.namespace) + def test_termcolumns(self): t1 = formula.Term("A") t2 = formula.Term("B") @@ -112,33 +142,30 @@ self.assertEquals(x.shape, (40, 10)) def test_product(self): - prod = self.terms[0] * self.terms[2] - self.formula += prod - x = self.formula.design() - p = self.formula['A*C'] - col = self.formula.termcolumns(prod, dict=False) + prod = self.formula['A'] * self.formula['C'] + f = self.formula + prod + f.namespace = self.namespace + x = f.design() + p = f['A*C'] + p.namespace = self.namespace + col = f.termcolumns(prod, dict=False) assert_almost_equal(N.squeeze(x[:,col]), self.X[:,0] * self.X[:,2]) assert_almost_equal(N.squeeze(p()), self.X[:,0] * self.X[:,2]) def test_intercept1(self): prod = self.terms[0] * self.terms[2] - self.formula += formula.I - icol = self.formula.names().index('intercept') - assert_almost_equal(self.formula()[icol], N.ones((40,))) + f = self.formula + formula.I + icol = f.names().index('intercept') + f.namespace = self.namespace + assert_almost_equal(f()[icol], N.ones((40,))) - def test_intercept2(self): - prod = self.terms[0] * self.terms[2] - self.formula += formula.I - icol = self.formula.names().index('intercept') - assert_almost_equal(self.formula()[icol], N.ones((40,))) - def test_intercept3(self): - prod = self.terms[0] * formula.I + t = self.formula['A'] + t.namespace = self.namespace + prod = t * formula.I prod.namespace = self.formula.namespace - assert_almost_equal(N.squeeze(prod()), self.terms[0]()) + assert_almost_equal(N.squeeze(prod()), t()) - - def test_contrast1(self): term = self.terms[0] + self.terms[2] c = contrast.Contrast(term, self.formula) @@ -202,6 +229,7 @@ fac = formula.Factor('ff', f) fac.namespace = {'ff':f} m = fac.main_effect(reference=1) + m.namespace = fac.namespace self.assertEquals(m().shape, (2,30)) def test_factor4(self): @@ -209,6 +237,7 @@ fac = formula.Factor('ff', f) fac.namespace = {'ff':f} m = fac.main_effect(reference=2) + m.namespace = fac.namespace r = N.array([N.identity(3)]*10) r.shape = (30,3) r = r.T @@ -251,7 +280,7 @@ def test_contrast4(self): f = self.formula + self.terms[5] + self.terms[5] - + f.namespace = self.namespace estimable = False c = contrast.Contrast(self.terms[5], f) @@ -267,6 +296,12 @@ f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=3) assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d'])) + f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[1,2,3]) + assert_equal(set(f.termnames()), set(['a', 'b', 'c', 'd', 'a*b', 'a*c', 'a*d', 'b*c', 'b*d', 'c*d', 'a*b*c', 'a*c*d', 'a*b*d', 'b*c*d'])) + + f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c', 'd']], order=[3]) + assert_equal(set(f.termnames()), set(['a*b*c', 'a*c*d', 'a*b*d', 'b*c*d'])) + def test_subtract(self): f = formula.interactions([formula.Term(l) for l in ['a', 'b', 'c']]) ff = f - f['a*b'] From scipy-svn at scipy.org Wed Feb 13 03:52:25 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 13 Feb 2008 02:52:25 -0600 (CST) Subject: [Scipy-svn] r3934 - trunk/scipy/splinalg/dsolve Message-ID: <20080213085225.7945339C2BA@new.scipy.org> Author: rc Date: 2008-02-13 02:52:06 -0600 (Wed, 13 Feb 2008) New Revision: 3934 Modified: trunk/scipy/splinalg/dsolve/linsolve.py Log: use scikit umfpack, issue warning otherwise Modified: trunk/scipy/splinalg/dsolve/linsolve.py =================================================================== --- trunk/scipy/splinalg/dsolve/linsolve.py 2008-02-13 05:04:04 UTC (rev 3933) +++ trunk/scipy/splinalg/dsolve/linsolve.py 2008-02-13 08:52:06 UTC (rev 3934) @@ -6,12 +6,19 @@ import _superlu -import umfpack -if hasattr( umfpack, 'UMFPACK_OK' ): - isUmfpack = True +noScikit = False +try: + import scikits.umfpack as umfpack +except ImportError: + import umfpack + noScikit = True else: - del umfpack - isUmfpack = False + isUmfpack = hasattr( umfpack, 'UMFPACK_OK' ) + +if isUmfpack and noScikit: + warn( 'scipy.splinalg.dsolve.umfpack will be removed,' + ' install scikits.umfpack instead', DeprecationWarning ) + useUmfpack = True From scipy-svn at scipy.org Wed Feb 13 12:29:55 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 13 Feb 2008 11:29:55 -0600 (CST) Subject: [Scipy-svn] r3935 - trunk/scipy/splinalg/dsolve Message-ID: <20080213172955.1728639C310@new.scipy.org> Author: wnbell Date: 2008-02-13 11:29:52 -0600 (Wed, 13 Feb 2008) New Revision: 3935 Modified: trunk/scipy/splinalg/dsolve/linsolve.py Log: fixed isUmfpack error Modified: trunk/scipy/splinalg/dsolve/linsolve.py =================================================================== --- trunk/scipy/splinalg/dsolve/linsolve.py 2008-02-13 08:52:06 UTC (rev 3934) +++ trunk/scipy/splinalg/dsolve/linsolve.py 2008-02-13 17:29:52 UTC (rev 3935) @@ -12,9 +12,9 @@ except ImportError: import umfpack noScikit = True -else: - isUmfpack = hasattr( umfpack, 'UMFPACK_OK' ) +isUmfpack = hasattr( umfpack, 'UMFPACK_OK' ) + if isUmfpack and noScikit: warn( 'scipy.splinalg.dsolve.umfpack will be removed,' ' install scikits.umfpack instead', DeprecationWarning ) From scipy-svn at scipy.org Wed Feb 13 16:12:56 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 13 Feb 2008 15:12:56 -0600 (CST) Subject: [Scipy-svn] r3936 - trunk/scipy/sparse Message-ID: <20080213211256.7B9F539C1A2@new.scipy.org> Author: wnbell Date: 2008-02-13 15:12:54 -0600 (Wed, 13 Feb 2008) New Revision: 3936 Modified: trunk/scipy/sparse/construct.py Log: fixed typo in deprecation string Modified: trunk/scipy/sparse/construct.py =================================================================== --- trunk/scipy/sparse/construct.py 2008-02-13 17:29:52 UTC (rev 3935) +++ trunk/scipy/sparse/construct.py 2008-02-13 21:12:54 UTC (rev 3936) @@ -313,7 +313,7 @@ spkron = deprecate(kron, oldname='spkron', newname='kron') speye = deprecate(eye, oldname='speye', newname='eye') -spidentity = deprecate(identity, oldname='spidenitiy', newname='identity') +spidentity = deprecate(identity, oldname='spidentity', newname='identity') def lil_eye((r,c), k=0, dtype='d'): From scipy-svn at scipy.org Thu Feb 14 01:00:16 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 14 Feb 2008 00:00:16 -0600 (CST) Subject: [Scipy-svn] r3937 - in trunk/scipy/sparse: . tests Message-ID: <20080214060016.58CEF39C010@new.scipy.org> Author: wnbell Date: 2008-02-14 00:00:10 -0600 (Thu, 14 Feb 2008) New Revision: 3937 Modified: trunk/scipy/sparse/compressed.py trunk/scipy/sparse/csc.py trunk/scipy/sparse/csr.py trunk/scipy/sparse/tests/test_base.py Log: begin cleanup of CSR/CSC __getitem__ Modified: trunk/scipy/sparse/compressed.py =================================================================== --- trunk/scipy/sparse/compressed.py 2008-02-13 21:12:54 UTC (rev 3936) +++ trunk/scipy/sparse/compressed.py 2008-02-14 06:00:10 UTC (rev 3937) @@ -404,30 +404,9 @@ return spmatrix.sum(self,axis) raise ValueError, "axis out of bounds" - def _get_single_element(self,row,col): - M, N = self.shape - if (row < 0): - row += M - if (col < 0): - col += N - if not (0<=row Author: wnbell Date: 2008-02-14 02:34:57 -0600 (Thu, 14 Feb 2008) New Revision: 3938 Modified: trunk/scipy/sparse/csr.py trunk/scipy/sparse/tests/test_base.py Log: added better CSR slicing Modified: trunk/scipy/sparse/csr.py =================================================================== --- trunk/scipy/sparse/csr.py 2008-02-14 06:00:10 UTC (rev 3937) +++ trunk/scipy/sparse/csr.py 2008-02-14 08:34:57 UTC (rev 3938) @@ -8,10 +8,11 @@ import numpy from numpy import array, matrix, asarray, asmatrix, zeros, rank, intc, \ empty, hstack, isscalar, ndarray, shape, searchsorted, where, \ - concatenate, deprecate + concatenate, deprecate, arange, ones from base import spmatrix, isspmatrix -from sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks +from sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \ + get_csr_submatrix from sputils import upcast, to_native, isdense, isshape, getdtype, \ isscalarlike, isintlike @@ -182,6 +183,189 @@ return (x[0],x[1]) + def __getitem__(self, key): + def asindices(x): + try: + x = asarray(x,dtype='intc') + except: + raise IndexError('invalid index') + else: + return x + + def extractor(indices,N): + """Return a sparse matrix P so that P*self implements + slicing of the form self[[1,2,3],:] + """ + indices = asindices(indices) + + max_indx = indices.max() + + if max_indx > N: + raise ValueError('index (%d) out of range' % max_indx) + + min_indx = indices.min() + if min_indx < -N: + raise ValueError('index (%d) out of range' % (N + min_indx)) + + if min_indx < 0: + indices = indices.copy() + indices[indices < 0] += N + + indptr = arange(len(indices) + 1, dtype='intc') + data = ones(len(indices), dtype=self.dtype) + shape = (len(indices),N) + + return csr_matrix( (data,indices,indptr), shape=shape) + + + if isinstance(key, tuple): + row = key[0] + col = key[1] + + #TODO implement CSR[ [1,2,3], X ] with sparse matmat + #TODO make use of sorted indices + + if isintlike(row): + #[1,??] + if isintlike(col): + return self._get_single_element(row, col) #[i,j] + elif isinstance(col, slice): + return self._get_row_slice(row, col) #[i,1:2] + else: + P = extractor(col,self.shape[1]).T #[i,[1,2]] + return self[row,:]*P + + elif isinstance(row, slice): + #[1:2,??] + if isintlike(col) or isinstance(col, slice): + return self._get_submatrix(row, col) #[1:2,j] + else: + P = extractor(col,self.shape[1]).T #[1:2,[1,2]] + return self[row,:]*P + else: + #[[1,2],??] + if isintlike(col) or isinstance(col,slice): + P = extractor(row, self.shape[0]) + return (P*self)[:,col] #[[1,2],j] or [[1,2],1:2] + else: + row = asindices(row) #[[1,2],[1,2]] + col = asindices(col) + if len(row) != len(col): + raise ValueError('number of row and column indices differ') + val = [] + for i,j in zip(row,col): + val.append(self._get_single_element(i,j)) + return asmatrix(val) + + + elif isintlike(key) or isinstance(key,slice): + return self[key,:] #[i] or [1:2] + else: + return self[asindices(key),:] #[[1,2]] + + + def _get_single_element(self,row,col): + M, N = self.shape + if (row < 0): + row += M + if (col < 0): + col += N + if not (0<=row= 1" + + #TODO make [i,:] faster + #TODO implement [i,x:y:z] + + indices = [] + + for ind in xrange(self.indptr[i], self.indptr[i+1]): + if self.indices[ind] >= start and self.indices[ind] < stop: + indices.append(ind) + + index = self.indices[indices] - start + data = self.data[indices] + indptr = numpy.array([0, len(indices)]) + return csr_matrix( (data, index, indptr), shape=(1, stop-start) ) + + def _get_submatrix( self, row_slice, col_slice ): + """Return a submatrix of this matrix (new matrix is created).""" + + M,N = self.shape + + def process_slice( sl, num ): + if isinstance( sl, slice ): + i0, i1 = sl.start, sl.stop + if i0 is None: + i0 = 0 + elif i0 < 0: + i0 = num + i0 + + if i1 is None: + i1 = num + elif i1 < 0: + i1 = num + i1 + + return i0, i1 + + elif isscalar( sl ): + if sl < 0: + sl += num + + return sl, sl + 1 + + else: + raise TypeError('expected slice or scalar') + + def check_bounds( i0, i1, num ): + if not (0<=i0 Author: wnbell Date: 2008-02-14 10:42:25 -0600 (Thu, 14 Feb 2008) New Revision: 3939 Modified: trunk/scipy/sparse/csr.py Log: fixed bounds checking in new sparse slicing Modified: trunk/scipy/sparse/csr.py =================================================================== --- trunk/scipy/sparse/csr.py 2008-02-14 08:34:57 UTC (rev 3938) +++ trunk/scipy/sparse/csr.py 2008-02-14 16:42:25 UTC (rev 3939) @@ -200,7 +200,7 @@ max_indx = indices.max() - if max_indx > N: + if max_indx >= N: raise ValueError('index (%d) out of range' % max_indx) min_indx = indices.min() @@ -222,8 +222,6 @@ row = key[0] col = key[1] - #TODO implement CSR[ [1,2,3], X ] with sparse matmat - #TODO make use of sorted indices if isintlike(row): #[1,??] @@ -265,6 +263,8 @@ def _get_single_element(self,row,col): + """Returns the single element self[row, col] + """ M, N = self.shape if (row < 0): row += M @@ -272,6 +272,8 @@ col += N if not (0<=row= self.shape[0]: raise ValueError('index (%d) out of range' % i ) start, stop, stride = cslice.indices(self.shape[1]) From scipy-svn at scipy.org Thu Feb 14 11:46:25 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 14 Feb 2008 10:46:25 -0600 (CST) Subject: [Scipy-svn] r3940 - trunk/scipy/sparse Message-ID: <20080214164625.9213939C2FE@new.scipy.org> Author: wnbell Date: 2008-02-14 10:46:23 -0600 (Thu, 14 Feb 2008) New Revision: 3940 Modified: trunk/scipy/sparse/csr.py Log: raise IndexError instead of ValueError Modified: trunk/scipy/sparse/csr.py =================================================================== --- trunk/scipy/sparse/csr.py 2008-02-14 16:42:25 UTC (rev 3939) +++ trunk/scipy/sparse/csr.py 2008-02-14 16:46:23 UTC (rev 3940) @@ -201,11 +201,11 @@ max_indx = indices.max() if max_indx >= N: - raise ValueError('index (%d) out of range' % max_indx) + raise IndexError('index (%d) out of range' % max_indx) min_indx = indices.min() if min_indx < -N: - raise ValueError('index (%d) out of range' % (N + min_indx)) + raise IndexError('index (%d) out of range' % (N + min_indx)) if min_indx < 0: indices = indices.copy() @@ -249,7 +249,7 @@ row = asindices(row) #[[1,2],[1,2]] col = asindices(col) if len(row) != len(col): - raise ValueError('number of row and column indices differ') + raise IndexError('number of row and column indices differ') val = [] for i,j in zip(row,col): val.append(self._get_single_element(i,j)) @@ -271,7 +271,7 @@ if (col < 0): col += N if not (0<=row= self.shape[0]: - raise ValueError('index (%d) out of range' % i ) + raise IndexError('index (%d) out of range' % i ) start, stop, stride = cslice.indices(self.shape[1]) @@ -350,9 +350,9 @@ def check_bounds( i0, i1, num ): if not (0<=i0 Author: tom.waite Date: 2008-02-14 20:13:15 -0600 (Thu, 14 Feb 2008) New Revision: 3941 Modified: trunk/scipy/ndimage/src/register/Register_EXT.c trunk/scipy/ndimage/src/register/Register_IMPL.c Log: Added tri-cubic resampler Modified: trunk/scipy/ndimage/src/register/Register_EXT.c =================================================================== --- trunk/scipy/ndimage/src/register/Register_EXT.c 2008-02-14 16:46:23 UTC (rev 3940) +++ trunk/scipy/ndimage/src/register/Register_EXT.c 2008-02-15 02:13:15 UTC (rev 3941) @@ -140,6 +140,62 @@ } + +static PyObject *Register_CubicResample(PyObject *self, PyObject *args) +{ + + int num; + int nd; + int type; + int itype; + int nd_rotmatrix; + int nd_S; + npy_intp *dimsF; + npy_intp *dimsG; + npy_intp *dims_rotmatrix; + npy_intp *dims_S; + unsigned char *imageG; + unsigned char *imageF; + double *M; + int *S; + PyObject *imgArray1 = NULL; + PyObject *imgArray2 = NULL; + PyObject *rotArray = NULL; + PyObject *SArray = NULL; + + if(!PyArg_ParseTuple(args, "OOOO", &imgArray1, &imgArray2, &rotArray, &SArray)) + goto exit; + + /* check in the Python code that F and G are the same dims, type */ + imageF = (unsigned char *)PyArray_DATA(imgArray1); + imageG = (unsigned char *)PyArray_DATA(imgArray2); + /* reads dims as 0 = layers, 1 = rows, 2 = cols */ + nd = PyArray_NDIM(imgArray1); + dimsF = PyArray_DIMS(imgArray1); + dimsG = PyArray_DIMS(imgArray2); + type = PyArray_TYPE(imgArray1); + num = PyArray_SIZE(imgArray1); + + M = (double *)PyArray_DATA(rotArray); + nd_rotmatrix = PyArray_NDIM(rotArray); + dims_rotmatrix = PyArray_DIMS(rotArray); + + S = (int *)PyArray_DATA(SArray); + nd_S = PyArray_NDIM(SArray); + dims_S = PyArray_DIMS(SArray); + + if(!NI_CubicResample((int)dimsF[0], (int)dimsF[1], (int)dimsF[2], + (int)dimsG[0], (int)dimsG[1], (int)dimsG[2], + S, M, imageG, imageF)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); + +} + + static PyObject *Register_LinearResample(PyObject *self, PyObject *args) { @@ -199,6 +255,7 @@ { "register_histogram", Register_Histogram, METH_VARARGS, NULL }, { "register_histogram_lite", Register_HistogramLite, METH_VARARGS, NULL }, { "register_linear_resample", Register_LinearResample, METH_VARARGS, NULL }, + { "register_cubic_resample", Register_CubicResample, METH_VARARGS, NULL }, { NULL, NULL, 0, NULL}, }; Modified: trunk/scipy/ndimage/src/register/Register_IMPL.c =================================================================== --- trunk/scipy/ndimage/src/register/Register_IMPL.c 2008-02-14 16:46:23 UTC (rev 3940) +++ trunk/scipy/ndimage/src/register/Register_IMPL.c 2008-02-15 02:13:15 UTC (rev 3941) @@ -1,6 +1,100 @@ #include #include +float tri_cubic_convolve(unsigned char *pVolume, int x, int y, int z, float xp, float yp, + float zp, int colsG, int rowsG, int layersG, int sliceSizeG){ + + int i, j, k; + int layerOffsets[4]; + int rowOffsets[4]; + float ps1, ps2, ps3; + float Y[4], NewRow[4], NewLayer[4]; + float R, C, L, D, T; + float valueXYZ = 0.0; + float dataCube[4][4][4]; + /* [cols][rows][layers] */ + + rowOffsets[0] = (y-1)*colsG; + rowOffsets[1] = (y )*colsG; + rowOffsets[2] = (y+1)*colsG; + rowOffsets[3] = (y+2)*colsG; + + layerOffsets[0] = (z-1)*sliceSizeG; + layerOffsets[1] = (z )*sliceSizeG; + layerOffsets[2] = (z+1)*sliceSizeG; + layerOffsets[3] = (z+2)*sliceSizeG; + + /* get numerator for interpolation */ + C = xp - (float)x; + R = yp - (float)y; + L = zp - (float)z; + D = (float)0.002; + + /* get 4x4 window over all 4 layers */ + for(i = 0; i < 4; ++i){ + for(j = 0; j < 4; ++j){ + dataCube[0][j][i] = (float)pVolume[layerOffsets[i]+rowOffsets[j]+x-1]; + dataCube[1][j][i] = (float)pVolume[layerOffsets[i]+rowOffsets[j]+x]; + dataCube[2][j][i] = (float)pVolume[layerOffsets[i]+rowOffsets[j]+x+1]; + dataCube[3][j][i] = (float)pVolume[layerOffsets[i]+rowOffsets[j]+x+2]; + } + } + + for(i = 0; i < 4; ++i){ + /* interpolate 4 rows in all 4 layers */ + for(j = 0; j < 4; ++j){ + if(C > D){ + Y[0] = dataCube[0][j][i]; + Y[1] = dataCube[1][j][i]; + Y[2] = dataCube[2][j][i]; + Y[3] = dataCube[3][j][i]; + ps1 = Y[2] - Y[0]; + ps2 = (float)2.0*(Y[0] - Y[1]) + Y[2] - Y[3]; + ps3 = -Y[0] + Y[1] - Y[2] + Y[3]; + NewRow[j] = Y[1]+C*(ps1+C*(ps2+C*ps3)); + } + else{ + NewRow[j] = dataCube[1][j][i]; + } + } + /* interpolate across 4 columns */ + if(R > D){ + Y[0] = NewRow[0]; + Y[1] = NewRow[1]; + Y[2] = NewRow[2]; + Y[3] = NewRow[3]; + ps1 = Y[2] - Y[0]; + ps2 = (float)2.0*(Y[0] - Y[1]) + Y[2] - Y[3]; + ps3 = -Y[0] + Y[1] - Y[2] + Y[3]; + T = (Y[1]+R*(ps1+R*(ps2+R*ps3))); + NewLayer[i] = T; + } + else{ + T = NewRow[1]; + NewLayer[i] = T; + } + } + /* interpolate across 4 layers */ + if(R > D){ + Y[0] = NewLayer[0]; + Y[1] = NewLayer[1]; + Y[2] = NewLayer[2]; + Y[3] = NewLayer[3]; + ps1 = Y[2] - Y[0]; + ps2 = (float)2.0*(Y[0] - Y[1]) + Y[2] - Y[3]; + ps3 = -Y[0] + Y[1] - Y[2] + Y[3]; + T = (Y[1]+R*(ps1+R*(ps2+R*ps3))); + valueXYZ = T; + } + else{ + T = NewLayer[1]; + valueXYZ = T; + } + + return(valueXYZ); + +} + float trilinear_A(unsigned char *pVolume, int x, int y, int z, float dx, float dy, float dz, int dims[]){ // Vxyz for [0,1] values of x, y, z @@ -414,3 +508,49 @@ } + +int NI_CubicResample(int layersF, int rowsF, int colsF, int layersG, int rowsG, int colsG, + int *dimSteps, double *M, unsigned char *imageG, unsigned char *imageF) +{ + + int i; + int status; + int sliceG; + int rowG; + int sliceSizeG; + int ivf; + float vf; + float x, y, z; + float xp, yp, zp; + + sliceSizeG = rowsG * colsG; + for(z = 1.0; z < layersG-dimSteps[2]-2; z += dimSteps[2]){ + sliceG = (int)z * sliceSizeG; + for(y = 1.0; y < rowsG-dimSteps[1]-2; y += dimSteps[1]){ + rowG = (int)y * colsG; + for(x = 1.0; x < colsG-dimSteps[0]-2; x += dimSteps[0]){ + // get the 'from' coordinates + xp = M[0]*x + M[1]*y + M[2]*z + M[3]; + yp = M[4]*x + M[5]*y + M[6]*z + M[7]; + zp = M[8]*x + M[9]*y + M[10]*z + M[11]; + // clip the resample window + if((zp >= 1.0 && zp < layersF-dimSteps[2]-2) && + (yp >= 1.0 && yp < rowsF-dimSteps[1]-2) && + (xp >= 1.0 && xp < colsF-dimSteps[0]-2)){ + vf = tri_cubic_convolve(imageF, (int)xp, (int)yp, (int)zp, xp, yp, + zp, colsG, rowsG, layersG, sliceSizeG); + /* clip at hard edges */ + if(vf < 0.0) vf = 0.0; + imageG[sliceG+rowG+(int)x] = (int)vf; + } + } + } + } + + status = 1; + + return status; + +} + + From scipy-svn at scipy.org Thu Feb 14 21:13:51 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 14 Feb 2008 20:13:51 -0600 (CST) Subject: [Scipy-svn] r3942 - trunk/scipy/ndimage Message-ID: <20080215021351.BF17C39C12B@new.scipy.org> Author: tom.waite Date: 2008-02-14 20:13:49 -0600 (Thu, 14 Feb 2008) New Revision: 3942 Modified: trunk/scipy/ndimage/_registration.py Log: Added support for tri-cubic resampler in C extension code Modified: trunk/scipy/ndimage/_registration.py =================================================================== --- trunk/scipy/ndimage/_registration.py 2008-02-15 02:13:15 UTC (rev 3941) +++ trunk/scipy/ndimage/_registration.py 2008-02-15 02:13:49 UTC (rev 3942) @@ -28,13 +28,19 @@ inputname = 'ANAT1_V0001.img' filename = os.path.join(os.path.split(__file__)[0], inputname) -def remap_image(image, parm_vector): +def remap_image(image, parm_vector, resample='linear'): M_inverse = get_inverse_mappings(parm_vector) # allocate the zero image remaped_image = load_blank_image() imdata = build_structs(step=1) - # trilinear interpolation mapping. to be replaced with splines - R.register_linear_resample(image['data'], remaped_image['data'], M_inverse, imdata['step']) + + if resample == 'linear': + # trilinear interpolation mapping. + R.register_linear_resample(image['data'], remaped_image['data'], M_inverse, imdata['step']) + elif resample == 'cubic': + # tricubic convolve interpolation mapping. + R.register_cubic_resample(image['data'], remaped_image['data'], M_inverse, imdata['step']) + return remaped_image def get_inverse_mappings(parm_vector): @@ -91,12 +97,14 @@ for i in loop: step = imdata['sample'][i] imdata['step'][:] = step - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, + method, ret_histo) p_args = (optfunc_args,) if opt_method=='powell': print 'POWELL multi-res registration step size ', step print 'vector ', x - x = OPT.fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) + x = OPT.fmin_powell(optimize_function, x, args=p_args, + callback=callback_powell) elif opt_method=='cg': print 'CG multi-res registration step size ', step print 'vector ', x @@ -106,14 +114,16 @@ print 'Hybrid POWELL multi-res registration step size ', step print 'vector ', x lite = 0 - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, + method, ret_histo) p_args = (optfunc_args,) x = OPT.fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) elif i==1: print 'Hybrid CG multi-res registration step size ', step print 'vector ', x lite = 1 - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, + smhist, method, ret_histo) p_args = (optfunc_args,) x = OPT.fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) From scipy-svn at scipy.org Sat Feb 16 08:46:06 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 16 Feb 2008 07:46:06 -0600 (CST) Subject: [Scipy-svn] r3943 - branches/build_with_scons/scipy/cluster Message-ID: <20080216134606.5030A39C1FD@new.scipy.org> Author: cdavid Date: 2008-02-16 07:45:59 -0600 (Sat, 16 Feb 2008) New Revision: 3943 Modified: branches/build_with_scons/scipy/cluster/SConstruct Log: Fix import for numscons in cluster Modified: branches/build_with_scons/scipy/cluster/SConstruct =================================================================== --- branches/build_with_scons/scipy/cluster/SConstruct 2008-02-15 02:13:49 UTC (rev 3942) +++ branches/build_with_scons/scipy/cluster/SConstruct 2008-02-16 13:45:59 UTC (rev 3943) @@ -3,7 +3,7 @@ from os.path import join from numpy.distutils.misc_util import get_numpy_include_dirs -from numpy.distutils.scons import GetNumpyEnvironment +from numscons import GetNumpyEnvironment env = GetNumpyEnvironment(ARGUMENTS) From scipy-svn at scipy.org Sat Feb 16 08:46:42 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 16 Feb 2008 07:46:42 -0600 (CST) Subject: [Scipy-svn] r3944 - branches/build_with_scons Message-ID: <20080216134642.9FDB239C1FD@new.scipy.org> Author: cdavid Date: 2008-02-16 07:46:34 -0600 (Sat, 16 Feb 2008) New Revision: 3944 Added: branches/build_with_scons/setupscons.py Log: Add root setupscons.py Copied: branches/build_with_scons/setupscons.py (from rev 3942, branches/build_with_scons/setup.py) =================================================================== --- branches/build_with_scons/setup.py 2008-02-15 02:13:49 UTC (rev 3942) +++ branches/build_with_scons/setupscons.py 2008-02-16 13:46:34 UTC (rev 3944) @@ -0,0 +1,92 @@ +#!/usr/bin/env python +"""SciPy: Scientific Library for Python + +SciPy (pronounced "Sigh Pie") is open-source software for mathematics, +science, and engineering. The SciPy library +depends on NumPy, which provides convenient and fast N-dimensional +array manipulation. The SciPy library is built to work with NumPy +arrays, and provides many user-friendly and efficient numerical +routines such as routines for numerical integration and optimization. +Together, they run on all popular operating systems, are quick to +install, and are free of charge. NumPy and SciPy are easy to use, +but powerful enough to be depended upon by some of the world's +leading scientists and engineers. If you need to manipulate +numbers on a computer and display or publish the results, +give SciPy a try! + +""" + +DOCLINES = __doc__.split("\n") + +import os +import sys + +CLASSIFIERS = """\ +Development Status :: 4 - Beta +Intended Audience :: Science/Research +Intended Audience :: Developers +License :: OSI Approved +Programming Language :: C +Programming Language :: Python +Topic :: Software Development +Topic :: Scientific/Engineering +Operating System :: Microsoft :: Windows +Operating System :: POSIX +Operating System :: Unix +Operating System :: MacOS + +""" + +# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly +# update it when the contents of directories change. +if os.path.exists('MANIFEST'): os.remove('MANIFEST') + +os.environ['NO_SCIPY_IMPORT']='SciPy/setup.py' + +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + config = Configuration(None, parent_package, top_path, setup_name = "setupscons.py") + config.set_options(ignore_setup_xxx_py=True, + assume_default_configuration=True, + delegate_options_to_subpackages=True, + quiet=True) + + config.add_subpackage('scipy') + config.add_data_files(('scipy','*.txt')) + + config.get_version('scipy/version.py') # sets config.version + + return config + +def setup_package(): + + from numpy.distutils.core import setup + from numpy.distutils.misc_util import Configuration + + old_path = os.getcwd() + local_path = os.path.dirname(os.path.abspath(sys.argv[0])) + os.chdir(local_path) + sys.path.insert(0,local_path) + sys.path.insert(0,os.path.join(local_path,'scipy')) # to retrive version + + try: + setup( + name = 'scipy', + maintainer = "SciPy Developers", + maintainer_email = "scipy-dev at scipy.org", + description = DOCLINES[0], + long_description = "\n".join(DOCLINES[2:]), + url = "http://www.scipy.org", + download_url = "http://sourceforge.net/project/showfiles.php?group_id=27747&package_id=19531", + license = 'BSD', + classifiers=filter(None, CLASSIFIERS.split('\n')), + platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], + configuration=configuration ) + finally: + del sys.path[0] + os.chdir(old_path) + + return + +if __name__ == '__main__': + setup_package() From scipy-svn at scipy.org Sat Feb 16 14:53:26 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 16 Feb 2008 13:53:26 -0600 (CST) Subject: [Scipy-svn] r3945 - in trunk/scipy/sparse: . tests Message-ID: <20080216195326.4B27C39C211@new.scipy.org> Author: wnbell Date: 2008-02-16 13:53:19 -0600 (Sat, 16 Feb 2008) New Revision: 3945 Modified: trunk/scipy/sparse/data.py trunk/scipy/sparse/tests/test_base.py Log: fixed .real and .imag errors resolves ticket #606 Modified: trunk/scipy/sparse/data.py =================================================================== --- trunk/scipy/sparse/data.py 2008-02-16 13:46:34 UTC (rev 3944) +++ trunk/scipy/sparse/data.py 2008-02-16 19:53:19 UTC (rev 3945) @@ -27,10 +27,10 @@ return self._with_data(abs(self.data)) def _real(self): - return self._with_data(numpy.real(self.data),copy=False) + return self._with_data(self.data.real) def _imag(self): - return self._with_data(numpy.imag(self.data),copy=False) + return self._with_data(self.data.imag) def __neg__(self): return self._with_data(-self.data) Modified: trunk/scipy/sparse/tests/test_base.py =================================================================== --- trunk/scipy/sparse/tests/test_base.py 2008-02-16 13:46:34 UTC (rev 3944) +++ trunk/scipy/sparse/tests/test_base.py 2008-02-16 19:53:19 UTC (rev 3945) @@ -75,6 +75,15 @@ A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d') assert_equal(-A,(-self.spmatrix(A)).todense()) + def test_real(self): + D = matrix([[1 + 3j, 2 - 4j]]) + A = self.spmatrix(D) + assert_equal(A.real.todense(),D.real) + + def test_imag(self): + D = matrix([[1 + 3j, 2 - 4j]]) + A = self.spmatrix(D) + assert_equal(A.imag.todense(),D.imag) def test_diagonal(self): """Does the matrix's .diagonal() method work? From scipy-svn at scipy.org Sat Feb 16 15:13:17 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 16 Feb 2008 14:13:17 -0600 (CST) Subject: [Scipy-svn] r3946 - trunk/scipy/sparse Message-ID: <20080216201317.CD57539C211@new.scipy.org> Author: wnbell Date: 2008-02-16 14:13:15 -0600 (Sat, 16 Feb 2008) New Revision: 3946 Modified: trunk/scipy/sparse/lil.py Log: check that shape= is not specified when first arg is (M,N) Modified: trunk/scipy/sparse/lil.py =================================================================== --- trunk/scipy/sparse/lil.py 2008-02-16 19:53:19 UTC (rev 3945) +++ trunk/scipy/sparse/lil.py 2008-02-16 20:13:15 UTC (rev 3946) @@ -56,7 +56,7 @@ # First get the shape if A is None: if not isshape(shape): - raise TypeError, "need a valid shape" + raise TypeError("need a valid shape") M, N = shape self.shape = (M,N) self.rows = numpy.empty((M,), dtype=object) @@ -75,6 +75,8 @@ self.data = A.data elif isinstance(A,tuple): if isshape(A): + if shape is not None: + raise ValueError('invalid use of shape parameter') M, N = A self.shape = (M,N) self.rows = numpy.empty((M,), dtype=object) From scipy-svn at scipy.org Tue Feb 19 16:06:12 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 19 Feb 2008 15:06:12 -0600 (CST) Subject: [Scipy-svn] r3947 - trunk/scipy/stats/models Message-ID: <20080219210612.F124439C227@new.scipy.org> Author: jonathan.taylor Date: 2008-02-19 15:06:08 -0600 (Tue, 19 Feb 2008) New Revision: 3947 Modified: trunk/scipy/stats/models/contrast.py trunk/scipy/stats/models/utils.py Log: fixed a problem in specifying contrasts using "contrast matrices" rank now correctly identifies rank 1 arrays Modified: trunk/scipy/stats/models/contrast.py =================================================================== --- trunk/scipy/stats/models/contrast.py 2008-02-16 20:13:15 UTC (rev 3946) +++ trunk/scipy/stats/models/contrast.py 2008-02-19 21:06:08 UTC (rev 3947) @@ -118,6 +118,9 @@ """ + T = N.asarray(T) + D = N.asarray(D) + n, p = D.shape if T.shape[0] != n and T.shape[1] != p: @@ -127,14 +130,18 @@ pseudo = pinv(D) if T.shape[0] == n: - C = N.transpose(N.dot(pseudo, T)) + C = N.dot(pseudo, T).T else: C = T + C = N.dot(pseudo, N.dot(D, C.T)).T + + Tp = N.dot(D, C.T) - Tp = N.dot(D, N.transpose(C)) - + if len(Tp.shape) == 1: + Tp.shape = (n, 1) + if utils.rank(Tp) != Tp.shape[1]: Tp = utils.fullrank(Tp) - C = N.transpose(N.dot(pseudo, Tp)) + C = N.dot(pseudo, Tp).T return N.squeeze(C) Modified: trunk/scipy/stats/models/utils.py =================================================================== --- trunk/scipy/stats/models/utils.py 2008-02-16 20:13:15 UTC (rev 3946) +++ trunk/scipy/stats/models/utils.py 2008-02-19 21:06:08 UTC (rev 3947) @@ -50,8 +50,12 @@ Return the rank of a matrix X based on its generalized inverse, not the SVD. """ - D = scipy.linalg.svdvals(X) - return int(N.add.reduce(N.greater(D / D.max(), cond).astype(N.int32))) + X = N.asarray(X) + if len(X.shape) == 2: + D = scipy.linalg.svdvals(X) + return int(N.add.reduce(N.greater(D / D.max(), cond).astype(N.int32))) + else: + return int(not N.alltrue(N.equal(X, 0.))) def fullrank(X, r=None): """ From scipy-svn at scipy.org Tue Feb 19 16:13:14 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 19 Feb 2008 15:13:14 -0600 (CST) Subject: [Scipy-svn] r3948 - trunk/scipy/stats/models Message-ID: <20080219211314.94D6C39C227@new.scipy.org> Author: jonathan.taylor Date: 2008-02-19 15:13:12 -0600 (Tue, 19 Feb 2008) New Revision: 3948 Modified: trunk/scipy/stats/models/contrast.py Log: clarifying doc string Modified: trunk/scipy/stats/models/contrast.py =================================================================== --- trunk/scipy/stats/models/contrast.py 2008-02-19 21:06:08 UTC (rev 3947) +++ trunk/scipy/stats/models/contrast.py 2008-02-19 21:13:12 UTC (rev 3948) @@ -98,9 +98,9 @@ self.rank = 1 -def contrastfromcols(T, D, pseudo=None): +def contrastfromcols(L, D, pseudo=None): """ - From an n x p design matrix D and a matrix T, tries + From an n x p design matrix D and a matrix L, tries to determine a p x q contrast matrix C which determines a contrast of full rank, i.e. the n x q matrix @@ -109,39 +109,46 @@ is full rank. - T must satisfy either T.shape[0] == n or T.shape[1] == p. + L must satisfy either L.shape[0] == n or L.shape[1] == p. + If L.shape[0] == n, then L is thought of as representing + columns in the column space of D. + + If L.shape[1] == p, then L is thought of as what is known + as a contrast matrix. In this case, this function returns an estimable + contrast corresponding to the dot(D, L.T) + Note that this always produces a meaningful contrast, not always with the intended properties because q is always non-zero unless - T is identically 0. That is, it produces a contrast that spans - the column space of T (after projection onto the column space of D). + L is identically 0. That is, it produces a contrast that spans + the column space of L (after projection onto the column space of D). """ - T = N.asarray(T) + L = N.asarray(L) D = N.asarray(D) n, p = D.shape - if T.shape[0] != n and T.shape[1] != p: - raise ValueError, 'shape of T and D mismatched' + if L.shape[0] != n and L.shape[1] != p: + raise ValueError, 'shape of L and D mismatched' if pseudo is None: pseudo = pinv(D) - if T.shape[0] == n: - C = N.dot(pseudo, T).T + if L.shape[0] == n: + C = N.dot(pseudo, L).T else: - C = T + C = L C = N.dot(pseudo, N.dot(D, C.T)).T - Tp = N.dot(D, C.T) + Lp = N.dot(D, C.T) - if len(Tp.shape) == 1: - Tp.shape = (n, 1) + if len(Lp.shape) == 1: + Lp.shape = (n, 1) - if utils.rank(Tp) != Tp.shape[1]: - Tp = utils.fullrank(Tp) - C = N.dot(pseudo, Tp).T + if utils.rank(Lp) != Lp.shape[1]: + Lp = utils.fullrank(Lp) + C = N.dot(pseudo, Lp).T return N.squeeze(C) From scipy-svn at scipy.org Tue Feb 19 20:35:08 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 19 Feb 2008 19:35:08 -0600 (CST) Subject: [Scipy-svn] r3949 - trunk/scipy/ndimage/src/register Message-ID: <20080220013508.359E4C7C026@new.scipy.org> Author: tom.waite Date: 2008-02-19 19:35:04 -0600 (Tue, 19 Feb 2008) New Revision: 3949 Modified: trunk/scipy/ndimage/src/register/Register_IMPL.c Log: Fixed resample bug in tri_cubic_convolve function. Modified: trunk/scipy/ndimage/src/register/Register_IMPL.c =================================================================== --- trunk/scipy/ndimage/src/register/Register_IMPL.c 2008-02-19 21:13:12 UTC (rev 3948) +++ trunk/scipy/ndimage/src/register/Register_IMPL.c 2008-02-20 01:35:04 UTC (rev 3949) @@ -75,7 +75,7 @@ } } /* interpolate across 4 layers */ - if(R > D){ + if(L > D){ Y[0] = NewLayer[0]; Y[1] = NewLayer[1]; Y[2] = NewLayer[2]; @@ -83,7 +83,7 @@ ps1 = Y[2] - Y[0]; ps2 = (float)2.0*(Y[0] - Y[1]) + Y[2] - Y[3]; ps3 = -Y[0] + Y[1] - Y[2] + Y[3]; - T = (Y[1]+R*(ps1+R*(ps2+R*ps3))); + T = (Y[1]+L*(ps1+L*(ps2+L*ps3))); valueXYZ = T; } else{ From scipy-svn at scipy.org Wed Feb 20 00:27:20 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 19 Feb 2008 23:27:20 -0600 (CST) Subject: [Scipy-svn] r3950 - in trunk/scipy/integrate: . tests Message-ID: <20080220052720.5354339C10E@new.scipy.org> Author: oliphant Date: 2008-02-19 23:27:16 -0600 (Tue, 19 Feb 2008) New Revision: 3950 Modified: trunk/scipy/integrate/info.py trunk/scipy/integrate/ode.py trunk/scipy/integrate/tests/test_integrate.py trunk/scipy/integrate/vode.pyf Log: Apply patch for ticket #334 which adds zvode to scipy's ode functionality. Modified: trunk/scipy/integrate/info.py =================================================================== --- trunk/scipy/integrate/info.py 2008-02-20 01:35:04 UTC (rev 3949) +++ trunk/scipy/integrate/info.py 2008-02-20 05:27:16 UTC (rev 3950) @@ -27,7 +27,7 @@ Interface to numerical integrators of ODE systems. odeint -- General integration of ordinary differential equations. - ode -- Integrate ODE using vode routine. + ode -- Integrate ODE using VODE and ZVODE routines. """ Modified: trunk/scipy/integrate/ode.py =================================================================== --- trunk/scipy/integrate/ode.py 2008-02-20 01:35:04 UTC (rev 3949) +++ trunk/scipy/integrate/ode.py 2008-02-20 05:27:16 UTC (rev 3950) @@ -1,140 +1,208 @@ -## Automatically adapted for scipy Oct 21, 2005 by +# Authors: Pearu Peterson, Pauli Virtanen +""" +First-order ODE integrators -#!/usr/bin/env python -#Author: Pearu Peterson -#Date: 3 Feb 2002 -#$Revision$ -""" User-friendly interface to various numerical integrators for solving a -system of first order ODEs with prescribed initial conditions: +system of first order ODEs with prescribed initial conditions:: - d y(t)[i] - --------- = f(t,y(t))[i], - d t + d y(t)[i] + --------- = f(t,y(t))[i], + d t + + y(t=0)[i] = y0[i], - y(t=0)[i] = y0[i], +where:: -where i = 0, ..., len(y0) - 1 + i = 0, ..., len(y0) - 1 -Provides: - ode - a generic interface class to numeric integrators. It has the - following methods: - integrator = ode(f,jac=None) - integrator = integrator.set_integrator(name,**params) - integrator = integrator.set_initial_value(y0,t0=0.0) - integrator = integrator.set_f_params(*args) - integrator = integrator.set_jac_params(*args) - y1 = integrator.integrate(t1,step=0,relax=0) - flag = integrator.successful() +class ode +--------- -Supported integrators: - vode - Variable-coefficient Ordinary Differential Equation solver, - with fixed-leading-coefficient implementation. - It provides implicit Adams method (for non-stiff problems) - and a method based on backward differentiation formulas (BDF) - (for stiff problems). - Source: http://www.netlib.org/ode/vode.f - This integrator accepts the following parameters in - set_integrator() method of the ode class: - atol=float|seq - rtol=float|seq - lband=None|int - rband=None|int - method='adams'|'bdf' - with_jacobian=0|1 - nsteps = int - (first|min|max)_step = float - order = int # <=12 for adams, <=5 for bdf +A generic interface class to numeric integrators. It has the following +methods:: + + integrator = ode(f,jac=None) + integrator = integrator.set_integrator(name,**params) + integrator = integrator.set_initial_value(y0,t0=0.0) + integrator = integrator.set_f_params(*args) + integrator = integrator.set_jac_params(*args) + y1 = integrator.integrate(t1,step=0,relax=0) + flag = integrator.successful() + """ + +integrator_info = \ """ -XXX: Integrators must have: -=========================== -cvode - C version of vode and vodpk with many improvements. - Get it from http://www.netlib.org/ode/cvode.tar.gz - To wrap cvode to Python, one must write extension module by - hand. Its interface is too much 'advanced C' that using f2py - would be too complicated (or impossible). +Available integrators +--------------------- -How to define a new integrator: -=============================== +vode +~~~~ -class myodeint(IntegratorBase): +Real-valued Variable-coefficient Ordinary Differential Equation +solver, with fixed-leading-coefficient implementation. It provides +implicit Adams method (for non-stiff problems) and a method based on +backward differentiation formulas (BDF) (for stiff problems). - runner = or None +Source: http://www.netlib.org/ode/vode.f - def __init__(self,...): # required - +This integrator accepts the following parameters in set_integrator() +method of the ode class: - def reset(self,n,has_jac): # optional - # n - the size of the problem (number of equations) - # has_jac - whether user has supplied its own routine for Jacobian - +- atol : float or sequence + absolute tolerance for solution +- rtol : float or sequence + relative tolerance for solution +- lband : None or int +- rband : None or int + Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+rband. + Setting these requires your jac routine to return the jacobian + in packed format, jac_packed[i-j+lband, j] = jac[i,j]. +- method: 'adams' or 'bdf' + Which solver to use, Adams (non-stiff) or BDF (stiff) +- with_jacobian : bool + Whether to use the jacobian +- nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. +- first_step : float +- min_step : float +- max_step : float + Limits for the step sizes used by the integrator. +- order : int + Maximum order used by the integrator, + order <= 12 for Adams, <= 5 for BDF. - def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required - # this method is called to integrate from t=t0 to t=t1 - # with initial condition y0. f and jac are user-supplied functions - # that define the problem. f_params,jac_params are additional arguments - # to these functions. - - if : - self.success = 0 - return t1,y1 +zvode +~~~~~ - # In addition, one can define step() and run_relax() methods (they - # take the same arguments as run()) if the integrator can support - # these features (see IntegratorBase doc strings). +Complex-valued Variable-coefficient Ordinary Differential Equation +solver, with fixed-leading-coefficient implementation. It provides +implicit Adams method (for non-stiff problems) and a method based on +backward differentiation formulas (BDF) (for stiff problems). -if myodeint.runner: - IntegratorBase.integrator_classes.append(myodeint) +Source: http://www.netlib.org/ode/zvode.f + +This integrator accepts the same parameters in set_integrator() +as the "vode" solver. + +:Note: + When using ZVODE for a stiff system, it should only be used for + the case in which the function f is analytic, that is, when each f(i) + is an analytic function of each y(j). Analyticity means that the + partial derivative df(i)/dy(j) is a unique complex number, and this + fact is critical in the way ZVODE solves the dense or banded linear + systems that arise in the stiff case. For a complex stiff ODE system + in which f is not analytic, ZVODE is likely to have convergence + failures, and for this problem one should instead use DVODE on the + equivalent real system (in the real and imaginary parts of y). + """ +__doc__ += integrator_info + +# XXX: Integrators must have: +# =========================== +# cvode - C version of vode and vodpk with many improvements. +# Get it from http://www.netlib.org/ode/cvode.tar.gz +# To wrap cvode to Python, one must write extension module by +# hand. Its interface is too much 'advanced C' that using f2py +# would be too complicated (or impossible). +# +# How to define a new integrator: +# =============================== +# +# class myodeint(IntegratorBase): +# +# runner = or None +# +# def __init__(self,...): # required +# +# +# def reset(self,n,has_jac): # optional +# # n - the size of the problem (number of equations) +# # has_jac - whether user has supplied its own routine for Jacobian +# +# +# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required +# # this method is called to integrate from t=t0 to t=t1 +# # with initial condition y0. f and jac are user-supplied functions +# # that define the problem. f_params,jac_params are additional +# # arguments +# # to these functions. +# +# if : +# self.success = 0 +# return t1,y1 +# +# # In addition, one can define step() and run_relax() methods (they +# # take the same arguments as run()) if the integrator can support +# # these features (see IntegratorBase doc strings). +# +# if myodeint.runner: +# IntegratorBase.integrator_classes.append(myodeint) + __all__ = ['ode'] __version__ = "$Id$" +__docformat__ = "restructuredtext en" from numpy import asarray, array, zeros, sin, int32, isscalar import re, sys +#------------------------------------------------------------------------------ +# User interface +#------------------------------------------------------------------------------ + class ode(object): """\ -ode - a generic interface class to numeric integrators. It has the - following methods: - integrator = ode(f,jac=None) - integrator = integrator.set_integrator(name,**params) - integrator = integrator.set_initial_value(y0,t0=0.0) - integrator = integrator.set_f_params(*args) - integrator = integrator.set_jac_params(*args) - y1 = integrator.integrate(t1,step=0,relax=0) - flag = integrator.successful() +A generic interface class to numeric integrators. - Typical usage: - r = ode(f,jac).set_integrator('vode').set_initial_value(y0,t0) - t1 = - dt = - while r.successful() and r.t < t1: - r.integrate(r.t+dt) - print r.t, r.y - where f and jac have the following signatures: - def f(t,y[,arg1,..]): - return - def jac(t,y[,arg1,..]): - return +See also +-------- +odeint : an integrator with a simpler interface based on lsoda from ODEPACK +quad : for finding the area under a curve -See also: - odeint - an integrator with a simpler interface based on lsoda from ODEPACK - quad - for finding the area under a curve - """ +Examples +-------- +A problem to integrate and the corresponding jacobian: - def __init__(self,f,jac=None): - """Define equation y' = f(y,t) where (optional) jac = df/dy. - User-supplied functions must have the following signatures: - def f(t,y,...): - return - def jac(t,y,...): - return - where ... means extra parameters that can be set with - set_(f|jac)_params(*args) - methods. +>>> from scipy import eye +>>> from scipy.integrate import ode +>>> +>>> y0, t0 = [1.0j, 2.0], 0 +>>> +>>> def f(t, y, arg1): +>>> return [1j*arg1*y[0] + y[1], -arg1*y[1]**2] +>>> def jac(t, y, arg1): +>>> return [[1j*arg1, 1], [0, -arg1*2*y[1]]] + +The integration: + +>>> r = ode(f, jac).set_integrator('zvode', method='bdf', with_jacobian=True) +>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0) +>>> t1 = 10 +>>> dt = 1 +>>> while r.successful() and r.t < t1: +>>> r.integrate(r.t+dt) +>>> print r.t, r.y + +""" + + __doc__ += integrator_info + + def __init__(self, f, jac=None): """ + Define equation y' = f(y,t) where (optional) jac = df/dy. + + Parameters + ---------- + f : f(t, y, *f_args) + Rhs of the equation. t is a scalar, y.shape == (n,). + f_args is set by calling set_f_params(*args) + jac : jac(t, y, *jac_args) + Jacobian of the rhs, jac[i,j] = d f[i] / d y[j] + jac_args is set by calling set_f_params(*args) + """ self.stiff = 0 self.f = f self.jac = jac @@ -142,20 +210,29 @@ self.jac_params = () self.y = [] - def set_initial_value(self,y,t=0.0): + def set_initial_value(self, y, t=0.0): """Set initial conditions y(t) = y.""" if isscalar(y): y = [y] n_prev = len(self.y) - self.y = asarray(y, float) - self.t = t if not n_prev: self.set_integrator('') # find first available integrator + self.y = asarray(y, self._integrator.scalar) + self.t = t self._integrator.reset(len(self.y),self.jac is not None) return self - def set_integrator(self,name,**integrator_params): - """Set integrator by name.""" + def set_integrator(self, name, **integrator_params): + """ + Set integrator by name. + + Parameters + ---------- + name : str + Name of the integrator + integrator_params + Additional parameters for the integrator. + """ integrator = find_integrator(name) if integrator is None: print 'No integrator name match with %s or is not available.'\ @@ -164,11 +241,11 @@ self._integrator = integrator(**integrator_params) if not len(self.y): self.t = 0.0 - self.y = array([0.0], float) + self.y = array([0.0], self._integrator.scalar) self._integrator.reset(len(self.y),self.jac is not None) return self - def integrate(self,t,step=0,relax=0): + def integrate(self, t, step=0, relax=0): """Find y=y(t), set y as an initial condition, and return y.""" if step and self._integrator.supports_step: mth = self._integrator.step @@ -188,23 +265,22 @@ return self._integrator.success==1 def set_f_params(self,*args): - """Set extra-parameters for user-supplied function f.""" + """Set extra parameters for user-supplied function f.""" self.f_params = args return self def set_jac_params(self,*args): - """Set extra-parameters for user-supplied function jac.""" + """Set extra parameters for user-supplied function jac.""" self.jac_params = args return self -############################################################# -#### Nothing interesting for an end-user in what follows #### -############################################################# +#------------------------------------------------------------------------------ +# ODE integrators +#------------------------------------------------------------------------------ def find_integrator(name): for cl in IntegratorBase.integrator_classes: if re.match(name,cl.__name__,re.I): - print 'Found integrator',cl.__name__ return cl return @@ -215,6 +291,7 @@ supports_run_relax = None supports_step = None integrator_classes = [] + scalar = float def reset(self,n,has_jac): """Prepare integrator for call: allocate memory, set flags, etc. @@ -379,47 +456,107 @@ IntegratorBase.integrator_classes.append(vode) -def test1(): - def f(t,y): - a = sin(6*t) - return y*y-a+y +class zvode(vode): + try: + import vode as _vode + except ImportError: + print sys.exc_value + _vode = None + runner = getattr(_vode,'zvode',None) - ode_runner = ode(f) - ode_runner.set_integrator('vode') - ode_runner.set_initial_value([0.1,0.11,.1]*10) + supports_run_relax = 1 + supports_step = 1 + scalar = complex - while ode_runner.successful() and ode_runner.t < 50: - y1 = ode_runner.integrate(ode_runner.t+2) - print ode_runner.t,y1[:3] + def reset(self, n, has_jac): + # Calculate parameters for Fortran subroutine dvode. + if has_jac: + if self.mu is None and self.ml is None: + miter = 1 + else: + if self.mu is None: self.mu = 0 + if self.ml is None: self.ml = 0 + miter = 4 + else: + if self.mu is None and self.ml is None: + if self.with_jacobian: + miter = 2 + else: + miter = 0 + else: + if self.mu is None: self.mu = 0 + if self.ml is None: self.ml = 0 + if self.ml==self.mu==0: + miter = 3 + else: + miter = 5 -def test2(): - # Stiff problem. Requires analytic Jacobian. - def f(t,y): - ydot0 = -0.04*y[0] + 1e4*y[1]*y[2] - ydot2 = 3e7*y[1]*y[1] - ydot1 = -ydot0-ydot2 - return [ydot0,ydot1,ydot2] - def jac(t,y): - jc = [[-0.04,1e4*y[2] ,1e4*y[1]], - [0.04 ,-1e4*y[2]-6e7*y[1],-1e4*y[1]], - [0.0 ,6e7*y[1] ,0.0]] - return jc - r = ode(f,jac).set_integrator('vode', - rtol=1e-4, - atol=[1e-8,1e-14,1e-6], - method='bdf', - ) - r.set_initial_value([1,0,0]) - print 'At t=%s y=%s'%(r.t,r.y) - tout = 0.4 - for i in range(12): - r.integrate(tout) - print 'At t=%s y=%s'%(r.t,r.y) - tout *= 10 + mf = 10*self.meth + miter -if __name__ == "__main__": - print 'Integrators available:',\ - ', '.join(map(lambda c:c.__name__, - IntegratorBase.integrator_classes)) - test1() - test2() + if mf in (10,): + lzw = 15*n + elif mf in (11, 12): + lzw = 15*n + 2*n**2 + elif mf in (-11, -12): + lzw = 15*n + n**2 + elif mf in (13,): + lzw = 16*n + elif mf in (14,15): + lzw = 17*n + (3*self.ml + 2*self.mu)*n + elif mf in (-14,-15): + lzw = 16*n + (2*self.ml + self.mu)*n + elif mf in (20,): + lzw = 8*n + elif mf in (21, 22): + lzw = 8*n + 2*n**2 + elif mf in (-21,-22): + lzw = 8*n + n**2 + elif mf in (23,): + lzw = 9*n + elif mf in (24, 25): + lzw = 10*n + (3*self.ml + 2*self.mu)*n + elif mf in (-24, -25): + lzw = 9*n + (2*self.ml + self.mu)*n + + lrw = 20 + n + + if miter in (0, 3): + liw = 30 + else: + liw = 30 + n + + zwork = zeros((lzw,), complex) + self.zwork = zwork + + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + + iwork = zeros((liw,), int32) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.order + iwork[5] = self.nsteps + iwork[6] = 2 # mxhnil + self.iwork = iwork + + self.call_args = [self.rtol,self.atol,1,1, + self.zwork,self.rwork,self.iwork,mf] + self.success = 1 + + def run(self,*args): + y1,t,istate = self.runner(*(args[:5]+tuple(self.call_args)+args[5:])) + if istate < 0: + print 'zvode:', self.messages.get(istate, + 'Unexpected istate=%s'%istate) + self.success = 0 + else: + self.call_args[3] = 2 # upgrade istate from 1 to 2 + return y1, t + +if zvode.runner: + IntegratorBase.integrator_classes.append(zvode) Modified: trunk/scipy/integrate/tests/test_integrate.py =================================================================== --- trunk/scipy/integrate/tests/test_integrate.py 2008-02-20 01:35:04 UTC (rev 3949) +++ trunk/scipy/integrate/tests/test_integrate.py 2008-02-20 05:27:16 UTC (rev 3950) @@ -1,52 +1,146 @@ -#!/usr/bin/env python - -# Test provided by Nils Wagner. -# File created by Ed Schofield on Nov 16. - -""" Tests for numerical integration. +# Authors: Nils Wagner, Ed Schofield, Pauli Virtanen """ +Tests for numerical integration. +""" import numpy -from numpy import arange, zeros, array, dot, sqrt, cos, sin +from numpy import (arange, zeros, array, dot, sqrt, cos, sin, absolute, + eye, pi, exp, allclose) from scipy.linalg import norm + from scipy.testing import * -from scipy.integrate import odeint +from scipy.integrate import odeint, ode -class TestODEInt(TestCase): - """ Test odeint: free vibration of a simple oscillator +#------------------------------------------------------------------------------ +# Test ODE integrators +#------------------------------------------------------------------------------ + +class TestOdeint(TestCase): + """ + Check integrate.odeint + """ + def _do_problem(self, problem): + t = arange(0.0, problem.stop_t, 0.05) + z, infodict = odeint(problem.f, problem.z0, t, full_output=True) + assert problem.verify(z, t) + + def test_odeint(self): + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: continue + self._do_problem(problem) + +class TestOde(TestCase): + """ + Check integrate.ode + """ + def _do_problem(self, problem, integrator, method='adams'): + + # ode has callback arguments in different order than odeint + f = lambda t, z: problem.f(z, t) + jac = None + if hasattr(problem, 'jac'): + jac = lambda t, z: problem.jac(z, t) + + ig = ode(f, jac) + ig.set_integrator(integrator, + atol=problem.atol/10, + rtol=problem.rtol/10, + method=method) + ig.set_initial_value(problem.z0, t=0.0) + z = ig.integrate(problem.stop_t) + + assert ig.successful(), (problem, method) + assert problem.verify(array([z]), problem.stop_t), (problem, method) + + def test_vode(self): + """Check the vode solver""" + for problem_cls in PROBLEMS: + problem = problem_cls() + if problem.cmplx: continue + if not problem.stiff: + self._do_problem(problem, 'vode', 'adams') + self._do_problem(problem, 'vode', 'bdf') + + def test_zvode(self): + """Check the zvode solver""" + for problem_cls in PROBLEMS: + problem = problem_cls() + if not problem.stiff: + self._do_problem(problem, 'zvode', 'adams') + self._do_problem(problem, 'zvode', 'bdf') + +#------------------------------------------------------------------------------ +# Test problems +#------------------------------------------------------------------------------ + +class ODE: + """ + ODE problem + """ + stiff = False + cmplx = False + stop_t = 1 + z0 = [] + + atol = 1e-6 + rtol = 1e-5 + +class SimpleOscillator(ODE): + r""" + Free vibration of a simple oscillator:: m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0 - - Solution: + Solution:: u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m) """ + stop_t = 1 + 0.09 + z0 = array([1.0, 0.1], float) - def setUp(self): - self.k = 4.0 - self.m = 1.0 + k = 4.0 + m = 1.0 - def F(self, z, t): + def f(self, z, t): tmp = zeros((2,2), float) tmp[0,1] = 1.0 tmp[1,0] = -self.k / self.m - return dot(tmp,z) + return dot(tmp, z) - def test_odeint1(self): + def verify(self, zs, t): omega = sqrt(self.k / self.m) - z0 = zeros(2, float) - z0[0] = 1.0 # initial displacement - z0[1] = 0.1 # initial velocity - t = arange(0.0, 1+0.09, 0.1) + u = self.z0[0]*cos(omega*t)+self.z0[1]*sin(omega*t)/omega + return allclose(u, zs[:,0], atol=self.atol, rtol=self.rtol) - # Analytical solution - # - u = z0[0]*cos(omega*t)+z0[1]*sin(omega*t)/omega +class ComplexExp(ODE): + r"""The equation :lm:`\dot u = i u`""" + stop_t = 1.23*pi + z0 = exp([1j,2j,3j,4j,5j]) + cmplx = True - # Numerical solution - z, infodict = odeint(self.F, z0, t, full_output=True) + def f(self, z, t): + return 1j*z - res = norm(u - z[:,0]) - print 'Residual:', res - assert res < 1.0e-6 + def jac(self, z, t): + return 1j*eye(5) + def verify(self, zs, t): + u = self.z0 * exp(1j*t) + return allclose(u, zs, atol=self.atol, rtol=self.rtol) + +class Pi(ODE): + r"""Integrate 1/(t + 1j) from t=-10 to t=10""" + stop_t = 20 + z0 = [0] + cmplx = True + + def f(self, z, t): + return array([1./(t - 10 + 1j)]) + def verify(self, zs, t): + u = -2j*numpy.arctan(10) + return allclose(u, zs[-1,:], atol=self.atol, rtol=self.rtol) + +PROBLEMS = [SimpleOscillator, ComplexExp, Pi] + +#------------------------------------------------------------------------------ + if __name__ == "__main__": nose.run(argv=['', __file__]) Modified: trunk/scipy/integrate/vode.pyf =================================================================== --- trunk/scipy/integrate/vode.pyf 2008-02-20 01:35:04 UTC (rev 3949) +++ trunk/scipy/integrate/vode.pyf 2008-02-20 05:27:16 UTC (rev 3950) @@ -25,12 +25,35 @@ end subroutine jac end interface end python module dvode__user__routines + +python module zvode__user__routines + interface zvode_user_interface + subroutine f(n,t,y,ydot,rpar,ipar) + integer intent(hide) :: n + double precision intent(in) :: t + double complex dimension(n),intent(in,c) :: y + double complex dimension(n),intent(out,c) :: ydot + double precision intent(hide) :: rpar + integer intent(hide) :: ipar + end subroutine f + subroutine jac(n,t,y,ml,mu,jac,nrowpd,rpar,ipar) + integer intent(hide) :: n + double precision :: t + double complex dimension(n),intent(c,in) :: y + integer intent(hide) :: ml,mu + integer intent(hide):: nrowpd + double complex intent(out) :: jac(nrowpd, n) + double precision intent(hide) :: rpar + integer intent(hide) :: ipar + end subroutine jac + end interface +end python module zvode__user__routines python module vode interface subroutine dvode(f,jac,neq,y,t,tout,itol,rtol,atol,itask,istate,iopt,rwork,lrw,iwork,liw,mf,rpar,ipar) ! y1,t,istate = dvode(f,jac,y0,t0,t1,rtol,atol,itask,istate,rwork,iwork,mf) - callstatement (*f2py_func)(cb_f_in_dvode__user__routines,&neq,y,&t,&tout,&itol,&rtol,atol,&itask,&istate,&iopt,rwork,&lrw,iwork,&liw,cb_jac_in_dvode__user__routines,&mf,&rpar,&ipar) + callstatement (*f2py_func)(cb_f_in_dvode__user__routines,&neq,y,&t,&tout,&itol,rtol,atol,&itask,&istate,&iopt,rwork,&lrw,iwork,&liw,cb_jac_in_dvode__user__routines,&mf,&rpar,&ipar) use dvode__user__routines external f external jac @@ -56,4 +79,36 @@ integer intent(hide) :: ipar = 0 end subroutine dvode end interface + + interface + subroutine zvode(f,jac,neq,y,t,tout,itol,rtol,atol,itask,istate,iopt,zwork,lzw,rwork,lrw,iwork,liw,mf,rpar,ipar) + ! y1,t,istate = zvode(f,jac,y0,t0,t1,rtol,atol,itask,istate,rwork,iwork,mf) + callstatement (*f2py_func)(cb_f_in_zvode__user__routines,&neq,y,&t,&tout,&itol,rtol,atol,&itask,&istate,&iopt,zwork,&lzw,rwork,&lrw,iwork,&liw,cb_jac_in_zvode__user__routines,&mf,&rpar,&ipar) + use zvode__user__routines + external f + external jac + + integer intent(hide),depend(y) :: neq = len(y) + double complex dimension(neq),intent(in,out,copy) :: y + double precision intent(in,out):: t + double precision intent(in):: tout + integer intent(hide),depend(atol) :: itol = (len(atol)<=1 && len(rtol)<=1?1:(len(rtol)<=1?2:(len(atol)<=1?3:4))) + double precision dimension(*),intent(in),check(len(atol)<& + &=1||len(atol)>=neq),depend(neq) :: atol + double precision dimension(*),intent(in),check(len(rtol)<& + &=1||len(rtol)>=neq),depend(neq) :: rtol + integer intent(in),check(itask>0 && itask<6) :: itask + integer intent(in,out),check(istate>0 && istate<4) :: istate + integer intent(hide) :: iopt = 1 + double complex dimension(lzw),intent(in,cache) :: zwork + integer intent(hide),check(len(zwork)>=lzw),depend(zwork) :: lzw=len(zwork) + double precision dimension(lrw),intent(in,cache) :: rwork + integer intent(hide),check(len(rwork)>=lrw),depend(rwork) :: lrw=len(rwork) + integer dimension(liw),intent(in,cache) :: iwork + integer intent(hide),check(len(iwork)>=liw),depend(iwork) :: liw=len(iwork) + integer intent(in) :: mf + double precision intent(hide) :: rpar = 0.0 + integer intent(hide) :: ipar = 0 + end subroutine zvode + end interface end python module vode From scipy-svn at scipy.org Wed Feb 20 00:39:10 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 19 Feb 2008 23:39:10 -0600 (CST) Subject: [Scipy-svn] r3951 - in trunk/scipy/integrate: linpack_lite odepack Message-ID: <20080220053910.CFE0539C10E@new.scipy.org> Author: oliphant Date: 2008-02-19 23:39:03 -0600 (Tue, 19 Feb 2008) New Revision: 3951 Added: trunk/scipy/integrate/linpack_lite/zgbfa.f trunk/scipy/integrate/linpack_lite/zgbsl.f trunk/scipy/integrate/linpack_lite/zgefa.f trunk/scipy/integrate/linpack_lite/zgesl.f trunk/scipy/integrate/odepack/zvode.f Log: Add missing pieces from ticket #334 Added: trunk/scipy/integrate/linpack_lite/zgbfa.f =================================================================== --- trunk/scipy/integrate/linpack_lite/zgbfa.f 2008-02-20 05:27:16 UTC (rev 3950) +++ trunk/scipy/integrate/linpack_lite/zgbfa.f 2008-02-20 05:39:03 UTC (rev 3951) @@ -0,0 +1,181 @@ + subroutine zgbfa(abd,lda,n,ml,mu,ipvt,info) + integer lda,n,ml,mu,ipvt(1),info + complex*16 abd(lda,1) +c +c zgbfa factors a complex*16 band matrix by elimination. +c +c zgbfa is usually called by zgbco, but it can be called +c directly with a saving in time if rcond is not needed. +c +c on entry +c +c abd complex*16(lda, n) +c contains the matrix in band storage. the columns +c of the matrix are stored in the columns of abd and +c the diagonals of the matrix are stored in rows +c ml+1 through 2*ml+mu+1 of abd . +c see the comments below for details. +c +c lda integer +c the leading dimension of the array abd . +c lda must be .ge. 2*ml + mu + 1 . +c +c n integer +c the order of the original matrix. +c +c ml integer +c number of diagonals below the main diagonal. +c 0 .le. ml .lt. n . +c +c mu integer +c number of diagonals above the main diagonal. +c 0 .le. mu .lt. n . +c more efficient if ml .le. mu . +c on return +c +c abd an upper triangular matrix in band storage and +c the multipliers which were used to obtain it. +c the factorization can be written a = l*u where +c l is a product of permutation and unit lower +c triangular matrices and u is upper triangular. +c +c ipvt integer(n) +c an integer vector of pivot indices. +c +c info integer +c = 0 normal value. +c = k if u(k,k) .eq. 0.0 . this is not an error +c condition for this subroutine, but it does +c indicate that zgbsl will divide by zero if +c called. use rcond in zgbco for a reliable +c indication of singularity. +c +c band storage +c +c if a is a band matrix, the following program segment +c will set up the input. +c +c ml = (band width below the diagonal) +c mu = (band width above the diagonal) +c m = ml + mu + 1 +c do 20 j = 1, n +c i1 = max0(1, j-mu) +c i2 = min0(n, j+ml) +c do 10 i = i1, i2 +c k = i - j + m +c abd(k,j) = a(i,j) +c 10 continue +c 20 continue +c +c this uses rows ml+1 through 2*ml+mu+1 of abd . +c in addition, the first ml rows in abd are used for +c elements generated during the triangularization. +c the total number of rows needed in abd is 2*ml+mu+1 . +c the ml+mu by ml+mu upper left triangle and the +c ml by ml lower right triangle are not referenced. +c +c linpack. this version dated 08/14/78 . +c cleve moler, university of new mexico, argonne national lab. +c +c subroutines and functions +c +c blas zaxpy,zscal,izamax +c fortran dabs,max0,min0 +c +c internal variables +c + complex*16 t + integer i,izamax,i0,j,ju,jz,j0,j1,k,kp1,l,lm,m,mm,nm1 +c + complex*16 zdum + double precision cabs1 + double precision dreal,dimag + complex*16 zdumr,zdumi + dreal(zdumr) = zdumr + dimag(zdumi) = (0.0d0,-1.0d0)*zdumi + cabs1(zdum) = dabs(dreal(zdum)) + dabs(dimag(zdum)) +c + m = ml + mu + 1 + info = 0 +c +c zero initial fill-in columns +c + j0 = mu + 2 + j1 = min0(n,m) - 1 + if (j1 .lt. j0) go to 30 + do 20 jz = j0, j1 + i0 = m + 1 - jz + do 10 i = i0, ml + abd(i,jz) = (0.0d0,0.0d0) + 10 continue + 20 continue + 30 continue + jz = j1 + ju = 0 +c +c gaussian elimination with partial pivoting +c + nm1 = n - 1 + if (nm1 .lt. 1) go to 130 + do 120 k = 1, nm1 + kp1 = k + 1 +c +c zero next fill-in column +c + jz = jz + 1 + if (jz .gt. n) go to 50 + if (ml .lt. 1) go to 50 + do 40 i = 1, ml + abd(i,jz) = (0.0d0,0.0d0) + 40 continue + 50 continue +c +c find l = pivot index +c + lm = min0(ml,n-k) + l = izamax(lm+1,abd(m,k),1) + m - 1 + ipvt(k) = l + k - m +c +c zero pivot implies this column already triangularized +c + if (cabs1(abd(l,k)) .eq. 0.0d0) go to 100 +c +c interchange if necessary +c + if (l .eq. m) go to 60 + t = abd(l,k) + abd(l,k) = abd(m,k) + abd(m,k) = t + 60 continue +c +c compute multipliers +c + t = -(1.0d0,0.0d0)/abd(m,k) + call zscal(lm,t,abd(m+1,k),1) +c +c row elimination with column indexing +c + ju = min0(max0(ju,mu+ipvt(k)),n) + mm = m + if (ju .lt. kp1) go to 90 + do 80 j = kp1, ju + l = l - 1 + mm = mm - 1 + t = abd(l,j) + if (l .eq. mm) go to 70 + abd(l,j) = abd(mm,j) + abd(mm,j) = t + 70 continue + call zaxpy(lm,t,abd(m+1,k),1,abd(mm+1,j),1) + 80 continue + 90 continue + go to 110 + 100 continue + info = k + 110 continue + 120 continue + 130 continue + ipvt(n) = n + if (cabs1(abd(m,n)) .eq. 0.0d0) info = n + return + end Property changes on: trunk/scipy/integrate/linpack_lite/zgbfa.f ___________________________________________________________________ Name: svn:eol-style + native Added: trunk/scipy/integrate/linpack_lite/zgbsl.f =================================================================== --- trunk/scipy/integrate/linpack_lite/zgbsl.f 2008-02-20 05:27:16 UTC (rev 3950) +++ trunk/scipy/integrate/linpack_lite/zgbsl.f 2008-02-20 05:39:03 UTC (rev 3951) @@ -0,0 +1,139 @@ + subroutine zgbsl(abd,lda,n,ml,mu,ipvt,b,job) + integer lda,n,ml,mu,ipvt(1),job + complex*16 abd(lda,1),b(1) +c +c zgbsl solves the complex*16 band system +c a * x = b or ctrans(a) * x = b +c using the factors computed by zgbco or zgbfa. +c +c on entry +c +c abd complex*16(lda, n) +c the output from zgbco or zgbfa. +c +c lda integer +c the leading dimension of the array abd . +c +c n integer +c the order of the original matrix. +c +c ml integer +c number of diagonals below the main diagonal. +c +c mu integer +c number of diagonals above the main diagonal. +c +c ipvt integer(n) +c the pivot vector from zgbco or zgbfa. +c +c b complex*16(n) +c the right hand side vector. +c +c job integer +c = 0 to solve a*x = b , +c = nonzero to solve ctrans(a)*x = b , where +c ctrans(a) is the conjugate transpose. +c +c on return +c +c b the solution vector x . +c +c error condition +c +c a division by zero will occur if the input factor contains a +c zero on the diagonal. technically this indicates singularity +c but it is often caused by improper arguments or improper +c setting of lda . it will not occur if the subroutines are +c called correctly and if zgbco has set rcond .gt. 0.0 +c or zgbfa has set info .eq. 0 . +c +c to compute inverse(a) * c where c is a matrix +c with p columns +c call zgbco(abd,lda,n,ml,mu,ipvt,rcond,z) +c if (rcond is too small) go to ... +c do 10 j = 1, p +c call zgbsl(abd,lda,n,ml,mu,ipvt,c(1,j),0) +c 10 continue +c +c linpack. this version dated 08/14/78 . +c cleve moler, university of new mexico, argonne national lab. +c +c subroutines and functions +c +c blas zaxpy,zdotc +c fortran dconjg,min0 +c +c internal variables +c + complex*16 zdotc,t + integer k,kb,l,la,lb,lm,m,nm1 + double precision dreal,dimag + complex*16 zdumr,zdumi + dreal(zdumr) = zdumr + dimag(zdumi) = (0.0d0,-1.0d0)*zdumi +c + m = mu + ml + 1 + nm1 = n - 1 + if (job .ne. 0) go to 50 +c +c job = 0 , solve a * x = b +c first solve l*y = b +c + if (ml .eq. 0) go to 30 + if (nm1 .lt. 1) go to 30 + do 20 k = 1, nm1 + lm = min0(ml,n-k) + l = ipvt(k) + t = b(l) + if (l .eq. k) go to 10 + b(l) = b(k) + b(k) = t + 10 continue + call zaxpy(lm,t,abd(m+1,k),1,b(k+1),1) + 20 continue + 30 continue +c +c now solve u*x = y +c + do 40 kb = 1, n + k = n + 1 - kb + b(k) = b(k)/abd(m,k) + lm = min0(k,m) - 1 + la = m - lm + lb = k - lm + t = -b(k) + call zaxpy(lm,t,abd(la,k),1,b(lb),1) + 40 continue + go to 100 + 50 continue +c +c job = nonzero, solve ctrans(a) * x = b +c first solve ctrans(u)*y = b +c + do 60 k = 1, n + lm = min0(k,m) - 1 + la = m - lm + lb = k - lm + t = zdotc(lm,abd(la,k),1,b(lb),1) + b(k) = (b(k) - t)/dconjg(abd(m,k)) + 60 continue +c +c now solve ctrans(l)*x = y +c + if (ml .eq. 0) go to 90 + if (nm1 .lt. 1) go to 90 + do 80 kb = 1, nm1 + k = n - kb + lm = min0(ml,n-k) + b(k) = b(k) + zdotc(lm,abd(m+1,k),1,b(k+1),1) + l = ipvt(k) + if (l .eq. k) go to 70 + t = b(l) + b(l) = b(k) + b(k) = t + 70 continue + 80 continue + 90 continue + 100 continue + return + end Property changes on: trunk/scipy/integrate/linpack_lite/zgbsl.f ___________________________________________________________________ Name: svn:eol-style + native Added: trunk/scipy/integrate/linpack_lite/zgefa.f =================================================================== --- trunk/scipy/integrate/linpack_lite/zgefa.f 2008-02-20 05:27:16 UTC (rev 3950) +++ trunk/scipy/integrate/linpack_lite/zgefa.f 2008-02-20 05:39:03 UTC (rev 3951) @@ -0,0 +1,111 @@ + subroutine zgefa(a,lda,n,ipvt,info) + integer lda,n,ipvt(1),info + complex*16 a(lda,1) +c +c zgefa factors a complex*16 matrix by gaussian elimination. +c +c zgefa is usually called by zgeco, but it can be called +c directly with a saving in time if rcond is not needed. +c (time for zgeco) = (1 + 9/n)*(time for zgefa) . +c +c on entry +c +c a complex*16(lda, n) +c the matrix to be factored. +c +c lda integer +c the leading dimension of the array a . +c +c n integer +c the order of the matrix a . +c +c on return +c +c a an upper triangular matrix and the multipliers +c which were used to obtain it. +c the factorization can be written a = l*u where +c l is a product of permutation and unit lower +c triangular matrices and u is upper triangular. +c +c ipvt integer(n) +c an integer vector of pivot indices. +c +c info integer +c = 0 normal value. +c = k if u(k,k) .eq. 0.0 . this is not an error +c condition for this subroutine, but it does +c indicate that zgesl or zgedi will divide by zero +c if called. use rcond in zgeco for a reliable +c indication of singularity. +c +c linpack. this version dated 08/14/78 . +c cleve moler, university of new mexico, argonne national lab. +c +c subroutines and functions +c +c blas zaxpy,zscal,izamax +c fortran dabs +c +c internal variables +c + complex*16 t + integer izamax,j,k,kp1,l,nm1 +c + complex*16 zdum + double precision cabs1 + double precision dreal,dimag + complex*16 zdumr,zdumi + dreal(zdumr) = zdumr + dimag(zdumi) = (0.0d0,-1.0d0)*zdumi + cabs1(zdum) = dabs(dreal(zdum)) + dabs(dimag(zdum)) +c +c gaussian elimination with partial pivoting +c + info = 0 + nm1 = n - 1 + if (nm1 .lt. 1) go to 70 + do 60 k = 1, nm1 + kp1 = k + 1 +c +c find l = pivot index +c + l = izamax(n-k+1,a(k,k),1) + k - 1 + ipvt(k) = l +c +c zero pivot implies this column already triangularized +c + if (cabs1(a(l,k)) .eq. 0.0d0) go to 40 +c +c interchange if necessary +c + if (l .eq. k) go to 10 + t = a(l,k) + a(l,k) = a(k,k) + a(k,k) = t + 10 continue +c +c compute multipliers +c + t = -(1.0d0,0.0d0)/a(k,k) + call zscal(n-k,t,a(k+1,k),1) +c +c row elimination with column indexing +c + do 30 j = kp1, n + t = a(l,j) + if (l .eq. k) go to 20 + a(l,j) = a(k,j) + a(k,j) = t + 20 continue + call zaxpy(n-k,t,a(k+1,k),1,a(k+1,j),1) + 30 continue + go to 50 + 40 continue + info = k + 50 continue + 60 continue + 70 continue + ipvt(n) = n + if (cabs1(a(n,n)) .eq. 0.0d0) info = n + return + end Property changes on: trunk/scipy/integrate/linpack_lite/zgefa.f ___________________________________________________________________ Name: svn:eol-style + native Added: trunk/scipy/integrate/linpack_lite/zgesl.f =================================================================== --- trunk/scipy/integrate/linpack_lite/zgesl.f 2008-02-20 05:27:16 UTC (rev 3950) +++ trunk/scipy/integrate/linpack_lite/zgesl.f 2008-02-20 05:39:03 UTC (rev 3951) @@ -0,0 +1,122 @@ + subroutine zgesl(a,lda,n,ipvt,b,job) + integer lda,n,ipvt(1),job + complex*16 a(lda,1),b(1) +c +c zgesl solves the complex*16 system +c a * x = b or ctrans(a) * x = b +c using the factors computed by zgeco or zgefa. +c +c on entry +c +c a complex*16(lda, n) +c the output from zgeco or zgefa. +c +c lda integer +c the leading dimension of the array a . +c +c n integer +c the order of the matrix a . +c +c ipvt integer(n) +c the pivot vector from zgeco or zgefa. +c +c b complex*16(n) +c the right hand side vector. +c +c job integer +c = 0 to solve a*x = b , +c = nonzero to solve ctrans(a)*x = b where +c ctrans(a) is the conjugate transpose. +c +c on return +c +c b the solution vector x . +c +c error condition +c +c a division by zero will occur if the input factor contains a +c zero on the diagonal. technically this indicates singularity +c but it is often caused by improper arguments or improper +c setting of lda . it will not occur if the subroutines are +c called correctly and if zgeco has set rcond .gt. 0.0 +c or zgefa has set info .eq. 0 . +c +c to compute inverse(a) * c where c is a matrix +c with p columns +c call zgeco(a,lda,n,ipvt,rcond,z) +c if (rcond is too small) go to ... +c do 10 j = 1, p +c call zgesl(a,lda,n,ipvt,c(1,j),0) +c 10 continue +c +c linpack. this version dated 08/14/78 . +c cleve moler, university of new mexico, argonne national lab. +c +c subroutines and functions +c +c blas zaxpy,zdotc +c fortran dconjg +c +c internal variables +c + complex*16 zdotc,t + integer k,kb,l,nm1 + double precision dreal,dimag + complex*16 zdumr,zdumi + dreal(zdumr) = zdumr + dimag(zdumi) = (0.0d0,-1.0d0)*zdumi +c + nm1 = n - 1 + if (job .ne. 0) go to 50 +c +c job = 0 , solve a * x = b +c first solve l*y = b +c + if (nm1 .lt. 1) go to 30 + do 20 k = 1, nm1 + l = ipvt(k) + t = b(l) + if (l .eq. k) go to 10 + b(l) = b(k) + b(k) = t + 10 continue + call zaxpy(n-k,t,a(k+1,k),1,b(k+1),1) + 20 continue + 30 continue +c +c now solve u*x = y +c + do 40 kb = 1, n + k = n + 1 - kb + b(k) = b(k)/a(k,k) + t = -b(k) + call zaxpy(k-1,t,a(1,k),1,b(1),1) + 40 continue + go to 100 + 50 continue +c +c job = nonzero, solve ctrans(a) * x = b +c first solve ctrans(u)*y = b +c + do 60 k = 1, n + t = zdotc(k-1,a(1,k),1,b(1),1) + b(k) = (b(k) - t)/dconjg(a(k,k)) + 60 continue +c +c now solve ctrans(l)*x = y +c + if (nm1 .lt. 1) go to 90 + do 80 kb = 1, nm1 + k = n - kb + b(k) = b(k) + zdotc(n-k,a(k+1,k),1,b(k+1),1) + l = ipvt(k) + if (l .eq. k) go to 70 + t = b(l) + b(l) = b(k) + b(k) = t + 70 continue + 80 continue + 90 continue + 100 continue + return + end Property changes on: trunk/scipy/integrate/linpack_lite/zgesl.f ___________________________________________________________________ Name: svn:eol-style + native Added: trunk/scipy/integrate/odepack/zvode.f =================================================================== --- trunk/scipy/integrate/odepack/zvode.f 2008-02-20 05:27:16 UTC (rev 3950) +++ trunk/scipy/integrate/odepack/zvode.f 2008-02-20 05:39:03 UTC (rev 3951) @@ -0,0 +1,3650 @@ +*DECK ZVODE + SUBROUTINE ZVODE (F, NEQ, Y, T, TOUT, ITOL, RTOL, ATOL, ITASK, + 1 ISTATE, IOPT, ZWORK, LZW, RWORK, LRW, IWORK, LIW, + 2 JAC, MF, RPAR, IPAR) + EXTERNAL F, JAC + DOUBLE COMPLEX Y, ZWORK + DOUBLE PRECISION T, TOUT, RTOL, ATOL, RWORK + INTEGER NEQ, ITOL, ITASK, ISTATE, IOPT, LZW, LRW, IWORK, LIW, + 1 MF, IPAR + DIMENSION Y(*), RTOL(*), ATOL(*), ZWORK(LZW), RWORK(LRW), + 1 IWORK(LIW), RPAR(*), IPAR(*) +C----------------------------------------------------------------------- +C ZVODE: Variable-coefficient Ordinary Differential Equation solver, +C with fixed-leading-coefficient implementation. +C This version is in complex double precision. +C +C ZVODE solves the initial value problem for stiff or nonstiff +C systems of first order ODEs, +C dy/dt = f(t,y) , or, in component form, +C dy(i)/dt = f(i) = f(i,t,y(1),y(2),...,y(NEQ)) (i = 1,...,NEQ). +C Here the y vector is treated as complex. +C ZVODE is a package based on the EPISODE and EPISODEB packages, and +C on the ODEPACK user interface standard, with minor modifications. +C +C NOTE: When using ZVODE for a stiff system, it should only be used for +C the case in which the function f is analytic, that is, when each f(i) +C is an analytic function of each y(j). Analyticity means that the +C partial derivative df(i)/dy(j) is a unique complex number, and this +C fact is critical in the way ZVODE solves the dense or banded linear +C systems that arise in the stiff case. For a complex stiff ODE system +C in which f is not analytic, ZVODE is likely to have convergence +C failures, and for this problem one should instead use DVODE on the +C equivalent real system (in the real and imaginary parts of y). +C----------------------------------------------------------------------- +C Authors: +C Peter N. Brown and Alan C. Hindmarsh +C Center for Applied Scientific Computing +C Lawrence Livermore National Laboratory +C Livermore, CA 94551 +C and +C George D. Byrne (Prof. Emeritus) +C Illinois Institute of Technology +C Chicago, IL 60616 +C----------------------------------------------------------------------- +C For references, see DVODE. +C----------------------------------------------------------------------- +C Summary of usage. +C +C Communication between the user and the ZVODE package, for normal +C situations, is summarized here. This summary describes only a subset +C of the full set of options available. See the full description for +C details, including optional communication, nonstandard options, +C and instructions for special situations. See also the example +C problem (with program and output) following this summary. +C +C A. First provide a subroutine of the form: +C SUBROUTINE F (NEQ, T, Y, YDOT, RPAR, IPAR) +C DOUBLE COMPLEX Y(NEQ), YDOT(NEQ) +C DOUBLE PRECISION T +C which supplies the vector function f by loading YDOT(i) with f(i). +C +C B. Next determine (or guess) whether or not the problem is stiff. +C Stiffness occurs when the Jacobian matrix df/dy has an eigenvalue +C whose real part is negative and large in magnitude, compared to the +C reciprocal of the t span of interest. If the problem is nonstiff, +C use a method flag MF = 10. If it is stiff, there are four standard +C choices for MF (21, 22, 24, 25), and ZVODE requires the Jacobian +C matrix in some form. In these cases (MF .gt. 0), ZVODE will use a +C saved copy of the Jacobian matrix. If this is undesirable because of +C storage limitations, set MF to the corresponding negative value +C (-21, -22, -24, -25). (See full description of MF below.) +C The Jacobian matrix is regarded either as full (MF = 21 or 22), +C or banded (MF = 24 or 25). In the banded case, ZVODE requires two +C half-bandwidth parameters ML and MU. These are, respectively, the +C widths of the lower and upper parts of the band, excluding the main +C diagonal. Thus the band consists of the locations (i,j) with +C i-ML .le. j .le. i+MU, and the full bandwidth is ML+MU+1. +C +C C. If the problem is stiff, you are encouraged to supply the Jacobian +C directly (MF = 21 or 24), but if this is not feasible, ZVODE will +C compute it internally by difference quotients (MF = 22 or 25). +C If you are supplying the Jacobian, provide a subroutine of the form: +C SUBROUTINE JAC (NEQ, T, Y, ML, MU, PD, NROWPD, RPAR, IPAR) +C DOUBLE COMPLEX Y(NEQ), PD(NROWPD,NEQ) +C DOUBLE PRECISION T +C which supplies df/dy by loading PD as follows: +C For a full Jacobian (MF = 21), load PD(i,j) with df(i)/dy(j), +C the partial derivative of f(i) with respect to y(j). (Ignore the +C ML and MU arguments in this case.) +C For a banded Jacobian (MF = 24), load PD(i-j+MU+1,j) with +C df(i)/dy(j), i.e. load the diagonal lines of df/dy into the rows of +C PD from the top down. +C In either case, only nonzero elements need be loaded. +C +C D. Write a main program which calls subroutine ZVODE once for +C each point at which answers are desired. This should also provide +C for possible use of logical unit 6 for output of error messages +C by ZVODE. On the first call to ZVODE, supply arguments as follows: +C F = Name of subroutine for right-hand side vector f. +C This name must be declared external in calling program. +C NEQ = Number of first order ODEs. +C Y = Double complex array of initial values, of length NEQ. +C T = The initial value of the independent variable. +C TOUT = First point where output is desired (.ne. T). +C ITOL = 1 or 2 according as ATOL (below) is a scalar or array. +C RTOL = Relative tolerance parameter (scalar). +C ATOL = Absolute tolerance parameter (scalar or array). +C The estimated local error in Y(i) will be controlled so as +C to be roughly less (in magnitude) than +C EWT(i) = RTOL*abs(Y(i)) + ATOL if ITOL = 1, or +C EWT(i) = RTOL*abs(Y(i)) + ATOL(i) if ITOL = 2. +C Thus the local error test passes if, in each component, +C either the absolute error is less than ATOL (or ATOL(i)), +C or the relative error is less than RTOL. +C Use RTOL = 0.0 for pure absolute error control, and +C use ATOL = 0.0 (or ATOL(i) = 0.0) for pure relative error +C control. Caution: Actual (global) errors may exceed these +C local tolerances, so choose them conservatively. +C ITASK = 1 for normal computation of output values of Y at t = TOUT. +C ISTATE = Integer flag (input and output). Set ISTATE = 1. +C IOPT = 0 to indicate no optional input used. +C ZWORK = Double precision complex work array of length at least: +C 15*NEQ for MF = 10, +C 8*NEQ + 2*NEQ**2 for MF = 21 or 22, +C 10*NEQ + (3*ML + 2*MU)*NEQ for MF = 24 or 25. +C LZW = Declared length of ZWORK (in user's DIMENSION statement). +C RWORK = Real work array of length at least 20 + NEQ. +C LRW = Declared length of RWORK (in user's DIMENSION statement). +C IWORK = Integer work array of length at least: +C 30 for MF = 10, +C 30 + NEQ for MF = 21, 22, 24, or 25. +C If MF = 24 or 25, input in IWORK(1),IWORK(2) the lower +C and upper half-bandwidths ML,MU. +C LIW = Declared length of IWORK (in user's DIMENSION statement). +C JAC = Name of subroutine for Jacobian matrix (MF = 21 or 24). +C If used, this name must be declared external in calling +C program. If not used, pass a dummy name. +C MF = Method flag. Standard values are: +C 10 for nonstiff (Adams) method, no Jacobian used. +C 21 for stiff (BDF) method, user-supplied full Jacobian. +C 22 for stiff method, internally generated full Jacobian. +C 24 for stiff method, user-supplied banded Jacobian. +C 25 for stiff method, internally generated banded Jacobian. +C RPAR = user-defined real or complex array passed to F and JAC. +C IPAR = user-defined integer array passed to F and JAC. +C Note that the main program must declare arrays Y, ZWORK, RWORK, IWORK, +C and possibly ATOL, RPAR, and IPAR. RPAR may be declared REAL, DOUBLE, +C COMPLEX, or DOUBLE COMPLEX, depending on the user's needs. +C +C E. The output from the first call (or any call) is: +C Y = Array of computed values of y(t) vector. +C T = Corresponding value of independent variable (normally TOUT). +C ISTATE = 2 if ZVODE was successful, negative otherwise. +C -1 means excess work done on this call. (Perhaps wrong MF.) +C -2 means excess accuracy requested. (Tolerances too small.) +C -3 means illegal input detected. (See printed message.) +C -4 means repeated error test failures. (Check all input.) +C -5 means repeated convergence failures. (Perhaps bad +C Jacobian supplied or wrong choice of MF or tolerances.) +C -6 means error weight became zero during problem. (Solution +C component i vanished, and ATOL or ATOL(i) = 0.) +C +C F. To continue the integration after a successful return, simply +C reset TOUT and call ZVODE again. No other parameters need be reset. +C +C----------------------------------------------------------------------- +C EXAMPLE PROBLEM +C +C The program below uses ZVODE to solve the following system of 2 ODEs: +C dw/dt = -i*w*w*z, dz/dt = i*z; w(0) = 1/2.1, z(0) = 1; t = 0 to 2*pi. +C Solution: w = 1/(z + 1.1), z = exp(it). As z traces the unit circle, +C w traces a circle of radius 10/2.1 with center at 11/2.1. +C For convenience, Main passes RPAR = (imaginary unit i) to FEX and JEX. +C +C EXTERNAL FEX, JEX +C DOUBLE COMPLEX Y(2), ZWORK(24), RPAR, WTRU, ERR +C DOUBLE PRECISION ABERR, AEMAX, ATOL, RTOL, RWORK(22), T, TOUT +C DIMENSION IWORK(32) +C NEQ = 2 +C Y(1) = 1.0D0/2.1D0 +C Y(2) = 1.0D0 +C T = 0.0D0 +C DTOUT = 0.1570796326794896D0 +C TOUT = DTOUT +C ITOL = 1 +C RTOL = 1.D-9 +C ATOL = 1.D-8 +C ITASK = 1 +C ISTATE = 1 +C IOPT = 0 +C LZW = 24 +C LRW = 22 +C LIW = 32 +C MF = 21 +C RPAR = DCMPLX(0.0D0,1.0D0) +C AEMAX = 0.0D0 +C WRITE(6,10) +C 10 FORMAT(' t',11X,'w',26X,'z') +C DO 40 IOUT = 1,40 +C CALL ZVODE(FEX,NEQ,Y,T,TOUT,ITOL,RTOL,ATOL,ITASK,ISTATE,IOPT, +C 1 ZWORK,LZW,RWORK,LRW,IWORK,LIW,JEX,MF,RPAR,IPAR) +C WTRU = 1.0D0/DCMPLX(COS(T) + 1.1D0, SIN(T)) +C ERR = Y(1) - WTRU +C ABERR = ABS(DREAL(ERR)) + ABS(DIMAG(ERR)) +C AEMAX = MAX(AEMAX,ABERR) +C WRITE(6,20) T, DREAL(Y(1)),DIMAG(Y(1)), DREAL(Y(2)),DIMAG(Y(2)) +C 20 FORMAT(F9.5,2X,2F12.7,3X,2F12.7) +C IF (ISTATE .LT. 0) THEN +C WRITE(6,30) ISTATE +C 30 FORMAT(//'***** Error halt. ISTATE =',I3) +C STOP +C ENDIF +C 40 TOUT = TOUT + DTOUT +C WRITE(6,50) IWORK(11), IWORK(12), IWORK(13), IWORK(20), +C 1 IWORK(21), IWORK(22), IWORK(23), AEMAX +C 50 FORMAT(/' No. steps =',I4,' No. f-s =',I5, +C 1 ' No. J-s =',I4,' No. LU-s =',I4/ +C 2 ' No. nonlinear iterations =',I4/ +C 3 ' No. nonlinear convergence failures =',I4/ +C 4 ' No. error test failures =',I4/ +C 5 ' Max. abs. error in w =',D10.2) +C STOP +C END +C +C SUBROUTINE FEX (NEQ, T, Y, YDOT, RPAR, IPAR) +C DOUBLE COMPLEX Y(NEQ), YDOT(NEQ), RPAR +C DOUBLE PRECISION T +C YDOT(1) = -RPAR*Y(1)*Y(1)*Y(2) +C YDOT(2) = RPAR*Y(2) +C RETURN +C END +C +C SUBROUTINE JEX (NEQ, T, Y, ML, MU, PD, NRPD, RPAR, IPAR) +C DOUBLE COMPLEX Y(NEQ), PD(NRPD,NEQ), RPAR +C DOUBLE PRECISION T +C PD(1,1) = -2.0D0*RPAR*Y(1)*Y(2) +C PD(1,2) = -RPAR*Y(1)*Y(1) +C PD(2,2) = RPAR +C RETURN +C END +C +C The output of this example program is as follows: +C +C t w z +C 0.15708 0.4763242 -0.0356919 0.9876884 0.1564345 +C 0.31416 0.4767322 -0.0718256 0.9510565 0.3090170 +C 0.47124 0.4774351 -0.1088651 0.8910065 0.4539906 +C 0.62832 0.4784699 -0.1473206 0.8090170 0.5877853 +C 0.78540 0.4798943 -0.1877789 0.7071067 0.7071069 +C 0.94248 0.4817938 -0.2309414 0.5877852 0.8090171 +C 1.09956 0.4842934 -0.2776778 0.4539904 0.8910066 +C 1.25664 0.4875766 -0.3291039 0.3090169 0.9510566 +C 1.41372 0.4919177 -0.3866987 0.1564343 0.9876884 +C 1.57080 0.4977376 -0.4524889 -0.0000001 1.0000000 +C 1.72788 0.5057044 -0.5293524 -0.1564346 0.9876883 +C 1.88496 0.5169274 -0.6215400 -0.3090171 0.9510565 +C 2.04204 0.5333540 -0.7356275 -0.4539906 0.8910065 +C 2.19911 0.5586542 -0.8823669 -0.5877854 0.8090169 +C 2.35619 0.6004188 -1.0806013 -0.7071069 0.7071067 +C 2.51327 0.6764486 -1.3664281 -0.8090171 0.5877851 +C 2.67035 0.8366909 -1.8175245 -0.8910066 0.4539904 +C 2.82743 1.2657121 -2.6260146 -0.9510566 0.3090168 +C 2.98451 3.0284506 -4.2182180 -0.9876884 0.1564343 +C 3.14159 10.0000699 0.0000663 -1.0000000 -0.0000002 +C 3.29867 3.0284170 4.2182053 -0.9876883 -0.1564346 +C 3.45575 1.2657041 2.6260067 -0.9510565 -0.3090172 +C 3.61283 0.8366878 1.8175205 -0.8910064 -0.4539907 +C 3.76991 0.6764469 1.3664259 -0.8090169 -0.5877854 +C 3.92699 0.6004178 1.0806000 -0.7071066 -0.7071069 +C 4.08407 0.5586535 0.8823662 -0.5877851 -0.8090171 +C 4.24115 0.5333535 0.7356271 -0.4539903 -0.8910066 +C 4.39823 0.5169271 0.6215398 -0.3090168 -0.9510566 +C 4.55531 0.5057041 0.5293523 -0.1564343 -0.9876884 +C 4.71239 0.4977374 0.4524890 0.0000002 -1.0000000 +C 4.86947 0.4919176 0.3866988 0.1564347 -0.9876883 +C 5.02655 0.4875765 0.3291040 0.3090172 -0.9510564 +C 5.18363 0.4842934 0.2776780 0.4539907 -0.8910064 +C 5.34071 0.4817939 0.2309415 0.5877854 -0.8090169 +C 5.49779 0.4798944 0.1877791 0.7071069 -0.7071066 +C 5.65487 0.4784700 0.1473208 0.8090171 -0.5877850 +C 5.81195 0.4774352 0.1088652 0.8910066 -0.4539903 +C 5.96903 0.4767324 0.0718257 0.9510566 -0.3090168 +C 6.12611 0.4763244 0.0356920 0.9876884 -0.1564342 +C 6.28319 0.4761907 0.0000000 1.0000000 0.0000003 +C +C No. steps = 542 No. f-s = 610 No. J-s = 10 No. LU-s = 47 +C No. nonlinear iterations = 607 +C No. nonlinear convergence failures = 0 +C No. error test failures = 13 +C Max. abs. error in w = 0.13E-03 +C +C----------------------------------------------------------------------- +C Full description of user interface to ZVODE. +C +C The user interface to ZVODE consists of the following parts. +C +C i. The call sequence to subroutine ZVODE, which is a driver +C routine for the solver. This includes descriptions of both +C the call sequence arguments and of user-supplied routines. +C Following these descriptions is +C * a description of optional input available through the +C call sequence, +C * a description of optional output (in the work arrays), and +C * instructions for interrupting and restarting a solution. +C +C ii. Descriptions of other routines in the ZVODE package that may be +C (optionally) called by the user. These provide the ability to +C alter error message handling, save and restore the internal +C COMMON, and obtain specified derivatives of the solution y(t). +C +C iii. Descriptions of COMMON blocks to be declared in overlay +C or similar environments. +C +C iv. Description of two routines in the ZVODE package, either of +C which the user may replace with his own version, if desired. +C these relate to the measurement of errors. +C +C----------------------------------------------------------------------- +C Part i. Call Sequence. +C +C The call sequence parameters used for input only are +C F, NEQ, TOUT, ITOL, RTOL, ATOL, ITASK, IOPT, LRW, LIW, JAC, MF, +C and those used for both input and output are +C Y, T, ISTATE. +C The work arrays ZWORK, RWORK, and IWORK are also used for conditional +C and optional input and optional output. (The term output here refers +C to the return from subroutine ZVODE to the user's calling program.) +C +C The legality of input parameters will be thoroughly checked on the +C initial call for the problem, but not checked thereafter unless a +C change in input parameters is flagged by ISTATE = 3 in the input. +C +C The descriptions of the call arguments are as follows. +C +C F = The name of the user-supplied subroutine defining the +C ODE system. The system must be put in the first-order +C form dy/dt = f(t,y), where f is a vector-valued function +C of the scalar t and the vector y. Subroutine F is to +C compute the function f. It is to have the form +C SUBROUTINE F (NEQ, T, Y, YDOT, RPAR, IPAR) +C DOUBLE COMPLEX Y(NEQ), YDOT(NEQ) +C DOUBLE PRECISION T +C where NEQ, T, and Y are input, and the array YDOT = f(t,y) +C is output. Y and YDOT are double complex arrays of length +C NEQ. Subroutine F should not alter Y(1),...,Y(NEQ). +C F must be declared EXTERNAL in the calling program. +C +C Subroutine F may access user-defined real/complex and +C integer work arrays RPAR and IPAR, which are to be +C dimensioned in the calling program. +C +C If quantities computed in the F routine are needed +C externally to ZVODE, an extra call to F should be made +C for this purpose, for consistent and accurate results. +C If only the derivative dy/dt is needed, use ZVINDY instead. +C +C NEQ = The size of the ODE system (number of first order +C ordinary differential equations). Used only for input. +C NEQ may not be increased during the problem, but +C can be decreased (with ISTATE = 3 in the input). +C +C Y = A double precision complex array for the vector of dependent +C variables, of length NEQ or more. Used for both input and +C output on the first call (ISTATE = 1), and only for output +C on other calls. On the first call, Y must contain the +C vector of initial values. In the output, Y contains the +C computed solution evaluated at T. If desired, the Y array +C may be used for other purposes between calls to the solver. +C +C This array is passed as the Y argument in all calls to +C F and JAC. +C +C T = The independent variable. In the input, T is used only on +C the first call, as the initial point of the integration. +C In the output, after each call, T is the value at which a +C computed solution Y is evaluated (usually the same as TOUT). +C On an error return, T is the farthest point reached. +C +C TOUT = The next value of t at which a computed solution is desired. +C Used only for input. +C +C When starting the problem (ISTATE = 1), TOUT may be equal +C to T for one call, then should .ne. T for the next call. +C For the initial T, an input value of TOUT .ne. T is used +C in order to determine the direction of the integration +C (i.e. the algebraic sign of the step sizes) and the rough +C scale of the problem. Integration in either direction +C (forward or backward in t) is permitted. +C +C If ITASK = 2 or 5 (one-step modes), TOUT is ignored after +C the first call (i.e. the first call with TOUT .ne. T). +C Otherwise, TOUT is required on every call. +C +C If ITASK = 1, 3, or 4, the values of TOUT need not be +C monotone, but a value of TOUT which backs up is limited +C to the current internal t interval, whose endpoints are +C TCUR - HU and TCUR. (See optional output, below, for +C TCUR and HU.) +C +C ITOL = An indicator for the type of error control. See +C description below under ATOL. Used only for input. +C +C RTOL = A relative error tolerance parameter, either a scalar or +C an array of length NEQ. See description below under ATOL. +C Input only. +C +C ATOL = An absolute error tolerance parameter, either a scalar or +C an array of length NEQ. Input only. +C +C The input parameters ITOL, RTOL, and ATOL determine +C the error control performed by the solver. The solver will +C control the vector e = (e(i)) of estimated local errors +C in Y, according to an inequality of the form +C rms-norm of ( e(i)/EWT(i) ) .le. 1, +C where EWT(i) = RTOL(i)*abs(Y(i)) + ATOL(i), +C and the rms-norm (root-mean-square norm) here is +C rms-norm(v) = sqrt(sum v(i)**2 / NEQ). Here EWT = (EWT(i)) +C is a vector of weights which must always be positive, and +C the values of RTOL and ATOL should all be non-negative. +C The following table gives the types (scalar/array) of +C RTOL and ATOL, and the corresponding form of EWT(i). +C +C ITOL RTOL ATOL EWT(i) +C 1 scalar scalar RTOL*ABS(Y(i)) + ATOL +C 2 scalar array RTOL*ABS(Y(i)) + ATOL(i) +C 3 array scalar RTOL(i)*ABS(Y(i)) + ATOL +C 4 array array RTOL(i)*ABS(Y(i)) + ATOL(i) +C +C When either of these parameters is a scalar, it need not +C be dimensioned in the user's calling program. +C +C If none of the above choices (with ITOL, RTOL, and ATOL +C fixed throughout the problem) is suitable, more general +C error controls can be obtained by substituting +C user-supplied routines for the setting of EWT and/or for +C the norm calculation. See Part iv below. +C +C If global errors are to be estimated by making a repeated +C run on the same problem with smaller tolerances, then all +C components of RTOL and ATOL (i.e. of EWT) should be scaled +C down uniformly. +C +C ITASK = An index specifying the task to be performed. +C Input only. ITASK has the following values and meanings. +C 1 means normal computation of output values of y(t) at +C t = TOUT (by overshooting and interpolating). +C 2 means take one step only and return. +C 3 means stop at the first internal mesh point at or +C beyond t = TOUT and return. +C 4 means normal computation of output values of y(t) at +C t = TOUT but without overshooting t = TCRIT. +C TCRIT must be input as RWORK(1). TCRIT may be equal to +C or beyond TOUT, but not behind it in the direction of +C integration. This option is useful if the problem +C has a singularity at or beyond t = TCRIT. +C 5 means take one step, without passing TCRIT, and return. +C TCRIT must be input as RWORK(1). +C +C Note: If ITASK = 4 or 5 and the solver reaches TCRIT +C (within roundoff), it will return T = TCRIT (exactly) to +C indicate this (unless ITASK = 4 and TOUT comes before TCRIT, +C in which case answers at T = TOUT are returned first). +C +C ISTATE = an index used for input and output to specify the +C the state of the calculation. +C +C In the input, the values of ISTATE are as follows. +C 1 means this is the first call for the problem +C (initializations will be done). See note below. +C 2 means this is not the first call, and the calculation +C is to continue normally, with no change in any input +C parameters except possibly TOUT and ITASK. +C (If ITOL, RTOL, and/or ATOL are changed between calls +C with ISTATE = 2, the new values will be used but not +C tested for legality.) +C 3 means this is not the first call, and the +C calculation is to continue normally, but with +C a change in input parameters other than +C TOUT and ITASK. Changes are allowed in +C NEQ, ITOL, RTOL, ATOL, IOPT, LRW, LIW, MF, ML, MU, +C and any of the optional input except H0. +C (See IWORK description for ML and MU.) +C Note: A preliminary call with TOUT = T is not counted +C as a first call here, as no initialization or checking of +C input is done. (Such a call is sometimes useful to include +C the initial conditions in the output.) +C Thus the first call for which TOUT .ne. T requires +C ISTATE = 1 in the input. +C +C In the output, ISTATE has the following values and meanings. +C 1 means nothing was done, as TOUT was equal to T with +C ISTATE = 1 in the input. +C 2 means the integration was performed successfully. +C -1 means an excessive amount of work (more than MXSTEP +C steps) was done on this call, before completing the +C requested task, but the integration was otherwise +C successful as far as T. (MXSTEP is an optional input +C and is normally 500.) To continue, the user may +C simply reset ISTATE to a value .gt. 1 and call again. +C (The excess work step counter will be reset to 0.) +C In addition, the user may increase MXSTEP to avoid +C this error return. (See optional input below.) +C -2 means too much accuracy was requested for the precision +C of the machine being used. This was detected before +C completing the requested task, but the integration +C was successful as far as T. To continue, the tolerance +C parameters must be reset, and ISTATE must be set +C to 3. The optional output TOLSF may be used for this +C purpose. (Note: If this condition is detected before +C taking any steps, then an illegal input return +C (ISTATE = -3) occurs instead.) +C -3 means illegal input was detected, before taking any +C integration steps. See written message for details. +C Note: If the solver detects an infinite loop of calls +C to the solver with illegal input, it will cause +C the run to stop. +C -4 means there were repeated error test failures on +C one attempted step, before completing the requested +C task, but the integration was successful as far as T. +C The problem may have a singularity, or the input +C may be inappropriate. +C -5 means there were repeated convergence test failures on +C one attempted step, before completing the requested +C task, but the integration was successful as far as T. +C This may be caused by an inaccurate Jacobian matrix, +C if one is being used. +C -6 means EWT(i) became zero for some i during the +C integration. Pure relative error control (ATOL(i)=0.0) +C was requested on a variable which has now vanished. +C The integration was successful as far as T. +C +C Note: Since the normal output value of ISTATE is 2, +C it does not need to be reset for normal continuation. +C Also, since a negative input value of ISTATE will be +C regarded as illegal, a negative output value requires the +C user to change it, and possibly other input, before +C calling the solver again. +C +C IOPT = An integer flag to specify whether or not any optional +C input is being used on this call. Input only. +C The optional input is listed separately below. +C IOPT = 0 means no optional input is being used. +C Default values will be used in all cases. +C IOPT = 1 means optional input is being used. +C +C ZWORK = A double precision complex working array. +C The length of ZWORK must be at least +C NYH*(MAXORD + 1) + 2*NEQ + LWM where +C NYH = the initial value of NEQ, +C MAXORD = 12 (if METH = 1) or 5 (if METH = 2) (unless a +C smaller value is given as an optional input), +C LWM = length of work space for matrix-related data: +C LWM = 0 if MITER = 0, +C LWM = 2*NEQ**2 if MITER = 1 or 2, and MF.gt.0, +C LWM = NEQ**2 if MITER = 1 or 2, and MF.lt.0, +C LWM = NEQ if MITER = 3, +C LWM = (3*ML+2*MU+2)*NEQ if MITER = 4 or 5, and MF.gt.0, +C LWM = (2*ML+MU+1)*NEQ if MITER = 4 or 5, and MF.lt.0. +C (See the MF description for METH and MITER.) +C Thus if MAXORD has its default value and NEQ is constant, +C this length is: +C 15*NEQ for MF = 10, +C 15*NEQ + 2*NEQ**2 for MF = 11 or 12, +C 15*NEQ + NEQ**2 for MF = -11 or -12, +C 16*NEQ for MF = 13, +C 17*NEQ + (3*ML+2*MU)*NEQ for MF = 14 or 15, +C 16*NEQ + (2*ML+MU)*NEQ for MF = -14 or -15, +C 8*NEQ for MF = 20, +C 8*NEQ + 2*NEQ**2 for MF = 21 or 22, +C 8*NEQ + NEQ**2 for MF = -21 or -22, +C 9*NEQ for MF = 23, +C 10*NEQ + (3*ML+2*MU)*NEQ for MF = 24 or 25. +C 9*NEQ + (2*ML+MU)*NEQ for MF = -24 or -25. +C +C LZW = The length of the array ZWORK, as declared by the user. +C (This will be checked by the solver.) +C +C RWORK = A real working array (double precision). +C The length of RWORK must be at least 20 + NEQ. +C The first 20 words of RWORK are reserved for conditional +C and optional input and optional output. +C +C The following word in RWORK is a conditional input: +C RWORK(1) = TCRIT = critical value of t which the solver +C is not to overshoot. Required if ITASK is +C 4 or 5, and ignored otherwise. (See ITASK.) +C +C LRW = The length of the array RWORK, as declared by the user. +C (This will be checked by the solver.) +C +C IWORK = An integer work array. The length of IWORK must be at least +C 30 if MITER = 0 or 3 (MF = 10, 13, 20, 23), or +C 30 + NEQ otherwise (abs(MF) = 11,12,14,15,21,22,24,25). +C The first 30 words of IWORK are reserved for conditional and +C optional input and optional output. +C +C The following 2 words in IWORK are conditional input: +C IWORK(1) = ML These are the lower and upper +C IWORK(2) = MU half-bandwidths, respectively, of the +C banded Jacobian, excluding the main diagonal. +C The band is defined by the matrix locations +C (i,j) with i-ML .le. j .le. i+MU. ML and MU +C must satisfy 0 .le. ML,MU .le. NEQ-1. +C These are required if MITER is 4 or 5, and +C ignored otherwise. ML and MU may in fact be +C the band parameters for a matrix to which +C df/dy is only approximately equal. +C +C LIW = the length of the array IWORK, as declared by the user. +C (This will be checked by the solver.) +C +C Note: The work arrays must not be altered between calls to ZVODE +C for the same problem, except possibly for the conditional and +C optional input, and except for the last 2*NEQ words of ZWORK and +C the last NEQ words of RWORK. The latter space is used for internal +C scratch space, and so is available for use by the user outside ZVODE +C between calls, if desired (but not for use by F or JAC). +C +C JAC = The name of the user-supplied routine (MITER = 1 or 4) to +C compute the Jacobian matrix, df/dy, as a function of +C the scalar t and the vector y. It is to have the form +C SUBROUTINE JAC (NEQ, T, Y, ML, MU, PD, NROWPD, +C RPAR, IPAR) +C DOUBLE COMPLEX Y(NEQ), PD(NROWPD,NEQ) +C DOUBLE PRECISION T +C where NEQ, T, Y, ML, MU, and NROWPD are input and the array +C PD is to be loaded with partial derivatives (elements of the +C Jacobian matrix) in the output. PD must be given a first +C dimension of NROWPD. T and Y have the same meaning as in +C Subroutine F. +C In the full matrix case (MITER = 1), ML and MU are +C ignored, and the Jacobian is to be loaded into PD in +C columnwise manner, with df(i)/dy(j) loaded into PD(i,j). +C In the band matrix case (MITER = 4), the elements +C within the band are to be loaded into PD in columnwise +C manner, with diagonal lines of df/dy loaded into the rows +C of PD. Thus df(i)/dy(j) is to be loaded into PD(i-j+MU+1,j). +C ML and MU are the half-bandwidth parameters. (See IWORK). +C The locations in PD in the two triangular areas which +C correspond to nonexistent matrix elements can be ignored +C or loaded arbitrarily, as they are overwritten by ZVODE. +C JAC need not provide df/dy exactly. A crude +C approximation (possibly with a smaller bandwidth) will do. +C In either case, PD is preset to zero by the solver, +C so that only the nonzero elements need be loaded by JAC. +C Each call to JAC is preceded by a call to F with the same +C arguments NEQ, T, and Y. Thus to gain some efficiency, +C intermediate quantities shared by both calculations may be +C saved in a user COMMON block by F and not recomputed by JAC, +C if desired. Also, JAC may alter the Y array, if desired. +C JAC must be declared external in the calling program. +C Subroutine JAC may access user-defined real/complex and +C integer work arrays, RPAR and IPAR, whose dimensions are set +C by the user in the calling program. +C +C MF = The method flag. Used only for input. The legal values of +C MF are 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25, +C -11, -12, -14, -15, -21, -22, -24, -25. +C MF is a signed two-digit integer, MF = JSV*(10*METH + MITER). +C JSV = SIGN(MF) indicates the Jacobian-saving strategy: +C JSV = 1 means a copy of the Jacobian is saved for reuse +C in the corrector iteration algorithm. +C JSV = -1 means a copy of the Jacobian is not saved +C (valid only for MITER = 1, 2, 4, or 5). +C METH indicates the basic linear multistep method: +C METH = 1 means the implicit Adams method. +C METH = 2 means the method based on backward +C differentiation formulas (BDF-s). +C MITER indicates the corrector iteration method: +C MITER = 0 means functional iteration (no Jacobian matrix +C is involved). +C MITER = 1 means chord iteration with a user-supplied +C full (NEQ by NEQ) Jacobian. +C MITER = 2 means chord iteration with an internally +C generated (difference quotient) full Jacobian +C (using NEQ extra calls to F per df/dy value). +C MITER = 3 means chord iteration with an internally +C generated diagonal Jacobian approximation +C (using 1 extra call to F per df/dy evaluation). +C MITER = 4 means chord iteration with a user-supplied +C banded Jacobian. +C MITER = 5 means chord iteration with an internally +C generated banded Jacobian (using ML+MU+1 extra +C calls to F per df/dy evaluation). +C If MITER = 1 or 4, the user must supply a subroutine JAC +C (the name is arbitrary) as described above under JAC. +C For other values of MITER, a dummy argument can be used. +C +C RPAR User-specified array used to communicate real or complex +C parameters to user-supplied subroutines. If RPAR is an +C array, it must be dimensioned in the user's calling program; +C if it is unused or it is a scalar, then it need not be +C dimensioned. The type of RPAR may be REAL, DOUBLE, COMPLEX, +C or DOUBLE COMPLEX, depending on the user program's needs. +C RPAR is not type-declared within ZVODE, but simply passed +C (by address) to the user's F and JAC routines. +C +C IPAR User-specified array used to communicate integer parameter +C to user-supplied subroutines. If IPAR is an array, it must +C be dimensioned in the user's calling program. +C----------------------------------------------------------------------- +C Optional Input. +C +C The following is a list of the optional input provided for in the +C call sequence. (See also Part ii.) For each such input variable, +C this table lists its name as used in this documentation, its +C location in the call sequence, its meaning, and the default value. +C The use of any of this input requires IOPT = 1, and in that +C case all of this input is examined. A value of zero for any +C of these optional input variables will cause the default value to be +C used. Thus to use a subset of the optional input, simply preload +C locations 5 to 10 in RWORK and IWORK to 0.0 and 0 respectively, and +C then set those of interest to nonzero values. +C +C NAME LOCATION MEANING AND DEFAULT VALUE +C +C H0 RWORK(5) The step size to be attempted on the first step. +C The default value is determined by the solver. +C +C HMAX RWORK(6) The maximum absolute step size allowed. +C The default value is infinite. +C +C HMIN RWORK(7) The minimum absolute step size allowed. +C The default value is 0. (This lower bound is not +C enforced on the final step before reaching TCRIT +C when ITASK = 4 or 5.) +C +C MAXORD IWORK(5) The maximum order to be allowed. The default +C value is 12 if METH = 1, and 5 if METH = 2. +C If MAXORD exceeds the default value, it will +C be reduced to the default value. +C If MAXORD is changed during the problem, it may +C cause the current order to be reduced. +C +C MXSTEP IWORK(6) Maximum number of (internally defined) steps +C allowed during one call to the solver. +C The default value is 500. +C +C MXHNIL IWORK(7) Maximum number of messages printed (per problem) +C warning that T + H = T on a step (H = step size). +C This must be positive to result in a non-default +C value. The default value is 10. +C +C----------------------------------------------------------------------- +C Optional Output. +C +C As optional additional output from ZVODE, the variables listed +C below are quantities related to the performance of ZVODE +C which are available to the user. These are communicated by way of +C the work arrays, but also have internal mnemonic names as shown. +C Except where stated otherwise, all of this output is defined +C on any successful return from ZVODE, and on any return with +C ISTATE = -1, -2, -4, -5, or -6. On an illegal input return +C (ISTATE = -3), they will be unchanged from their existing values +C (if any), except possibly for TOLSF, LENZW, LENRW, and LENIW. +C On any error return, output relevant to the error will be defined, +C as noted below. +C +C NAME LOCATION MEANING +C +C HU RWORK(11) The step size in t last used (successfully). +C +C HCUR RWORK(12) The step size to be attempted on the next step. +C +C TCUR RWORK(13) The current value of the independent variable +C which the solver has actually reached, i.e. the +C current internal mesh point in t. In the output, +C TCUR will always be at least as far from the +C initial value of t as the current argument T, +C but may be farther (if interpolation was done). +C +C TOLSF RWORK(14) A tolerance scale factor, greater than 1.0, +C computed when a request for too much accuracy was +C detected (ISTATE = -3 if detected at the start of +C the problem, ISTATE = -2 otherwise). If ITOL is +C left unaltered but RTOL and ATOL are uniformly +C scaled up by a factor of TOLSF for the next call, +C then the solver is deemed likely to succeed. +C (The user may also ignore TOLSF and alter the +C tolerance parameters in any other way appropriate.) +C +C NST IWORK(11) The number of steps taken for the problem so far. +C +C NFE IWORK(12) The number of f evaluations for the problem so far. +C +C NJE IWORK(13) The number of Jacobian evaluations so far. +C +C NQU IWORK(14) The method order last used (successfully). +C +C NQCUR IWORK(15) The order to be attempted on the next step. +C +C IMXER IWORK(16) The index of the component of largest magnitude in +C the weighted local error vector ( e(i)/EWT(i) ), +C on an error return with ISTATE = -4 or -5. +C +C LENZW IWORK(17) The length of ZWORK actually required. +C This is defined on normal returns and on an illegal +C input return for insufficient storage. +C +C LENRW IWORK(18) The length of RWORK actually required. +C This is defined on normal returns and on an illegal +C input return for insufficient storage. +C +C LENIW IWORK(19) The length of IWORK actually required. +C This is defined on normal returns and on an illegal +C input return for insufficient storage. +C +C NLU IWORK(20) The number of matrix LU decompositions so far. +C +C NNI IWORK(21) The number of nonlinear (Newton) iterations so far. +C +C NCFN IWORK(22) The number of convergence failures of the nonlinear +C solver so far. +C +C NETF IWORK(23) The number of error test failures of the integrator +C so far. +C +C The following two arrays are segments of the ZWORK array which +C may also be of interest to the user as optional output. +C For each array, the table below gives its internal name, +C its base address in ZWORK, and its description. +C +C NAME BASE ADDRESS DESCRIPTION +C +C YH 1 The Nordsieck history array, of size NYH by +C (NQCUR + 1), where NYH is the initial value +C of NEQ. For j = 0,1,...,NQCUR, column j+1 +C of YH contains HCUR**j/factorial(j) times +C the j-th derivative of the interpolating +C polynomial currently representing the +C solution, evaluated at t = TCUR. +C +C ACOR LENZW-NEQ+1 Array of size NEQ used for the accumulated +C corrections on each step, scaled in the output +C to represent the estimated local error in Y +C on the last step. This is the vector e in +C the description of the error control. It is +C defined only on a successful return from ZVODE. +C +C----------------------------------------------------------------------- +C Interrupting and Restarting +C +C If the integration of a given problem by ZVODE is to be +C interrrupted and then later continued, such as when restarting +C an interrupted run or alternating between two or more ODE problems, +C the user should save, following the return from the last ZVODE call +C prior to the interruption, the contents of the call sequence +C variables and internal COMMON blocks, and later restore these +C values before the next ZVODE call for that problem. To save +C and restore the COMMON blocks, use subroutine ZVSRCO, as +C described below in part ii. +C +C In addition, if non-default values for either LUN or MFLAG are +C desired, an extra call to XSETUN and/or XSETF should be made just +C before continuing the integration. See Part ii below for details. +C +C----------------------------------------------------------------------- +C Part ii. Other Routines Callable. +C +C The following are optional calls which the user may make to +C gain additional capabilities in conjunction with ZVODE. +C (The routines XSETUN and XSETF are designed to conform to the +C SLATEC error handling package.) +C +C FORM OF CALL FUNCTION +C CALL XSETUN(LUN) Set the logical unit number, LUN, for +C output of messages from ZVODE, if +C the default is not desired. +C The default value of LUN is 6. +C +C CALL XSETF(MFLAG) Set a flag to control the printing of +C messages by ZVODE. +C MFLAG = 0 means do not print. (Danger: +C This risks losing valuable information.) +C MFLAG = 1 means print (the default). +C +C Either of the above calls may be made at +C any time and will take effect immediately. +C +C CALL ZVSRCO(RSAV,ISAV,JOB) Saves and restores the contents of +C the internal COMMON blocks used by +C ZVODE. (See Part iii below.) +C RSAV must be a real array of length 51 +C or more, and ISAV must be an integer +C array of length 40 or more. +C JOB=1 means save COMMON into RSAV/ISAV. +C JOB=2 means restore COMMON from RSAV/ISAV. +C ZVSRCO is useful if one is +C interrupting a run and restarting +C later, or alternating between two or +C more problems solved with ZVODE. +C +C CALL ZVINDY(,,,,,) Provide derivatives of y, of various +C (See below.) orders, at a specified point T, if +C desired. It may be called only after +C a successful return from ZVODE. +C +C The detailed instructions for using ZVINDY are as follows. +C The form of the call is: +C +C CALL ZVINDY (T, K, ZWORK, NYH, DKY, IFLAG) +C +C The input parameters are: +C +C T = Value of independent variable where answers are desired +C (normally the same as the T last returned by ZVODE). +C For valid results, T must lie between TCUR - HU and TCUR. +C (See optional output for TCUR and HU.) +C K = Integer order of the derivative desired. K must satisfy +C 0 .le. K .le. NQCUR, where NQCUR is the current order +C (see optional output). The capability corresponding +C to K = 0, i.e. computing y(T), is already provided +C by ZVODE directly. Since NQCUR .ge. 1, the first +C derivative dy/dt is always available with ZVINDY. +C ZWORK = The history array YH. +C NYH = Column length of YH, equal to the initial value of NEQ. +C +C The output parameters are: +C +C DKY = A double complex array of length NEQ containing the +C computed value of the K-th derivative of y(t). +C IFLAG = Integer flag, returned as 0 if K and T were legal, +C -1 if K was illegal, and -2 if T was illegal. +C On an error return, a message is also written. +C----------------------------------------------------------------------- +C Part iii. COMMON Blocks. +C If ZVODE is to be used in an overlay situation, the user +C must declare, in the primary overlay, the variables in: +C (1) the call sequence to ZVODE, +C (2) the two internal COMMON blocks +C /ZVOD01/ of length 83 (50 double precision words +C followed by 33 integer words), +C /ZVOD02/ of length 9 (1 double precision word +C followed by 8 integer words), +C +C If ZVODE is used on a system in which the contents of internal +C COMMON blocks are not preserved between calls, the user should +C declare the above two COMMON blocks in his calling program to insure +C that their contents are preserved. +C +C----------------------------------------------------------------------- +C Part iv. Optionally Replaceable Solver Routines. +C +C Below are descriptions of two routines in the ZVODE package which +C relate to the measurement of errors. Either routine can be +C replaced by a user-supplied version, if desired. However, since such +C a replacement may have a major impact on performance, it should be +C done only when absolutely necessary, and only with great caution. +C (Note: The means by which the package version of a routine is +C superseded by the user's version may be system-dependent.) +C +C (a) ZEWSET. +C The following subroutine is called just before each internal +C integration step, and sets the array of error weights, EWT, as +C described under ITOL/RTOL/ATOL above: +C SUBROUTINE ZEWSET (NEQ, ITOL, RTOL, ATOL, YCUR, EWT) +C where NEQ, ITOL, RTOL, and ATOL are as in the ZVODE call sequence, +C YCUR contains the current (double complex) dependent variable vector, +C and EWT is the array of weights set by ZEWSET. +C +C If the user supplies this subroutine, it must return in EWT(i) +C (i = 1,...,NEQ) a positive quantity suitable for comparison with +C errors in Y(i). The EWT array returned by ZEWSET is passed to the +C ZVNORM routine (See below.), and also used by ZVODE in the computation +C of the optional output IMXER, the diagonal Jacobian approximation, +C and the increments for difference quotient Jacobians. +C +C In the user-supplied version of ZEWSET, it may be desirable to use +C the current values of derivatives of y. Derivatives up to order NQ +C are available from the history array YH, described above under +C Optional Output. In ZEWSET, YH is identical to the YCUR array, +C extended to NQ + 1 columns with a column length of NYH and scale +C factors of h**j/factorial(j). On the first call for the problem, +C given by NST = 0, NQ is 1 and H is temporarily set to 1.0. +C NYH is the initial value of NEQ. The quantities NQ, H, and NST +C can be obtained by including in ZEWSET the statements: +C DOUBLE PRECISION RVOD, H, HU +C COMMON /ZVOD01/ RVOD(50), IVOD(33) +C COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C NQ = IVOD(28) +C H = RVOD(21) +C Thus, for example, the current value of dy/dt can be obtained as +C YCUR(NYH+i)/H (i=1,...,NEQ) (and the division by H is +C unnecessary when NST = 0). +C +C (b) ZVNORM. +C The following is a real function routine which computes the weighted +C root-mean-square norm of a vector v: +C D = ZVNORM (N, V, W) +C where: +C N = the length of the vector, +C V = double complex array of length N containing the vector, +C W = real array of length N containing weights, +C D = sqrt( (1/N) * sum(abs(V(i))*W(i))**2 ). +C ZVNORM is called with N = NEQ and with W(i) = 1.0/EWT(i), where +C EWT is as set by subroutine ZEWSET. +C +C If the user supplies this function, it should return a non-negative +C value of ZVNORM suitable for use in the error control in ZVODE. +C None of the arguments should be altered by ZVNORM. +C For example, a user-supplied ZVNORM routine might: +C -substitute a max-norm of (V(i)*W(i)) for the rms-norm, or +C -ignore some components of V in the norm, with the effect of +C suppressing the error control on those components of Y. +C----------------------------------------------------------------------- +C REVISION HISTORY (YYYYMMDD) +C 20060517 DATE WRITTEN, modified from DVODE of 20020430. +C 20061227 Added note on use for analytic f. +C----------------------------------------------------------------------- +C Other Routines in the ZVODE Package. +C +C In addition to Subroutine ZVODE, the ZVODE package includes the +C following subroutines and function routines: +C ZVHIN computes an approximate step size for the initial step. +C ZVINDY computes an interpolated value of the y vector at t = TOUT. +C ZVSTEP is the core integrator, which does one step of the +C integration and the associated error control. +C ZVSET sets all method coefficients and test constants. +C ZVNLSD solves the underlying nonlinear system -- the corrector. +C ZVJAC computes and preprocesses the Jacobian matrix J = df/dy +C and the Newton iteration matrix P = I - (h/l1)*J. +C ZVSOL manages solution of linear system in chord iteration. +C ZVJUST adjusts the history array on a change of order. +C ZEWSET sets the error weight vector EWT before each step. +C ZVNORM computes the weighted r.m.s. norm of a vector. +C ZABSSQ computes the squared absolute value of a double complex z. +C ZVSRCO is a user-callable routine to save and restore +C the contents of the internal COMMON blocks. +C ZACOPY is a routine to copy one two-dimensional array to another. +C ZGEFA and ZGESL are routines from LINPACK for solving full +C systems of linear algebraic equations. +C ZGBFA and ZGBSL are routines from LINPACK for solving banded +C linear systems. +C DZSCAL scales a double complex array by a double prec. scalar. +C DZAXPY adds a D.P. scalar times one complex vector to another. +C ZCOPY is a basic linear algebra module from the BLAS. +C DUMACH sets the unit roundoff of the machine. +C XERRWD, XSETUN, XSETF, IXSAV, and IUMACH handle the printing of all +C error messages and warnings. XERRWD is machine-dependent. +C Note: ZVNORM, ZABSSQ, DUMACH, IXSAV, and IUMACH are function routines. +C All the others are subroutines. +C The intrinsic functions called with double precision complex arguments +C are: ABS, DREAL, and DIMAG. All of these are expected to return +C double precision real values. +C +C----------------------------------------------------------------------- +C +C Type declarations for labeled COMMON block ZVOD01 -------------------- +C + DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, + 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND + INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 4 NSLP, NYH +C +C Type declarations for labeled COMMON block ZVOD02 -------------------- +C + DOUBLE PRECISION HU + INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C +C Type declarations for local variables -------------------------------- +C + EXTERNAL ZVNLSD + LOGICAL IHIT + DOUBLE PRECISION ATOLI, BIG, EWTI, FOUR, H0, HMAX, HMX, HUN, ONE, + 1 PT2, RH, RTOLI, SIZE, TCRIT, TNEXT, TOLSF, TP, TWO, ZERO + INTEGER I, IER, IFLAG, IMXER, JCO, KGO, LENIW, LENJ, LENP, LENZW, + 1 LENRW, LENWM, LF0, MBAND, MFA, ML, MORD, MU, MXHNL0, MXSTP0, + 2 NITER, NSLAST + CHARACTER*80 MSG +C +C Type declaration for function subroutines called --------------------- +C + DOUBLE PRECISION DUMACH, ZVNORM +C + DIMENSION MORD(2) +C----------------------------------------------------------------------- +C The following Fortran-77 declaration is to cause the values of the +C listed (local) variables to be saved between calls to ZVODE. +C----------------------------------------------------------------------- + SAVE MORD, MXHNL0, MXSTP0 + SAVE ZERO, ONE, TWO, FOUR, PT2, HUN +C----------------------------------------------------------------------- +C The following internal COMMON blocks contain variables which are +C communicated between subroutines in the ZVODE package, or which are +C to be saved between calls to ZVODE. +C In each block, real variables precede integers. +C The block /ZVOD01/ appears in subroutines ZVODE, ZVINDY, ZVSTEP, +C ZVSET, ZVNLSD, ZVJAC, ZVSOL, ZVJUST and ZVSRCO. +C The block /ZVOD02/ appears in subroutines ZVODE, ZVINDY, ZVSTEP, +C ZVNLSD, ZVJAC, and ZVSRCO. +C +C The variables stored in the internal COMMON blocks are as follows: +C +C ACNRM = Weighted r.m.s. norm of accumulated correction vectors. +C CCMXJ = Threshhold on DRC for updating the Jacobian. (See DRC.) +C CONP = The saved value of TQ(5). +C CRATE = Estimated corrector convergence rate constant. +C DRC = Relative change in H*RL1 since last ZVJAC call. +C EL = Real array of integration coefficients. See ZVSET. +C ETA = Saved tentative ratio of new to old H. +C ETAMAX = Saved maximum value of ETA to be allowed. +C H = The step size. +C HMIN = The minimum absolute value of the step size H to be used. +C HMXI = Inverse of the maximum absolute value of H to be used. +C HMXI = 0.0 is allowed and corresponds to an infinite HMAX. +C HNEW = The step size to be attempted on the next step. +C HRL1 = Saved value of H*RL1. +C HSCAL = Stepsize in scaling of YH array. +C PRL1 = The saved value of RL1. +C RC = Ratio of current H*RL1 to value on last ZVJAC call. +C RL1 = The reciprocal of the coefficient EL(1). +C SRUR = Sqrt(UROUND), used in difference quotient algorithms. +C TAU = Real vector of past NQ step sizes, length 13. +C TQ = A real vector of length 5 in which ZVSET stores constants +C used for the convergence test, the error test, and the +C selection of H at a new order. +C TN = The independent variable, updated on each step taken. +C UROUND = The machine unit roundoff. The smallest positive real number +C such that 1.0 + UROUND .ne. 1.0 +C ICF = Integer flag for convergence failure in ZVNLSD: +C 0 means no failures. +C 1 means convergence failure with out of date Jacobian +C (recoverable error). +C 2 means convergence failure with current Jacobian or +C singular matrix (unrecoverable error). +C INIT = Saved integer flag indicating whether initialization of the +C problem has been done (INIT = 1) or not. +C IPUP = Saved flag to signal updating of Newton matrix. +C JCUR = Output flag from ZVJAC showing Jacobian status: +C JCUR = 0 means J is not current. +C JCUR = 1 means J is current. +C JSTART = Integer flag used as input to ZVSTEP: +C 0 means perform the first step. +C 1 means take a new step continuing from the last. +C -1 means take the next step with a new value of MAXORD, +C HMIN, HMXI, N, METH, MITER, and/or matrix parameters. +C On return, ZVSTEP sets JSTART = 1. +C JSV = Integer flag for Jacobian saving, = sign(MF). +C KFLAG = A completion code from ZVSTEP with the following meanings: +C 0 the step was succesful. +C -1 the requested error could not be achieved. +C -2 corrector convergence could not be achieved. +C -3, -4 fatal error in VNLS (can not occur here). +C KUTH = Input flag to ZVSTEP showing whether H was reduced by the +C driver. KUTH = 1 if H was reduced, = 0 otherwise. +C L = Integer variable, NQ + 1, current order plus one. +C LMAX = MAXORD + 1 (used for dimensioning). +C LOCJS = A pointer to the saved Jacobian, whose storage starts at +C WM(LOCJS), if JSV = 1. +C LYH, LEWT, LACOR, LSAVF, LWM, LIWM = Saved integer pointers +C to segments of ZWORK, RWORK, and IWORK. +C MAXORD = The maximum order of integration method to be allowed. +C METH/MITER = The method flags. See MF. +C MSBJ = The maximum number of steps between J evaluations, = 50. +C MXHNIL = Saved value of optional input MXHNIL. +C MXSTEP = Saved value of optional input MXSTEP. +C N = The number of first-order ODEs, = NEQ. +C NEWH = Saved integer to flag change of H. +C NEWQ = The method order to be used on the next step. +C NHNIL = Saved counter for occurrences of T + H = T. +C NQ = Integer variable, the current integration method order. +C NQNYH = Saved value of NQ*NYH. +C NQWAIT = A counter controlling the frequency of order changes. +C An order change is about to be considered if NQWAIT = 1. +C NSLJ = The number of steps taken as of the last Jacobian update. +C NSLP = Saved value of NST as of last Newton matrix update. +C NYH = Saved value of the initial value of NEQ. +C HU = The step size in t last used. +C NCFN = Number of nonlinear convergence failures so far. +C NETF = The number of error test failures of the integrator so far. +C NFE = The number of f evaluations for the problem so far. +C NJE = The number of Jacobian evaluations so far. +C NLU = The number of matrix LU decompositions so far. +C NNI = Number of nonlinear iterations so far. +C NQU = The method order last used. +C NST = The number of steps taken for the problem so far. +C----------------------------------------------------------------------- + COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, + 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, + 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 7 NSLP, NYH + COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C + DATA MORD(1) /12/, MORD(2) /5/, MXSTP0 /500/, MXHNL0 /10/ + DATA ZERO /0.0D0/, ONE /1.0D0/, TWO /2.0D0/, FOUR /4.0D0/, + 1 PT2 /0.2D0/, HUN /100.0D0/ +C----------------------------------------------------------------------- +C Block A. +C This code block is executed on every call. +C It tests ISTATE and ITASK for legality and branches appropriately. +C If ISTATE .gt. 1 but the flag INIT shows that initialization has +C not yet been done, an error return occurs. +C If ISTATE = 1 and TOUT = T, return immediately. +C----------------------------------------------------------------------- + IF (ISTATE .LT. 1 .OR. ISTATE .GT. 3) GO TO 601 + IF (ITASK .LT. 1 .OR. ITASK .GT. 5) GO TO 602 + IF (ISTATE .EQ. 1) GO TO 10 + IF (INIT .NE. 1) GO TO 603 + IF (ISTATE .EQ. 2) GO TO 200 + GO TO 20 + 10 INIT = 0 + IF (TOUT .EQ. T) RETURN +C----------------------------------------------------------------------- +C Block B. +C The next code block is executed for the initial call (ISTATE = 1), +C or for a continuation call with parameter changes (ISTATE = 3). +C It contains checking of all input and various initializations. +C +C First check legality of the non-optional input NEQ, ITOL, IOPT, +C MF, ML, and MU. +C----------------------------------------------------------------------- + 20 IF (NEQ .LE. 0) GO TO 604 + IF (ISTATE .EQ. 1) GO TO 25 + IF (NEQ .GT. N) GO TO 605 + 25 N = NEQ + IF (ITOL .LT. 1 .OR. ITOL .GT. 4) GO TO 606 + IF (IOPT .LT. 0 .OR. IOPT .GT. 1) GO TO 607 + JSV = SIGN(1,MF) + MFA = ABS(MF) + METH = MFA/10 + MITER = MFA - 10*METH + IF (METH .LT. 1 .OR. METH .GT. 2) GO TO 608 + IF (MITER .LT. 0 .OR. MITER .GT. 5) GO TO 608 + IF (MITER .LE. 3) GO TO 30 + ML = IWORK(1) + MU = IWORK(2) + IF (ML .LT. 0 .OR. ML .GE. N) GO TO 609 + IF (MU .LT. 0 .OR. MU .GE. N) GO TO 610 + 30 CONTINUE +C Next process and check the optional input. --------------------------- + IF (IOPT .EQ. 1) GO TO 40 + MAXORD = MORD(METH) + MXSTEP = MXSTP0 + MXHNIL = MXHNL0 + IF (ISTATE .EQ. 1) H0 = ZERO + HMXI = ZERO + HMIN = ZERO + GO TO 60 + 40 MAXORD = IWORK(5) + IF (MAXORD .LT. 0) GO TO 611 + IF (MAXORD .EQ. 0) MAXORD = 100 + MAXORD = MIN(MAXORD,MORD(METH)) + MXSTEP = IWORK(6) + IF (MXSTEP .LT. 0) GO TO 612 + IF (MXSTEP .EQ. 0) MXSTEP = MXSTP0 + MXHNIL = IWORK(7) + IF (MXHNIL .LT. 0) GO TO 613 + IF (MXHNIL .EQ. 0) MXHNIL = MXHNL0 + IF (ISTATE .NE. 1) GO TO 50 + H0 = RWORK(5) + IF ((TOUT - T)*H0 .LT. ZERO) GO TO 614 + 50 HMAX = RWORK(6) + IF (HMAX .LT. ZERO) GO TO 615 + HMXI = ZERO + IF (HMAX .GT. ZERO) HMXI = ONE/HMAX + HMIN = RWORK(7) + IF (HMIN .LT. ZERO) GO TO 616 +C----------------------------------------------------------------------- +C Set work array pointers and check lengths LZW, LRW, and LIW. +C Pointers to segments of ZWORK, RWORK, and IWORK are named by prefixing +C L to the name of the segment. E.g., segment YH starts at ZWORK(LYH). +C Segments of ZWORK (in order) are denoted YH, WM, SAVF, ACOR. +C Besides optional inputs/outputs, RWORK has only the segment EWT. +C Within WM, LOCJS is the location of the saved Jacobian (JSV .gt. 0). +C----------------------------------------------------------------------- + 60 LYH = 1 + IF (ISTATE .EQ. 1) NYH = N + LWM = LYH + (MAXORD + 1)*NYH + JCO = MAX(0,JSV) + IF (MITER .EQ. 0) LENWM = 0 + IF (MITER .EQ. 1 .OR. MITER .EQ. 2) THEN + LENWM = (1 + JCO)*N*N + LOCJS = N*N + 1 + ENDIF + IF (MITER .EQ. 3) LENWM = N + IF (MITER .EQ. 4 .OR. MITER .EQ. 5) THEN + MBAND = ML + MU + 1 + LENP = (MBAND + ML)*N + LENJ = MBAND*N + LENWM = LENP + JCO*LENJ + LOCJS = LENP + 1 + ENDIF + LSAVF = LWM + LENWM + LACOR = LSAVF + N + LENZW = LACOR + N - 1 + IWORK(17) = LENZW + LEWT = 21 + LENRW = 20 + N + IWORK(18) = LENRW + LIWM = 1 + LENIW = 30 + N + IF (MITER .EQ. 0 .OR. MITER .EQ. 3) LENIW = 30 + IWORK(19) = LENIW + IF (LENZW .GT. LZW) GO TO 628 + IF (LENRW .GT. LRW) GO TO 617 + IF (LENIW .GT. LIW) GO TO 618 +C Check RTOL and ATOL for legality. ------------------------------------ + RTOLI = RTOL(1) + ATOLI = ATOL(1) + DO 70 I = 1,N + IF (ITOL .GE. 3) RTOLI = RTOL(I) + IF (ITOL .EQ. 2 .OR. ITOL .EQ. 4) ATOLI = ATOL(I) + IF (RTOLI .LT. ZERO) GO TO 619 + IF (ATOLI .LT. ZERO) GO TO 620 + 70 CONTINUE + IF (ISTATE .EQ. 1) GO TO 100 +C If ISTATE = 3, set flag to signal parameter changes to ZVSTEP. ------- + JSTART = -1 + IF (NQ .LE. MAXORD) GO TO 200 +C MAXORD was reduced below NQ. Copy YH(*,MAXORD+2) into SAVF. --------- + CALL ZCOPY (N, ZWORK(LWM), 1, ZWORK(LSAVF), 1) + GO TO 200 +C----------------------------------------------------------------------- +C Block C. +C The next block is for the initial call only (ISTATE = 1). +C It contains all remaining initializations, the initial call to F, +C and the calculation of the initial step size. +C The error weights in EWT are inverted after being loaded. +C----------------------------------------------------------------------- + 100 UROUND = DUMACH() + TN = T + IF (ITASK .NE. 4 .AND. ITASK .NE. 5) GO TO 110 + TCRIT = RWORK(1) + IF ((TCRIT - TOUT)*(TOUT - T) .LT. ZERO) GO TO 625 + IF (H0 .NE. ZERO .AND. (T + H0 - TCRIT)*H0 .GT. ZERO) + 1 H0 = TCRIT - T + 110 JSTART = 0 + IF (MITER .GT. 0) SRUR = SQRT(UROUND) + CCMXJ = PT2 + MSBJ = 50 + NHNIL = 0 + NST = 0 + NJE = 0 + NNI = 0 + NCFN = 0 + NETF = 0 + NLU = 0 + NSLJ = 0 + NSLAST = 0 + HU = ZERO + NQU = 0 +C Initial call to F. (LF0 points to YH(*,2).) ------------------------- + LF0 = LYH + NYH + CALL F (N, T, Y, ZWORK(LF0), RPAR, IPAR) + NFE = 1 +C Load the initial value vector in YH. --------------------------------- + CALL ZCOPY (N, Y, 1, ZWORK(LYH), 1) +C Load and invert the EWT array. (H is temporarily set to 1.0.) ------- + NQ = 1 + H = ONE + CALL ZEWSET (N, ITOL, RTOL, ATOL, ZWORK(LYH), RWORK(LEWT)) + DO 120 I = 1,N + IF (RWORK(I+LEWT-1) .LE. ZERO) GO TO 621 + 120 RWORK(I+LEWT-1) = ONE/RWORK(I+LEWT-1) + IF (H0 .NE. ZERO) GO TO 180 +C Call ZVHIN to set initial step size H0 to be attempted. -------------- + CALL ZVHIN (N, T, ZWORK(LYH), ZWORK(LF0), F, RPAR, IPAR, TOUT, + 1 UROUND, RWORK(LEWT), ITOL, ATOL, Y, ZWORK(LACOR), H0, + 2 NITER, IER) + NFE = NFE + NITER + IF (IER .NE. 0) GO TO 622 +C Adjust H0 if necessary to meet HMAX bound. --------------------------- + 180 RH = ABS(H0)*HMXI + IF (RH .GT. ONE) H0 = H0/RH +C Load H with H0 and scale YH(*,2) by H0. ------------------------------ + H = H0 + CALL DZSCAL (N, H0, ZWORK(LF0), 1) + GO TO 270 +C----------------------------------------------------------------------- +C Block D. +C The next code block is for continuation calls only (ISTATE = 2 or 3) +C and is to check stop conditions before taking a step. +C----------------------------------------------------------------------- + 200 NSLAST = NST + KUTH = 0 + GO TO (210, 250, 220, 230, 240), ITASK + 210 IF ((TN - TOUT)*H .LT. ZERO) GO TO 250 + CALL ZVINDY (TOUT, 0, ZWORK(LYH), NYH, Y, IFLAG) + IF (IFLAG .NE. 0) GO TO 627 + T = TOUT + GO TO 420 + 220 TP = TN - HU*(ONE + HUN*UROUND) + IF ((TP - TOUT)*H .GT. ZERO) GO TO 623 + IF ((TN - TOUT)*H .LT. ZERO) GO TO 250 + GO TO 400 + 230 TCRIT = RWORK(1) + IF ((TN - TCRIT)*H .GT. ZERO) GO TO 624 + IF ((TCRIT - TOUT)*H .LT. ZERO) GO TO 625 + IF ((TN - TOUT)*H .LT. ZERO) GO TO 245 + CALL ZVINDY (TOUT, 0, ZWORK(LYH), NYH, Y, IFLAG) + IF (IFLAG .NE. 0) GO TO 627 + T = TOUT + GO TO 420 + 240 TCRIT = RWORK(1) + IF ((TN - TCRIT)*H .GT. ZERO) GO TO 624 + 245 HMX = ABS(TN) + ABS(H) + IHIT = ABS(TN - TCRIT) .LE. HUN*UROUND*HMX + IF (IHIT) GO TO 400 + TNEXT = TN + HNEW*(ONE + FOUR*UROUND) + IF ((TNEXT - TCRIT)*H .LE. ZERO) GO TO 250 + H = (TCRIT - TN)*(ONE - FOUR*UROUND) + KUTH = 1 +C----------------------------------------------------------------------- +C Block E. +C The next block is normally executed for all calls and contains +C the call to the one-step core integrator ZVSTEP. +C +C This is a looping point for the integration steps. +C +C First check for too many steps being taken, update EWT (if not at +C start of problem), check for too much accuracy being requested, and +C check for H below the roundoff level in T. +C----------------------------------------------------------------------- + 250 CONTINUE + IF ((NST-NSLAST) .GE. MXSTEP) GO TO 500 + CALL ZEWSET (N, ITOL, RTOL, ATOL, ZWORK(LYH), RWORK(LEWT)) + DO 260 I = 1,N + IF (RWORK(I+LEWT-1) .LE. ZERO) GO TO 510 + 260 RWORK(I+LEWT-1) = ONE/RWORK(I+LEWT-1) + 270 TOLSF = UROUND*ZVNORM (N, ZWORK(LYH), RWORK(LEWT)) + IF (TOLSF .LE. ONE) GO TO 280 + TOLSF = TOLSF*TWO + IF (NST .EQ. 0) GO TO 626 + GO TO 520 + 280 IF ((TN + H) .NE. TN) GO TO 290 + NHNIL = NHNIL + 1 + IF (NHNIL .GT. MXHNIL) GO TO 290 + MSG = 'ZVODE-- Warning: internal T (=R1) and H (=R2) are' + CALL XERRWD (MSG, 50, 101, 1, 0, 0, 0, 0, ZERO, ZERO) + MSG=' such that in the machine, T + H = T on the next step ' + CALL XERRWD (MSG, 60, 101, 1, 0, 0, 0, 0, ZERO, ZERO) + MSG = ' (H = step size). solver will continue anyway' + CALL XERRWD (MSG, 50, 101, 1, 0, 0, 0, 2, TN, H) + IF (NHNIL .LT. MXHNIL) GO TO 290 + MSG = 'ZVODE-- Above warning has been issued I1 times. ' + CALL XERRWD (MSG, 50, 102, 1, 0, 0, 0, 0, ZERO, ZERO) + MSG = ' it will not be issued again for this problem' + CALL XERRWD (MSG, 50, 102, 1, 1, MXHNIL, 0, 0, ZERO, ZERO) + 290 CONTINUE +C----------------------------------------------------------------------- +C CALL ZVSTEP (Y, YH, NYH, YH, EWT, SAVF, VSAV, ACOR, +C WM, IWM, F, JAC, F, ZVNLSD, RPAR, IPAR) +C----------------------------------------------------------------------- + CALL ZVSTEP (Y, ZWORK(LYH), NYH, ZWORK(LYH), RWORK(LEWT), + 1 ZWORK(LSAVF), Y, ZWORK(LACOR), ZWORK(LWM), IWORK(LIWM), + 2 F, JAC, F, ZVNLSD, RPAR, IPAR) + KGO = 1 - KFLAG +C Branch on KFLAG. Note: In this version, KFLAG can not be set to -3. +C KFLAG .eq. 0, -1, -2 + GO TO (300, 530, 540), KGO +C----------------------------------------------------------------------- +C Block F. +C The following block handles the case of a successful return from the +C core integrator (KFLAG = 0). Test for stop conditions. +C----------------------------------------------------------------------- + 300 INIT = 1 + KUTH = 0 + GO TO (310, 400, 330, 340, 350), ITASK +C ITASK = 1. If TOUT has been reached, interpolate. ------------------- + 310 IF ((TN - TOUT)*H .LT. ZERO) GO TO 250 + CALL ZVINDY (TOUT, 0, ZWORK(LYH), NYH, Y, IFLAG) + T = TOUT + GO TO 420 +C ITASK = 3. Jump to exit if TOUT was reached. ------------------------ + 330 IF ((TN - TOUT)*H .GE. ZERO) GO TO 400 + GO TO 250 +C ITASK = 4. See if TOUT or TCRIT was reached. Adjust H if necessary. + 340 IF ((TN - TOUT)*H .LT. ZERO) GO TO 345 + CALL ZVINDY (TOUT, 0, ZWORK(LYH), NYH, Y, IFLAG) + T = TOUT + GO TO 420 + 345 HMX = ABS(TN) + ABS(H) + IHIT = ABS(TN - TCRIT) .LE. HUN*UROUND*HMX + IF (IHIT) GO TO 400 + TNEXT = TN + HNEW*(ONE + FOUR*UROUND) + IF ((TNEXT - TCRIT)*H .LE. ZERO) GO TO 250 + H = (TCRIT - TN)*(ONE - FOUR*UROUND) + KUTH = 1 + GO TO 250 +C ITASK = 5. See if TCRIT was reached and jump to exit. --------------- + 350 HMX = ABS(TN) + ABS(H) + IHIT = ABS(TN - TCRIT) .LE. HUN*UROUND*HMX +C----------------------------------------------------------------------- +C Block G. +C The following block handles all successful returns from ZVODE. +C If ITASK .ne. 1, Y is loaded from YH and T is set accordingly. +C ISTATE is set to 2, and the optional output is loaded into the work +C arrays before returning. +C----------------------------------------------------------------------- + 400 CONTINUE + CALL ZCOPY (N, ZWORK(LYH), 1, Y, 1) + T = TN + IF (ITASK .NE. 4 .AND. ITASK .NE. 5) GO TO 420 + IF (IHIT) T = TCRIT + 420 ISTATE = 2 + RWORK(11) = HU + RWORK(12) = HNEW + RWORK(13) = TN + IWORK(11) = NST + IWORK(12) = NFE + IWORK(13) = NJE + IWORK(14) = NQU + IWORK(15) = NEWQ + IWORK(20) = NLU + IWORK(21) = NNI + IWORK(22) = NCFN + IWORK(23) = NETF + RETURN +C----------------------------------------------------------------------- +C Block H. +C The following block handles all unsuccessful returns other than +C those for illegal input. First the error message routine is called. +C if there was an error test or convergence test failure, IMXER is set. +C Then Y is loaded from YH, and T is set to TN. +C The optional output is loaded into the work arrays before returning. +C----------------------------------------------------------------------- +C The maximum number of steps was taken before reaching TOUT. ---------- + 500 MSG = 'ZVODE-- At current T (=R1), MXSTEP (=I1) steps ' + CALL XERRWD (MSG, 50, 201, 1, 0, 0, 0, 0, ZERO, ZERO) + MSG = ' taken on this call before reaching TOUT ' + CALL XERRWD (MSG, 50, 201, 1, 1, MXSTEP, 0, 1, TN, ZERO) + ISTATE = -1 + GO TO 580 +C EWT(i) .le. 0.0 for some i (not at start of problem). ---------------- + 510 EWTI = RWORK(LEWT+I-1) + MSG = 'ZVODE-- At T (=R1), EWT(I1) has become R2 .le. 0.' + CALL XERRWD (MSG, 50, 202, 1, 1, I, 0, 2, TN, EWTI) + ISTATE = -6 + GO TO 580 +C Too much accuracy requested for machine precision. ------------------- + 520 MSG = 'ZVODE-- At T (=R1), too much accuracy requested ' + CALL XERRWD (MSG, 50, 203, 1, 0, 0, 0, 0, ZERO, ZERO) + MSG = ' for precision of machine: see TOLSF (=R2) ' + CALL XERRWD (MSG, 50, 203, 1, 0, 0, 0, 2, TN, TOLSF) + RWORK(14) = TOLSF + ISTATE = -2 + GO TO 580 +C KFLAG = -1. Error test failed repeatedly or with ABS(H) = HMIN. ----- + 530 MSG = 'ZVODE-- At T(=R1) and step size H(=R2), the error' + CALL XERRWD (MSG, 50, 204, 1, 0, 0, 0, 0, ZERO, ZERO) + MSG = ' test failed repeatedly or with abs(H) = HMIN' + CALL XERRWD (MSG, 50, 204, 1, 0, 0, 0, 2, TN, H) + ISTATE = -4 + GO TO 560 +C KFLAG = -2. Convergence failed repeatedly or with ABS(H) = HMIN. ---- + 540 MSG = 'ZVODE-- At T (=R1) and step size H (=R2), the ' + CALL XERRWD (MSG, 50, 205, 1, 0, 0, 0, 0, ZERO, ZERO) + MSG = ' corrector convergence failed repeatedly ' + CALL XERRWD (MSG, 50, 205, 1, 0, 0, 0, 0, ZERO, ZERO) + MSG = ' or with abs(H) = HMIN ' + CALL XERRWD (MSG, 30, 205, 1, 0, 0, 0, 2, TN, H) + ISTATE = -5 +C Compute IMXER if relevant. ------------------------------------------- + 560 BIG = ZERO + IMXER = 1 + DO 570 I = 1,N + SIZE = ABS(ZWORK(I+LACOR-1))*RWORK(I+LEWT-1) + IF (BIG .GE. SIZE) GO TO 570 + BIG = SIZE + IMXER = I + 570 CONTINUE + IWORK(16) = IMXER +C Set Y vector, T, and optional output. -------------------------------- + 580 CONTINUE + CALL ZCOPY (N, ZWORK(LYH), 1, Y, 1) + T = TN + RWORK(11) = HU + RWORK(12) = H + RWORK(13) = TN + IWORK(11) = NST + IWORK(12) = NFE + IWORK(13) = NJE + IWORK(14) = NQU + IWORK(15) = NQ + IWORK(20) = NLU + IWORK(21) = NNI + IWORK(22) = NCFN + IWORK(23) = NETF + RETURN +C----------------------------------------------------------------------- +C Block I. +C The following block handles all error returns due to illegal input +C (ISTATE = -3), as detected before calling the core integrator. +C First the error message routine is called. If the illegal input +C is a negative ISTATE, the run is aborted (apparent infinite loop). +C----------------------------------------------------------------------- + 601 MSG = 'ZVODE-- ISTATE (=I1) illegal ' + CALL XERRWD (MSG, 30, 1, 1, 1, ISTATE, 0, 0, ZERO, ZERO) + IF (ISTATE .LT. 0) GO TO 800 + GO TO 700 + 602 MSG = 'ZVODE-- ITASK (=I1) illegal ' + CALL XERRWD (MSG, 30, 2, 1, 1, ITASK, 0, 0, ZERO, ZERO) + GO TO 700 + 603 MSG='ZVODE-- ISTATE (=I1) .gt. 1 but ZVODE not initialized ' + CALL XERRWD (MSG, 60, 3, 1, 1, ISTATE, 0, 0, ZERO, ZERO) + GO TO 700 + 604 MSG = 'ZVODE-- NEQ (=I1) .lt. 1 ' + CALL XERRWD (MSG, 30, 4, 1, 1, NEQ, 0, 0, ZERO, ZERO) + GO TO 700 + 605 MSG = 'ZVODE-- ISTATE = 3 and NEQ increased (I1 to I2) ' + CALL XERRWD (MSG, 50, 5, 1, 2, N, NEQ, 0, ZERO, ZERO) + GO TO 700 + 606 MSG = 'ZVODE-- ITOL (=I1) illegal ' + CALL XERRWD (MSG, 30, 6, 1, 1, ITOL, 0, 0, ZERO, ZERO) + GO TO 700 + 607 MSG = 'ZVODE-- IOPT (=I1) illegal ' + CALL XERRWD (MSG, 30, 7, 1, 1, IOPT, 0, 0, ZERO, ZERO) + GO TO 700 + 608 MSG = 'ZVODE-- MF (=I1) illegal ' + CALL XERRWD (MSG, 30, 8, 1, 1, MF, 0, 0, ZERO, ZERO) + GO TO 700 + 609 MSG = 'ZVODE-- ML (=I1) illegal: .lt.0 or .ge.NEQ (=I2)' + CALL XERRWD (MSG, 50, 9, 1, 2, ML, NEQ, 0, ZERO, ZERO) + GO TO 700 + 610 MSG = 'ZVODE-- MU (=I1) illegal: .lt.0 or .ge.NEQ (=I2)' + CALL XERRWD (MSG, 50, 10, 1, 2, MU, NEQ, 0, ZERO, ZERO) + GO TO 700 + 611 MSG = 'ZVODE-- MAXORD (=I1) .lt. 0 ' + CALL XERRWD (MSG, 30, 11, 1, 1, MAXORD, 0, 0, ZERO, ZERO) + GO TO 700 + 612 MSG = 'ZVODE-- MXSTEP (=I1) .lt. 0 ' + CALL XERRWD (MSG, 30, 12, 1, 1, MXSTEP, 0, 0, ZERO, ZERO) + GO TO 700 + 613 MSG = 'ZVODE-- MXHNIL (=I1) .lt. 0 ' + CALL XERRWD (MSG, 30, 13, 1, 1, MXHNIL, 0, 0, ZERO, ZERO) + GO TO 700 + 614 MSG = 'ZVODE-- TOUT (=R1) behind T (=R2) ' + CALL XERRWD (MSG, 40, 14, 1, 0, 0, 0, 2, TOUT, T) + MSG = ' integration direction is given by H0 (=R1) ' + CALL XERRWD (MSG, 50, 14, 1, 0, 0, 0, 1, H0, ZERO) + GO TO 700 + 615 MSG = 'ZVODE-- HMAX (=R1) .lt. 0.0 ' + CALL XERRWD (MSG, 30, 15, 1, 0, 0, 0, 1, HMAX, ZERO) + GO TO 700 + 616 MSG = 'ZVODE-- HMIN (=R1) .lt. 0.0 ' + CALL XERRWD (MSG, 30, 16, 1, 0, 0, 0, 1, HMIN, ZERO) + GO TO 700 + 617 CONTINUE + MSG='ZVODE-- RWORK length needed, LENRW (=I1), exceeds LRW (=I2)' + CALL XERRWD (MSG, 60, 17, 1, 2, LENRW, LRW, 0, ZERO, ZERO) + GO TO 700 + 618 CONTINUE + MSG='ZVODE-- IWORK length needed, LENIW (=I1), exceeds LIW (=I2)' + CALL XERRWD (MSG, 60, 18, 1, 2, LENIW, LIW, 0, ZERO, ZERO) + GO TO 700 + 619 MSG = 'ZVODE-- RTOL(I1) is R1 .lt. 0.0 ' + CALL XERRWD (MSG, 40, 19, 1, 1, I, 0, 1, RTOLI, ZERO) + GO TO 700 + 620 MSG = 'ZVODE-- ATOL(I1) is R1 .lt. 0.0 ' + CALL XERRWD (MSG, 40, 20, 1, 1, I, 0, 1, ATOLI, ZERO) + GO TO 700 + 621 EWTI = RWORK(LEWT+I-1) + MSG = 'ZVODE-- EWT(I1) is R1 .le. 0.0 ' + CALL XERRWD (MSG, 40, 21, 1, 1, I, 0, 1, EWTI, ZERO) + GO TO 700 + 622 CONTINUE + MSG='ZVODE-- TOUT (=R1) too close to T(=R2) to start integration' + CALL XERRWD (MSG, 60, 22, 1, 0, 0, 0, 2, TOUT, T) + GO TO 700 + 623 CONTINUE + MSG='ZVODE-- ITASK = I1 and TOUT (=R1) behind TCUR - HU (= R2) ' + CALL XERRWD (MSG, 60, 23, 1, 1, ITASK, 0, 2, TOUT, TP) + GO TO 700 + 624 CONTINUE + MSG='ZVODE-- ITASK = 4 or 5 and TCRIT (=R1) behind TCUR (=R2) ' + CALL XERRWD (MSG, 60, 24, 1, 0, 0, 0, 2, TCRIT, TN) + GO TO 700 + 625 CONTINUE + MSG='ZVODE-- ITASK = 4 or 5 and TCRIT (=R1) behind TOUT (=R2) ' + CALL XERRWD (MSG, 60, 25, 1, 0, 0, 0, 2, TCRIT, TOUT) + GO TO 700 + 626 MSG = 'ZVODE-- At start of problem, too much accuracy ' + CALL XERRWD (MSG, 50, 26, 1, 0, 0, 0, 0, ZERO, ZERO) + MSG=' requested for precision of machine: see TOLSF (=R1) ' + CALL XERRWD (MSG, 60, 26, 1, 0, 0, 0, 1, TOLSF, ZERO) + RWORK(14) = TOLSF + GO TO 700 + 627 MSG='ZVODE-- Trouble from ZVINDY. ITASK = I1, TOUT = R1. ' + CALL XERRWD (MSG, 60, 27, 1, 1, ITASK, 0, 1, TOUT, ZERO) + GO TO 700 + 628 CONTINUE + MSG='ZVODE-- ZWORK length needed, LENZW (=I1), exceeds LZW (=I2)' + CALL XERRWD (MSG, 60, 17, 1, 2, LENZW, LZW, 0, ZERO, ZERO) +C + 700 CONTINUE + ISTATE = -3 + RETURN +C + 800 MSG = 'ZVODE-- Run aborted: apparent infinite loop ' + CALL XERRWD (MSG, 50, 303, 2, 0, 0, 0, 0, ZERO, ZERO) + RETURN +C----------------------- End of Subroutine ZVODE ----------------------- + END +*DECK ZVHIN + SUBROUTINE ZVHIN (N, T0, Y0, YDOT, F, RPAR, IPAR, TOUT, UROUND, + 1 EWT, ITOL, ATOL, Y, TEMP, H0, NITER, IER) + EXTERNAL F + DOUBLE COMPLEX Y0, YDOT, Y, TEMP + DOUBLE PRECISION T0, TOUT, UROUND, EWT, ATOL, H0 + INTEGER N, IPAR, ITOL, NITER, IER + DIMENSION Y0(*), YDOT(*), EWT(*), ATOL(*), Y(*), + 1 TEMP(*), RPAR(*), IPAR(*) +C----------------------------------------------------------------------- +C Call sequence input -- N, T0, Y0, YDOT, F, RPAR, IPAR, TOUT, UROUND, +C EWT, ITOL, ATOL, Y, TEMP +C Call sequence output -- H0, NITER, IER +C COMMON block variables accessed -- None +C +C Subroutines called by ZVHIN: F +C Function routines called by ZVHIN: ZVNORM +C----------------------------------------------------------------------- +C This routine computes the step size, H0, to be attempted on the +C first step, when the user has not supplied a value for this. +C +C First we check that TOUT - T0 differs significantly from zero. Then +C an iteration is done to approximate the initial second derivative +C and this is used to define h from w.r.m.s.norm(h**2 * yddot / 2) = 1. +C A bias factor of 1/2 is applied to the resulting h. +C The sign of H0 is inferred from the initial values of TOUT and T0. +C +C Communication with ZVHIN is done with the following variables: +C +C N = Size of ODE system, input. +C T0 = Initial value of independent variable, input. +C Y0 = Vector of initial conditions, input. +C YDOT = Vector of initial first derivatives, input. +C F = Name of subroutine for right-hand side f(t,y), input. +C RPAR, IPAR = User's real/complex and integer work arrays. +C TOUT = First output value of independent variable +C UROUND = Machine unit roundoff +C EWT, ITOL, ATOL = Error weights and tolerance parameters +C as described in the driver routine, input. +C Y, TEMP = Work arrays of length N. +C H0 = Step size to be attempted, output. +C NITER = Number of iterations (and of f evaluations) to compute H0, +C output. +C IER = The error flag, returned with the value +C IER = 0 if no trouble occurred, or +C IER = -1 if TOUT and T0 are considered too close to proceed. +C----------------------------------------------------------------------- +C +C Type declarations for local variables -------------------------------- +C + DOUBLE PRECISION AFI, ATOLI, DELYI, H, HALF, HG, HLB, HNEW, HRAT, + 1 HUB, HUN, PT1, T1, TDIST, TROUND, TWO, YDDNRM + INTEGER I, ITER +C +C Type declaration for function subroutines called --------------------- +C + DOUBLE PRECISION ZVNORM +C----------------------------------------------------------------------- +C The following Fortran-77 declaration is to cause the values of the +C listed (local) variables to be saved between calls to this integrator. +C----------------------------------------------------------------------- + SAVE HALF, HUN, PT1, TWO + DATA HALF /0.5D0/, HUN /100.0D0/, PT1 /0.1D0/, TWO /2.0D0/ +C + NITER = 0 + TDIST = ABS(TOUT - T0) + TROUND = UROUND*MAX(ABS(T0),ABS(TOUT)) + IF (TDIST .LT. TWO*TROUND) GO TO 100 +C +C Set a lower bound on h based on the roundoff level in T0 and TOUT. --- + HLB = HUN*TROUND +C Set an upper bound on h based on TOUT-T0 and the initial Y and YDOT. - + HUB = PT1*TDIST + ATOLI = ATOL(1) + DO 10 I = 1, N + IF (ITOL .EQ. 2 .OR. ITOL .EQ. 4) ATOLI = ATOL(I) + DELYI = PT1*ABS(Y0(I)) + ATOLI + AFI = ABS(YDOT(I)) + IF (AFI*HUB .GT. DELYI) HUB = DELYI/AFI + 10 CONTINUE +C +C Set initial guess for h as geometric mean of upper and lower bounds. - + ITER = 0 + HG = SQRT(HLB*HUB) +C If the bounds have crossed, exit with the mean value. ---------------- + IF (HUB .LT. HLB) THEN + H0 = HG + GO TO 90 + ENDIF +C +C Looping point for iteration. ----------------------------------------- + 50 CONTINUE +C Estimate the second derivative as a difference quotient in f. -------- + H = SIGN (HG, TOUT - T0) + T1 = T0 + H + DO 60 I = 1, N + 60 Y(I) = Y0(I) + H*YDOT(I) + CALL F (N, T1, Y, TEMP, RPAR, IPAR) + DO 70 I = 1, N + 70 TEMP(I) = (TEMP(I) - YDOT(I))/H + YDDNRM = ZVNORM (N, TEMP, EWT) +C Get the corresponding new value of h. -------------------------------- + IF (YDDNRM*HUB*HUB .GT. TWO) THEN + HNEW = SQRT(TWO/YDDNRM) + ELSE + HNEW = SQRT(HG*HUB) + ENDIF + ITER = ITER + 1 +C----------------------------------------------------------------------- +C Test the stopping conditions. +C Stop if the new and previous h values differ by a factor of .lt. 2. +C Stop if four iterations have been done. Also, stop with previous h +C if HNEW/HG .gt. 2 after first iteration, as this probably means that +C the second derivative value is bad because of cancellation error. +C----------------------------------------------------------------------- + IF (ITER .GE. 4) GO TO 80 + HRAT = HNEW/HG + IF ( (HRAT .GT. HALF) .AND. (HRAT .LT. TWO) ) GO TO 80 + IF ( (ITER .GE. 2) .AND. (HNEW .GT. TWO*HG) ) THEN + HNEW = HG + GO TO 80 + ENDIF + HG = HNEW + GO TO 50 +C +C Iteration done. Apply bounds, bias factor, and sign. Then exit. ---- + 80 H0 = HNEW*HALF + IF (H0 .LT. HLB) H0 = HLB + IF (H0 .GT. HUB) H0 = HUB + 90 H0 = SIGN(H0, TOUT - T0) + NITER = ITER + IER = 0 + RETURN +C Error return for TOUT - T0 too small. -------------------------------- + 100 IER = -1 + RETURN +C----------------------- End of Subroutine ZVHIN ----------------------- + END +*DECK ZVINDY + SUBROUTINE ZVINDY (T, K, YH, LDYH, DKY, IFLAG) + DOUBLE COMPLEX YH, DKY + DOUBLE PRECISION T + INTEGER K, LDYH, IFLAG + DIMENSION YH(LDYH,*), DKY(*) +C----------------------------------------------------------------------- +C Call sequence input -- T, K, YH, LDYH +C Call sequence output -- DKY, IFLAG +C COMMON block variables accessed: +C /ZVOD01/ -- H, TN, UROUND, L, N, NQ +C /ZVOD02/ -- HU +C +C Subroutines called by ZVINDY: DZSCAL, XERRWD +C Function routines called by ZVINDY: None +C----------------------------------------------------------------------- +C ZVINDY computes interpolated values of the K-th derivative of the +C dependent variable vector y, and stores it in DKY. This routine +C is called within the package with K = 0 and T = TOUT, but may +C also be called by the user for any K up to the current order. +C (See detailed instructions in the usage documentation.) +C----------------------------------------------------------------------- +C The computed values in DKY are gotten by interpolation using the +C Nordsieck history array YH. This array corresponds uniquely to a +C vector-valued polynomial of degree NQCUR or less, and DKY is set +C to the K-th derivative of this polynomial at T. +C The formula for DKY is: +C q +C DKY(i) = sum c(j,K) * (T - TN)**(j-K) * H**(-j) * YH(i,j+1) +C j=K +C where c(j,K) = j*(j-1)*...*(j-K+1), q = NQCUR, TN = TCUR, H = HCUR. +C The quantities NQ = NQCUR, L = NQ+1, N, TN, and H are +C communicated by COMMON. The above sum is done in reverse order. +C IFLAG is returned negative if either K or T is out of bounds. +C +C Discussion above and comments in driver explain all variables. +C----------------------------------------------------------------------- +C +C Type declarations for labeled COMMON block ZVOD01 -------------------- +C + DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, + 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND + INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 4 NSLP, NYH +C +C Type declarations for labeled COMMON block ZVOD02 -------------------- +C + DOUBLE PRECISION HU + INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C +C Type declarations for local variables -------------------------------- +C + DOUBLE PRECISION C, HUN, R, S, TFUZZ, TN1, TP, ZERO + INTEGER I, IC, J, JB, JB2, JJ, JJ1, JP1 + CHARACTER*80 MSG +C----------------------------------------------------------------------- +C The following Fortran-77 declaration is to cause the values of the +C listed (local) variables to be saved between calls to this integrator. +C----------------------------------------------------------------------- + SAVE HUN, ZERO +C + COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, + 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, + 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 7 NSLP, NYH + COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C + DATA HUN /100.0D0/, ZERO /0.0D0/ +C + IFLAG = 0 + IF (K .LT. 0 .OR. K .GT. NQ) GO TO 80 + TFUZZ = HUN*UROUND*SIGN(ABS(TN) + ABS(HU), HU) + TP = TN - HU - TFUZZ + TN1 = TN + TFUZZ + IF ((T-TP)*(T-TN1) .GT. ZERO) GO TO 90 +C + S = (T - TN)/H + IC = 1 + IF (K .EQ. 0) GO TO 15 + JJ1 = L - K + DO 10 JJ = JJ1, NQ + 10 IC = IC*JJ + 15 C = REAL(IC) + DO 20 I = 1, N + 20 DKY(I) = C*YH(I,L) + IF (K .EQ. NQ) GO TO 55 + JB2 = NQ - K + DO 50 JB = 1, JB2 + J = NQ - JB + JP1 = J + 1 + IC = 1 + IF (K .EQ. 0) GO TO 35 + JJ1 = JP1 - K + DO 30 JJ = JJ1, J + 30 IC = IC*JJ + 35 C = REAL(IC) + DO 40 I = 1, N + 40 DKY(I) = C*YH(I,JP1) + S*DKY(I) + 50 CONTINUE + IF (K .EQ. 0) RETURN + 55 R = H**(-K) + CALL DZSCAL (N, R, DKY, 1) + RETURN +C + 80 MSG = 'ZVINDY-- K (=I1) illegal ' + CALL XERRWD (MSG, 30, 51, 1, 1, K, 0, 0, ZERO, ZERO) + IFLAG = -1 + RETURN + 90 MSG = 'ZVINDY-- T (=R1) illegal ' + CALL XERRWD (MSG, 30, 52, 1, 0, 0, 0, 1, T, ZERO) + MSG=' T not in interval TCUR - HU (= R1) to TCUR (=R2) ' + CALL XERRWD (MSG, 60, 52, 1, 0, 0, 0, 2, TP, TN) + IFLAG = -2 + RETURN +C----------------------- End of Subroutine ZVINDY ---------------------- + END +*DECK ZVSTEP + SUBROUTINE ZVSTEP (Y, YH, LDYH, YH1, EWT, SAVF, VSAV, ACOR, + 1 WM, IWM, F, JAC, PSOL, VNLS, RPAR, IPAR) + EXTERNAL F, JAC, PSOL, VNLS + DOUBLE COMPLEX Y, YH, YH1, SAVF, VSAV, ACOR, WM + DOUBLE PRECISION EWT + INTEGER LDYH, IWM, IPAR + DIMENSION Y(*), YH(LDYH,*), YH1(*), EWT(*), SAVF(*), VSAV(*), + 1 ACOR(*), WM(*), IWM(*), RPAR(*), IPAR(*) +C----------------------------------------------------------------------- +C Call sequence input -- Y, YH, LDYH, YH1, EWT, SAVF, VSAV, +C ACOR, WM, IWM, F, JAC, PSOL, VNLS, RPAR, IPAR +C Call sequence output -- YH, ACOR, WM, IWM +C COMMON block variables accessed: +C /ZVOD01/ ACNRM, EL(13), H, HMIN, HMXI, HNEW, HSCAL, RC, TAU(13), +C TQ(5), TN, JCUR, JSTART, KFLAG, KUTH, +C L, LMAX, MAXORD, N, NEWQ, NQ, NQWAIT +C /ZVOD02/ HU, NCFN, NETF, NFE, NQU, NST +C +C Subroutines called by ZVSTEP: F, DZAXPY, ZCOPY, DZSCAL, +C ZVJUST, VNLS, ZVSET +C Function routines called by ZVSTEP: ZVNORM +C----------------------------------------------------------------------- +C ZVSTEP performs one step of the integration of an initial value +C problem for a system of ordinary differential equations. +C ZVSTEP calls subroutine VNLS for the solution of the nonlinear system +C arising in the time step. Thus it is independent of the problem +C Jacobian structure and the type of nonlinear system solution method. +C ZVSTEP returns a completion flag KFLAG (in COMMON). +C A return with KFLAG = -1 or -2 means either ABS(H) = HMIN or 10 +C consecutive failures occurred. On a return with KFLAG negative, +C the values of TN and the YH array are as of the beginning of the last +C step, and H is the last step size attempted. +C +C Communication with ZVSTEP is done with the following variables: +C +C Y = An array of length N used for the dependent variable vector. +C YH = An LDYH by LMAX array containing the dependent variables +C and their approximate scaled derivatives, where +C LMAX = MAXORD + 1. YH(i,j+1) contains the approximate +C j-th derivative of y(i), scaled by H**j/factorial(j) +C (j = 0,1,...,NQ). On entry for the first step, the first +C two columns of YH must be set from the initial values. +C LDYH = A constant integer .ge. N, the first dimension of YH. +C N is the number of ODEs in the system. +C YH1 = A one-dimensional array occupying the same space as YH. +C EWT = An array of length N containing multiplicative weights +C for local error measurements. Local errors in y(i) are +C compared to 1.0/EWT(i) in various error tests. +C SAVF = An array of working storage, of length N. +C also used for input of YH(*,MAXORD+2) when JSTART = -1 +C and MAXORD .lt. the current order NQ. +C VSAV = A work array of length N passed to subroutine VNLS. +C ACOR = A work array of length N, used for the accumulated +C corrections. On a successful return, ACOR(i) contains +C the estimated one-step local error in y(i). +C WM,IWM = Complex and integer work arrays associated with matrix +C operations in VNLS. +C F = Dummy name for the user-supplied subroutine for f. +C JAC = Dummy name for the user-supplied Jacobian subroutine. +C PSOL = Dummy name for the subroutine passed to VNLS, for +C possible use there. +C VNLS = Dummy name for the nonlinear system solving subroutine, +C whose real name is dependent on the method used. +C RPAR, IPAR = User's real/complex and integer work arrays. +C----------------------------------------------------------------------- +C +C Type declarations for labeled COMMON block ZVOD01 -------------------- +C + DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, + 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND + INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 4 NSLP, NYH +C +C Type declarations for labeled COMMON block ZVOD02 -------------------- +C + DOUBLE PRECISION HU + INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C +C Type declarations for local variables -------------------------------- +C + DOUBLE PRECISION ADDON, BIAS1,BIAS2,BIAS3, CNQUOT, DDN, DSM, DUP, + 1 ETACF, ETAMIN, ETAMX1, ETAMX2, ETAMX3, ETAMXF, + 2 ETAQ, ETAQM1, ETAQP1, FLOTL, ONE, ONEPSM, + 3 R, THRESH, TOLD, ZERO + INTEGER I, I1, I2, IBACK, J, JB, KFC, KFH, MXNCF, NCF, NFLAG +C +C Type declaration for function subroutines called --------------------- +C + DOUBLE PRECISION ZVNORM +C----------------------------------------------------------------------- +C The following Fortran-77 declaration is to cause the values of the +C listed (local) variables to be saved between calls to this integrator. +C----------------------------------------------------------------------- + SAVE ADDON, BIAS1, BIAS2, BIAS3, + 1 ETACF, ETAMIN, ETAMX1, ETAMX2, ETAMX3, ETAMXF, ETAQ, ETAQM1, + 2 KFC, KFH, MXNCF, ONEPSM, THRESH, ONE, ZERO +C----------------------------------------------------------------------- + COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, + 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, + 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 7 NSLP, NYH + COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C + DATA KFC/-3/, KFH/-7/, MXNCF/10/ + DATA ADDON /1.0D-6/, BIAS1 /6.0D0/, BIAS2 /6.0D0/, + 1 BIAS3 /10.0D0/, ETACF /0.25D0/, ETAMIN /0.1D0/, + 2 ETAMXF /0.2D0/, ETAMX1 /1.0D4/, ETAMX2 /10.0D0/, + 3 ETAMX3 /10.0D0/, ONEPSM /1.00001D0/, THRESH /1.5D0/ + DATA ONE/1.0D0/, ZERO/0.0D0/ +C + KFLAG = 0 + TOLD = TN + NCF = 0 + JCUR = 0 + NFLAG = 0 + IF (JSTART .GT. 0) GO TO 20 + IF (JSTART .EQ. -1) GO TO 100 +C----------------------------------------------------------------------- +C On the first call, the order is set to 1, and other variables are +C initialized. ETAMAX is the maximum ratio by which H can be increased +C in a single step. It is normally 10, but is larger during the +C first step to compensate for the small initial H. If a failure +C occurs (in corrector convergence or error test), ETAMAX is set to 1 +C for the next increase. +C----------------------------------------------------------------------- + LMAX = MAXORD + 1 + NQ = 1 + L = 2 + NQNYH = NQ*LDYH + TAU(1) = H + PRL1 = ONE + RC = ZERO + ETAMAX = ETAMX1 + NQWAIT = 2 + HSCAL = H + GO TO 200 +C----------------------------------------------------------------------- +C Take preliminary actions on a normal continuation step (JSTART.GT.0). +C If the driver changed H, then ETA must be reset and NEWH set to 1. +C If a change of order was dictated on the previous step, then +C it is done here and appropriate adjustments in the history are made. +C On an order decrease, the history array is adjusted by ZVJUST. +C On an order increase, the history array is augmented by a column. +C On a change of step size H, the history array YH is rescaled. +C----------------------------------------------------------------------- + 20 CONTINUE + IF (KUTH .EQ. 1) THEN + ETA = MIN(ETA,H/HSCAL) + NEWH = 1 + ENDIF + 50 IF (NEWH .EQ. 0) GO TO 200 + IF (NEWQ .EQ. NQ) GO TO 150 + IF (NEWQ .LT. NQ) THEN + CALL ZVJUST (YH, LDYH, -1) + NQ = NEWQ + L = NQ + 1 + NQWAIT = L + GO TO 150 + ENDIF + IF (NEWQ .GT. NQ) THEN + CALL ZVJUST (YH, LDYH, 1) + NQ = NEWQ + L = NQ + 1 + NQWAIT = L + GO TO 150 + ENDIF +C----------------------------------------------------------------------- +C The following block handles preliminaries needed when JSTART = -1. +C If N was reduced, zero out part of YH to avoid undefined references. +C If MAXORD was reduced to a value less than the tentative order NEWQ, +C then NQ is set to MAXORD, and a new H ratio ETA is chosen. +C Otherwise, we take the same preliminary actions as for JSTART .gt. 0. +C In any case, NQWAIT is reset to L = NQ + 1 to prevent further +C changes in order for that many steps. +C The new H ratio ETA is limited by the input H if KUTH = 1, +C by HMIN if KUTH = 0, and by HMXI in any case. +C Finally, the history array YH is rescaled. +C----------------------------------------------------------------------- + 100 CONTINUE + LMAX = MAXORD + 1 + IF (N .EQ. LDYH) GO TO 120 + I1 = 1 + (NEWQ + 1)*LDYH + I2 = (MAXORD + 1)*LDYH + IF (I1 .GT. I2) GO TO 120 + DO 110 I = I1, I2 + 110 YH1(I) = ZERO + 120 IF (NEWQ .LE. MAXORD) GO TO 140 + FLOTL = REAL(LMAX) + IF (MAXORD .LT. NQ-1) THEN + DDN = ZVNORM (N, SAVF, EWT)/TQ(1) + ETA = ONE/((BIAS1*DDN)**(ONE/FLOTL) + ADDON) + ENDIF + IF (MAXORD .EQ. NQ .AND. NEWQ .EQ. NQ+1) ETA = ETAQ + IF (MAXORD .EQ. NQ-1 .AND. NEWQ .EQ. NQ+1) THEN + ETA = ETAQM1 + CALL ZVJUST (YH, LDYH, -1) + ENDIF + IF (MAXORD .EQ. NQ-1 .AND. NEWQ .EQ. NQ) THEN + DDN = ZVNORM (N, SAVF, EWT)/TQ(1) + ETA = ONE/((BIAS1*DDN)**(ONE/FLOTL) + ADDON) + CALL ZVJUST (YH, LDYH, -1) + ENDIF + ETA = MIN(ETA,ONE) + NQ = MAXORD + L = LMAX + 140 IF (KUTH .EQ. 1) ETA = MIN(ETA,ABS(H/HSCAL)) + IF (KUTH .EQ. 0) ETA = MAX(ETA,HMIN/ABS(HSCAL)) + ETA = ETA/MAX(ONE,ABS(HSCAL)*HMXI*ETA) + NEWH = 1 + NQWAIT = L + IF (NEWQ .LE. MAXORD) GO TO 50 +C Rescale the history array for a change in H by a factor of ETA. ------ + 150 R = ONE + DO 180 J = 2, L + R = R*ETA + CALL DZSCAL (N, R, YH(1,J), 1 ) + 180 CONTINUE + H = HSCAL*ETA + HSCAL = H + RC = RC*ETA + NQNYH = NQ*LDYH +C----------------------------------------------------------------------- +C This section computes the predicted values by effectively +C multiplying the YH array by the Pascal triangle matrix. +C ZVSET is called to calculate all integration coefficients. +C RC is the ratio of new to old values of the coefficient H/EL(2)=h/l1. +C----------------------------------------------------------------------- + 200 TN = TN + H + I1 = NQNYH + 1 + DO 220 JB = 1, NQ + I1 = I1 - LDYH + DO 210 I = I1, NQNYH + 210 YH1(I) = YH1(I) + YH1(I+LDYH) + 220 CONTINUE + CALL ZVSET + RL1 = ONE/EL(2) + RC = RC*(RL1/PRL1) + PRL1 = RL1 +C +C Call the nonlinear system solver. ------------------------------------ +C + CALL VNLS (Y, YH, LDYH, VSAV, SAVF, EWT, ACOR, IWM, WM, + 1 F, JAC, PSOL, NFLAG, RPAR, IPAR) +C + IF (NFLAG .EQ. 0) GO TO 450 +C----------------------------------------------------------------------- +C The VNLS routine failed to achieve convergence (NFLAG .NE. 0). +C The YH array is retracted to its values before prediction. +C The step size H is reduced and the step is retried, if possible. +C Otherwise, an error exit is taken. +C----------------------------------------------------------------------- + NCF = NCF + 1 + NCFN = NCFN + 1 + ETAMAX = ONE + TN = TOLD + I1 = NQNYH + 1 + DO 430 JB = 1, NQ + I1 = I1 - LDYH + DO 420 I = I1, NQNYH + 420 YH1(I) = YH1(I) - YH1(I+LDYH) + 430 CONTINUE + IF (NFLAG .LT. -1) GO TO 680 + IF (ABS(H) .LE. HMIN*ONEPSM) GO TO 670 + IF (NCF .EQ. MXNCF) GO TO 670 + ETA = ETACF + ETA = MAX(ETA,HMIN/ABS(H)) + NFLAG = -1 + GO TO 150 +C----------------------------------------------------------------------- +C The corrector has converged (NFLAG = 0). The local error test is +C made and control passes to statement 500 if it fails. +C----------------------------------------------------------------------- + 450 CONTINUE + DSM = ACNRM/TQ(2) + IF (DSM .GT. ONE) GO TO 500 +C----------------------------------------------------------------------- +C After a successful step, update the YH and TAU arrays and decrement +C NQWAIT. If NQWAIT is then 1 and NQ .lt. MAXORD, then ACOR is saved +C for use in a possible order increase on the next step. +C If ETAMAX = 1 (a failure occurred this step), keep NQWAIT .ge. 2. +C----------------------------------------------------------------------- + KFLAG = 0 + NST = NST + 1 + HU = H + NQU = NQ + DO 470 IBACK = 1, NQ + I = L - IBACK + 470 TAU(I+1) = TAU(I) + TAU(1) = H + DO 480 J = 1, L + CALL DZAXPY (N, EL(J), ACOR, 1, YH(1,J), 1 ) + 480 CONTINUE + NQWAIT = NQWAIT - 1 + IF ((L .EQ. LMAX) .OR. (NQWAIT .NE. 1)) GO TO 490 + CALL ZCOPY (N, ACOR, 1, YH(1,LMAX), 1 ) + CONP = TQ(5) + 490 IF (ETAMAX .NE. ONE) GO TO 560 + IF (NQWAIT .LT. 2) NQWAIT = 2 + NEWQ = NQ + NEWH = 0 + ETA = ONE + HNEW = H + GO TO 690 +C----------------------------------------------------------------------- +C The error test failed. KFLAG keeps track of multiple failures. +C Restore TN and the YH array to their previous values, and prepare +C to try the step again. Compute the optimum step size for the +C same order. After repeated failures, H is forced to decrease +C more rapidly. +C----------------------------------------------------------------------- + 500 KFLAG = KFLAG - 1 + NETF = NETF + 1 + NFLAG = -2 + TN = TOLD + I1 = NQNYH + 1 + DO 520 JB = 1, NQ + I1 = I1 - LDYH + DO 510 I = I1, NQNYH + 510 YH1(I) = YH1(I) - YH1(I+LDYH) + 520 CONTINUE + IF (ABS(H) .LE. HMIN*ONEPSM) GO TO 660 + ETAMAX = ONE + IF (KFLAG .LE. KFC) GO TO 530 +C Compute ratio of new H to current H at the current order. ------------ + FLOTL = REAL(L) + ETA = ONE/((BIAS2*DSM)**(ONE/FLOTL) + ADDON) + ETA = MAX(ETA,HMIN/ABS(H),ETAMIN) + IF ((KFLAG .LE. -2) .AND. (ETA .GT. ETAMXF)) ETA = ETAMXF + GO TO 150 +C----------------------------------------------------------------------- +C Control reaches this section if 3 or more consecutive failures +C have occurred. It is assumed that the elements of the YH array +C have accumulated errors of the wrong order. The order is reduced +C by one, if possible. Then H is reduced by a factor of 0.1 and +C the step is retried. After a total of 7 consecutive failures, +C an exit is taken with KFLAG = -1. +C----------------------------------------------------------------------- + 530 IF (KFLAG .EQ. KFH) GO TO 660 + IF (NQ .EQ. 1) GO TO 540 + ETA = MAX(ETAMIN,HMIN/ABS(H)) + CALL ZVJUST (YH, LDYH, -1) + L = NQ + NQ = NQ - 1 + NQWAIT = L + GO TO 150 + 540 ETA = MAX(ETAMIN,HMIN/ABS(H)) + H = H*ETA + HSCAL = H + TAU(1) = H + CALL F (N, TN, Y, SAVF, RPAR, IPAR) + NFE = NFE + 1 + DO 550 I = 1, N + 550 YH(I,2) = H*SAVF(I) + NQWAIT = 10 + GO TO 200 +C----------------------------------------------------------------------- +C If NQWAIT = 0, an increase or decrease in order by one is considered. +C Factors ETAQ, ETAQM1, ETAQP1 are computed by which H could +C be multiplied at order q, q-1, or q+1, respectively. +C The largest of these is determined, and the new order and +C step size set accordingly. +C A change of H or NQ is made only if H increases by at least a +C factor of THRESH. If an order change is considered and rejected, +C then NQWAIT is set to 2 (reconsider it after 2 steps). +C----------------------------------------------------------------------- +C Compute ratio of new H to current H at the current order. ------------ + 560 FLOTL = REAL(L) + ETAQ = ONE/((BIAS2*DSM)**(ONE/FLOTL) + ADDON) + IF (NQWAIT .NE. 0) GO TO 600 + NQWAIT = 2 + ETAQM1 = ZERO + IF (NQ .EQ. 1) GO TO 570 +C Compute ratio of new H to current H at the current order less one. --- + DDN = ZVNORM (N, YH(1,L), EWT)/TQ(1) + ETAQM1 = ONE/((BIAS1*DDN)**(ONE/(FLOTL - ONE)) + ADDON) + 570 ETAQP1 = ZERO + IF (L .EQ. LMAX) GO TO 580 +C Compute ratio of new H to current H at current order plus one. ------- + CNQUOT = (TQ(5)/CONP)*(H/TAU(2))**L + DO 575 I = 1, N + 575 SAVF(I) = ACOR(I) - CNQUOT*YH(I,LMAX) + DUP = ZVNORM (N, SAVF, EWT)/TQ(3) + ETAQP1 = ONE/((BIAS3*DUP)**(ONE/(FLOTL + ONE)) + ADDON) + 580 IF (ETAQ .GE. ETAQP1) GO TO 590 + IF (ETAQP1 .GT. ETAQM1) GO TO 620 + GO TO 610 + 590 IF (ETAQ .LT. ETAQM1) GO TO 610 + 600 ETA = ETAQ + NEWQ = NQ + GO TO 630 + 610 ETA = ETAQM1 + NEWQ = NQ - 1 + GO TO 630 + 620 ETA = ETAQP1 + NEWQ = NQ + 1 + CALL ZCOPY (N, ACOR, 1, YH(1,LMAX), 1) +C Test tentative new H against THRESH, ETAMAX, and HMXI, then exit. ---- + 630 IF (ETA .LT. THRESH .OR. ETAMAX .EQ. ONE) GO TO 640 + ETA = MIN(ETA,ETAMAX) + ETA = ETA/MAX(ONE,ABS(H)*HMXI*ETA) + NEWH = 1 + HNEW = H*ETA + GO TO 690 + 640 NEWQ = NQ + NEWH = 0 + ETA = ONE + HNEW = H + GO TO 690 +C----------------------------------------------------------------------- +C All returns are made through this section. +C On a successful return, ETAMAX is reset and ACOR is scaled. +C----------------------------------------------------------------------- + 660 KFLAG = -1 + GO TO 720 + 670 KFLAG = -2 + GO TO 720 + 680 IF (NFLAG .EQ. -2) KFLAG = -3 + IF (NFLAG .EQ. -3) KFLAG = -4 + GO TO 720 + 690 ETAMAX = ETAMX3 + IF (NST .LE. 10) ETAMAX = ETAMX2 + 700 R = ONE/TQ(2) + CALL DZSCAL (N, R, ACOR, 1) + 720 JSTART = 1 + RETURN +C----------------------- End of Subroutine ZVSTEP ---------------------- + END +*DECK ZVSET + SUBROUTINE ZVSET +C----------------------------------------------------------------------- +C Call sequence communication: None +C COMMON block variables accessed: +C /ZVOD01/ -- EL(13), H, TAU(13), TQ(5), L(= NQ + 1), +C METH, NQ, NQWAIT +C +C Subroutines called by ZVSET: None +C Function routines called by ZVSET: None +C----------------------------------------------------------------------- +C ZVSET is called by ZVSTEP and sets coefficients for use there. +C +C For each order NQ, the coefficients in EL are calculated by use of +C the generating polynomial lambda(x), with coefficients EL(i). +C lambda(x) = EL(1) + EL(2)*x + ... + EL(NQ+1)*(x**NQ). +C For the backward differentiation formulas, +C NQ-1 +C lambda(x) = (1 + x/xi*(NQ)) * product (1 + x/xi(i) ) . +C i = 1 +C For the Adams formulas, +C NQ-1 +C (d/dx) lambda(x) = c * product (1 + x/xi(i) ) , +C i = 1 +C lambda(-1) = 0, lambda(0) = 1, +C where c is a normalization constant. +C In both cases, xi(i) is defined by +C H*xi(i) = t sub n - t sub (n-i) +C = H + TAU(1) + TAU(2) + ... TAU(i-1). +C +C +C In addition to variables described previously, communication +C with ZVSET uses the following: +C TAU = A vector of length 13 containing the past NQ values +C of H. +C EL = A vector of length 13 in which vset stores the +C coefficients for the corrector formula. +C TQ = A vector of length 5 in which vset stores constants +C used for the convergence test, the error test, and the +C selection of H at a new order. +C METH = The basic method indicator. +C NQ = The current order. +C L = NQ + 1, the length of the vector stored in EL, and +C the number of columns of the YH array being used. +C NQWAIT = A counter controlling the frequency of order changes. +C An order change is about to be considered if NQWAIT = 1. +C----------------------------------------------------------------------- +C +C Type declarations for labeled COMMON block ZVOD01 -------------------- +C + DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, + 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND + INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 4 NSLP, NYH +C +C Type declarations for local variables -------------------------------- +C + DOUBLE PRECISION AHATN0, ALPH0, CNQM1, CORTES, CSUM, ELP, EM, + 1 EM0, FLOTI, FLOTL, FLOTNQ, HSUM, ONE, RXI, RXIS, S, SIX, + 2 T1, T2, T3, T4, T5, T6, TWO, XI, ZERO + INTEGER I, IBACK, J, JP1, NQM1, NQM2 +C + DIMENSION EM(13) +C----------------------------------------------------------------------- +C The following Fortran-77 declaration is to cause the values of the +C listed (local) variables to be saved between calls to this integrator. +C----------------------------------------------------------------------- + SAVE CORTES, ONE, SIX, TWO, ZERO +C + COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, + 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, + 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 7 NSLP, NYH +C + DATA CORTES /0.1D0/ + DATA ONE /1.0D0/, SIX /6.0D0/, TWO /2.0D0/, ZERO /0.0D0/ +C + FLOTL = REAL(L) + NQM1 = NQ - 1 + NQM2 = NQ - 2 + GO TO (100, 200), METH +C +C Set coefficients for Adams methods. ---------------------------------- + 100 IF (NQ .NE. 1) GO TO 110 + EL(1) = ONE + EL(2) = ONE + TQ(1) = ONE + TQ(2) = TWO + TQ(3) = SIX*TQ(2) + TQ(5) = ONE + GO TO 300 + 110 HSUM = H + EM(1) = ONE + FLOTNQ = FLOTL - ONE + DO 115 I = 2, L + 115 EM(I) = ZERO + DO 150 J = 1, NQM1 + IF ((J .NE. NQM1) .OR. (NQWAIT .NE. 1)) GO TO 130 + S = ONE + CSUM = ZERO + DO 120 I = 1, NQM1 + CSUM = CSUM + S*EM(I)/REAL(I+1) + 120 S = -S + TQ(1) = EM(NQM1)/(FLOTNQ*CSUM) + 130 RXI = H/HSUM + DO 140 IBACK = 1, J + I = (J + 2) - IBACK + 140 EM(I) = EM(I) + EM(I-1)*RXI + HSUM = HSUM + TAU(J) + 150 CONTINUE +C Compute integral from -1 to 0 of polynomial and of x times it. ------- + S = ONE + EM0 = ZERO + CSUM = ZERO + DO 160 I = 1, NQ + FLOTI = REAL(I) + EM0 = EM0 + S*EM(I)/FLOTI + CSUM = CSUM + S*EM(I)/(FLOTI+ONE) + 160 S = -S +C In EL, form coefficients of normalized integrated polynomial. -------- + S = ONE/EM0 + EL(1) = ONE + DO 170 I = 1, NQ + 170 EL(I+1) = S*EM(I)/REAL(I) + XI = HSUM/H + TQ(2) = XI*EM0/CSUM + TQ(5) = XI/EL(L) + IF (NQWAIT .NE. 1) GO TO 300 +C For higher order control constant, multiply polynomial by 1+x/xi(q). - + RXI = ONE/XI + DO 180 IBACK = 1, NQ + I = (L + 1) - IBACK + 180 EM(I) = EM(I) + EM(I-1)*RXI +C Compute integral of polynomial. -------------------------------------- + S = ONE + CSUM = ZERO + DO 190 I = 1, L + CSUM = CSUM + S*EM(I)/REAL(I+1) + 190 S = -S + TQ(3) = FLOTL*EM0/CSUM + GO TO 300 +C +C Set coefficients for BDF methods. ------------------------------------ + 200 DO 210 I = 3, L + 210 EL(I) = ZERO + EL(1) = ONE + EL(2) = ONE + ALPH0 = -ONE + AHATN0 = -ONE + HSUM = H + RXI = ONE + RXIS = ONE + IF (NQ .EQ. 1) GO TO 240 + DO 230 J = 1, NQM2 +C In EL, construct coefficients of (1+x/xi(1))*...*(1+x/xi(j+1)). ------ + HSUM = HSUM + TAU(J) + RXI = H/HSUM + JP1 = J + 1 + ALPH0 = ALPH0 - ONE/REAL(JP1) + DO 220 IBACK = 1, JP1 + I = (J + 3) - IBACK + 220 EL(I) = EL(I) + EL(I-1)*RXI + 230 CONTINUE + ALPH0 = ALPH0 - ONE/REAL(NQ) + RXIS = -EL(2) - ALPH0 + HSUM = HSUM + TAU(NQM1) + RXI = H/HSUM + AHATN0 = -EL(2) - RXI + DO 235 IBACK = 1, NQ + I = (NQ + 2) - IBACK + 235 EL(I) = EL(I) + EL(I-1)*RXIS + 240 T1 = ONE - AHATN0 + ALPH0 + T2 = ONE + REAL(NQ)*T1 + TQ(2) = ABS(ALPH0*T2/T1) + TQ(5) = ABS(T2/(EL(L)*RXI/RXIS)) + IF (NQWAIT .NE. 1) GO TO 300 + CNQM1 = RXIS/EL(L) + T3 = ALPH0 + ONE/REAL(NQ) + T4 = AHATN0 + RXI + ELP = T3/(ONE - T4 + T3) + TQ(1) = ABS(ELP/CNQM1) + HSUM = HSUM + TAU(NQ) + RXI = H/HSUM + T5 = ALPH0 - ONE/REAL(NQ+1) + T6 = AHATN0 - RXI + ELP = T2/(ONE - T6 + T5) + TQ(3) = ABS(ELP*RXI*(FLOTL + ONE)*T5) + 300 TQ(4) = CORTES*TQ(2) + RETURN +C----------------------- End of Subroutine ZVSET ----------------------- + END +*DECK ZVJUST + SUBROUTINE ZVJUST (YH, LDYH, IORD) + DOUBLE COMPLEX YH + INTEGER LDYH, IORD + DIMENSION YH(LDYH,*) +C----------------------------------------------------------------------- +C Call sequence input -- YH, LDYH, IORD +C Call sequence output -- YH +C COMMON block input -- NQ, METH, LMAX, HSCAL, TAU(13), N +C COMMON block variables accessed: +C /ZVOD01/ -- HSCAL, TAU(13), LMAX, METH, N, NQ, +C +C Subroutines called by ZVJUST: DZAXPY +C Function routines called by ZVJUST: None +C----------------------------------------------------------------------- +C This subroutine adjusts the YH array on reduction of order, +C and also when the order is increased for the stiff option (METH = 2). +C Communication with ZVJUST uses the following: +C IORD = An integer flag used when METH = 2 to indicate an order +C increase (IORD = +1) or an order decrease (IORD = -1). +C HSCAL = Step size H used in scaling of Nordsieck array YH. +C (If IORD = +1, ZVJUST assumes that HSCAL = TAU(1).) +C See References 1 and 2 for details. +C----------------------------------------------------------------------- +C +C Type declarations for labeled COMMON block ZVOD01 -------------------- +C + DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, + 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND + INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 4 NSLP, NYH +C +C Type declarations for local variables -------------------------------- +C + DOUBLE PRECISION ALPH0, ALPH1, HSUM, ONE, PROD, T1, XI,XIOLD, ZERO + INTEGER I, IBACK, J, JP1, LP1, NQM1, NQM2, NQP1 +C----------------------------------------------------------------------- +C The following Fortran-77 declaration is to cause the values of the +C listed (local) variables to be saved between calls to this integrator. +C----------------------------------------------------------------------- + SAVE ONE, ZERO +C + COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, + 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, + 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 7 NSLP, NYH +C + DATA ONE /1.0D0/, ZERO /0.0D0/ +C + IF ((NQ .EQ. 2) .AND. (IORD .NE. 1)) RETURN + NQM1 = NQ - 1 + NQM2 = NQ - 2 + GO TO (100, 200), METH +C----------------------------------------------------------------------- +C Nonstiff option... +C Check to see if the order is being increased or decreased. +C----------------------------------------------------------------------- + 100 CONTINUE + IF (IORD .EQ. 1) GO TO 180 +C Order decrease. ------------------------------------------------------ + DO 110 J = 1, LMAX + 110 EL(J) = ZERO + EL(2) = ONE + HSUM = ZERO + DO 130 J = 1, NQM2 +C Construct coefficients of x*(x+xi(1))*...*(x+xi(j)). ----------------- + HSUM = HSUM + TAU(J) + XI = HSUM/HSCAL + JP1 = J + 1 + DO 120 IBACK = 1, JP1 + I = (J + 3) - IBACK + 120 EL(I) = EL(I)*XI + EL(I-1) + 130 CONTINUE +C Construct coefficients of integrated polynomial. --------------------- + DO 140 J = 2, NQM1 + 140 EL(J+1) = REAL(NQ)*EL(J)/REAL(J) +C Subtract correction terms from YH array. ----------------------------- + DO 170 J = 3, NQ + DO 160 I = 1, N + 160 YH(I,J) = YH(I,J) - YH(I,L)*EL(J) + 170 CONTINUE + RETURN +C Order increase. ------------------------------------------------------ +C Zero out next column in YH array. ------------------------------------ + 180 CONTINUE + LP1 = L + 1 + DO 190 I = 1, N + 190 YH(I,LP1) = ZERO + RETURN +C----------------------------------------------------------------------- +C Stiff option... +C Check to see if the order is being increased or decreased. +C----------------------------------------------------------------------- + 200 CONTINUE + IF (IORD .EQ. 1) GO TO 300 +C Order decrease. ------------------------------------------------------ + DO 210 J = 1, LMAX + 210 EL(J) = ZERO + EL(3) = ONE + HSUM = ZERO + DO 230 J = 1,NQM2 +C Construct coefficients of x*x*(x+xi(1))*...*(x+xi(j)). --------------- + HSUM = HSUM + TAU(J) + XI = HSUM/HSCAL + JP1 = J + 1 + DO 220 IBACK = 1, JP1 + I = (J + 4) - IBACK + 220 EL(I) = EL(I)*XI + EL(I-1) + 230 CONTINUE +C Subtract correction terms from YH array. ----------------------------- + DO 250 J = 3,NQ + DO 240 I = 1, N + 240 YH(I,J) = YH(I,J) - YH(I,L)*EL(J) + 250 CONTINUE + RETURN +C Order increase. ------------------------------------------------------ + 300 DO 310 J = 1, LMAX + 310 EL(J) = ZERO + EL(3) = ONE + ALPH0 = -ONE + ALPH1 = ONE + PROD = ONE + XIOLD = ONE + HSUM = HSCAL + IF (NQ .EQ. 1) GO TO 340 + DO 330 J = 1, NQM1 +C Construct coefficients of x*x*(x+xi(1))*...*(x+xi(j)). --------------- + JP1 = J + 1 + HSUM = HSUM + TAU(JP1) + XI = HSUM/HSCAL + PROD = PROD*XI + ALPH0 = ALPH0 - ONE/REAL(JP1) + ALPH1 = ALPH1 + ONE/XI + DO 320 IBACK = 1, JP1 + I = (J + 4) - IBACK + 320 EL(I) = EL(I)*XIOLD + EL(I-1) + XIOLD = XI + 330 CONTINUE + 340 CONTINUE + T1 = (-ALPH0 - ALPH1)/PROD +C Load column L + 1 in YH array. --------------------------------------- + LP1 = L + 1 + DO 350 I = 1, N + 350 YH(I,LP1) = T1*YH(I,LMAX) +C Add correction terms to YH array. ------------------------------------ + NQP1 = NQ + 1 + DO 370 J = 3, NQP1 + CALL DZAXPY (N, EL(J), YH(1,LP1), 1, YH(1,J), 1 ) + 370 CONTINUE + RETURN +C----------------------- End of Subroutine ZVJUST ---------------------- + END +*DECK ZVNLSD + SUBROUTINE ZVNLSD (Y, YH, LDYH, VSAV, SAVF, EWT, ACOR, IWM, WM, + 1 F, JAC, PDUM, NFLAG, RPAR, IPAR) + EXTERNAL F, JAC, PDUM + DOUBLE COMPLEX Y, YH, VSAV, SAVF, ACOR, WM + DOUBLE PRECISION EWT + INTEGER LDYH, IWM, NFLAG, IPAR + DIMENSION Y(*), YH(LDYH,*), VSAV(*), SAVF(*), EWT(*), ACOR(*), + 1 IWM(*), WM(*), RPAR(*), IPAR(*) +C----------------------------------------------------------------------- +C Call sequence input -- Y, YH, LDYH, SAVF, EWT, ACOR, IWM, WM, +C F, JAC, NFLAG, RPAR, IPAR +C Call sequence output -- YH, ACOR, WM, IWM, NFLAG +C COMMON block variables accessed: +C /ZVOD01/ ACNRM, CRATE, DRC, H, RC, RL1, TQ(5), TN, ICF, +C JCUR, METH, MITER, N, NSLP +C /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C +C Subroutines called by ZVNLSD: F, DZAXPY, ZCOPY, DZSCAL, ZVJAC, ZVSOL +C Function routines called by ZVNLSD: ZVNORM +C----------------------------------------------------------------------- +C Subroutine ZVNLSD is a nonlinear system solver, which uses functional +C iteration or a chord (modified Newton) method. For the chord method +C direct linear algebraic system solvers are used. Subroutine ZVNLSD +C then handles the corrector phase of this integration package. +C +C Communication with ZVNLSD is done with the following variables. (For +C more details, please see the comments in the driver subroutine.) +C +C Y = The dependent variable, a vector of length N, input. +C YH = The Nordsieck (Taylor) array, LDYH by LMAX, input +C and output. On input, it contains predicted values. +C LDYH = A constant .ge. N, the first dimension of YH, input. +C VSAV = Unused work array. +C SAVF = A work array of length N. +C EWT = An error weight vector of length N, input. +C ACOR = A work array of length N, used for the accumulated +C corrections to the predicted y vector. +C WM,IWM = Complex and integer work arrays associated with matrix +C operations in chord iteration (MITER .ne. 0). +C F = Dummy name for user-supplied routine for f. +C JAC = Dummy name for user-supplied Jacobian routine. +C PDUM = Unused dummy subroutine name. Included for uniformity +C over collection of integrators. +C NFLAG = Input/output flag, with values and meanings as follows: +C INPUT +C 0 first call for this time step. +C -1 convergence failure in previous call to ZVNLSD. +C -2 error test failure in ZVSTEP. +C OUTPUT +C 0 successful completion of nonlinear solver. +C -1 convergence failure or singular matrix. +C -2 unrecoverable error in matrix preprocessing +C (cannot occur here). +C -3 unrecoverable error in solution (cannot occur +C here). +C RPAR, IPAR = User's real/complex and integer work arrays. +C +C IPUP = Own variable flag with values and meanings as follows: +C 0, do not update the Newton matrix. +C MITER .ne. 0, update Newton matrix, because it is the +C initial step, order was changed, the error +C test failed, or an update is indicated by +C the scalar RC or step counter NST. +C +C For more details, see comments in driver subroutine. +C----------------------------------------------------------------------- +C Type declarations for labeled COMMON block ZVOD01 -------------------- +C + DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, + 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND + INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 4 NSLP, NYH +C +C Type declarations for labeled COMMON block ZVOD02 -------------------- +C + DOUBLE PRECISION HU + INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C +C Type declarations for local variables -------------------------------- +C + DOUBLE PRECISION CCMAX, CRDOWN, CSCALE, DCON, DEL, DELP, ONE, + 1 RDIV, TWO, ZERO + INTEGER I, IERPJ, IERSL, M, MAXCOR, MSBP +C +C Type declaration for function subroutines called --------------------- +C + DOUBLE PRECISION ZVNORM +C----------------------------------------------------------------------- +C The following Fortran-77 declaration is to cause the values of the +C listed (local) variables to be saved between calls to this integrator. +C----------------------------------------------------------------------- + SAVE CCMAX, CRDOWN, MAXCOR, MSBP, RDIV, ONE, TWO, ZERO +C + COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, + 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, + 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 7 NSLP, NYH + COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C + DATA CCMAX /0.3D0/, CRDOWN /0.3D0/, MAXCOR /3/, MSBP /20/, + 1 RDIV /2.0D0/ + DATA ONE /1.0D0/, TWO /2.0D0/, ZERO /0.0D0/ +C----------------------------------------------------------------------- +C On the first step, on a change of method order, or after a +C nonlinear convergence failure with NFLAG = -2, set IPUP = MITER +C to force a Jacobian update when MITER .ne. 0. +C----------------------------------------------------------------------- + IF (JSTART .EQ. 0) NSLP = 0 + IF (NFLAG .EQ. 0) ICF = 0 + IF (NFLAG .EQ. -2) IPUP = MITER + IF ( (JSTART .EQ. 0) .OR. (JSTART .EQ. -1) ) IPUP = MITER +C If this is functional iteration, set CRATE .eq. 1 and drop to 220 + IF (MITER .EQ. 0) THEN + CRATE = ONE + GO TO 220 + ENDIF +C----------------------------------------------------------------------- +C RC is the ratio of new to old values of the coefficient H/EL(2)=h/l1. +C When RC differs from 1 by more than CCMAX, IPUP is set to MITER +C to force ZVJAC to be called, if a Jacobian is involved. +C In any case, ZVJAC is called at least every MSBP steps. +C----------------------------------------------------------------------- + DRC = ABS(RC-ONE) + IF (DRC .GT. CCMAX .OR. NST .GE. NSLP+MSBP) IPUP = MITER +C----------------------------------------------------------------------- +C Up to MAXCOR corrector iterations are taken. A convergence test is +C made on the r.m.s. norm of each correction, weighted by the error +C weight vector EWT. The sum of the corrections is accumulated in the +C vector ACOR(i). The YH array is not altered in the corrector loop. +C----------------------------------------------------------------------- + 220 M = 0 + DELP = ZERO + CALL ZCOPY (N, YH(1,1), 1, Y, 1 ) + CALL F (N, TN, Y, SAVF, RPAR, IPAR) + NFE = NFE + 1 + IF (IPUP .LE. 0) GO TO 250 +C----------------------------------------------------------------------- +C If indicated, the matrix P = I - h*rl1*J is reevaluated and +C preprocessed before starting the corrector iteration. IPUP is set +C to 0 as an indicator that this has been done. +C----------------------------------------------------------------------- + CALL ZVJAC (Y, YH, LDYH, EWT, ACOR, SAVF, WM, IWM, F, JAC, IERPJ, + 1 RPAR, IPAR) + IPUP = 0 + RC = ONE + DRC = ZERO + CRATE = ONE + NSLP = NST +C If matrix is singular, take error return to force cut in step size. -- + IF (IERPJ .NE. 0) GO TO 430 + 250 DO 260 I = 1,N + 260 ACOR(I) = ZERO +C This is a looping point for the corrector iteration. ----------------- + 270 IF (MITER .NE. 0) GO TO 350 +C----------------------------------------------------------------------- +C In the case of functional iteration, update Y directly from +C the result of the last function evaluation. +C----------------------------------------------------------------------- + DO 280 I = 1,N + 280 SAVF(I) = RL1*(H*SAVF(I) - YH(I,2)) + DO 290 I = 1,N + 290 Y(I) = SAVF(I) - ACOR(I) + DEL = ZVNORM (N, Y, EWT) + DO 300 I = 1,N + 300 Y(I) = YH(I,1) + SAVF(I) + CALL ZCOPY (N, SAVF, 1, ACOR, 1) + GO TO 400 +C----------------------------------------------------------------------- +C In the case of the chord method, compute the corrector error, +C and solve the linear system with that as right-hand side and +C P as coefficient matrix. The correction is scaled by the factor +C 2/(1+RC) to account for changes in h*rl1 since the last ZVJAC call. +C----------------------------------------------------------------------- + 350 DO 360 I = 1,N + 360 Y(I) = (RL1*H)*SAVF(I) - (RL1*YH(I,2) + ACOR(I)) + CALL ZVSOL (WM, IWM, Y, IERSL) + NNI = NNI + 1 + IF (IERSL .GT. 0) GO TO 410 + IF (METH .EQ. 2 .AND. RC .NE. ONE) THEN + CSCALE = TWO/(ONE + RC) + CALL DZSCAL (N, CSCALE, Y, 1) + ENDIF + DEL = ZVNORM (N, Y, EWT) + CALL DZAXPY (N, ONE, Y, 1, ACOR, 1) + DO 380 I = 1,N + 380 Y(I) = YH(I,1) + ACOR(I) +C----------------------------------------------------------------------- +C Test for convergence. If M .gt. 0, an estimate of the convergence +C rate constant is stored in CRATE, and this is used in the test. +C----------------------------------------------------------------------- + 400 IF (M .NE. 0) CRATE = MAX(CRDOWN*CRATE,DEL/DELP) + DCON = DEL*MIN(ONE,CRATE)/TQ(4) + IF (DCON .LE. ONE) GO TO 450 + M = M + 1 + IF (M .EQ. MAXCOR) GO TO 410 + IF (M .GE. 2 .AND. DEL .GT. RDIV*DELP) GO TO 410 + DELP = DEL + CALL F (N, TN, Y, SAVF, RPAR, IPAR) + NFE = NFE + 1 + GO TO 270 +C + 410 IF (MITER .EQ. 0 .OR. JCUR .EQ. 1) GO TO 430 + ICF = 1 + IPUP = MITER + GO TO 220 +C + 430 CONTINUE + NFLAG = -1 + ICF = 2 + IPUP = MITER + RETURN +C +C Return for successful step. ------------------------------------------ + 450 NFLAG = 0 + JCUR = 0 + ICF = 0 + IF (M .EQ. 0) ACNRM = DEL + IF (M .GT. 0) ACNRM = ZVNORM (N, ACOR, EWT) + RETURN +C----------------------- End of Subroutine ZVNLSD ---------------------- + END +*DECK ZVJAC + SUBROUTINE ZVJAC (Y, YH, LDYH, EWT, FTEM, SAVF, WM, IWM, F, JAC, + 1 IERPJ, RPAR, IPAR) + EXTERNAL F, JAC + DOUBLE COMPLEX Y, YH, FTEM, SAVF, WM + DOUBLE PRECISION EWT + INTEGER LDYH, IWM, IERPJ, IPAR + DIMENSION Y(*), YH(LDYH,*), EWT(*), FTEM(*), SAVF(*), + 1 WM(*), IWM(*), RPAR(*), IPAR(*) +C----------------------------------------------------------------------- +C Call sequence input -- Y, YH, LDYH, EWT, FTEM, SAVF, WM, IWM, +C F, JAC, RPAR, IPAR +C Call sequence output -- WM, IWM, IERPJ +C COMMON block variables accessed: +C /ZVOD01/ CCMXJ, DRC, H, HRL1, RL1, SRUR, TN, UROUND, ICF, JCUR, +C LOCJS, MITER, MSBJ, N, NSLJ +C /ZVOD02/ NFE, NST, NJE, NLU +C +C Subroutines called by ZVJAC: F, JAC, ZACOPY, ZCOPY, ZGBFA, ZGEFA, +C DZSCAL +C Function routines called by ZVJAC: ZVNORM +C----------------------------------------------------------------------- +C ZVJAC is called by ZVNLSD to compute and process the matrix +C P = I - h*rl1*J , where J is an approximation to the Jacobian. +C Here J is computed by the user-supplied routine JAC if +C MITER = 1 or 4, or by finite differencing if MITER = 2, 3, or 5. +C If MITER = 3, a diagonal approximation to J is used. +C If JSV = -1, J is computed from scratch in all cases. +C If JSV = 1 and MITER = 1, 2, 4, or 5, and if the saved value of J is +C considered acceptable, then P is constructed from the saved J. +C J is stored in wm and replaced by P. If MITER .ne. 3, P is then +C subjected to LU decomposition in preparation for later solution +C of linear systems with P as coefficient matrix. This is done +C by ZGEFA if MITER = 1 or 2, and by ZGBFA if MITER = 4 or 5. +C +C Communication with ZVJAC is done with the following variables. (For +C more details, please see the comments in the driver subroutine.) +C Y = Vector containing predicted values on entry. +C YH = The Nordsieck array, an LDYH by LMAX array, input. +C LDYH = A constant .ge. N, the first dimension of YH, input. +C EWT = An error weight vector of length N. +C SAVF = Array containing f evaluated at predicted y, input. +C WM = Complex work space for matrices. In the output, it +C contains the inverse diagonal matrix if MITER = 3 and +C the LU decomposition of P if MITER is 1, 2 , 4, or 5. +C Storage of the saved Jacobian starts at WM(LOCJS). +C IWM = Integer work space containing pivot information, +C starting at IWM(31), if MITER is 1, 2, 4, or 5. +C IWM also contains band parameters ML = IWM(1) and +C MU = IWM(2) if MITER is 4 or 5. +C F = Dummy name for the user-supplied subroutine for f. +C JAC = Dummy name for the user-supplied Jacobian subroutine. +C RPAR, IPAR = User's real/complex and integer work arrays. +C RL1 = 1/EL(2) (input). +C IERPJ = Output error flag, = 0 if no trouble, 1 if the P +C matrix is found to be singular. +C JCUR = Output flag to indicate whether the Jacobian matrix +C (or approximation) is now current. +C JCUR = 0 means J is not current. +C JCUR = 1 means J is current. +C----------------------------------------------------------------------- +C +C Type declarations for labeled COMMON block ZVOD01 -------------------- +C + DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, + 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND + INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 4 NSLP, NYH +C +C Type declarations for labeled COMMON block ZVOD02 -------------------- +C + DOUBLE PRECISION HU + INTEGER NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C +C Type declarations for local variables -------------------------------- +C + DOUBLE COMPLEX DI, R1, YI, YJ, YJJ + DOUBLE PRECISION CON, FAC, ONE, PT1, R, R0, THOU, ZERO + INTEGER I, I1, I2, IER, II, J, J1, JJ, JOK, LENP, MBA, MBAND, + 1 MEB1, MEBAND, ML, ML1, MU, NP1 +C +C Type declaration for function subroutines called --------------------- +C + DOUBLE PRECISION ZVNORM +C----------------------------------------------------------------------- +C The following Fortran-77 declaration is to cause the values of the +C listed (local) variables to be saved between calls to this subroutine. +C----------------------------------------------------------------------- + SAVE ONE, PT1, THOU, ZERO +C----------------------------------------------------------------------- + COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, + 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, + 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 7 NSLP, NYH + COMMON /ZVOD02/ HU, NCFN, NETF, NFE, NJE, NLU, NNI, NQU, NST +C + DATA ONE /1.0D0/, THOU /1000.0D0/, ZERO /0.0D0/, PT1 /0.1D0/ +C + IERPJ = 0 + HRL1 = H*RL1 +C See whether J should be evaluated (JOK = -1) or not (JOK = 1). ------- + JOK = JSV + IF (JSV .EQ. 1) THEN + IF (NST .EQ. 0 .OR. NST .GT. NSLJ+MSBJ) JOK = -1 + IF (ICF .EQ. 1 .AND. DRC .LT. CCMXJ) JOK = -1 + IF (ICF .EQ. 2) JOK = -1 + ENDIF +C End of setting JOK. -------------------------------------------------- +C + IF (JOK .EQ. -1 .AND. MITER .EQ. 1) THEN +C If JOK = -1 and MITER = 1, call JAC to evaluate Jacobian. ------------ + NJE = NJE + 1 + NSLJ = NST + JCUR = 1 + LENP = N*N + DO 110 I = 1,LENP + 110 WM(I) = ZERO + CALL JAC (N, TN, Y, 0, 0, WM, N, RPAR, IPAR) + IF (JSV .EQ. 1) CALL ZCOPY (LENP, WM, 1, WM(LOCJS), 1) + ENDIF +C + IF (JOK .EQ. -1 .AND. MITER .EQ. 2) THEN +C If MITER = 2, make N calls to F to approximate the Jacobian. --------- + NJE = NJE + 1 + NSLJ = NST + JCUR = 1 + FAC = ZVNORM (N, SAVF, EWT) + R0 = THOU*ABS(H)*UROUND*REAL(N)*FAC + IF (R0 .EQ. ZERO) R0 = ONE + J1 = 0 + DO 230 J = 1,N + YJ = Y(J) + R = MAX(SRUR*ABS(YJ),R0/EWT(J)) + Y(J) = Y(J) + R + FAC = ONE/R + CALL F (N, TN, Y, FTEM, RPAR, IPAR) + DO 220 I = 1,N + 220 WM(I+J1) = (FTEM(I) - SAVF(I))*FAC + Y(J) = YJ + J1 = J1 + N + 230 CONTINUE + NFE = NFE + N + LENP = N*N + IF (JSV .EQ. 1) CALL ZCOPY (LENP, WM, 1, WM(LOCJS), 1) + ENDIF +C + IF (JOK .EQ. 1 .AND. (MITER .EQ. 1 .OR. MITER .EQ. 2)) THEN + JCUR = 0 + LENP = N*N + CALL ZCOPY (LENP, WM(LOCJS), 1, WM, 1) + ENDIF +C + IF (MITER .EQ. 1 .OR. MITER .EQ. 2) THEN +C Multiply Jacobian by scalar, add identity, and do LU decomposition. -- + CON = -HRL1 + CALL DZSCAL (LENP, CON, WM, 1) + J = 1 + NP1 = N + 1 + DO 250 I = 1,N + WM(J) = WM(J) + ONE + 250 J = J + NP1 + NLU = NLU + 1 + CALL ZGEFA (WM, N, N, IWM(31), IER) + IF (IER .NE. 0) IERPJ = 1 + RETURN + ENDIF +C End of code block for MITER = 1 or 2. -------------------------------- +C + IF (MITER .EQ. 3) THEN +C If MITER = 3, construct a diagonal approximation to J and P. --------- + NJE = NJE + 1 + JCUR = 1 + R = RL1*PT1 + DO 310 I = 1,N + 310 Y(I) = Y(I) + R*(H*SAVF(I) - YH(I,2)) + CALL F (N, TN, Y, WM, RPAR, IPAR) + NFE = NFE + 1 + DO 320 I = 1,N + R1 = H*SAVF(I) - YH(I,2) + DI = PT1*R1 - H*(WM(I) - SAVF(I)) + WM(I) = ONE + IF (ABS(R1) .LT. UROUND/EWT(I)) GO TO 320 + IF (ABS(DI) .EQ. ZERO) GO TO 330 + WM(I) = PT1*R1/DI + 320 CONTINUE + RETURN + 330 IERPJ = 1 + RETURN + ENDIF +C End of code block for MITER = 3. ------------------------------------- +C +C Set constants for MITER = 4 or 5. ------------------------------------ + ML = IWM(1) + MU = IWM(2) + ML1 = ML + 1 + MBAND = ML + MU + 1 + MEBAND = MBAND + ML + LENP = MEBAND*N +C + IF (JOK .EQ. -1 .AND. MITER .EQ. 4) THEN +C If JOK = -1 and MITER = 4, call JAC to evaluate Jacobian. ------------ + NJE = NJE + 1 + NSLJ = NST + JCUR = 1 + DO 410 I = 1,LENP + 410 WM(I) = ZERO + CALL JAC (N, TN, Y, ML, MU, WM(ML1), MEBAND, RPAR, IPAR) + IF (JSV .EQ. 1) + 1 CALL ZACOPY (MBAND, N, WM(ML1), MEBAND, WM(LOCJS), MBAND) + ENDIF +C + IF (JOK .EQ. -1 .AND. MITER .EQ. 5) THEN +C If MITER = 5, make ML+MU+1 calls to F to approximate the Jacobian. --- + NJE = NJE + 1 + NSLJ = NST + JCUR = 1 + MBA = MIN(MBAND,N) + MEB1 = MEBAND - 1 + FAC = ZVNORM (N, SAVF, EWT) + R0 = THOU*ABS(H)*UROUND*REAL(N)*FAC + IF (R0 .EQ. ZERO) R0 = ONE + DO 560 J = 1,MBA + DO 530 I = J,N,MBAND + YI = Y(I) + R = MAX(SRUR*ABS(YI),R0/EWT(I)) + 530 Y(I) = Y(I) + R + CALL F (N, TN, Y, FTEM, RPAR, IPAR) + DO 550 JJ = J,N,MBAND + Y(JJ) = YH(JJ,1) + YJJ = Y(JJ) + R = MAX(SRUR*ABS(YJJ),R0/EWT(JJ)) + FAC = ONE/R + I1 = MAX(JJ-MU,1) + I2 = MIN(JJ+ML,N) + II = JJ*MEB1 - ML + DO 540 I = I1,I2 + 540 WM(II+I) = (FTEM(I) - SAVF(I))*FAC + 550 CONTINUE + 560 CONTINUE + NFE = NFE + MBA + IF (JSV .EQ. 1) + 1 CALL ZACOPY (MBAND, N, WM(ML1), MEBAND, WM(LOCJS), MBAND) + ENDIF +C + IF (JOK .EQ. 1) THEN + JCUR = 0 + CALL ZACOPY (MBAND, N, WM(LOCJS), MBAND, WM(ML1), MEBAND) + ENDIF +C +C Multiply Jacobian by scalar, add identity, and do LU decomposition. + CON = -HRL1 + CALL DZSCAL (LENP, CON, WM, 1 ) + II = MBAND + DO 580 I = 1,N + WM(II) = WM(II) + ONE + 580 II = II + MEBAND + NLU = NLU + 1 + CALL ZGBFA (WM, MEBAND, N, ML, MU, IWM(31), IER) + IF (IER .NE. 0) IERPJ = 1 + RETURN +C End of code block for MITER = 4 or 5. -------------------------------- +C +C----------------------- End of Subroutine ZVJAC ----------------------- + END +*DECK ZACOPY + SUBROUTINE ZACOPY (NROW, NCOL, A, NROWA, B, NROWB) + DOUBLE COMPLEX A, B + INTEGER NROW, NCOL, NROWA, NROWB + DIMENSION A(NROWA,NCOL), B(NROWB,NCOL) +C----------------------------------------------------------------------- +C Call sequence input -- NROW, NCOL, A, NROWA, NROWB +C Call sequence output -- B +C COMMON block variables accessed -- None +C +C Subroutines called by ZACOPY: ZCOPY +C Function routines called by ZACOPY: None +C----------------------------------------------------------------------- +C This routine copies one rectangular array, A, to another, B, +C where A and B may have different row dimensions, NROWA and NROWB. +C The data copied consists of NROW rows and NCOL columns. +C----------------------------------------------------------------------- + INTEGER IC +C + DO 20 IC = 1,NCOL + CALL ZCOPY (NROW, A(1,IC), 1, B(1,IC), 1) + 20 CONTINUE +C + RETURN +C----------------------- End of Subroutine ZACOPY ---------------------- + END +*DECK ZVSOL + SUBROUTINE ZVSOL (WM, IWM, X, IERSL) + DOUBLE COMPLEX WM, X + INTEGER IWM, IERSL + DIMENSION WM(*), IWM(*), X(*) +C----------------------------------------------------------------------- +C Call sequence input -- WM, IWM, X +C Call sequence output -- X, IERSL +C COMMON block variables accessed: +C /ZVOD01/ -- H, HRL1, RL1, MITER, N +C +C Subroutines called by ZVSOL: ZGESL, ZGBSL +C Function routines called by ZVSOL: None +C----------------------------------------------------------------------- +C This routine manages the solution of the linear system arising from +C a chord iteration. It is called if MITER .ne. 0. +C If MITER is 1 or 2, it calls ZGESL to accomplish this. +C If MITER = 3 it updates the coefficient H*RL1 in the diagonal +C matrix, and then computes the solution. +C If MITER is 4 or 5, it calls ZGBSL. +C Communication with ZVSOL uses the following variables: +C WM = Real work space containing the inverse diagonal matrix if +C MITER = 3 and the LU decomposition of the matrix otherwise. +C IWM = Integer work space containing pivot information, starting at +C IWM(31), if MITER is 1, 2, 4, or 5. IWM also contains band +C parameters ML = IWM(1) and MU = IWM(2) if MITER is 4 or 5. +C X = The right-hand side vector on input, and the solution vector +C on output, of length N. +C IERSL = Output flag. IERSL = 0 if no trouble occurred. +C IERSL = 1 if a singular matrix arose with MITER = 3. +C----------------------------------------------------------------------- +C +C Type declarations for labeled COMMON block ZVOD01 -------------------- +C + DOUBLE PRECISION ACNRM, CCMXJ, CONP, CRATE, DRC, EL, + 1 ETA, ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU, TQ, TN, UROUND + INTEGER ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 1 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 2 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 3 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 4 NSLP, NYH +C +C Type declarations for local variables -------------------------------- +C + DOUBLE COMPLEX DI + DOUBLE PRECISION ONE, PHRL1, R, ZERO + INTEGER I, MEBAND, ML, MU +C----------------------------------------------------------------------- +C The following Fortran-77 declaration is to cause the values of the +C listed (local) variables to be saved between calls to this integrator. +C----------------------------------------------------------------------- + SAVE ONE, ZERO +C + COMMON /ZVOD01/ ACNRM, CCMXJ, CONP, CRATE, DRC, EL(13), ETA, + 1 ETAMAX, H, HMIN, HMXI, HNEW, HRL1, HSCAL, PRL1, + 2 RC, RL1, SRUR, TAU(13), TQ(5), TN, UROUND, + 3 ICF, INIT, IPUP, JCUR, JSTART, JSV, KFLAG, KUTH, + 4 L, LMAX, LYH, LEWT, LACOR, LSAVF, LWM, LIWM, + 5 LOCJS, MAXORD, METH, MITER, MSBJ, MXHNIL, MXSTEP, + 6 N, NEWH, NEWQ, NHNIL, NQ, NQNYH, NQWAIT, NSLJ, + 7 NSLP, NYH +C + DATA ONE /1.0D0/, ZERO /0.0D0/ +C + IERSL = 0 + GO TO (100, 100, 300, 400, 400), MITER + 100 CALL ZGESL (WM, N, N, IWM(31), X, 0) + RETURN +C + 300 PHRL1 = HRL1 + HRL1 = H*RL1 + IF (HRL1 .EQ. PHRL1) GO TO 330 + R = HRL1/PHRL1 + DO 320 I = 1,N + DI = ONE - R*(ONE - ONE/WM(I)) + IF (ABS(DI) .EQ. ZERO) GO TO 390 + 320 WM(I) = ONE/DI +C + 330 DO 340 I = 1,N + 340 X(I) = WM(I)*X(I) + RETURN + 390 IERSL = 1 + RETURN +C + 400 ML = IWM(1) + MU = IWM(2) + MEBAND = 2*ML + MU + 1 + CALL ZGBSL (WM, MEBAND, N, ML, MU, IWM(31), X, 0) + RETURN +C----------------------- End of Subroutine ZVSOL ----------------------- + END +*DECK ZVSRCO + SUBROUTINE ZVSRCO (RSAV, ISAV, JOB) + DOUBLE PRECISION RSAV + INTEGER ISAV, JOB + DIMENSION RSAV(*), ISAV(*) +C----------------------------------------------------------------------- +C Call sequence input -- RSAV, ISAV, JOB +C Call sequence output -- RSAV, ISAV +C COMMON block variables accessed -- All of /ZVOD01/ and /ZVOD02/ +C +C Subroutines/functions called by ZVSRCO: None +C----------------------------------------------------------------------- +C This routine saves or restores (depending on JOB) the contents of the +C COMMON blocks ZVOD01 and ZVOD02, which are used internally by ZVODE. +C +C RSAV = real array of length 51 or more. +C ISAV = integer array of length 41 or more. +C JOB = flag indicating to save or restore the COMMON blocks: +C JOB = 1 if COMMON is to be saved (written to RSAV/ISAV). +C JOB = 2 if COMMON is to be restored (read from RSAV/ISAV). +C A call with JOB = 2 presumes a prior call with JOB = 1. +C----------------------------------------------------------------------- + DOUBLE PRECISION RVOD1, RVOD2 + INTEGER IVOD1, IVOD2 + INTEGER I, LENIV1, LENIV2, LENRV1, LENRV2 +C----------------------------------------------------------------------- +C The following Fortran-77 declaration is to cause the values of the +C listed (local) variables to be saved between calls to this integrator. +C----------------------------------------------------------------------- + SAVE LENRV1, LENIV1, LENRV2, LENIV2 +C + COMMON /ZVOD01/ RVOD1(50), IVOD1(33) + COMMON /ZVOD02/ RVOD2(1), IVOD2(8) + DATA LENRV1/50/, LENIV1/33/, LENRV2/1/, LENIV2/8/ +C + IF (JOB .EQ. 2) GO TO 100 + DO 10 I = 1,LENRV1 + 10 RSAV(I) = RVOD1(I) + DO 15 I = 1,LENRV2 + 15 RSAV(LENRV1+I) = RVOD2(I) +C + DO 20 I = 1,LENIV1 + 20 ISAV(I) = IVOD1(I) + DO 25 I = 1,LENIV2 + 25 ISAV(LENIV1+I) = IVOD2(I) +C + RETURN +C + 100 CONTINUE + DO 110 I = 1,LENRV1 + 110 RVOD1(I) = RSAV(I) + DO 115 I = 1,LENRV2 + 115 RVOD2(I) = RSAV(LENRV1+I) +C + DO 120 I = 1,LENIV1 + 120 IVOD1(I) = ISAV(I) + DO 125 I = 1,LENIV2 + 125 IVOD2(I) = ISAV(LENIV1+I) +C + RETURN +C----------------------- End of Subroutine ZVSRCO ---------------------- + END +*DECK ZEWSET + SUBROUTINE ZEWSET (N, ITOL, RTOL, ATOL, YCUR, EWT) +C***BEGIN PROLOGUE ZEWSET +C***SUBSIDIARY +C***PURPOSE Set error weight vector. +C***TYPE DOUBLE PRECISION (SEWSET-S, DEWSET-D, ZEWSET-Z) +C***AUTHOR Hindmarsh, Alan C., (LLNL) +C***DESCRIPTION +C +C This subroutine sets the error weight vector EWT according to +C EWT(i) = RTOL(i)*ABS(YCUR(i)) + ATOL(i), i = 1,...,N, +C with the subscript on RTOL and/or ATOL possibly replaced by 1 above, +C depending on the value of ITOL. +C +C***SEE ALSO DLSODE +C***ROUTINES CALLED (NONE) +C***REVISION HISTORY (YYMMDD) +C 060502 DATE WRITTEN, modified from DEWSET of 930809. +C***END PROLOGUE ZEWSET + DOUBLE COMPLEX YCUR + DOUBLE PRECISION RTOL, ATOL, EWT + INTEGER N, ITOL + INTEGER I + DIMENSION RTOL(*), ATOL(*), YCUR(N), EWT(N) +C +C***FIRST EXECUTABLE STATEMENT ZEWSET + GO TO (10, 20, 30, 40), ITOL + 10 CONTINUE + DO 15 I = 1,N + 15 EWT(I) = RTOL(1)*ABS(YCUR(I)) + ATOL(1) + RETURN + 20 CONTINUE + DO 25 I = 1,N + 25 EWT(I) = RTOL(1)*ABS(YCUR(I)) + ATOL(I) + RETURN + 30 CONTINUE + DO 35 I = 1,N + 35 EWT(I) = RTOL(I)*ABS(YCUR(I)) + ATOL(1) + RETURN + 40 CONTINUE + DO 45 I = 1,N + 45 EWT(I) = RTOL(I)*ABS(YCUR(I)) + ATOL(I) + RETURN +C----------------------- END OF SUBROUTINE ZEWSET ---------------------- + END +*DECK ZVNORM + DOUBLE PRECISION FUNCTION ZVNORM (N, V, W) +C***BEGIN PROLOGUE ZVNORM +C***SUBSIDIARY +C***PURPOSE Weighted root-mean-square vector norm. +C***TYPE DOUBLE COMPLEX (SVNORM-S, DVNORM-D, ZVNORM-Z) +C***AUTHOR Hindmarsh, Alan C., (LLNL) +C***DESCRIPTION +C +C This function routine computes the weighted root-mean-square norm +C of the vector of length N contained in the double complex array V, +C with weights contained in the array W of length N: +C ZVNORM = SQRT( (1/N) * SUM( abs(V(i))**2 * W(i)**2 ) +C The squared absolute value abs(v)**2 is computed by ZABSSQ. +C +C***SEE ALSO DLSODE +C***ROUTINES CALLED ZABSSQ +C***REVISION HISTORY (YYMMDD) +C 060502 DATE WRITTEN, modified from DVNORM of 930809. +C***END PROLOGUE ZVNORM + DOUBLE COMPLEX V + DOUBLE PRECISION W, SUM, ZABSSQ + INTEGER N, I + DIMENSION V(N), W(N) +C +C***FIRST EXECUTABLE STATEMENT ZVNORM + SUM = 0.0D0 + DO 10 I = 1,N + 10 SUM = SUM + ZABSSQ(V(I)) * W(I)**2 + ZVNORM = SQRT(SUM/N) + RETURN +C----------------------- END OF FUNCTION ZVNORM ------------------------ + END +*DECK ZABSSQ + DOUBLE PRECISION FUNCTION ZABSSQ(Z) +C***BEGIN PROLOGUE ZABSSQ +C***SUBSIDIARY +C***PURPOSE Squared absolute value of a double complex number. +C***TYPE DOUBLE PRECISION (ZABSSQ-Z) +C***AUTHOR Hindmarsh, Alan C., (LLNL) +C***DESCRIPTION +C +C This function routine computes the square of the absolute value of +C a double precision complex number Z, +C ZABSSQ = DREAL(Z)**2 * DIMAG(Z)**2 +C***REVISION HISTORY (YYMMDD) +C 060502 DATE WRITTEN. +C***END PROLOGUE ZABSSQ + DOUBLE COMPLEX Z + ZABSSQ = DREAL(Z)**2 + DIMAG(Z)**2 + RETURN +C----------------------- END OF FUNCTION ZABSSQ ------------------------ + END +*DECK DZSCAL + SUBROUTINE DZSCAL(N, DA, ZX, INCX) +C***BEGIN PROLOGUE DZSCAL +C***SUBSIDIARY +C***PURPOSE Scale a double complex vector by a double prec. constant. +C***TYPE DOUBLE PRECISION (DZSCAL-Z) +C***AUTHOR Hindmarsh, Alan C., (LLNL) +C***DESCRIPTION +C Scales a double complex vector by a double precision constant. +C Minor modification of BLAS routine ZSCAL. +C***REVISION HISTORY (YYMMDD) +C 060530 DATE WRITTEN. +C***END PROLOGUE DZSCAL + DOUBLE COMPLEX ZX(*) + DOUBLE PRECISION DA + INTEGER I,INCX,IX,N +C + IF( N.LE.0 .OR. INCX.LE.0 )RETURN + IF(INCX.EQ.1)GO TO 20 +C Code for increment not equal to 1 + IX = 1 + DO 10 I = 1,N + ZX(IX) = DA*ZX(IX) + IX = IX + INCX + 10 CONTINUE + RETURN +C Code for increment equal to 1 + 20 DO 30 I = 1,N + ZX(I) = DA*ZX(I) + 30 CONTINUE + RETURN + END +*DECK DZAXPY + SUBROUTINE DZAXPY(N, DA, ZX, INCX, ZY, INCY) +C***BEGIN PROLOGUE DZAXPY +C***PURPOSE Real constant times a complex vector plus a complex vector. +C***TYPE DOUBLE PRECISION (DZAXPY-Z) +C***AUTHOR Hindmarsh, Alan C., (LLNL) +C***DESCRIPTION +C Add a D.P. real constant times a complex vector to a complex vector. +C Minor modification of BLAS routine ZAXPY. +C***REVISION HISTORY (YYMMDD) +C 060530 DATE WRITTEN. +C***END PROLOGUE DZAXPY + DOUBLE COMPLEX ZX(*),ZY(*) + DOUBLE PRECISION DA + INTEGER I,INCX,INCY,IX,IY,N + IF(N.LE.0)RETURN + IF (ABS(DA) .EQ. 0.0D0) RETURN + IF (INCX.EQ.1.AND.INCY.EQ.1)GO TO 20 +C Code for unequal increments or equal increments not equal to 1 + IX = 1 + IY = 1 + IF(INCX.LT.0)IX = (-N+1)*INCX + 1 + IF(INCY.LT.0)IY = (-N+1)*INCY + 1 + DO 10 I = 1,N + ZY(IY) = ZY(IY) + DA*ZX(IX) + IX = IX + INCX + IY = IY + INCY + 10 CONTINUE + RETURN +C Code for both increments equal to 1 + 20 DO 30 I = 1,N + ZY(I) = ZY(I) + DA*ZX(I) + 30 CONTINUE + RETURN + END +*DECK DUMACH + DOUBLE PRECISION FUNCTION DUMACH () +C***BEGIN PROLOGUE DUMACH +C***PURPOSE Compute the unit roundoff of the machine. +C***CATEGORY R1 +C***TYPE DOUBLE PRECISION (RUMACH-S, DUMACH-D) +C***KEYWORDS MACHINE CONSTANTS +C***AUTHOR Hindmarsh, Alan C., (LLNL) +C***DESCRIPTION +C *Usage: +C DOUBLE PRECISION A, DUMACH +C A = DUMACH() +C +C *Function Return Values: +C A : the unit roundoff of the machine. +C +C *Description: +C The unit roundoff is defined as the smallest positive machine +C number u such that 1.0 + u .ne. 1.0. This is computed by DUMACH +C in a machine-independent manner. +C +C***REFERENCES (NONE) +C***ROUTINES CALLED DUMSUM +C***REVISION HISTORY (YYYYMMDD) +C 19930216 DATE WRITTEN +C 19930818 Added SLATEC-format prologue. (FNF) +C 20030707 Added DUMSUM to force normal storage of COMP. (ACH) +C***END PROLOGUE DUMACH +C + DOUBLE PRECISION U, COMP +C***FIRST EXECUTABLE STATEMENT DUMACH + U = 1.0D0 + 10 U = U*0.5D0 + CALL DUMSUM(1.0D0, U, COMP) + IF (COMP .NE. 1.0D0) GO TO 10 + DUMACH = U*2.0D0 + RETURN +C----------------------- End of Function DUMACH ------------------------ + END + SUBROUTINE DUMSUM(A,B,C) +C Routine to force normal storing of A + B, for DUMACH. + DOUBLE PRECISION A, B, C + C = A + B + RETURN + END Property changes on: trunk/scipy/integrate/odepack/zvode.f ___________________________________________________________________ Name: svn:eol-style + native From scipy-svn at scipy.org Wed Feb 20 20:57:49 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 20 Feb 2008 19:57:49 -0600 (CST) Subject: [Scipy-svn] r3952 - trunk/scipy/linalg Message-ID: <20080221015749.3A37839C0F4@new.scipy.org> Author: wnbell Date: 2008-02-20 19:57:37 -0600 (Wed, 20 Feb 2008) New Revision: 3952 Modified: trunk/scipy/linalg/basic.py trunk/scipy/linalg/blas.py trunk/scipy/linalg/decomp.py trunk/scipy/linalg/matfuncs.py Log: applied patch from user pv Reformat and ReSTify scipy.linalg docstrings resolves ticket #596 Modified: trunk/scipy/linalg/basic.py =================================================================== --- trunk/scipy/linalg/basic.py 2008-02-20 05:39:03 UTC (rev 3951) +++ trunk/scipy/linalg/basic.py 2008-02-21 01:57:37 UTC (rev 3952) @@ -25,22 +25,34 @@ def lu_solve((lu, piv), b, trans=0, overwrite_b=0): - """ lu_solve((lu, piv), b, trans=0, overwrite_b=0) -> x + """Solve an equation system, a x = b, given the LU factorization of a + + Parameters + ---------- + (lu, piv) + Factorization of the coefficient matrix a, as given by lu_factor + b : array + Right-hand side + trans : {0, 1, 2} + Type of system to solve: - Solve a system of equations given a previously factored matrix + ===== ========= + trans system + ===== ========= + 0 a x = b + 1 a^T x = b + 2 a^H x = b + ===== ========= - Inputs: + Returns + ------- + x : array + Solution to the system - (lu,piv) -- The factored matrix, a (the output of lu_factor) - b -- a set of right-hand sides - trans -- type of system to solve: - 0 : a * x = b (no transpose) - 1 : a^T * x = b (transpose) - 2 a^H * x = b (conjugate transpose) - - Outputs: - - x -- the solution to the system + See also + -------- + lu_factor : LU factorize a matrix + """ b1 = asarray_chkfinite(b) overwrite_b = overwrite_b or (b1 is not b and not hasattr(b,'__array__')) @@ -54,18 +66,24 @@ 'illegal value in %-th argument of internal gesv|posv'%(-info) def cho_solve((c, lower), b, overwrite_b=0): - """ cho_solve((c, lower), b, overwrite_b=0) -> x + """Solve an equation system, a x = b, given the Cholesky factorization of a - Solve a system of equations given a previously cholesky factored matrix + Parameters + ---------- + (c, lower) + Cholesky factorization of a, as given by cho_factor + b : array + Right-hand side - Inputs: + Returns + ------- + x : array + The solution to the system a x = b - (c,lower) -- The factored matrix, a (the output of cho_factor) - b -- a set of right-hand sides - - Outputs: - - x -- the solution to the system a*x = b + See also + -------- + cho_factor : Cholesky factorization of a matrix + """ b1 = asarray_chkfinite(b) overwrite_b = overwrite_b or (b1 is not b and not hasattr(b,'__array__')) @@ -81,22 +99,29 @@ # Linear equations def solve(a, b, sym_pos=0, lower=0, overwrite_a=0, overwrite_b=0, debug = 0): - """ solve(a, b, sym_pos=0, lower=0, overwrite_a=0, overwrite_b=0) -> x + """Solve the equation a x = b for x - Solve a linear system of equations a * x = b for x. + Parameters + ---------- + a : array, shape (M, M) + b : array, shape (M,) or (M, N) + sym_pos : boolean + Assume a is symmetric and positive definite + lower : boolean + Use only data contained in the lower triangle of a, if sym_pos is true. + Default is to use upper triangle. + overwrite_a : boolean + Allow overwriting data in a (may enhance performance) + overwrite_b : boolean + Allow overwriting data in b (may enhance performance) + + Returns + ------- + x : array, shape (M,) or (M, N) depending on b + Solution to the system a x = b - Inputs: - - a -- An N x N matrix. - b -- An N x nrhs matrix or N vector. - sym_pos -- Assume a is symmetric and positive definite. - lower -- Assume a is lower triangular, otherwise upper one. - Only used if sym_pos is true. - overwrite_y - Discard data in y, where y is a or b. - - Outputs: - - x -- The solution to the system a * x = b + Raises LinAlgError if a is singular + """ a1, b1 = map(asarray_chkfinite,(a,b)) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: @@ -129,29 +154,37 @@ def solve_banded((l,u), ab, b, overwrite_ab=0, overwrite_b=0, debug = 0): - """ solve_banded((l,u), ab, b, overwrite_ab=0, overwrite_b=0) -> x + """Solve the equation a x = b for x, assuming a is banded matrix. - Solve a linear system of equations a * x = b for x where - a is a banded matrix stored in diagonal orded form + The matrix a is stored in ab using the matrix diagonal orded form:: - * * a1u + ab[u + i - j, j] == a[i,j] - * a12 a23 ... - a11 a22 a33 ... - a21 a32 a43 ... - . - al1 .. * + Example of ab (shape of a is (6,6), u=1, l=2):: - Inputs: + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * - (l,u) -- number of non-zero lower and upper diagonals, respectively. - a -- An N x (l+u+1) matrix. - b -- An N x nrhs matrix or N vector. - overwrite_y - Discard data in y, where y is ab or b. + Parameters + ---------- + (l, u) : (integer, integer) + Number of non-zero lower and upper diagonals + ab : array, shape (l+u+1, M) + Banded matrix + b : array, shape (M,) or (M, K) + Right-hand side + overwrite_ab : boolean + Discard data in ab (may enhance performance) + overwrite_b : boolean + Discard data in b (may enhance performance) - Outputs: - - x -- The solution to the system a * x = b + Returns + ------- + x : array, shape (M,) or (M, K) + The solution to the system a x = b + """ a1, b1 = map(asarray_chkfinite,(ab,b)) overwrite_b = overwrite_b or (b1 is not b and not hasattr(b,'__array__')) @@ -171,34 +204,48 @@ def solveh_banded(ab, b, overwrite_ab=0, overwrite_b=0, lower=0): - """ solveh_banded(ab, b, overwrite_ab=0, overwrite_b=0) -> c, x + """Solve equation a x = b. a is Hermitian positive-definite banded matrix. - Solve a linear system of equations a * x = b for x where - a is a banded symmetric or Hermitian positive definite - matrix stored in lower diagonal ordered form (lower=1) + The matrix a is stored in ab either in lower diagonal or upper + diagonal ordered form: - a11 a22 a33 a44 a55 a66 - a21 a32 a43 a54 a65 * - a31 a42 a53 a64 * * + ab[u + i - j, j] == a[i,j] (if upper form; i <= j) + ab[ i - j, j] == a[i,j] (if lower form; i >= j) - or upper diagonal ordered form + Example of ab (shape of a is (6,6), u=2):: - * * a31 a42 a53 a64 - * a21 a32 a43 a54 a65 - a11 a22 a33 a44 a55 a66 + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * - Inputs: + Cells marked with * are not used. - ab -- An N x l - b -- An N x nrhs matrix or N vector. - overwrite_y - Discard data in y, where y is ab or b. - lower - is ab in lower or upper form? + Parameters + ---------- + ab : array, shape (M, u + 1) + Banded matrix + b : array, shape (M,) or (M, K) + Right-hand side + overwrite_ab : boolean + Discard data in ab (may enhance performance) + overwrite_b : boolean + Discard data in b (may enhance performance) + lower : boolean + Is the matrix in the lower form. (Default is upper form) - Outputs: - - c: the Cholesky factorization of ab - x: the solution to ab * x = b - + Returns + ------- + c : array, shape (M, u+1) + Cholesky factorization of a, in the same banded format as ab + x : array, shape (M,) or (M, K) + The solution to the system a x = b + """ ab, b = map(asarray_chkfinite,(ab,b)) @@ -215,32 +262,40 @@ 'illegal value in %d-th argument of internal pbsv'%(-info) def cholesky_banded(ab, overwrite_ab=0, lower=0): - """ cholesky_banded(ab, overwrite_ab=0, lower=0) -> c - - Compute the Cholesky decomposition of a - banded symmetric or Hermitian positive definite - matrix stored in lower diagonal ordered form (lower=1) - - a11 a22 a33 a44 a55 a66 - a21 a32 a43 a54 a65 * - a31 a42 a53 a64 * * - - or upper diagonal ordered form - - * * a31 a42 a53 a64 - * a21 a32 a43 a54 a65 - a11 a22 a33 a44 a55 a66 - - Inputs: - - ab -- An N x l - overwrite_ab - Discard data in ab - lower - is ab in lower or upper form? - - Outputs: - - c: the Cholesky factorization of ab - + """Cholesky decompose a banded Hermitian positive-definite matrix + + The matrix a is stored in ab either in lower diagonal or upper + diagonal ordered form: + + ab[u + i - j, j] == a[i,j] (if upper form; i <= j) + ab[ i - j, j] == a[i,j] (if lower form; i >= j) + + Example of ab (shape of a is (6,6), u=2):: + + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * + + Parameters + ---------- + ab : array, shape (M, u + 1) + Banded matrix + overwrite_ab : boolean + Discard data in ab (may enhance performance) + lower : boolean + Is the matrix in the lower form. (Default is upper form) + + Returns + ------- + c : array, shape (M, u+1) + Cholesky factorization of a, in the same banded format as ab + """ ab = asarray_chkfinite(ab) @@ -259,9 +314,30 @@ # matrix inversion def inv(a, overwrite_a=0): - """ inv(a, overwrite_a=0) -> a_inv + """Compute the inverse of a matrix. + + Parameters + ---------- + a : array-like, shape (M, M) + Matrix to be inverted + + Returns + ------- + ainv : array-like, shape (M, M) + Inverse of the matrix a - Return inverse of square matrix a. + Raises LinAlgError if a is singular + + Examples + -------- + >>> a = array([[1., 2.], [3., 4.]]) + >>> inv(a) + array([[-2. , 1. ], + [ 1.5, -0.5]]) + >>> dot(a, inv(a)) + array([[ 1., 0.], + [ 0., 1.]]) + """ a1 = asarray_chkfinite(a) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: @@ -312,35 +388,39 @@ ## matrix and Vector norm import decomp def norm(x, ord=None): - """ norm(x, ord=None) -> n + """Matrix or vector norm. - Matrix or vector norm. + Parameters + ---------- + x : array, shape (M,) or (M, N) + ord : number, or {None, 1, -1, 2, -2, inf, -inf, 'fro'} + Order of the norm: - Inputs: + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm - + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 1 max(sum(abs(x), axis=0)) as below + -1 min(sum(abs(x), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other - sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + Returns + ------- + n : float + Norm of the matrix or vector - x -- a rank-1 (vector) or rank-2 (matrix) array - ord -- the order of the norm. - - Comments: - For arrays of any rank, if ord is None: - calculate the square norm (Euclidean norm for vectors, Frobenius norm for matrices) - - For vectors ord can be any real number including Inf or -Inf. - ord = Inf, computes the maximum of the magnitudes - ord = -Inf, computes minimum of the magnitudes - ord is finite, computes sum(abs(x)**ord,axis=0)**(1.0/ord) - - For matrices ord can only be one of the following values: - ord = 2 computes the largest singular value - ord = -2 computes the smallest singular value - ord = 1 computes the largest column sum of absolute values - ord = -1 computes the smallest column sum of absolute values - ord = Inf computes the largest row sum of absolute values - ord = -Inf computes the smallest row sum of absolute values - ord = 'fro' computes the frobenius norm sqrt(sum(diag(X.H * X),axis=0)) - - For values ord < 0, the result is, strictly speaking, not a - mathematical 'norm', but it may still be useful for numerical purposes. + Notes + ----- + For values ord < 0, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for numerical + purposes. + """ x = asarray_chkfinite(x) if ord is None: # check the default case first and handle it immediately @@ -382,9 +462,20 @@ ### Determinant def det(a, overwrite_a=0): - """ det(a, overwrite_a=0) -> d + """Compute the determinant of a matrix - Return determinant of a square matrix. + Parameters + ---------- + a : array, shape (M, M) + + Returns + ------- + det : float or complex + Determinant of a + + Notes + ----- + The determinant is computed via LU factorization, LAPACK routine z/dgetrf. """ a1 = asarray_chkfinite(a) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: @@ -399,25 +490,38 @@ ### Linear Least Squares def lstsq(a, b, cond=None, overwrite_a=0, overwrite_b=0): - """ lstsq(a, b, cond=None, overwrite_a=0, overwrite_b=0) -> x,resids,rank,s - - Return least-squares solution of a * x = b. - - Inputs: - - a -- An M x N matrix. - b -- An M x nrhs matrix or M vector. - cond -- Used to determine effective rank of a. - - Outputs: - - x -- The solution (N x nrhs matrix) to the minimization problem: - 2-norm(| b - a * x |) -> min - resids -- The residual sum-of-squares for the solution matrix x - (only if M>N and rank==N). - rank -- The effective rank of a. - s -- Singular values of a in decreasing order. The condition number - of a is abs(s[0]/s[-1]). + """Compute least-squares solution to equation :m:`a x = b` + + Compute a vector x such that the 2-norm :m:`|b - a x|` is minimised. + + Parameters + ---------- + a : array, shape (M, N) + b : array, shape (M,) or (M, K) + cond : float + Cutoff for 'small' singular values; used to determine effective + rank of a. Singular values smaller than rcond*largest_singular_value + are considered zero. + overwrite_a : boolean + Discard data in a (may enhance performance) + overwrite_b : boolean + Discard data in b (may enhance performance) + + Returns + ------- + x : array, shape (N,) or (N, K) depending on shape of b + Least-squares solution + residues : array, shape () or (1,) or (K,) + Sums of residues, squared 2-norm for each column in :m:`b - a x` + If rank of matrix a is < N or > M this is an empty array. + If b was 1-d, this is an (1,) shape array, otherwise the shape is (K,) + rank : integer + Effective rank of matrix a + s : array, shape (min(M,N),) + Singular values of a. The condition number of a is abs(s[0]/s[-1]). + + Raises LinAlgError if computation does not converge + """ a1, b1 = map(asarray_chkfinite,(a,b)) if len(a1.shape) != 2: @@ -457,9 +561,36 @@ def pinv(a, cond=None, rcond=None): - """ pinv(a, rcond=None) -> a_pinv + """Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate a generalized inverse of a matrix using a least-squares + solver. + + Parameters + ---------- + a : array, shape (M, N) + Matrix to be pseudo-inverted + cond, rcond : float + Cutoff for 'small' singular values in the least-squares solver. + Singular values smaller than rcond*largest_singular_value are + considered zero. + + Returns + ------- + B : array, shape (N, M) + + Raises LinAlgError if computation does not converge - Compute generalized inverse of A using least-squares solver. + Examples + -------- + >>> from numpy import * + >>> a = random.randn(9, 6) + >>> B = linalg.pinv(a) + >>> allclose(a, dot(a, dot(B, a))) + True + >>> allclose(B, dot(B, dot(a, B))) + True + """ a = asarray_chkfinite(a) b = numpy.identity(a.shape[0], dtype=a.dtype) @@ -473,9 +604,39 @@ _array_precision = {'f': 0, 'd': 1, 'F': 0, 'D': 1} def pinv2(a, cond=None, rcond=None): - """ pinv2(a, rcond=None) -> a_pinv + """Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate a generalized inverse of a matrix using its + singular-value decomposition and including all 'large' singular + values. + + Parameters + ---------- + a : array, shape (M, N) + Matrix to be pseudo-inverted + cond, rcond : float or None + Cutoff for 'small' singular values. + Singular values smaller than rcond*largest_singular_value are + considered zero. - Compute the generalized inverse of A using svd. + If None or -1, suitable machine precision is used. + + Returns + ------- + B : array, shape (N, M) + + Raises LinAlgError if SVD computation does not converge + + Examples + -------- + >>> from numpy import * + >>> a = random.randn(9, 6) + >>> B = linalg.pinv2(a) + >>> allclose(a, dot(a, dot(B, a))) + True + >>> allclose(B, dot(B, dot(a, B))) + True + """ a = asarray_chkfinite(a) u, s, vh = decomp.svd(a) @@ -498,8 +659,37 @@ #----------------------------------------------------------------------------- def tri(N, M=None, k=0, dtype=None): - """ returns a N-by-M matrix where all the diagonals starting from - lower left corner up to the k-th are all ones. + """Construct (N, M) matrix filled with ones at and below the k-th diagonal. + + The matrix has A[i,j] == 1 for i <= j + k + + Parameters + ---------- + N : integer + M : integer + Size of the matrix. If M is None, M == N is assumed. + k : integer + Number of subdiagonal below which matrix is filled with ones. + k == 0 is the main diagonal, k < 0 subdiagonal and k > 0 superdiagonal. + dtype : dtype + Data type of the matrix. + + Returns + ------- + A : array, shape (N, M) + + Examples + -------- + >>> from scipy.linalg import tri + >>> tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + >>> tri(3, 5, -1, dtype=int) + array([[0, 0, 0, 0, 0], + [1, 0, 0, 0, 0], + [1, 1, 0, 0, 0]]) + """ if M is None: M = N if type(M) == type('d'): @@ -514,8 +704,29 @@ return m.astype(dtype) def tril(m, k=0): - """ returns the elements on and below the k-th diagonal of m. k=0 is the - main diagonal, k > 0 is above and k < 0 is below the main diagonal. + """Construct a copy of a matrix with elements above the k-th diagonal zeroed. + + Parameters + ---------- + m : array + Matrix whose elements to return + k : integer + Diagonal above which to zero elements. + k == 0 is the main diagonal, k < 0 subdiagonal and k > 0 superdiagonal. + + Returns + ------- + A : array, shape m.shape, dtype m.dtype + + Examples + -------- + >>> from scipy.linalg import tril + >>> tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + """ svsp = getattr(m,'spacesaver',lambda:0)() m = asarray(m) @@ -524,8 +735,29 @@ return out def triu(m, k=0): - """ returns the elements on and above the k-th diagonal of m. k=0 is the - main diagonal, k > 0 is above and k < 0 is below the main diagonal. + """Construct a copy of a matrix with elements below the k-th diagonal zeroed. + + Parameters + ---------- + m : array + Matrix whose elements to return + k : integer + Diagonal below which to zero elements. + k == 0 is the main diagonal, k < 0 subdiagonal and k > 0 superdiagonal. + + Returns + ------- + A : array, shape m.shape, dtype m.dtype + + Examples + -------- + >>> from scipy.linalg import tril + >>> triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + """ svsp = getattr(m,'spacesaver',lambda:0)() m = asarray(m) @@ -534,16 +766,36 @@ return out def toeplitz(c,r=None): - """ Construct a toeplitz matrix (i.e. a matrix with constant diagonals). - - Description: - - toeplitz(c,r) is a non-symmetric Toeplitz matrix with c as its first - column and r as its first row. - - toeplitz(c) is a symmetric (Hermitian) Toeplitz matrix (r=c). - - See also: hankel + """Construct a Toeplitz matrix. + + The Toepliz matrix has constant diagonals, c as its first column, + and r as its first row (if not given, r == c is assumed). + + Parameters + ---------- + c : array + First column of the matrix + r : array + First row of the matrix. If None, r == c is assumed. + + Returns + ------- + A : array, shape (len(c), len(r)) + Constructed Toeplitz matrix. + dtype is the same as (c[0] + r[0]).dtype + + Examples + -------- + >>> from scipy.linalg import toeplitz + >>> toeplitz([1,2,3], [1,4,5,6]) + array([[1, 4, 5, 6], + [2, 1, 4, 5], + [3, 2, 1, 4]]) + + See also + -------- + hankel : Hankel matrix + """ isscalar = numpy.isscalar if isscalar(c) or isscalar(r): @@ -566,17 +818,37 @@ def hankel(c,r=None): - """ Construct a hankel matrix (i.e. matrix with constant anti-diagonals). - - Description: - - hankel(c,r) is a Hankel matrix whose first column is c and whose - last row is r. - - hankel(c) is a square Hankel matrix whose first column is C. - Elements below the first anti-diagonal are zero. - - See also: toeplitz + """Construct a Hankel matrix. + + The Hankel matrix has constant anti-diagonals, c as its first column, + and r as its last row (if not given, r == 0 os assumed). + + Parameters + ---------- + c : array + First column of the matrix + r : array + Last row of the matrix. If None, r == 0 is assumed. + + Returns + ------- + A : array, shape (len(c), len(r)) + Constructed Hankel matrix. + dtype is the same as (c[0] + r[0]).dtype + + Examples + -------- + >>> from scipy.linalg import hankel + >>> hankel([1,2,3,4], [4,7,7,8,9]) + array([[1, 2, 3, 4, 7], + [2, 3, 4, 7, 7], + [3, 4, 7, 7, 8], + [4, 7, 7, 8, 9]]) + + See also + -------- + toeplitz : Toeplitz matrix + """ isscalar = numpy.isscalar if isscalar(c) or isscalar(r): @@ -599,12 +871,32 @@ return map(Matrix,args) def kron(a,b): - """kronecker product of a and b + """Kronecker product of a and b. - Kronecker product of two matrices is block matrix - [[ a[ 0 ,0]*b, a[ 0 ,1]*b, ... , a[ 0 ,n-1]*b ], - [ ... ... ], - [ a[m-1,0]*b, a[m-1,1]*b, ... , a[m-1,n-1]*b ]] + The result is the block matrix:: + + a[0,0]*b a[0,1]*b ... a[0,-1]*b + a[1,0]*b a[1,1]*b ... a[1,-1]*b + ... + a[-1,0]*b a[-1,1]*b ... a[-1,-1]*b + + Parameters + ---------- + a : array, shape (M, N) + b : array, shape (P, Q) + + Returns + ------- + A : array, shape (M*P, N*Q) + Kronecker product of a and b + + Examples + -------- + >>> from scipy import kron, array + >>> kron(array([[1,2],[3,4]]), array([[1,1,1]])) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + """ if not a.flags['CONTIGUOUS']: a = reshape(a, a.shape) Modified: trunk/scipy/linalg/blas.py =================================================================== --- trunk/scipy/linalg/blas.py 2008-02-20 05:39:03 UTC (rev 3951) +++ trunk/scipy/linalg/blas.py 2008-02-21 01:57:37 UTC (rev 3952) @@ -22,12 +22,15 @@ _inv_type_conv = {'s':'f','d':'d','c':'F','z':'D'} def has_column_major_storage(arr): + """Is array stored in column-major format""" return arr.flags['FORTRAN'] def get_blas_funcs(names,arrays=(),debug=0): """Return available BLAS function objects with names. arrays are used to determine the optimal prefix of - BLAS routines.""" + BLAS routines. + + """ ordering = [] for i in range(len(arrays)): t = arrays[i].dtype.char Modified: trunk/scipy/linalg/decomp.py =================================================================== --- trunk/scipy/linalg/decomp.py 2008-02-20 05:39:03 UTC (rev 3951) +++ trunk/scipy/linalg/decomp.py 2008-02-21 01:57:37 UTC (rev 3952) @@ -98,32 +98,54 @@ return w, vr def eig(a,b=None, left=False, right=True, overwrite_a=False, overwrite_b=False): - """ Solve ordinary and generalized eigenvalue problem - of a square matrix. + """Solve an ordinary or generalized eigenvalue problem of a square matrix. - Inputs: + Find eigenvalues w and right or left eigenvectors of a general matrix:: + + a vr[:,i] = w[i] b vr[:,i] + a.H vl[:,i] = w[i].conj() b.H vl[:,i] + + where .H is the Hermitean conjugation. + + Parameters + ---------- + a : array, shape (M, M) + A complex or real matrix whose eigenvalues and eigenvectors + will be computed. + b : array, shape (M, M) + Right-hand side matrix in a generalized eigenvalue problem. + If omitted, identity matrix is assumed. + left : boolean + Whether to calculate and return left eigenvectors + right : boolean + Whether to calculate and return right eigenvectors + + overwrite_a : boolean + Whether to overwrite data in a (may improve performance) + overwrite_b : boolean + Whether to overwrite data in b (may improve performance) + + Returns + ------- + w : double or complex array, shape (M,) + The eigenvalues, each repeated according to its multiplicity. - a -- An N x N matrix. - b -- An N x N matrix [default is identity(N)]. - left -- Return left eigenvectors [disabled]. - right -- Return right eigenvectors [enabled]. - overwrite_a, overwrite_b -- save space by overwriting the a and/or - b matrices (both False by default) + (if left == True) + vl : double or complex array, shape (M, M) + The normalized left eigenvector corresponding to the eigenvalue w[i] + is the column v[:,i]. + + (if right == True) + vr : double or complex array, shape (M, M) + The normalized right eigenvector corresponding to the eigenvalue w[i] + is the column vr[:,i]. + + Raises LinAlgError if eigenvalue computation does not converge - Outputs: - - w -- eigenvalues [left==right==False]. - w,vr -- w and right eigenvectors [left==False,right=True]. - w,vl -- w and left eigenvectors [left==True,right==False]. - w,vl,vr -- [left==right==True]. - - Definitions: - - a * vr[:,i] = w[i] * b * vr[:,i] - - a^H * vl[:,i] = conjugate(w[i]) * b^H * vl[:,i] - - where a^H denotes transpose(conjugate(a)). + See Also + -------- + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + """ a1 = asarray_chkfinite(a) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: @@ -182,28 +204,44 @@ return w, vr def eigh(a, lower=True, eigvals_only=False, overwrite_a=False): - """ Solve real symmetric or complex hermitian eigenvalue problem. + """Solve the eigenvalue problem for a Hermitian or real symmetric matrix. - Inputs: + Find eigenvalues w and optionally right eigenvectors v of a:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + Parameters + ---------- + a : array, shape (M, M) + A complex Hermitian or symmetric real matrix whose eigenvalues + and eigenvectors will be computed. + lower : boolean + Whether the pertinent array data is taken from the lower or upper + triangle of a. (Default: lower) + eigvals_only : boolean + Whether to calculate only eigenvalues and no eigenvectors. + (Default: both are calculated) + overwrite_a : boolean + Whether data in a is overwritten (may improve performance). + + Returns + ------- + w : double array, shape (M,) + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + + (if eigvals_only == False) + v : double or complex double array, shape (M, M) + The normalized eigenvector corresponding to the eigenvalue w[i] is + the column v[:,i]. + + Raises LinAlgError if eigenvalue computation does not converge - a -- A hermitian N x N matrix. - lower -- values in a are read from lower triangle - [True: UPLO='L' (default) / False: UPLO='U'] - eigvals_only -- don't compute eigenvectors. - overwrite_a -- content of a may be destroyed - - Outputs: - - For eigvals_only == False (the default), - w,v -- w: eigenvalues, v: eigenvectors - For eigvals_only == True, - w -- eigenvalues - - Definitions: - - a * v[:,i] = w[i] * vr[:,i] - v.H * v = identity - + See Also + -------- + eig : eigenvalues and right eigenvectors for non-symmetric arrays + """ if eigvals_only or overwrite_a: a1 = asarray_chkfinite(a) @@ -258,43 +296,74 @@ def eig_banded(a_band, lower=0, eigvals_only=0, overwrite_a_band=0, select='a', select_range=None, max_ev = 0): - """ Solve real symmetric or complex hermetian band matrix problem. + """Solve real symmetric or complex hermetian band matrix eigenvalue problem. - Inputs: + Find eigenvalues w and optionally right eigenvectors v of a:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + The matrix a is stored in ab either in lower diagonal or upper + diagonal ordered form: - a_band -- A hermitian N x M matrix in 'packed storage'. - Packed storage looks like this: ('upper form') - [ ... (more off-diagonals) ..., - [ * * a13 a24 a35 a46 ... a(n-2)(n)], - [ * a12 a23 a34 a45 a56 ... a(n-1)(n)], - [ a11 a22 a33 a44 a55 a66 ... a(n)(n) ]] - The cells denoted with * may contain anything. - lower -- a is in lower packed storage - (default: upper packed form) - eigvals_only -- if True, don't compute eigenvectors. - overwrite_a_band -- content of a may be destroyed - select -- 'a', 'all', 0 : return all eigenvalues/vectors - 'v', 'value', 1 : eigenvalues in the interval (min, max] - will be found - 'i', 'index', 2 : eigenvalues with index [min...max] - will be found - select_range -- select == 'v': eigenvalue limits as tuple (min, max) - select == 'i': index limits as tuple (min, max) - select == 'a': meaningless - max_ev -- select == 'v': set to max. number of eigenvalues that is - expected. In doubt, leave untouched. - select == 'i', 'a': meaningless + ab[u + i - j, j] == a[i,j] (if upper form; i <= j) + ab[ i - j, j] == a[i,j] (if lower form; i >= j) - Outputs: + Example of ab (shape of a is (6,6), u=2):: - w,v -- w: eigenvalues, v: eigenvectors [for eigvals_only == False] - w -- eigenvalues [for eigvals_only == True]. + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * - Definitions: + Cells marked with * are not used. - a_full * v[:,i] = w[i] * v[:,i] (with full matrix corresponding to a) - v.H * v = identity + Parameters + ---------- + a_band : array, shape (M, u+1) + Banded matrix whose eigenvalues to calculate + lower : boolean + Is the matrix in the lower form. (Default is upper form) + eigvals_only : boolean + Compute only the eigenvalues and no eigenvectors. + (Default: calculate also eigenvectors) + overwrite_a_band: + Discard data in a_band (may enhance performance) + select: {'a', 'v', 'i'} + Which eigenvalues to calculate + ====== ======================================== + select calculated + ====== ======================================== + 'a' All eigenvalues + 'v' Eigenvalues in the interval (min, max] + 'i' Eigenvalues with indices min <= i <= max + ====== ======================================== + select_range : (min, max) + Range of selected eigenvalues + max_ev : integer + For select=='v', maximum number of eigenvalues expected. + For other values of select, has no meaning. + + In doubt, leave this parameter untouched. + + Returns + ------- + w : array, shape (M,) + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + + v : double or complex double array, shape (M, M) + The normalized eigenvector corresponding to the eigenvalue w[i] is + the column v[:,i]. + + Raises LinAlgError if eigenvalue computation does not converge + """ if eigvals_only or overwrite_a_band: a1 = asarray_chkfinite(a_band) @@ -374,32 +443,180 @@ return w, v def eigvals(a,b=None,overwrite_a=0): - """Return eigenvalues of square matrix.""" + """Compute eigenvalues from an ordinary or generalized eigenvalue problem. + + Find eigenvalues of a general matrix:: + + a vr[:,i] = w[i] b vr[:,i] + + Parameters + ---------- + a : array, shape (M, M) + A complex or real matrix whose eigenvalues and eigenvectors + will be computed. + b : array, shape (M, M) + Right-hand side matrix in a generalized eigenvalue problem. + If omitted, identity matrix is assumed. + overwrite_a : boolean + Whether to overwrite data in a (may improve performance) + + Returns + ------- + w : double or complex array, shape (M,) + The eigenvalues, each repeated according to its multiplicity, + but not in any specific order. + + Raises LinAlgError if eigenvalue computation does not converge + + See Also + -------- + eigvalsh : eigenvalues of symmetric or Hemitiean arrays + eig : eigenvalues and right eigenvectors of general arrays + eigh : eigenvalues and eigenvectors of symmetric/Hermitean arrays. + + """ return eig(a,b=b,left=0,right=0,overwrite_a=overwrite_a) def eigvalsh(a,lower=1,overwrite_a=0): - """Return eigenvalues of hermitean or real symmetric matrix.""" + """Solve the eigenvalue problem for a Hermitian or real symmetric matrix. + + Find eigenvalues w of a:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + Parameters + ---------- + a : array, shape (M, M) + A complex Hermitian or symmetric real matrix whose eigenvalues + and eigenvectors will be computed. + lower : boolean + Whether the pertinent array data is taken from the lower or upper + triangle of a. (Default: lower) + overwrite_a : boolean + Whether data in a is overwritten (may improve performance). + + Returns + ------- + w : double array, shape (M,) + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + + Raises LinAlgError if eigenvalue computation does not converge + + See Also + -------- + eigvals : eigenvalues of general arrays + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eig : eigenvalues and right eigenvectors for non-symmetric arrays + + """ return eigh(a,lower=lower,eigvals_only=1,overwrite_a=overwrite_a) def eigvals_banded(a_band,lower=0,overwrite_a_band=0, select='a', select_range=None): - """Return eigenvalues of hermitean or real symmetric matrix.""" + """Solve real symmetric or complex hermetian band matrix eigenvalue problem. + + Find eigenvalues w of a:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + The matrix a is stored in ab either in lower diagonal or upper + diagonal ordered form: + + ab[u + i - j, j] == a[i,j] (if upper form; i <= j) + ab[ i - j, j] == a[i,j] (if lower form; i >= j) + + Example of ab (shape of a is (6,6), u=2):: + + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * + + Cells marked with * are not used. + + Parameters + ---------- + a_band : array, shape (M, u+1) + Banded matrix whose eigenvalues to calculate + lower : boolean + Is the matrix in the lower form. (Default is upper form) + overwrite_a_band: + Discard data in a_band (may enhance performance) + select: {'a', 'v', 'i'} + Which eigenvalues to calculate + + ====== ======================================== + select calculated + ====== ======================================== + 'a' All eigenvalues + 'v' Eigenvalues in the interval (min, max] + 'i' Eigenvalues with indices min <= i <= max + ====== ======================================== + select_range : (min, max) + Range of selected eigenvalues + + Returns + ------- + w : array, shape (M,) + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + + Raises LinAlgError if eigenvalue computation does not converge + + See Also + -------- + eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian band matrices + eigvals : eigenvalues of general arrays + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eig : eigenvalues and right eigenvectors for non-symmetric arrays + + """ return eig_banded(a_band,lower=lower,eigvals_only=1, overwrite_a_band=overwrite_a_band, select=select, select_range=select_range) def lu_factor(a, overwrite_a=0): - """Return raw LU decomposition of a matrix and pivots, for use in solving - a system of linear equations. + """Compute pivoted LU decomposition of a matrix. + + The decomposition is:: - Inputs: + A = P L U - a --- an NxN matrix + where P is a permutation matrix, L lower triangular with unit + diagonal elements, and U upper triangular. + + Parameters + ---------- + a : array, shape (M, M) + Matrix to decompose + overwrite_a : boolean + Whether to overwrite data in A (may increase performance) - Outputs: + Returns + ------- + lu : array, shape (N, N) + Matrix containing U in its upper triangle, and L in its lower triangle. + The unit diagonal elements of L are not stored. + piv : array, shape (N,) + Pivot indices representing the permutation matrix P: + row i of matrix was interchanged with row piv[i]. - lu --- the lu factorization matrix - piv --- an array of pivots + See also + -------- + lu_solve : solve an equation system using the LU factorization of a matrix + + Notes + ----- + This is a wrapper to the *GETRF routines from LAPACK. + """ a1 = asarray(a) if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): @@ -414,8 +631,24 @@ return lu, piv def lu_solve(a_lu_pivots,b): - """Solve a previously factored system. First input is a tuple (lu, pivots) - which is the output to lu_factor. Second input is the right hand side. + """Solve an equation system, a x = b, given the LU factorization of a + + Parameters + ---------- + (lu, piv) + Factorization of the coefficient matrix a, as given by lu_factor + b : array + Right-hand side + + Returns + ------- + x : array + Solution to the system + + See also + -------- + lu_factor : LU factorize a matrix + """ a_lu, pivots = a_lu_pivots a_lu = asarray_chkfinite(a_lu) @@ -435,28 +668,46 @@ def lu(a,permute_l=0,overwrite_a=0): - """Return LU decompostion of a matrix. + """Compute pivoted LU decompostion of a matrix. - Inputs: + The decomposition is:: - a -- An M x N matrix. - permute_l -- Perform matrix multiplication p * l [disabled]. + A = P L U - Outputs: + where P is a permutation matrix, L lower triangular with unit + diagonal elements, and U upper triangular. + + Parameters + ---------- + a : array, shape (M, N) + Array to decompose + permute_l : boolean + Perform the multiplication P*L (Default: do not permute) + overwrite_a : boolean + Whether to overwrite data in a (may improve performance) - p,l,u -- LU decomposition matrices of a [permute_l=0] - pl,u -- LU decomposition matrices of a [permute_l=1] - - Definitions: - - a = p * l * u [permute_l=0] - a = pl * u [permute_l=1] - - p - An M x M permutation matrix - l - An M x K lower triangular or trapezoidal matrix - with unit-diagonal - u - An K x N upper triangular or trapezoidal matrix - K = min(M,N) + Returns + ------- + (If permute_l == False) + p : array, shape (M, M) + Permutation matrix + l : array, shape (M, K) + Lower triangular or trapezoidal matrix with unit diagonal. + K = min(M, N) + u : array, shape (K, N) + Upper triangular or trapezoidal matrix + + (If permute_l == True) + pl : array, shape (M, K) + Permuted L matrix. + K = min(M, N) + u : array, shape (K, N) + Upper triangular or trapezoidal matrix + + Notes + ----- + This is a LU factorization routine written for Scipy. + """ a1 = asarray_chkfinite(a) if len(a1.shape) != 2: @@ -471,29 +722,60 @@ return p,l,u def svd(a,full_matrices=1,compute_uv=1,overwrite_a=0): - """Compute singular value decomposition (SVD) of matrix a. + """Singular Value Decomposition. - Description: + Factorizes the matrix a into two unitary matrices U and Vh and + an 1d-array s of singular values (real, non-negative) such that + a == U S Vh if S is an suitably shaped matrix of zeros whose + main diagonal is s. + + Parameters + ---------- + a : array, shape (M, N) + Matrix to decompose + full_matrices : boolean + If true, U, Vh are shaped (M,M), (N,N) + If false, the shapes are (M,K), (K,N) where K = min(M,N) + compute_uv : boolean + Whether to compute also U, Vh in addition to s (Default: true) + overwrite_a : boolean + Whether data in a is overwritten (may improve performance) + + Returns + ------- + U: array, shape (M,M) or (M,K) depending on full_matrices + s: array, shape (K,) + The singular values, sorted so that s[i] >= s[i+1]. K = min(M, N) + Vh: array, shape (N,N) or (K,N) depending on full_matrices - Singular value decomposition of a matrix a is - a = u * sigma * v^H, - where v^H denotes conjugate(transpose(v)), u,v are unitary - matrices, sigma is zero matrix with a main diagonal containing - real non-negative singular values of the matrix a. + For compute_uv = False, only s is returned. - Inputs: + Raises LinAlgError if SVD computation does not converge - a -- An M x N matrix. - compute_uv -- If zero, then only the vector of singular values - is returned. + Examples + -------- + >>> from scipy import random, linalg, allclose, dot + >>> a = random.randn(9, 6) + 1j*random.randn(9, 6) + >>> U, s, Vh = linalg.svd(a) + >>> U.shape, Vh.shape, s.shape + ((9, 9), (6, 6), (6,)) + + >>> U, s, Vh = linalg.svd(a, full_matrices=False) + >>> U.shape, Vh.shape, s.shape + ((9, 6), (6, 6), (6,)) + >>> S = linalg.diagsvd(s, 6, 6) + >>> allclose(a, dot(U, dot(S, Vh))) + True + + >>> s2 = linalg.svd(a, compute_uv=False) + >>> allclose(s, s2) + True - Outputs: - - u -- An M x M unitary matrix [compute_uv=1]. - s -- An min(M,N) vector of singular values in descending order, - sigma = diagsvd(s). - vh -- An N x N unitary matrix [compute_uv=1], vh = v^H. - + See also + -------- + svdvals : return singular values of a matrix + diagsvd : return the Sigma matrix, given the vector s + """ # A hack until full_matrices == 0 support is fixed here. if full_matrices == 0: @@ -520,11 +802,47 @@ return s def svdvals(a,overwrite_a=0): - """Return singular values of a matrix.""" + """Compute singular values of a matrix. + + Parameters + ---------- + a : array, shape (M, N) + Matrix to decompose + overwrite_a : boolean + Whether data in a is overwritten (may improve performance) + + Returns + ------- + s: array, shape (K,) + The singular values, sorted so that s[i] >= s[i+1]. K = min(M, N) + + Raises LinAlgError if SVD computation does not converge + + See also + -------- + svd : return the full singular value decomposition of a matrix + diagsvd : return the Sigma matrix, given the vector s + + """ return svd(a,compute_uv=0,overwrite_a=overwrite_a) def diagsvd(s,M,N): - """Return sigma from singular values and original size M,N.""" + """Construct the sigma matrix in SVD from singular values and size M,N. + + Parameters + ---------- + s : array, shape (M,) or (N,) + Singular values + M : integer + N : integer + Size of the matrix whose singular values are s + + Returns + ------- + S : array, shape (M, N) + The S-matrix in the singular value decomposition + + """ part = diag(s) typ = part.dtype.char MorN = len(s) @@ -536,14 +854,40 @@ raise ValueError, "Length of s must be M or N." def cholesky(a,lower=0,overwrite_a=0): - """Compute Cholesky decomposition of matrix. - - Description: - - For a hermitian positive-definite matrix a return the - upper-triangular (or lower-triangular if lower==1) matrix, - u such that u^H * u = a (or l * l^H = a). - + """Compute the Cholesky decomposition of a matrix. + + Returns the Cholesky decomposition, :lm:`A = L L^*` or :lm:`A = U^* U` + of a Hermitian positive-definite matrix :lm:`A`. + + Parameters + ---------- + a : array, shape (M, M) + Matrix to be decomposed + lower : boolean + Whether to compute the upper or lower triangular Cholesky factorization + (Default: upper-triangular) + overwrite_a : boolean + Whether to overwrite data in a (may improve performance) + + Returns + ------- + B : array, shape (M, M) + Upper- or lower-triangular Cholesky factor of A + + Raises LinAlgError if decomposition fails + + Examples + -------- + >>> from scipy import array, linalg, dot + >>> a = array([[1,-2j],[2j,5]]) + >>> L = linalg.cholesky(a, lower=True) + >>> L + array([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> dot(L, L.T.conj()) + array([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + """ a1 = asarray_chkfinite(a) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: @@ -557,8 +901,32 @@ return c def cho_factor(a, lower=0, overwrite_a=0): - """ Compute Cholesky decomposition of matrix and return an object - to be used for solving a linear system using cho_solve. + """Compute the Cholesky decomposition of a matrix, to use in cho_solve + + Returns the Cholesky decomposition, :lm:`A = L L^*` or :lm:`A = U^* U` + of a Hermitian positive-definite matrix :lm:`A`. + + The return value can be directly used as the first parameter to cho_solve. + + Parameters + ---------- + a : array, shape (M, M) + Matrix to be decomposed + lower : boolean + Whether to compute the upper or lower triangular Cholesky factorization + (Default: upper-triangular) + overwrite_a : boolean + Whether to overwrite data in a (may improve performance) + + Returns + ------- + c : array, shape (M, M) + Upper- or lower-triangular Cholesky factor of A + lower : array, shape (M, M) + Flag indicating whether the factor is lower or upper triangular + + Raises LinAlgError if decomposition fails + """ a1 = asarray_chkfinite(a) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: @@ -573,8 +941,29 @@ def cho_solve(clow, b): """Solve a previously factored symmetric system of equations. + + The equation system is + + A x = b, A = U^H U = L L^H + + and A is real symmetric or complex Hermitian. + + Parameters + ---------- + clow : tuple (c, lower) + Cholesky factor and a flag indicating whether it is lower triangular. + The return value from cho_factor can be used. + b : array + Right-hand side of the equation system + First input is a tuple (LorU, lower) which is the output to cho_factor. Second input is the right-hand side. + + Returns + ------- + x : array + Solution to the equation system + """ c, lower = clow c = asarray_chkfinite(c) @@ -591,29 +980,60 @@ return b def qr(a,overwrite_a=0,lwork=None,econ=False,mode='qr'): - """QR decomposition of an M x N matrix a. + """Compute QR decomposition of a matrix. - Description: + Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal + and R upper triangular. - Find a unitary (orthogonal) matrix, q, and an upper-triangular - matrix r such that q * r = a + Parameters + ---------- + a : array, shape (M, N) + Matrix to be decomposed + overwrite_a : boolean + Whether data in a is overwritten (may improve performance) + lwork : integer + Work array size, lwork >= a.shape[1]. If None or -1, an optimal size + is computed. + econ : boolean + Whether to compute the economy-size QR decomposition, making shapes + of Q and R (M, K) and (K, N) instead of (M,M) and (M,N). K=min(M,N) + mode : {'qr', 'r'} + Determines what information is to be returned: either both Q and R + or only R. + + Returns + ------- + (if mode == 'qr') + Q : double or complex array, shape (M, M) or (M, K) for econ==True - Inputs: + (for any mode) + R : double or complex array, shape (M, N) or (K, N) for econ==True + Size K = min(M, N) - a -- the matrix - overwrite_a=0 -- if non-zero then discard the contents of a, - i.e. a is used as a work array if possible. + Raises LinAlgError if decomposition fails - lwork=None -- >= shape(a)[1]. If None (or -1) compute optimal - work array size. - econ=False -- computes the skinny or economy-size QR decomposition - only useful when M>N - mode='qr' -- if 'qr' then return both q and r; if 'r' then just return r + Notes + ----- + This is an interface to the LAPACK routines dgeqrf, zgeqrf, + dorgqr, and zungqr. - Outputs: - q,r - if mode=='qr' - r - if mode=='r' + Examples + -------- + >>> from scipy import random, linalg, dot + >>> a = random.randn(9, 6) + >>> q, r = linalg.qr(a) + >>> allclose(a, dot(q, r)) + True + >>> q.shape, r.shape + ((9, 9), (9, 6)) + >>> r2 = linalg.qr(a, mode='r') + >>> allclose(r, r2) + + >>> q3, r3 = linalg.qr(a, econ=True) + >>> q3.shape, r3.shape + ((9, 6), (6, 6)) + """ a1 = asarray_chkfinite(a) if len(a1.shape) != 2: @@ -674,26 +1094,29 @@ def qr_old(a,overwrite_a=0,lwork=None): - """QR decomposition of an M x N matrix a. + """Compute QR decomposition of a matrix. - Description: + Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal + and R upper triangular. - Find a unitary (orthogonal) matrix, q, and an upper-triangular - matrix r such that q * r = a + Parameters + ---------- + a : array, shape (M, N) + Matrix to be decomposed + overwrite_a : boolean + Whether data in a is overwritten (may improve performance) + lwork : integer + Work array size, lwork >= a.shape[1]. If None or -1, an optimal size + is computed. + + Returns + ------- + Q : double or complex array, shape (M, M) + R : double or complex array, shape (M, N) + Size K = min(M, N) - Inputs: - - a -- the matrix - overwrite_a=0 -- if non-zero then discard the contents of a, - i.e. a is used as a work array if possible. - - lwork=None -- >= shape(a)[1]. If None (or -1) compute optimal - work array size. - - Outputs: - - q, r -- matrices such that q * r = a - + Raises LinAlgError if decomposition fails + """ a1 = asarray_chkfinite(a) if len(a1.shape) != 2: @@ -725,26 +1148,30 @@ def rq(a,overwrite_a=0,lwork=None): - """RQ decomposition of an M x N matrix a. + """Compute RQ decomposition of a square real matrix. - Description: + Calculate the decomposition :lm:`A = R Q` where Q is unitary/orthogonal + and R upper triangular. - Find an upper-triangular matrix r and a unitary (orthogonal) - matrix q such that r * q = a - - Inputs: - - a -- the matrix - overwrite_a=0 -- if non-zero then discard the contents of a, - i.e. a is used as a work array if possible. - - lwork=None -- >= shape(a)[1]. If None (or -1) compute optimal - work array size. - - Outputs: - - r, q -- matrices such that r * q = a - + Parameters + ---------- + a : array, shape (M, M) + Square real matrix to be decomposed + overwrite_a : boolean + Whether data in a is overwritten (may improve performance) + lwork : integer + Work array size, lwork >= a.shape[1]. If None or -1, an optimal size + is computed. + econ : boolean + + Returns + ------- + R : double array, shape (M, N) or (K, N) for econ==True + Size K = min(M, N) + Q : double or complex array, shape (M, M) or (M, K) for econ==True + + Raises LinAlgError if decomposition fails + """ # TODO: implement support for non-square and complex arrays a1 = asarray_chkfinite(a) @@ -783,13 +1210,40 @@ _double_precision = ['i','l','d'] def schur(a,output='real',lwork=None,overwrite_a=0): - """Compute Schur decomposition of matrix a. + """Compute Schur decomposition of a matrix. + + The Schur decomposition is + + A = Z T Z^H + + where Z is unitary and T is either upper-triangular, or for real + Schur decomposition (output='real'), quasi-upper triangular. In + the quasi-triangular form, 2x2 blocks describing complex-valued + eigenvalue pairs may extrude from the diagonal. + + Parameters + ---------- + a : array, shape (M, M) + Matrix to decompose + output : {'real', 'complex'} + Construct the real or complex Schur decomposition (for real matrices). + lwork : integer + Work array size. If None or -1, it is automatically computed. + overwrite_a : boolean + Whether to overwrite data in a (may improve performance) - Description: + Returns + ------- + T : array, shape (M, M) + Schur form of A. It is real-valued for the real Schur decomposition. + Z : array, shape (M, M) + An unitary Schur transformation matrix for A. + It is real-valued for the real Schur decomposition. - Return T, Z such that a = Z * T * (Z**H) where Z is a - unitary matrix and T is either upper-triangular or quasi-upper - triangular for output='real' + See also + -------- + rsf2csf : Convert real Schur form to complex Schur form + """ if not output in ['real','complex','r','c']: raise ValueError, "argument must be 'real', or 'complex'" @@ -850,16 +1304,29 @@ raise LinAlgError, 'Array must be square' def rsf2csf(T, Z): - """Convert real schur form to complex schur form. - - Description: - - If A is a real-valued matrix, then the real schur form is - quasi-upper triangular. 2x2 blocks extrude from the main-diagonal - corresponding to any complex-valued eigenvalues. - - This function converts this real schur form to a complex schur form - which is upper triangular. + """Convert real Schur form to complex Schur form. + + Convert a quasi-diagonal real-valued Schur form to the upper triangular + complex-valued Schur form. + + Parameters + ---------- + T : array, shape (M, M) + Real Schur form of the original matrix + Z : array, shape (M, M) + Schur transformation matrix + + Returns + ------- + T : array, shape (M, M) + Complex Schur form of the original matrix + Z : array, shape (M, M) + Schur transformation matrix corresponding to the complex form + + See also + -------- + schur : Schur decompose a matrix + """ Z,T = map(asarray_chkfinite, (Z,T)) if len(Z.shape) !=2 or Z.shape[0] != Z.shape[1]: @@ -898,7 +1365,23 @@ # Orthonormal decomposition def orth(A): - """Return an orthonormal basis for the range of A using svd""" + """Construct an orthonormal basis for the range of A using SVD + + Parameters + ---------- + A : array, shape (M, N) + + Returns + ------- + Q : array, shape (M, K) + Orthonormal basis for the range of A. + K = effective rank of A, as determined by automatic cutoff + + See also + -------- + svd : Singular value decomposition of a matrix + + """ u,s,vh = svd(A) M,N = A.shape tol = max(M,N)*numpy.amax(s)*eps @@ -907,21 +1390,33 @@ return Q def hessenberg(a,calc_q=0,overwrite_a=0): - """ Compute Hessenberg form of a matrix. + """Compute Hessenberg form of a matrix. + + The Hessenberg decomposition is + + A = Q H Q^H + + where Q is unitary/orthogonal and H has only zero elements below the first + subdiagonal. + + Parameters + ---------- + a : array, shape (M,M) + Matrix to bring into Hessenberg form + calc_q : boolean + Whether to compute the transformation matrix + overwrite_a : boolean + Whether to ovewrite data in a (may improve performance) - Inputs: + Returns + ------- + H : array, shape (M,M) + Hessenberg form of A - a -- the matrix - calc_q -- if non-zero then calculate unitary similarity - transformation matrix q. - overwrite_a=0 -- if non-zero then discard the contents of a, - i.e. a is used as a work array if possible. + (If calc_q == True) + Q : array, shape (M,M) + Unitary/orthogonal similarity transformation matrix s.t. A = Q H Q^H - Outputs: - - h -- Hessenberg form of a [calc_q=0] - h, q -- matrices such that a = q * h * q^T [calc_q=1] - """ a1 = asarray(a) if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): Modified: trunk/scipy/linalg/matfuncs.py =================================================================== --- trunk/scipy/linalg/matfuncs.py 2008-02-20 05:39:03 UTC (rev 3951) +++ trunk/scipy/linalg/matfuncs.py 2008-02-21 01:57:37 UTC (rev 3952) @@ -20,7 +20,20 @@ feps = sb.finfo(single).eps def expm(A,q=7): - """Compute the matrix exponential using Pade approximation of order q. + """Compute the matrix exponential using Pade approximation. + + Parameters + ---------- + A : array, shape(M,M) + Matrix to be exponentiated + q : integer + Order of the Pade approximation + + Returns + ------- + expA : array, shape(M,M) + Matrix exponential of A + """ A = asarray(A) ss = True @@ -61,6 +74,17 @@ def expm2(A): """Compute the matrix exponential using eigenvalue decomposition. + + Parameters + ---------- + A : array, shape(M,M) + Matrix to be exponentiated + + Returns + ------- + expA : array, shape(M,M) + Matrix exponential of A + """ A = asarray(A) t = A.dtype.char @@ -72,7 +96,20 @@ return dot(dot(vr,diag(exp(s))),vri).astype(t) def expm3(A,q=20): - """Compute the matrix exponential using a Taylor series.of order q. + """Compute the matrix exponential using Taylor series. + + Parameters + ---------- + A : array, shape(M,M) + Matrix to be exponentiated + q : integer + Order of the Taylor series + + Returns + ------- + expA : array, shape(M,M) + Matrix exponential of A + """ A = asarray(A) t = A.dtype.char @@ -91,6 +128,16 @@ _array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1} def toreal(arr,tol=None): """Return as real array if imaginary part is small. + + Parameters + ---------- + arr : array + tol : float + Absolute tolerance + + Returns + ------- + arr : double or complex array """ if tol is None: tol = {0:feps*1e3, 1:eps*1e6}[_array_precision[arr.dtype.char]] @@ -100,7 +147,19 @@ return arr def cosm(A): - """matrix cosine. + """Compute the matrix cosine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : array, shape(M,M) + + Returns + ------- + cosA : array, shape(M,M) + Matrix cosine of A + """ A = asarray(A) if A.dtype.char not in ['F','D','G']: @@ -110,7 +169,19 @@ def sinm(A): - """matrix sine. + """Compute the matrix sine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : array, shape(M,M) + + Returns + ------- + sinA : array, shape(M,M) + Matrix cosine of A + """ A = asarray(A) if A.dtype.char not in ['F','D','G']: @@ -119,7 +190,19 @@ return -0.5j*(expm(1j*A) - expm(-1j*A)) def tanm(A): - """matrix tangent. + """Compute the matrix tangent. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : array, shape(M,M) + + Returns + ------- + tanA : array, shape(M,M) + Matrix tangent of A + """ A = asarray(A) if A.dtype.char not in ['F','D','G']: @@ -128,7 +211,19 @@ return solve(cosm(A), sinm(A)) def coshm(A): - """matrix hyperbolic cosine. + """Compute the hyperbolic matrix cosine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : array, shape(M,M) + + Returns + ------- + coshA : array, shape(M,M) + Hyperbolic matrix cosine of A + """ A = asarray(A) if A.dtype.char not in ['F','D','G']: @@ -137,7 +232,19 @@ return 0.5*(expm(A) + expm(-A)) def sinhm(A): - """matrix hyperbolic sine. + """Compute the hyperbolic matrix sine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : array, shape(M,M) + + Returns + ------- + sinhA : array, shape(M,M) + Hyperbolic matrix sine of A + """ A = asarray(A) if A.dtype.char not in ['F','D']: @@ -146,7 +253,19 @@ return 0.5*(expm(A) - expm(-A)) def tanhm(A): - """matrix hyperbolic tangent. + """Compute the hyperbolic matrix tangent. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : array, shape(M,M) + + Returns + ------- + tanhA : array, shape(M,M) + Hyperbolic matrix tangent of A + """ A = asarray(A) if A.dtype.char not in ['F','D']: @@ -155,11 +274,32 @@ return solve(coshm(A), sinhm(A)) def funm(A,func,disp=1): - """matrix function for arbitrary callable object func. + """Evaluate a matrix function specified by a callable. + + Returns the value of matrix-valued function f at A. The function f + is an extension of the scalar-valued function func to matrices. + + Parameters + ---------- + A : array, shape(M,M) + Matrix at which to evaluate the function + func : callable + Callable object that evaluates a scalar function f. + Must be vectorized (eg. using vectorize). + disp : boolean + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + + Returns + ------- + fA : array, shape(M,M) + Value of the matrix function specified by func evaluated at A + + (if disp == False) + errest : float + 1-norm of the estimated error, ||err||_1 / ||A||_1 + """ - # func should take a vector of arguments (see vectorize if - # it needs wrapping. - # Perform Shur decomposition (lapack ?gees) A = asarray(A) if len(A.shape)!=2: @@ -209,7 +349,28 @@ return F, err def logm(A,disp=1): - """Matrix logarithm, inverse of expm.""" + """Compute matrix logarithm. + + The matrix logarithm is the inverse of expm: expm(logm(A)) == A + + Parameters + ---------- + A : array, shape(M,M) + Matrix whose logarithm to evaluate + disp : boolean + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + + Returns + ------- + logA : array, shape(M,M) + Matrix logarithm of A + + (if disp == False) + errest : float + 1-norm of the estimated error, ||err||_1 / ||A||_1 + + """ # Compute using general funm but then use better error estimator and # make one step in improving estimate using a rotation matrix. A = mat(asarray(A)) @@ -239,7 +400,37 @@ return F, errest def signm(a,disp=1): - """matrix sign""" + """Matrix sign function. + + Extension of the scalar sign(x) to matrices. + + Parameters + ---------- + A : array, shape(M,M) + Matrix at which to evaluate the sign function + disp : boolean + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + + Returns + ------- + sgnA : array, shape(M,M) + Value of the sign function at A + + (if disp == False) + errest : float + 1-norm of the estimated error, ||err||_1 / ||A||_1 + + Examples + -------- + >>> from scipy.linalg import signm, eigvals + >>> a = [[1,2,3], [1,2,1], [1,1,1]] + >>> eigvals(a) + array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j]) + >>> eigvals(signm(a)) + array([-1.+0.j, 1.+0.j, 1.+0.j]) + + """ def rounded_sign(x): rx = real(x) if rx.dtype.char=='f': @@ -286,12 +477,29 @@ return S0, errest def sqrtm(A,disp=1): - """Matrix square root + """Matrix square root. + + Parameters + ---------- + A : array, shape(M,M) + Matrix whose square root to evaluate + disp : boolean + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + + Returns + ------- + sgnA : array, shape(M,M) + Value of the sign function at A - If disp is non-zero display warning if singular matrix. - If disp is zero then return residual ||A-X*X||_F / ||A||_F + (if disp == False) + errest : float + Frobenius norm of the estimated error, ||err||_F / ||A||_F + Notes + ----- Uses algorithm by Nicholas J. Higham + """ A = asarray(A) if len(A.shape)!=2: From scipy-svn at scipy.org Thu Feb 21 13:26:07 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 21 Feb 2008 12:26:07 -0600 (CST) Subject: [Scipy-svn] r3953 - trunk/scipy/stats Message-ID: <20080221182607.0901939C22C@new.scipy.org> Author: dhuard Date: 2008-02-21 12:26:04 -0600 (Thu, 21 Feb 2008) New Revision: 3953 Modified: trunk/scipy/stats/stats.py Log: Fix ticket:594 -- docstring for stats.kurtosis Modified: trunk/scipy/stats/stats.py =================================================================== --- trunk/scipy/stats/stats.py 2008-02-21 01:57:37 UTC (rev 3952) +++ trunk/scipy/stats/stats.py 2008-02-21 18:26:04 UTC (rev 3953) @@ -767,9 +767,10 @@ Returns ------- - The kurtosis of values along an axis, returning 0 where all values are - equal. + The kurtosis of values along an axis. If all values are equal, return -3 for Fisher's + definition and 0 for Pearson's definition. + References ---------- [CRCProbStat2000] section 2.2.25 From scipy-svn at scipy.org Fri Feb 22 14:48:44 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 22 Feb 2008 13:48:44 -0600 (CST) Subject: [Scipy-svn] r3954 - trunk/scipy/stats/models Message-ID: <20080222194844.8A61939C0CA@new.scipy.org> Author: jonathan.taylor Date: 2008-02-22 13:48:39 -0600 (Fri, 22 Feb 2008) New Revision: 3954 Modified: trunk/scipy/stats/models/formula.py Log: allowed reference key for main_effect to be a key or an int Modified: trunk/scipy/stats/models/formula.py =================================================================== --- trunk/scipy/stats/models/formula.py 2008-02-21 18:26:04 UTC (rev 3953) +++ trunk/scipy/stats/models/formula.py 2008-02-22 19:48:39 UTC (rev 3954) @@ -256,14 +256,24 @@ def main_effect(self, reference=None): """ Return the 'main effect' columns of a factor, choosing - a reference column number to remove. + an optional reference key. + + The reference key can be one of the keys of the Factor, + or an integer, representing which column to remove. + It defaults to 0. + """ + names = self.names() + if reference is None: reference = 0 + else: + try: + reference = names.index(reference) + except IndexError: + reference = int(reference) - names = self.names() - def maineffect_func(value, reference=reference): rvalue = [] keep = range(value.shape[0]) From scipy-svn at scipy.org Fri Feb 22 20:41:41 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 22 Feb 2008 19:41:41 -0600 (CST) Subject: [Scipy-svn] r3955 - trunk/scipy/ndimage/src/register Message-ID: <20080223014141.1A0F039C218@new.scipy.org> Author: tom.waite Date: 2008-02-22 19:41:38 -0600 (Fri, 22 Feb 2008) New Revision: 3955 Modified: trunk/scipy/ndimage/src/register/Register_IMPL.c Log: added integrated histogram based volume thresholding Modified: trunk/scipy/ndimage/src/register/Register_IMPL.c =================================================================== --- trunk/scipy/ndimage/src/register/Register_IMPL.c 2008-02-22 19:48:39 UTC (rev 3954) +++ trunk/scipy/ndimage/src/register/Register_IMPL.c 2008-02-23 01:41:38 UTC (rev 3955) @@ -554,3 +554,59 @@ } + +int NI_ImageThreshold(int layers, int rows, int cols, unsigned short *image, double *H, + double *IH, int histogram_elements, double threshold, int *index) +{ + + int i, j, k; + int status; + int ptr; + int value; + float sum; + + for(i = 0; i < histogram_elements; ++i){ + H[i] = 0; + IH[i] = 0; + } + ptr = 0; + for(i = 0; i < layers; ++i){ + for(j = 0; j < rows; ++j){ + for(k = 0; k < cols; ++k){ + value = image[ptr++]; + ++H[value]; + } + } + } + + sum = 0.0; + for(i = 0; i < histogram_elements; ++i){ + sum += H[i]; + } + /* normalize the volume histogram */ + for(i = 0; i < histogram_elements; ++i){ + H[i] = H[i] / sum; + } + + /* build the integrated histogram */ + IH[0] = H[0]; + for(i = 1; i < histogram_elements; ++i){ + IH[i] = IH[i-1] + H[i]; + } + + /* get the threshold crossing. this deals with the high amplitude outliers in the volume */ + *index = histogram_elements-1; + for(i = 0; i < histogram_elements; ++i){ + if(IH[i] > threshold){ + *index = i; + break; + } + } + + status = 1; + + return status; + +} + + From scipy-svn at scipy.org Fri Feb 22 20:41:58 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 22 Feb 2008 19:41:58 -0600 (CST) Subject: [Scipy-svn] r3956 - trunk/scipy/ndimage/src/register Message-ID: <20080223014158.893B739C218@new.scipy.org> Author: tom.waite Date: 2008-02-22 19:41:56 -0600 (Fri, 22 Feb 2008) New Revision: 3956 Modified: trunk/scipy/ndimage/src/register/Register_EXT.c Log: added integrated histogram based volume thresholding Modified: trunk/scipy/ndimage/src/register/Register_EXT.c =================================================================== --- trunk/scipy/ndimage/src/register/Register_EXT.c 2008-02-23 01:41:38 UTC (rev 3955) +++ trunk/scipy/ndimage/src/register/Register_EXT.c 2008-02-23 01:41:56 UTC (rev 3956) @@ -250,12 +250,58 @@ } + +static PyObject *Register_ImageThreshold(PyObject *self, PyObject *args) +{ + + /* set threshold from the volume integrated histogram */ + int num; + int nd; + int type; + int itype; + int histogram_elements; + int tindex; + npy_intp *dimsImage; + npy_intp *dimsHistogram; + unsigned short *image; + double *H; + double *IH; + double threshold; + PyObject *imgArray = NULL; + PyObject *histogram = NULL; + PyObject *ihistogram = NULL; + + if(!PyArg_ParseTuple(args, "OOOd", &imgArray, &histogram, &ihistogram, &threshold)) + goto exit; + + image = (unsigned short *)PyArray_DATA(imgArray); + /* reads dims as 0 = layers, 1 = rows, 2 = cols */ + nd = PyArray_NDIM(imgArray); + dimsImage = PyArray_DIMS(imgArray); + type = PyArray_TYPE(imgArray); + num = PyArray_SIZE(imgArray); + + H = (double *)PyArray_DATA(histogram); + IH = (double *)PyArray_DATA(ihistogram); + histogram_elements = PyArray_SIZE(histogram); + + if(!NI_ImageThreshold((int)dimsImage[0], (int)dimsImage[1], (int)dimsImage[2], + image, H, IH, histogram_elements, threshold, &tindex)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue("i", tindex); + +} + static PyMethodDef RegisterMethods[] = { { "register_histogram", Register_Histogram, METH_VARARGS, NULL }, { "register_histogram_lite", Register_HistogramLite, METH_VARARGS, NULL }, { "register_linear_resample", Register_LinearResample, METH_VARARGS, NULL }, { "register_cubic_resample", Register_CubicResample, METH_VARARGS, NULL }, + { "register_image_threshold", Register_ImageThreshold, METH_VARARGS, NULL }, { NULL, NULL, 0, NULL}, }; From scipy-svn at scipy.org Fri Feb 22 20:43:08 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 22 Feb 2008 19:43:08 -0600 (CST) Subject: [Scipy-svn] r3957 - trunk/scipy/ndimage Message-ID: <20080223014308.3059D39C06A@new.scipy.org> Author: tom.waite Date: 2008-02-22 19:43:06 -0600 (Fri, 22 Feb 2008) New Revision: 3957 Modified: trunk/scipy/ndimage/_registration.py Log: More fMRI coregistration support added. Modified: trunk/scipy/ndimage/_registration.py =================================================================== --- trunk/scipy/ndimage/_registration.py 2008-02-23 01:41:56 UTC (rev 3956) +++ trunk/scipy/ndimage/_registration.py 2008-02-23 01:43:06 UTC (rev 3957) @@ -6,6 +6,7 @@ import scipy.ndimage as NDI import scipy.optimize as OPT import time +import glob # Issue warning regarding heavy development status of this module import warnings @@ -72,20 +73,6 @@ print 'Total Optimizer Time is ', (stop-start) return parm_vector -def get_test_images(alpha=0.0, beta=0.0, gamma=0.0): - image1 = load_image() - image2 = load_blank_image() - imdata = build_structs(step=1) - # allow the G image to be rotated for testing - imdata['parms'][0] = alpha - imdata['parms'][1] = beta - imdata['parms'][2] = gamma - image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) - image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) - M = build_rotate_matrix(imdata['parms']) - R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) - return image1, image2, imdata - def multires_registration(image1, image2, imdata, lite, smhist, method, opt_method): ret_histo=0 # zero out the start parameter; but this may be set to large values @@ -209,7 +196,7 @@ Tx=0.0, Ty=0.0, Tz=0.0, stepsize=1): # takes an image and 3D rotate using trilinear interpolation - image1 = load_image() + image1 = load_anatMRI_image() image2 = load_blank_image() imdata = build_structs(step=stepsize) imdata['parms'][0] = alpha @@ -227,6 +214,7 @@ image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) image2['data'] = image_F_xyz2 + # this is now a rotated and low pass filtered version of the volume R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) return image2 @@ -246,42 +234,6 @@ # return the 3D Gaussian kernel width (xyz) return fwhm -def load_image(imagename=filename, rows=256, cols=256, layers=90): - ImageVolume = N.fromfile(imagename, dtype=N.uint16).reshape(layers, rows, cols); - # clip to 8 bits. this will be rescale to 8 bits for fMRI - ImageVolume[ImageVolume>255] = 255 - # voxel to pixel is identity for this simulation using anatomical MRI volume - # 4x4 matrix - M = N.eye(4, dtype=N.float64); - # dimensions - D = N.zeros(3, dtype=N.int32); - # Gaussian kernel - fill in with build_fwhm() - F = N.zeros(3, dtype=N.float64); - D[0] = rows - D[1] = cols - D[2] = layers - # make sure the data type is uchar - ImageVolume = ImageVolume.astype(N.uint8) - image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} - return image - -def load_blank_image(rows=256, cols=256, layers=90): - ImageVolume = N.zeros(layers*rows*cols, dtype=N.uint16).reshape(layers, rows, cols); - # voxel to pixel is identity for this simulation using anatomical MRI volume - # 4x4 matrix - M = N.eye(4, dtype=N.float64); - # dimensions - D = N.zeros(3, dtype=N.int32); - # Gaussian kernel - fill in with build_fwhm() - F = N.zeros(3, dtype=N.float64); - D[0] = rows - D[1] = cols - D[2] = layers - # make sure the data type is uchar - ImageVolume = ImageVolume.astype(N.uint8) - image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} - return image - def optimize_function(x, optfunc_args): image_F = optfunc_args[0] image_G = optfunc_args[1] @@ -461,7 +413,146 @@ return rot_matrix +def load_fMRI_image(imagename, rows=64, cols=64, layers=28, threshold=0.999, debug=0): + # un-scaled images + ImageVolume = N.fromfile(imagename, dtype=N.uint16).reshape(layers, rows, cols); + M = N.eye(4, dtype=N.float64); + # for the test data, set the xyz voxel sizes for fMRI volume + M[0][0] = 3.75 + M[1][1] = 3.75 + M[2][2] = 5.0 + # dimensions + D = N.zeros(3, dtype=N.int32); + # Gaussian kernel - fill in with build_fwhm() + F = N.zeros(3, dtype=N.float64); + D[0] = rows + D[1] = cols + D[2] = layers + max = ImageVolume.max() + min = ImageVolume.min() + ih = N.zeros(max-min+1, dtype=N.float64); + h = N.zeros(max-min+1, dtype=N.float64); + if threshold <= 0: + threshold = 0.999 + elif threshold > 1.0: + threshold = 1.0 + # get the integrated histogram of the volume and get max from + # the threshold crossing in the integrated histogram + index = R.register_image_threshold(ImageVolume, h, ih, threshold) + scale = 255.0 / (index-min) + # generate the scaled 8 bit image + images = (scale*(ImageVolume.astype(N.float)-min)) + images[images>255] = 255 + # the data type is now uchar + image = {'data' : images.astype(N.uint8), 'mat' : M, 'dim' : D, 'fwhm' : F} + if debug == 1: + return image, h, ih, index + else: + return image +def load_anatMRI_image(imagename=filename, rows=256, cols=256, layers=90, threshold=0.999, debug=0): + # un-scaled images + ImageVolume = N.fromfile(imagename, dtype=N.uint16).reshape(layers, rows, cols); + M = N.eye(4, dtype=N.float64); + # for the test data, set the xyz voxel sizes for anat-MRI volume + M[0][0] = 0.9375 + M[1][1] = 0.9375 + M[2][2] = 1.5 + # dimensions + D = N.zeros(3, dtype=N.int32); + # Gaussian kernel - fill in with build_fwhm() + F = N.zeros(3, dtype=N.float64); + D[0] = rows + D[1] = cols + D[2] = layers + max = ImageVolume.max() + min = ImageVolume.min() + ih = N.zeros(max-min+1, dtype=N.float64); + h = N.zeros(max-min+1, dtype=N.float64); + if threshold <= 0: + threshold = 0.999 + elif threshold > 1.0: + threshold = 1.0 + # get the integrated histogram of the volume and get max from + # the threshold crossing in the integrated histogram + index = R.register_image_threshold(ImageVolume, h, ih, threshold) + scale = 255.0 / (index-min) + # generate the scaled 8 bit image + images = (scale*(ImageVolume.astype(N.float)-min)) + images[images>255] = 255 + # the data type is now uchar + image = {'data' : images.astype(N.uint8), 'mat' : M, 'dim' : D, 'fwhm' : F} + if debug == 1: + return image, h, ih, index + else: + return image +def load_blank_image(rows=256, cols=256, layers=90): + ImageVolume = N.zeros(layers*rows*cols, dtype=N.uint16).reshape(layers, rows, cols); + # voxel to pixel is identity for this simulation using anatomical MRI volume + # 4x4 matrix + M = N.eye(4, dtype=N.float64); + # for the test data, set the xyz voxel sizes for anat-MRI volume + M[0][0] = 0.9375 + M[1][1] = 0.9375 + M[2][2] = 1.5 + # dimensions + D = N.zeros(3, dtype=N.int32); + # Gaussian kernel - fill in with build_fwhm() + F = N.zeros(3, dtype=N.float64); + D[0] = rows + D[1] = cols + D[2] = layers + # make sure the data type is uchar + ImageVolume = ImageVolume.astype(N.uint8) + image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} + return image + +def read_fMRI_directory(path): + files_fMRI = glob.glob(path) + return files_fMRI + + +def get_test_rotated_images(alpha=0.0, beta=0.0, gamma=0.0): + image1 = load_anatMRI_image() + image2 = load_blank_image() + imdata = build_structs(step=1) + # allow the G image to be rotated for testing + imdata['parms'][0] = alpha + imdata['parms'][1] = beta + imdata['parms'][2] = gamma + image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) + image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) + M = build_rotate_matrix(imdata['parms']) + R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) + return image1, image2, imdata + +def get_test_scaled_images(scale=4.0): + # this is for coreg MRI / fMRI test + image1 = load_anatMRI_image() + image2 = build_scale_image(image1, scale) + imdata = build_structs(step=1) + # allow the G image to be rotated for testing + image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) + image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) + M = build_rotate_matrix(imdata['parms']) + R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) + return image1, image2, imdata + + +def build_scale_image(image, scale): + (layers, rows, cols) = image['data'].shape + image2 = image['data'][0:layers:scale, 0:rows:scale, 0:cols:scale] + M = image['mat'] * scale + # dimensions + D = N.zeros(3, dtype=N.int32); + # Gaussian kernel - fill in with build_fwhm() + F = N.zeros(3, dtype=N.float64); + D[0] = rows/scale + D[1] = cols/scale + D[2] = layers/scale + scaled_image = {'data' : image2, 'mat' : M, 'dim' : D, 'fwhm' : F} + return scaled_image + From scipy-svn at scipy.org Tue Feb 26 15:55:19 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 26 Feb 2008 14:55:19 -0600 (CST) Subject: [Scipy-svn] r3958 - trunk/scipy/io Message-ID: <20080226205519.4472B39C0E4@new.scipy.org> Author: wnbell Date: 2008-02-26 14:55:11 -0600 (Tue, 26 Feb 2008) New Revision: 3958 Modified: trunk/scipy/io/mmio.py Log: added support for writing pattern matrices write COOrdinate matrices with savetxt() Modified: trunk/scipy/io/mmio.py =================================================================== --- trunk/scipy/io/mmio.py 2008-02-23 01:43:06 UTC (rev 3957) +++ trunk/scipy/io/mmio.py 2008-02-26 20:55:11 UTC (rev 3958) @@ -11,8 +11,8 @@ import os from numpy import asarray, real, imag, conj, zeros, ndarray, \ - empty, concatenate, ones, ascontiguousarray -from itertools import izip + empty, concatenate, ones, ascontiguousarray, \ + vstack, savetxt, fromfile, fromstring __all__ = ['mminfo','mmread','mmwrite', 'MMFile'] @@ -108,7 +108,7 @@ # field values FIELD_INTEGER = 'integer' - FIELD_REAL = 'real' + FIELD_REAL = 'real' FIELD_COMPLEX = 'complex' FIELD_PATTERN = 'pattern' FIELD_VALUES = (FIELD_INTEGER, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN) @@ -120,13 +120,12 @@ (`field`, `self.FIELD_VALUES`) # symmetry values - SYMMETRY_GENERAL = 'general' - SYMMETRY_SYMMETRIC = 'symmetric' + SYMMETRY_GENERAL = 'general' + SYMMETRY_SYMMETRIC = 'symmetric' SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric' - SYMMETRY_HERMITIAN = 'hermitian' - SYMMETRY_VALUES = ( - SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC, SYMMETRY_SKEW_SYMMETRIC, - SYMMETRY_HERMITIAN) + SYMMETRY_HERMITIAN = 'hermitian' + SYMMETRY_VALUES = ( SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC, + SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN) @classmethod def _validate_symmetry(self, symmetry): @@ -136,9 +135,9 @@ DTYPES_BY_FIELD = { FIELD_INTEGER: 'i', - FIELD_REAL: 'd', - FIELD_COMPLEX:'D', - FIELD_PATTERN:'d'} + FIELD_REAL: 'd', + FIELD_COMPLEX: 'D', + FIELD_PATTERN: 'd'} #--------------------------------------------------------------------------- @staticmethod @@ -383,13 +382,14 @@ assert k==entries,`k,entries` elif format == self.FORMAT_COORDINATE: - from numpy import fromfile,fromstring + # Read sparse COOrdinate format + try: # fromfile works for normal files - flat_data = fromfile(stream,sep=' ') + flat_data = fromfile(stream, sep=' ') except: # fallback - fromfile fails for some file-like objects - flat_data = fromstring(stream.read(),sep=' ') + flat_data = fromstring(stream.read(), sep=' ') # TODO use iterator (e.g. xreadlines) to avoid reading # the whole file into memory @@ -398,7 +398,7 @@ flat_data = flat_data.reshape(-1,2) I = ascontiguousarray(flat_data[:,0], dtype='intc') J = ascontiguousarray(flat_data[:,1], dtype='intc') - V = ones(len(I)) + V = ones(len(I), dtype='int8') # filler elif is_complex: flat_data = flat_data.reshape(-1,4) I = ascontiguousarray(flat_data[:,0], dtype='intc') @@ -544,30 +544,45 @@ else: if symm != self.SYMMETRY_GENERAL: - raise ValueError, 'symmetric matrices incompatible with sparse format' + raise NotImplementedError('symmetric matrices not yet supported') coo = a.tocoo() # convert to COOrdinate format # write shape spec stream.write('%i %i %i\n' % (rows,cols,coo.nnz)) - # line template - template = '%i %i ' + template + fmt = '%%.%dg' % precision - I,J,V = coo.row + 1, coo.col + 1, coo.data # change base 0 -> base 1 - - if field in (self.FIELD_REAL, self.FIELD_INTEGER): - for ijv_tuple in izip(I,J,V): - stream.writelines(template % ijv_tuple) + if field == self.FIELD_PATTERN: + IJV = vstack((a.row, a.col)).T + elif field in [ self.FIELD_INTEGER, self.FIELD_REAL ]: + IJV = vstack((a.row, a.col, a.data)).T elif field == self.FIELD_COMPLEX: - for ijv_tuple in izip(I,J,V.real,V.imag): - stream.writelines(template % ijv_tuple) - elif field == self.FIELD_PATTERN: - raise NotImplementedError,`field` + IJV = vstack((a.row, a.col, a.data.real, a.data.imag)).T else: - raise TypeError,'Unknown field type %s'% `field` + raise TypeError('Unknown field type %s' % `field`) + IJV[:,:2] += 1 # change base 0 -> base 1 + savetxt(stream, IJV, fmt=fmt) + + + ### Old method + ## line template + #template = '%i %i ' + template + #I,J,V = coo.row + 1, coo.col + 1, coo.data # change base 0 -> base 1 + #if field in (self.FIELD_REAL, self.FIELD_INTEGER): + # for ijv_tuple in izip(I,J,V): + # stream.writelines(template % ijv_tuple) + #elif field == self.FIELD_COMPLEX: + # for ijv_tuple in izip(I,J,V.real,V.imag): + # stream.writelines(template % ijv_tuple) + #elif field == self.FIELD_PATTERN: + # raise NotImplementedError,`field` + #else: + # raise TypeError,'Unknown field type %s'% `field` + + #------------------------------------------------------------------------------- if __name__ == '__main__': import sys From scipy-svn at scipy.org Wed Feb 27 00:35:40 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 26 Feb 2008 23:35:40 -0600 (CST) Subject: [Scipy-svn] r3959 - in trunk/scipy/sparse: . tests Message-ID: <20080227053540.70F9539C079@new.scipy.org> Author: wnbell Date: 2008-02-26 23:35:35 -0600 (Tue, 26 Feb 2008) New Revision: 3959 Modified: trunk/scipy/sparse/csc.py trunk/scipy/sparse/csr.py trunk/scipy/sparse/tests/test_base.py Log: added fancy indexing to CSC matrix Modified: trunk/scipy/sparse/csc.py =================================================================== --- trunk/scipy/sparse/csc.py 2008-02-26 20:55:11 UTC (rev 3958) +++ trunk/scipy/sparse/csc.py 2008-02-27 05:35:35 UTC (rev 3959) @@ -7,12 +7,12 @@ import numpy from numpy import array, matrix, asarray, asmatrix, zeros, rank, intc, \ empty, hstack, isscalar, ndarray, shape, searchsorted, where, \ - concatenate, deprecate + concatenate, deprecate, transpose, ravel from base import spmatrix, isspmatrix from sparsetools import csc_tocsr from sputils import upcast, to_native, isdense, isshape, getdtype, \ - isscalarlike + isscalarlike, isintlike from compressed import _cs_matrix @@ -131,7 +131,38 @@ A.has_sorted_indices = True return A - + + def __getitem__(self, key): + # use CSR to implement fancy indexing + if isinstance(key, tuple): + row = key[0] + col = key[1] + + if isintlike(row) or isinstance(row, slice): + return self.T[col,row].T + else: + #[[1,2],??] or [[[1],[2]],??] + if isintlike(col) or isinstance(col,slice): + return self.T[col,row].T + else: + row = asarray(row, dtype='intc') + col = asarray(col, dtype='intc') + if len(row.shape) == 1: + return self.T[col,row] + elif len(row.shape) == 2: + row = row.reshape(-1) + col = col.reshape(-1,1) + return self.T[col,row].T + else: + raise NotImplementedError('unsupported indexing') + + return self.T[col,row].T + elif isintlike(key) or isinstance(key,slice): + return self.T[:,key].T #[i] or [1:2] + else: + return self.T[:,key].T #[[1,2]] + + # these functions are used by the parent class (_cs_matrix) # to remove redudancy between csc_matrix and csr_matrix def _swap(self,x): Modified: trunk/scipy/sparse/csr.py =================================================================== --- trunk/scipy/sparse/csr.py 2008-02-26 20:55:11 UTC (rev 3958) +++ trunk/scipy/sparse/csr.py 2008-02-27 05:35:35 UTC (rev 3959) @@ -8,7 +8,7 @@ import numpy from numpy import array, matrix, asarray, asmatrix, zeros, rank, intc, \ empty, hstack, isscalar, ndarray, shape, searchsorted, where, \ - concatenate, deprecate, arange, ones + concatenate, deprecate, arange, ones, ravel from base import spmatrix, isspmatrix from sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \ @@ -240,22 +240,34 @@ else: P = extractor(col,self.shape[1]).T #[1:2,[1,2]] return self[row,:]*P + else: - #[[1,2],??] + #[[1,2],??] or [[[1],[2]],??] if isintlike(col) or isinstance(col,slice): - P = extractor(row, self.shape[0]) - return (P*self)[:,col] #[[1,2],j] or [[1,2],1:2] + P = extractor(row, self.shape[0]) #[[1,2],j] or [[1,2],1:2] + return (P*self)[:,col] + else: - row = asindices(row) #[[1,2],[1,2]] - col = asindices(col) - if len(row) != len(col): - raise IndexError('number of row and column indices differ') - val = [] - for i,j in zip(row,col): - val.append(self._get_single_element(i,j)) - return asmatrix(val) + row = asindices(row) + col = asindices(col) + if len(row.shape) == 1: + if len(row) != len(col): #[[1,2],[1,2]] + raise IndexError('number of row and column indices differ') + val = [] + for i,j in zip(row,col): + val.append(self._get_single_element(i,j)) + return asmatrix(val) + + elif len(row.shape) == 2: + row = ravel(row) #[[[1],[2]],[1,2]] + P = extractor(row, self.shape[0]) + return (P*self)[:,col] + else: + raise NotImplementedError('unsupported indexing') + + elif isintlike(key) or isinstance(key,slice): return self[key,:] #[i] or [1:2] else: Modified: trunk/scipy/sparse/tests/test_base.py =================================================================== --- trunk/scipy/sparse/tests/test_base.py 2008-02-26 20:55:11 UTC (rev 3958) +++ trunk/scipy/sparse/tests/test_base.py 2008-02-27 05:35:35 UTC (rev 3959) @@ -651,24 +651,69 @@ """Tests fancy indexing features. The tests for any matrix formats that implement these features should derive from this class. """ - # This isn't supported by any matrix objects yet: - def test_sequence_indexing(self): - B = asmatrix(arange(50.).reshape(5,10)) - A = self.spmatrix(B) - assert_array_equal(B[(1,2),(3,4)], A[(1,2),(3,4)].todense()) - assert_array_equal(B[(1,2,3),(3,4,5)], A[(1,2,3),(3,4,5)].todense()) - def test_fancy_indexing(self): - """Test for new indexing functionality""" - B = ones((5,10), float) - A = dok_matrix(B) - # Write me! + B = asmatrix(arange(50).reshape(5,10)) + A = self.spmatrix( B ) - # Both slicing and fancy indexing: not yet supported - # assert_array_equal(B[(1,2),:], A[(1,2),:].todense()) - # assert_array_equal(B[(1,2,3),:], A[(1,2,3),:].todense()) + # [i,j] + assert_equal(A[2,3],B[2,3]) + assert_equal(A[-1,8],B[-1,8]) + assert_equal(A[-1,-2],B[-1,-2]) + # [i,1:2] + assert_equal(A[2,:].todense(),B[2,:]) + assert_equal(A[2,5:-2].todense(),B[2,5:-2]) + + # [i,[1,2]] + assert_equal(A[3,[1,3]].todense(),B[3,[1,3]]) + assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]]) + # [1:2,j] + assert_equal(A[:,2].todense(),B[:,2]) + assert_equal(A[3:4,9].todense(),B[3:4,9]) + assert_equal(A[1:4,-5].todense(),B[1:4,-5]) + + # [1:2,[1,2]] + assert_equal(A[:,[2,8,3,-1]].todense(),B[:,[2,8,3,-1]]) + assert_equal(A[3:4,[9]].todense(),B[3:4,[9]]) + assert_equal(A[1:4,[-1,-5]].todense(),B[1:4,[-1,-5]]) + + # [[1,2],j] + assert_equal(A[[1,3],3].todense(),B[[1,3],3]) + assert_equal(A[[2,-5],-4].todense(),B[[2,-5],-4]) + + # [[1,2],1:2] + assert_equal(A[[1,3],:].todense(),B[[1,3],:]) + assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1]) + + # [[1,2],[1,2]] + assert_equal(A[[1,3],[2,4]],B[[1,3],[2,4]]) + assert_equal(A[[-1,-3],[2,-4]],B[[-1,-3],[2,-4]]) + + # [[[1],[2]],[1,2]] + assert_equal(A[[[1],[3]],[2,4]].todense(),B[[[1],[3]],[2,4]]) + assert_equal(A[[[-1],[-3],[-2]],[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]]) + + # [i] + assert_equal(A[1].todense(),B[1]) + assert_equal(A[-2].todense(),B[-2]) + + # [1:2] + assert_equal(A[1:4].todense(),B[1:4]) + assert_equal(A[1:-2].todense(),B[1:-2]) + + # [[1,2]] + assert_equal(A[[1,3]].todense(),B[[1,3]]) + assert_equal(A[[-1,-3]].todense(),B[[-1,-3]]) + + # [[1,2],:][:,[1,2]] + assert_equal(A[[1,3],:][:,[2,4]].todense(), B[[1,3],:][:,[2,4]] ) + assert_equal(A[[-1,-3],:][:,[2,-4]].todense(), B[[-1,-3],:][:,[2,-4]] ) + + # [:,[1,2]][[1,2],:] + assert_equal(A[:,[1,3]][[2,4],:].todense(), B[:,[1,3]][[2,4],:] ) + assert_equal(A[:,[-1,-3]][[2,-4],:].todense(), B[:,[-1,-3]][[2,-4],:] ) + class _TestArithmetic: """ Test real/complex arithmetic @@ -768,7 +813,7 @@ class TestCSR(_TestCommon, _TestGetSet, _TestSolve, _TestInplaceArithmetic, _TestArithmetic, _TestMatvecOutput, _TestHorizSlicing, _TestVertSlicing, _TestBothSlicing, - TestCase): + _TestFancyIndexing, TestCase): spmatrix = csr_matrix def test_constructor1(self): @@ -861,70 +906,11 @@ assert_array_equal(asp.todense(),bsp.todense()) - def test_fancy_slicing(self): - #TODO add this to csc_matrix - B = asmatrix(arange(50).reshape(5,10)) - A = csr_matrix( B ) - # [i,j] - assert_equal(A[2,3],B[2,3]) - assert_equal(A[-1,8],B[-1,8]) - assert_equal(A[-1,-2],B[-1,-2]) - - # [i,1:2] - assert_equal(A[2,:].todense(),B[2,:]) - assert_equal(A[2,5:-2].todense(),B[2,5:-2]) - - # [i,[1,2]] - assert_equal(A[3,[1,3]].todense(),B[3,[1,3]]) - assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]]) - - # [1:2,j] - assert_equal(A[:,2].todense(),B[:,2]) - assert_equal(A[3:4,9].todense(),B[3:4,9]) - assert_equal(A[1:4,-5].todense(),B[1:4,-5]) - - # [1:2,[1,2]] - assert_equal(A[:,[2,8,3,-1]].todense(),B[:,[2,8,3,-1]]) - assert_equal(A[3:4,[9]].todense(),B[3:4,[9]]) - assert_equal(A[1:4,[-1,-5]].todense(),B[1:4,[-1,-5]]) - - # [[1,2],j] - assert_equal(A[[1,3],3].todense(),B[[1,3],3]) - assert_equal(A[[2,-5],-4].todense(),B[[2,-5],-4]) - - # [[1,2],1:2] - assert_equal(A[[1,3],:].todense(),B[[1,3],:]) - assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1]) - - # [[1,2],[1,2]] - assert_equal(A[[1,3],[2,4]],B[[1,3],[2,4]]) - assert_equal(A[[-1,-3],[2,-4]],B[[-1,-3],[2,-4]]) - - # [i] - assert_equal(A[1].todense(),B[1]) - assert_equal(A[-2].todense(),B[-2]) - - # [1:2] - assert_equal(A[1:4].todense(),B[1:4]) - assert_equal(A[1:-2].todense(),B[1:-2]) - - # [[1,2]] - assert_equal(A[[1,3]].todense(),B[[1,3]]) - assert_equal(A[[-1,-3]].todense(),B[[-1,-3]]) - - # [[1,2],:][:,[1,2]] - assert_equal(A[[1,3],:][:,[2,4]].todense(), B[[1,3],:][:,[2,4]] ) - assert_equal(A[[-1,-3],:][:,[2,-4]].todense(), B[[-1,-3],:][:,[2,-4]] ) - - # [:,[1,2]][[1,2],:] - assert_equal(A[:,[1,3]][[2,4],:].todense(), B[:,[1,3]][[2,4],:] ) - assert_equal(A[:,[-1,-3]][[2,-4],:].todense(), B[:,[-1,-3]][[2,-4],:] ) - class TestCSC(_TestCommon, _TestGetSet, _TestSolve, _TestInplaceArithmetic, _TestArithmetic, _TestMatvecOutput, _TestHorizSlicing, _TestVertSlicing, _TestBothSlicing, - TestCase): + _TestFancyIndexing, TestCase): spmatrix = csc_matrix def test_constructor1(self): From scipy-svn at scipy.org Wed Feb 27 01:15:04 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 27 Feb 2008 00:15:04 -0600 (CST) Subject: [Scipy-svn] r3960 - in trunk/scipy: . linalg linsolve sparse sparse/linalg sparse/linalg/dsolve sparse/linalg/dsolve/tests sparse/linalg/dsolve/umfpack/tests sparse/linalg/eigen sparse/linalg/eigen/arpack sparse/linalg/eigen/arpack/tests sparse/linalg/isolve sparse/linalg/isolve/tests sparse/linalg/tests sparse/tests splinalg Message-ID: <20080227061504.A8FA539C049@new.scipy.org> Author: wnbell Date: 2008-02-27 00:14:46 -0600 (Wed, 27 Feb 2008) New Revision: 3960 Added: trunk/scipy/sparse/linalg/ Removed: trunk/scipy/splinalg/dsolve/ trunk/scipy/splinalg/eigen/ trunk/scipy/splinalg/info.py trunk/scipy/splinalg/interface.py trunk/scipy/splinalg/isolve/ trunk/scipy/splinalg/setup.py trunk/scipy/splinalg/tests/ Modified: trunk/scipy/linalg/iterative.py trunk/scipy/linsolve/__init__.py trunk/scipy/setup.py trunk/scipy/sparse/csr.py trunk/scipy/sparse/linalg/dsolve/info.py trunk/scipy/sparse/linalg/dsolve/linsolve.py trunk/scipy/sparse/linalg/dsolve/tests/test_linsolve.py trunk/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py trunk/scipy/sparse/linalg/eigen/arpack/arpack.py trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py trunk/scipy/sparse/linalg/eigen/info.py trunk/scipy/sparse/linalg/info.py trunk/scipy/sparse/linalg/interface.py trunk/scipy/sparse/linalg/isolve/iterative.py trunk/scipy/sparse/linalg/isolve/minres.py trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py trunk/scipy/sparse/linalg/isolve/utils.py trunk/scipy/sparse/linalg/setup.py trunk/scipy/sparse/linalg/tests/test_interface.py trunk/scipy/sparse/setup.py trunk/scipy/sparse/tests/test_base.py trunk/scipy/splinalg/__init__.py Log: moved scipy.splinalg to scipy.sparse.linalg Modified: trunk/scipy/linalg/iterative.py =================================================================== --- trunk/scipy/linalg/iterative.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/linalg/iterative.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -2,12 +2,12 @@ # Deprecated on January 26, 2008 -from scipy.splinalg import isolve +from scipy.sparse.linalg import isolve from numpy import deprecate for name in __all__: oldfn = getattr(isolve, name) oldname='scipy.linalg.' + name - newname='scipy.splinalg.' + name + newname='scipy.sparse.linalg.' + name newfn = deprecate(oldfn, oldname=oldname, newname=newname) exec(name + ' = newfn') Modified: trunk/scipy/linsolve/__init__.py =================================================================== --- trunk/scipy/linsolve/__init__.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/linsolve/__init__.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -1,6 +1,6 @@ from warnings import warn -warn('scipy.linsolve has moved to scipy.splinalg.dsolve', DeprecationWarning) +warn('scipy.linsolve has moved to scipy.sparse.linalg.dsolve', DeprecationWarning) -from scipy.splinalg.dsolve import * +from scipy.sparse.linalg.dsolve import * Modified: trunk/scipy/setup.py =================================================================== --- trunk/scipy/setup.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/setup.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -18,7 +18,6 @@ config.add_subpackage('signal') config.add_subpackage('sparse') config.add_subpackage('special') - config.add_subpackage('splinalg') config.add_subpackage('stats') config.add_subpackage('ndimage') config.add_subpackage('stsci') Modified: trunk/scipy/sparse/csr.py =================================================================== --- trunk/scipy/sparse/csr.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/csr.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -221,7 +221,6 @@ if isinstance(key, tuple): row = key[0] col = key[1] - if isintlike(row): #[1,??] @@ -266,8 +265,6 @@ else: raise NotImplementedError('unsupported indexing') - - elif isintlike(key) or isinstance(key,slice): return self[key,:] #[i] or [1:2] else: Copied: trunk/scipy/sparse/linalg (from rev 3959, trunk/scipy/splinalg) Modified: trunk/scipy/sparse/linalg/dsolve/info.py =================================================================== --- trunk/scipy/splinalg/dsolve/info.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/dsolve/info.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -13,7 +13,7 @@ >>> from scipy.sparse import csc_matrix, spdiags >>> from numpy import array ->>> from scipy.splinalg import spsolve, use_solver +>>> from scipy.sparse.linalg import spsolve, use_solver >>> >>> print "Inverting a sparse linear system:" >>> print "The sparse matrix (constructed from diagonals):" Modified: trunk/scipy/sparse/linalg/dsolve/linsolve.py =================================================================== --- trunk/scipy/splinalg/dsolve/linsolve.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/dsolve/linsolve.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -16,7 +16,7 @@ isUmfpack = hasattr( umfpack, 'UMFPACK_OK' ) if isUmfpack and noScikit: - warn( 'scipy.splinalg.dsolve.umfpack will be removed,' + warn( 'scipy.sparse.linalg.dsolve.umfpack will be removed,' ' install scikits.umfpack instead', DeprecationWarning ) useUmfpack = True Modified: trunk/scipy/sparse/linalg/dsolve/tests/test_linsolve.py =================================================================== --- trunk/scipy/splinalg/dsolve/tests/test_linsolve.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/dsolve/tests/test_linsolve.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -5,7 +5,7 @@ from scipy.linalg import norm, inv from scipy.sparse import spdiags, csc_matrix, SparseEfficiencyWarning -from scipy.splinalg.dsolve import spsolve, use_solver +from scipy.sparse.linalg.dsolve import spsolve, use_solver warnings.simplefilter('ignore',SparseEfficiencyWarning) Modified: trunk/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py =================================================================== --- trunk/scipy/splinalg/dsolve/umfpack/tests/test_umfpack.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -14,13 +14,13 @@ from scipy import rand, matrix, diag, eye from scipy.sparse import csc_matrix, dok_matrix, spdiags, SparseEfficiencyWarning -from scipy.splinalg import linsolve +from scipy.sparse.linalg import linsolve warnings.simplefilter('ignore',SparseEfficiencyWarning) import numpy as nm try: - import scipy.splinalg.dsolve.umfpack as um + import scipy.sparse.linalg.dsolve.umfpack as um except (ImportError, AttributeError): _have_umfpack = False else: Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/splinalg/eigen/arpack/arpack.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -45,7 +45,7 @@ import _arpack import numpy as np -from scipy.splinalg.interface import aslinearoperator +from scipy.sparse.linalg.interface import aslinearoperator _type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} _ndigits = {'f':5, 'd':12, 'F':5, 'D':12} Modified: trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py =================================================================== --- trunk/scipy/splinalg/eigen/arpack/tests/test_arpack.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -9,7 +9,7 @@ from numpy import array,real,imag,finfo,concatenate,\ column_stack,argsort,dot,round,conj,sort,random -from scipy.splinalg.eigen.arpack import eigen_symmetric,eigen +from scipy.sparse.linalg.eigen.arpack import eigen_symmetric,eigen def assert_almost_equal_cc(actual,desired,decimal=7,err_msg='',verbose=True): Modified: trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py =================================================================== --- trunk/scipy/splinalg/eigen/arpack/tests/test_speigs.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -2,8 +2,8 @@ from scipy.testing import * -from scipy.splinalg.interface import aslinearoperator -from scipy.splinalg.eigen.arpack.speigs import * +from scipy.sparse.linalg.interface import aslinearoperator +from scipy.sparse.linalg.eigen.arpack.speigs import * import numpy as N Modified: trunk/scipy/sparse/linalg/eigen/info.py =================================================================== --- trunk/scipy/splinalg/eigen/info.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/eigen/info.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -2,7 +2,7 @@ Sparse Eigenvalue Solvers ========================= -There are submodules of splinalg: +The submodules of sparse.linalg: 1. arpack: spare eigenvalue solver using iterative methods Modified: trunk/scipy/sparse/linalg/info.py =================================================================== --- trunk/scipy/splinalg/info.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/info.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -2,7 +2,7 @@ Sparse Linear Algebra ===================== -There are submodules of splinalg: +The submodules of sparse.linalg: 1. eigen: sparse eigenvalue problem solvers 2. isolve: iterative methods for solving linear systems 3. dsolve: direct factorization methods for solving linear systems Modified: trunk/scipy/sparse/linalg/interface.py =================================================================== --- trunk/scipy/splinalg/interface.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/interface.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -31,7 +31,7 @@ Example: - >>> from scipy.splinalg import LinearOperator + >>> from scipy.sparse.linalg import LinearOperator >>> from scipy import * >>> def mv(x): ... return array([ 2*x[0], 3*x[1]]) Modified: trunk/scipy/sparse/linalg/isolve/iterative.py =================================================================== --- trunk/scipy/splinalg/isolve/iterative.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/isolve/iterative.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -15,7 +15,7 @@ import numpy as sb import copy -from scipy.splinalg.interface import LinearOperator +from scipy.sparse.linalg.interface import LinearOperator from utils import make_system _type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} Modified: trunk/scipy/sparse/linalg/isolve/minres.py =================================================================== --- trunk/scipy/splinalg/isolve/minres.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/isolve/minres.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -280,7 +280,7 @@ from scipy import ones, arange from scipy.linalg import norm from scipy.sparse import spdiags - from scipy.splinalg import cg + from scipy.sparse.linalg import cg #from scipy.sandbox.multigrid import * n = 10 Modified: trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py =================================================================== --- trunk/scipy/splinalg/isolve/tests/test_iterative.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/isolve/tests/test_iterative.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -1,5 +1,5 @@ #!/usr/bin/env python -""" Test functions for the splinalg.isolve module +""" Test functions for the sparse.linalg.isolve module """ from scipy.testing import * @@ -9,7 +9,7 @@ from scipy.linalg import norm from scipy.sparse import spdiags -from scipy.splinalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres +from scipy.sparse.linalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres #def callback(x): # global A, b @@ -116,8 +116,8 @@ def test_leftright_precond(self): """Check that QMR works with left and right preconditioners""" - from scipy.splinalg.dsolve import splu - from scipy.splinalg.interface import LinearOperator + from scipy.sparse.linalg.dsolve import splu + from scipy.sparse.linalg.interface import LinearOperator n = 100 Modified: trunk/scipy/sparse/linalg/isolve/utils.py =================================================================== --- trunk/scipy/splinalg/isolve/utils.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/isolve/utils.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -2,7 +2,7 @@ from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros -from scipy.splinalg.interface import aslinearoperator, LinearOperator +from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator _coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F', ('f','D'):'D', ('d','f'):'d', ('d','d'):'d', Modified: trunk/scipy/sparse/linalg/setup.py =================================================================== --- trunk/scipy/splinalg/setup.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/setup.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -3,7 +3,7 @@ def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration - config = Configuration('splinalg',parent_package,top_path) + config = Configuration('linalg',parent_package,top_path) config.add_subpackage(('isolve')) config.add_subpackage(('dsolve')) Modified: trunk/scipy/sparse/linalg/tests/test_interface.py =================================================================== --- trunk/scipy/splinalg/tests/test_interface.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/linalg/tests/test_interface.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -1,5 +1,5 @@ #!/usr/bin/env python -""" Test functions for the splinalg.interface module +""" Test functions for the sparse.linalg.interface module """ from scipy.testing import * @@ -8,7 +8,7 @@ from numpy import array, matrix, ones, ravel from scipy.sparse import csr_matrix -from scipy.splinalg.interface import * +from scipy.sparse.linalg.interface import * class TestInterface(TestCase): Modified: trunk/scipy/sparse/setup.py =================================================================== --- trunk/scipy/sparse/setup.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/setup.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -10,6 +10,7 @@ config = Configuration('sparse',parent_package,top_path) config.add_data_dir('tests') + config.add_subpackage('linalg') # Adding a Python file as a "source" file for an extension is something of # a hack, but it works to put it in the right place. Modified: trunk/scipy/sparse/tests/test_base.py =================================================================== --- trunk/scipy/sparse/tests/test_base.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/sparse/tests/test_base.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -27,7 +27,7 @@ coo_matrix, lil_matrix, dia_matrix, bsr_matrix, \ eye, SparseEfficiencyWarning from scipy.sparse.sputils import supported_dtypes -from scipy.splinalg import splu +from scipy.sparse.linalg import splu warnings.simplefilter('ignore',SparseEfficiencyWarning) Modified: trunk/scipy/splinalg/__init__.py =================================================================== --- trunk/scipy/splinalg/__init__.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/splinalg/__init__.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -1,14 +1,6 @@ -"Sparse Linear Algebra routines" +from warnings import warn -from info import __doc__ +warn('scipy.splinalg has moved to scipy.sparse.linalg', DeprecationWarning) -from isolve import * -from dsolve import * -from interface import * -from eigen import * +from scipy.sparse.linalg import * - -__all__ = filter(lambda s:not s.startswith('_'),dir()) -from scipy.testing.pkgtester import Tester -test = Tester().test -bench = Tester().bench Deleted: trunk/scipy/splinalg/info.py =================================================================== --- trunk/scipy/splinalg/info.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/splinalg/info.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -1,19 +0,0 @@ -""" -Sparse Linear Algebra -===================== - -There are submodules of splinalg: - 1. eigen: sparse eigenvalue problem solvers - 2. isolve: iterative methods for solving linear systems - 3. dsolve: direct factorization methods for solving linear systems - -Examples -======== - - - -""" - -#TODO show examples - -postpone_import = 1 Deleted: trunk/scipy/splinalg/interface.py =================================================================== --- trunk/scipy/splinalg/interface.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/splinalg/interface.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -1,128 +0,0 @@ -import numpy -from numpy import matrix, ndarray, asarray, dot, atleast_2d -from scipy.sparse.sputils import isshape -from scipy.sparse import isspmatrix - -__all__ = ['LinearOperator', 'aslinearoperator'] - -class LinearOperator: - def __init__( self, shape, matvec, rmatvec=None, dtype=None ): - """Common interface for performing matrix vector products - - Many iterative methods (e.g. cg, gmres) do not need to know the - individual entries of a matrix to solve a linear system A*x=b. - Such solvers only require the computation of matrix vector - products, A*v where v is a dense vector. This class serves as - an abstract interface between iterative solvers and matrix-like - objects. - - Required Parameters: - shape : tuple of matrix dimensions (M,N) - matvec(x) : function that returns A * x - - Optional Parameters: - rmatvec(x) : function that returns A^H * x where A^H represents - the Hermitian (conjugate) transpose of A - dtype : data type of the matrix - - - See Also: - aslinearoperator() : Construct LinearOperators for SciPy classes - - Example: - - >>> from scipy.splinalg import LinearOperator - >>> from scipy import * - >>> def mv(x): - ... return array([ 2*x[0], 3*x[1]]) - ... - >>> A = LinearOperator( (2,2), matvec=mv ) - >>> A - <2x2 LinearOperator with unspecified dtype> - >>> A.matvec( ones(2) ) - array([ 2., 3.]) - - """ - - shape = tuple(shape) - - if not isshape(shape): - raise ValueError('invalid shape') - - self.shape = shape - self.matvec = matvec - - if rmatvec is None: - def rmatvec(x): - raise NotImplementedError('rmatvec is not defined') - self.rmatvec = rmatvec - else: - self.rmatvec = rmatvec - - if dtype is not None: - self.dtype = numpy.dtype(dtype) - - def __repr__(self): - M,N = self.shape - if hasattr(self,'dtype'): - dt = 'dtype=' + str(self.dtype) - else: - dt = 'unspecified dtype' - - return '<%dx%d LinearOperator with %s>' % (M,N,dt) - -def aslinearoperator(A): - """Return A as a LinearOperator - - 'A' may be any of the following types - - ndarray - - matrix - - sparse matrix (e.g. csr_matrix, lil_matrix, etc.) - - LinearOperator - - An object with .shape and .matvec attributes - - See the LinearOperator documentation for additonal information. - - Examples - ======== - - >>> from scipy import matrix - >>> M = matrix( [[1,2,3],[4,5,6]], dtype='int32' ) - >>> aslinearoperator( M ) - <2x3 LinearOperator with dtype=int32> - - """ - - if isinstance(A, LinearOperator): - return A - - elif isinstance(A, ndarray) or isinstance(A, matrix): - if len(A.shape) > 2: - raise ValueError('array must have rank <= 2') - - A = atleast_2d(asarray(A)) - - def matvec(x): - return dot(A,x) - def rmatvec(x): - return dot(A.conj().transpose(),x) - return LinearOperator( A.shape, matvec, rmatvec=rmatvec, dtype=A.dtype ) - - elif isspmatrix(A): - return LinearOperator( A.shape, A.matvec, rmatvec=A.rmatvec, dtype=A.dtype ) - - else: - if hasattr(A,'shape') and hasattr(A,'matvec'): - rmatvec = None - dtype = None - - if hasattr(A,'rmatvec'): - rmatvec = A.rmatvec - if hasattr(A,'dtype'): - dtype = A.dtype - return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec, dtype=dtype) - - else: - raise TypeError('type not understood') - - Deleted: trunk/scipy/splinalg/setup.py =================================================================== --- trunk/scipy/splinalg/setup.py 2008-02-27 05:35:35 UTC (rev 3959) +++ trunk/scipy/splinalg/setup.py 2008-02-27 06:14:46 UTC (rev 3960) @@ -1,18 +0,0 @@ -#!/usr/bin/env python - -def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('splinalg',parent_package,top_path) - - config.add_subpackage(('isolve')) - config.add_subpackage(('dsolve')) - config.add_subpackage(('eigen')) - - config.add_data_dir('tests') - - return config - -if __name__ == '__main__': - from numpy.distutils.core import setup - setup(**configuration(top_path='').todict()) From scipy-svn at scipy.org Thu Feb 28 12:50:12 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 28 Feb 2008 11:50:12 -0600 (CST) Subject: [Scipy-svn] r3961 - trunk/scipy/sparse Message-ID: <20080228175012.3CC7239C422@new.scipy.org> Author: wnbell Date: 2008-02-28 11:50:08 -0600 (Thu, 28 Feb 2008) New Revision: 3961 Modified: trunk/scipy/sparse/construct.py Log: made deprecation more verbose Modified: trunk/scipy/sparse/construct.py =================================================================== --- trunk/scipy/sparse/construct.py 2008-02-27 06:14:46 UTC (rev 3960) +++ trunk/scipy/sparse/construct.py 2008-02-28 17:50:08 UTC (rev 3961) @@ -311,9 +311,9 @@ from numpy import deprecate -spkron = deprecate(kron, oldname='spkron', newname='kron') -speye = deprecate(eye, oldname='speye', newname='eye') -spidentity = deprecate(identity, oldname='spidentity', newname='identity') +spkron = deprecate(kron, oldname='spkron', newname='scipy.sparse.kron') +speye = deprecate(eye, oldname='speye', newname='scipy.sparse.eye') +spidentity = deprecate(identity, oldname='spidentity', newname='scipy.sparse.identity') def lil_eye((r,c), k=0, dtype='d'): From scipy-svn at scipy.org Thu Feb 28 14:49:11 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 28 Feb 2008 13:49:11 -0600 (CST) Subject: [Scipy-svn] r3962 - trunk/scipy Message-ID: <20080228194911.100AD39C05F@new.scipy.org> Author: wnbell Date: 2008-02-28 13:49:08 -0600 (Thu, 28 Feb 2008) New Revision: 3962 Modified: trunk/scipy/setup.py Log: re-added splinalg to setup.py Modified: trunk/scipy/setup.py =================================================================== --- trunk/scipy/setup.py 2008-02-28 17:50:08 UTC (rev 3961) +++ trunk/scipy/setup.py 2008-02-28 19:49:08 UTC (rev 3962) @@ -18,6 +18,7 @@ config.add_subpackage('signal') config.add_subpackage('sparse') config.add_subpackage('special') + config.add_subpackage('splinalg') config.add_subpackage('stats') config.add_subpackage('ndimage') config.add_subpackage('stsci') From scipy-svn at scipy.org Fri Feb 29 12:30:19 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 29 Feb 2008 11:30:19 -0600 (CST) Subject: [Scipy-svn] r3963 - trunk/scipy/io/matlab Message-ID: <20080229173019.B4C3C39C25F@new.scipy.org> Author: stefan Date: 2008-02-29 11:30:15 -0600 (Fri, 29 Feb 2008) New Revision: 3963 Modified: trunk/scipy/io/matlab/setup.py Log: Fix parent package of io.matlab. Modified: trunk/scipy/io/matlab/setup.py =================================================================== --- trunk/scipy/io/matlab/setup.py 2008-02-28 19:49:08 UTC (rev 3962) +++ trunk/scipy/io/matlab/setup.py 2008-02-29 17:30:15 UTC (rev 3963) @@ -1,6 +1,6 @@ #!/usr/bin/env python -def configuration(parent_package='',top_path=None): +def configuration(parent_package='io',top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('matlab', parent_package, top_path) config.add_data_dir('tests') From scipy-svn at scipy.org Fri Feb 29 19:43:55 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 29 Feb 2008 18:43:55 -0600 (CST) Subject: [Scipy-svn] r3964 - trunk/scipy/ndimage/src/register Message-ID: <20080301004355.9101439C0DA@new.scipy.org> Author: tom.waite Date: 2008-02-29 18:43:51 -0600 (Fri, 29 Feb 2008) New Revision: 3964 Modified: trunk/scipy/ndimage/src/register/Register_EXT.c Log: Bug fix and enhancements Modified: trunk/scipy/ndimage/src/register/Register_EXT.c =================================================================== --- trunk/scipy/ndimage/src/register/Register_EXT.c 2008-02-29 17:30:15 UTC (rev 3963) +++ trunk/scipy/ndimage/src/register/Register_EXT.c 2008-03-01 00:43:51 UTC (rev 3964) @@ -80,6 +80,7 @@ */ int num; + int numG; int nd; int type; int itype; @@ -114,6 +115,7 @@ dimsG = PyArray_DIMS(imgArray2); type = PyArray_TYPE(imgArray1); num = PyArray_SIZE(imgArray1); + numG = PyArray_SIZE(imgArray2); M = (double *)PyArray_DATA(rotArray); nd_rotmatrix = PyArray_NDIM(rotArray); @@ -140,7 +142,51 @@ } +static PyObject *Register_VolumeResample(PyObject *self, PyObject *args) +{ + int num; + int nd; + int type; + int itype; + int mode; + int scale; + npy_intp *dimsF; + npy_intp *dimsG; + unsigned char *imageG; + unsigned char *imageF; + double *Z; + PyObject *imgArray1 = NULL; + PyObject *imgArray2 = NULL; + PyObject *coordZoom = NULL; + + if(!PyArg_ParseTuple(args, "OOOii", &imgArray1, &imgArray2, &coordZoom, &scale, &mode)) + goto exit; + + /* check in the Python code that F and G are the same dims, type */ + imageF = (unsigned char *)PyArray_DATA(imgArray1); + imageG = (unsigned char *)PyArray_DATA(imgArray2); + Z = (double *)PyArray_DATA(coordZoom); + /* reads dims as 0 = layers, 1 = rows, 2 = cols */ + nd = PyArray_NDIM(imgArray1); + dimsF = PyArray_DIMS(imgArray1); + dimsG = PyArray_DIMS(imgArray2); + type = PyArray_TYPE(imgArray1); + num = PyArray_SIZE(imgArray1); + + if(!NI_VolumeResample((int)dimsF[0], (int)dimsF[1], (int)dimsF[2], + (int)dimsG[0], (int)dimsG[1], (int)dimsG[2], + scale, mode, imageG, imageF, Z)) + goto exit; + +exit: + + return PyErr_Occurred() ? NULL : (PyObject*)Py_BuildValue(""); + +} + + + static PyObject *Register_CubicResample(PyObject *self, PyObject *args) { @@ -301,6 +347,7 @@ { "register_histogram_lite", Register_HistogramLite, METH_VARARGS, NULL }, { "register_linear_resample", Register_LinearResample, METH_VARARGS, NULL }, { "register_cubic_resample", Register_CubicResample, METH_VARARGS, NULL }, + { "register_volume_resample", Register_VolumeResample, METH_VARARGS, NULL }, { "register_image_threshold", Register_ImageThreshold, METH_VARARGS, NULL }, { NULL, NULL, 0, NULL}, }; From scipy-svn at scipy.org Fri Feb 29 19:44:18 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 29 Feb 2008 18:44:18 -0600 (CST) Subject: [Scipy-svn] r3965 - trunk/scipy/ndimage/src/register Message-ID: <20080301004418.EBC2E39C2C2@new.scipy.org> Author: tom.waite Date: 2008-02-29 18:44:16 -0600 (Fri, 29 Feb 2008) New Revision: 3965 Modified: trunk/scipy/ndimage/src/register/Register_IMPL.c Log: Bug fix and enhancements Modified: trunk/scipy/ndimage/src/register/Register_IMPL.c =================================================================== --- trunk/scipy/ndimage/src/register/Register_IMPL.c 2008-03-01 00:43:51 UTC (rev 3964) +++ trunk/scipy/ndimage/src/register/Register_IMPL.c 2008-03-01 00:44:16 UTC (rev 3965) @@ -322,6 +322,7 @@ int V101; int V110; int V111; + int g[64], f[64]; float valueXYZ; // @@ -509,6 +510,180 @@ +int NI_VolumeResample(int layersS, int rowsS, int colsS, int layersD, int rowsD, int colsD, + int scale, int mode, unsigned char *imageD, unsigned char *imageS, double *Z) +{ + + int i; + int x, y, z; + int sliceSizeSrc; + int sliceSizeDst; + int status; + int ivf; + int xf, xg, yg, zg; + int g_slice, f_slice; + int g_row, f_row; + int g_slicesize, f_slicesize; + int itemp, sOffset, dOffset; + int XInt, YInt, ZInt; + float ps1, ps2, ps3; + float Y[4], tpoint, reSampler; + float XPrime, YPrime, ZPrime; + float C, R, L; + float *RLUT; + float *samples; + + if(mode ==1){ + /* + * integer subsample + */ + g_slicesize = rowsD * colsD; + f_slicesize = rowsS * colsS; + for(zg = 0; zg < layersD; ++zg){ + g_slice = zg * g_slicesize; + f_slice = zg * scale * f_slicesize; + for(yg = 0; yg < rowsD; ++yg){ + g_row = yg * colsD; + f_row = yg * scale * colsS; + for(xg = 0; xg < colsD; ++xg){ + xf = xg * scale; + ivf = imageS[f_slice+f_row+xf]; + imageD[g_slice+g_row+xg] = ivf; + } + } + } + } + else if(mode ==2){ + /* + * fractional cubic convolution resample + */ + + /* first resample each column in all rows and all layers */ + + sliceSizeSrc = colsS * rowsS; + sliceSizeDst = colsD * rowsD; + + RLUT = calloc(colsD, sizeof(float)); + samples = calloc(colsS+4, sizeof(float)); + reSampler = (float)1.0/Z[0]; + tpoint = (float)0.0; + for(i = 0; i < colsD; ++i){ + RLUT[i] = tpoint; + tpoint += reSampler; + } + + for(z = 0; z < layersS; ++z){ + sOffset = z * sliceSizeSrc; + dOffset = z * sliceSizeDst; + for(y = 0; y < rowsS; ++y){ + for(x = 0; x < colsS; ++x){ + samples[x] = (float)imageS[sOffset+x]; + } + for(x = 1; x < colsD; ++x){ + XPrime = RLUT[x]; + XInt = (int)XPrime; + C = XPrime - (float)XInt; + Y[0] = samples[XInt-1]; + Y[1] = samples[XInt]; + Y[2] = samples[XInt+1]; + Y[3] = samples[XInt+2]; + ps1 = Y[2] - Y[0]; + ps2 = (float)2.0*(Y[0] - Y[1]) + Y[2] - Y[3]; + ps3 = -Y[0] + Y[1] - Y[2] + Y[3]; + itemp = (int)(Y[1]+C*(ps1+C*(ps2+C*ps3))); + if(itemp < 0) itemp = 0; + if(itemp > 255) itemp = 255; + imageD[dOffset+x] = itemp; + } + sOffset += colsS; + dOffset += colsD; + } + } + free(RLUT); + free(samples); + + /* second resample each row in all columns and all layers */ + RLUT = calloc(rowsD, sizeof(float)); + samples = calloc(rowsS+4, sizeof(float)); + reSampler = (float)1.0/Z[1]; + tpoint = (float)0.0; + for(i = 0; i < rowsD; ++i){ + RLUT[i] = tpoint; + tpoint += reSampler; + } + + for(z = 0; z < layersS; ++z){ + dOffset = z * sliceSizeDst; + for(x = 0; x < colsD; ++x){ + for(y = 0; y < rowsS; ++y){ + samples[y] = (float)imageD[dOffset+x+y*colsD]; + } + for(y = 1; y < rowsD; ++y){ + YPrime = RLUT[y]; + YInt = (int)YPrime; + R = YPrime - (float)YInt; + Y[0] = samples[YInt-1]; + Y[1] = samples[YInt]; + Y[2] = samples[YInt+1]; + Y[3] = samples[YInt+2]; + ps1 = Y[2] - Y[0]; + ps2 = (float)2.0*(Y[0] - Y[1]) + Y[2] - Y[3]; + ps3 = -Y[0] + Y[1] - Y[2] + Y[3]; + itemp = (int)(Y[1]+R*(ps1+R*(ps2+R*ps3))); + if(itemp < 0) itemp = 0; + if(itemp > 255) itemp = 255; + imageD[dOffset+x+y*colsD] = itemp; + } + } + } + free(RLUT); + free(samples); + + /* third resample each layers in all columns and all rows */ + RLUT = calloc(layersD, sizeof(float)); + samples = calloc(layersS+4, sizeof(float)); + reSampler = (float)1.0/Z[2]; + tpoint = (float)0.0; + for(i = 0; i < layersD; ++i){ + RLUT[i] = tpoint; + tpoint += reSampler; + } + + for(y = 0; y < rowsD; ++y){ + dOffset = y * colsD; + for(x = 0; x < colsD; ++x){ + for(z = 0; z < layersS; ++z){ + samples[z] = (float)imageD[dOffset+x+z*sliceSizeDst]; + } + for(z = 1; z < layersD; ++z){ + ZPrime = RLUT[z]; + ZInt = (int)ZPrime; + L = ZPrime - (float)ZInt; + Y[0] = samples[ZInt-1]; + Y[1] = samples[ZInt]; + Y[2] = samples[ZInt+1]; + Y[3] = samples[ZInt+2]; + ps1 = Y[2] - Y[0]; + ps2 = (float)2.0*(Y[0] - Y[1]) + Y[2] - Y[3]; + ps3 = -Y[0] + Y[1] - Y[2] + Y[3]; + itemp = (int)(Y[1]+R*(ps1+R*(ps2+R*ps3))); + if(itemp < 0) itemp = 0; + if(itemp > 255) itemp = 255; + imageD[dOffset+x+z*sliceSizeDst] = itemp; + } + } + } + free(RLUT); + free(samples); + } + + status = 1; + + return status; + +} + + int NI_CubicResample(int layersF, int rowsF, int colsF, int layersG, int rowsG, int colsG, int *dimSteps, double *M, unsigned char *imageG, unsigned char *imageF) { @@ -541,6 +716,7 @@ zp, colsG, rowsG, layersG, sliceSizeG); /* clip at hard edges */ if(vf < 0.0) vf = 0.0; + if(vf > 255.0) vf = 255.0; imageG[sliceG+rowG+(int)x] = (int)vf; } } @@ -553,8 +729,6 @@ } - - int NI_ImageThreshold(int layers, int rows, int cols, unsigned short *image, double *H, double *IH, int histogram_elements, double threshold, int *index) { From scipy-svn at scipy.org Fri Feb 29 19:44:37 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 29 Feb 2008 18:44:37 -0600 (CST) Subject: [Scipy-svn] r3966 - trunk/scipy/ndimage Message-ID: <20080301004437.5DB1639C2C2@new.scipy.org> Author: tom.waite Date: 2008-02-29 18:44:34 -0600 (Fri, 29 Feb 2008) New Revision: 3966 Modified: trunk/scipy/ndimage/_registration.py Log: Bug fix and enhancements Modified: trunk/scipy/ndimage/_registration.py =================================================================== --- trunk/scipy/ndimage/_registration.py 2008-03-01 00:44:16 UTC (rev 3965) +++ trunk/scipy/ndimage/_registration.py 2008-03-01 00:44:34 UTC (rev 3966) @@ -29,10 +29,52 @@ inputname = 'ANAT1_V0001.img' filename = os.path.join(os.path.split(__file__)[0], inputname) +# +# ---- co-registration and IO ---- +# + +def resize_image(imageG, imageF_mat): + # + # Fractional resample imageG to imageF size. + # + Z = N.zeros(3, dtype=N.float64); + # get the zoom + Z[0] = imageG['mat'][0][0] / imageF_mat[0][0] + Z[1] = imageG['mat'][1][1] / imageF_mat[1][1] + Z[2] = imageG['mat'][2][2] / imageF_mat[2][2] + + # new volume dimensions (rounded) + D = N.zeros(3, dtype=N.int32); + D[0] = int(float(imageG['dim'][0])*Z[0]+0.5) + D[1] = int(float(imageG['dim'][1])*Z[1]+0.5) + D[2] = int(float(imageG['dim'][2])*Z[2]+0.5) + + M = N.eye(4, dtype=N.float64); + # for the test data, set the xyz voxel sizes for fMRI volume + M[0][0] = imageG['mat'][0][0]/Z[0] + M[1][1] = imageG['mat'][1][1]/Z[1] + M[2][2] = imageG['mat'][2][2]/Z[2] + + image = N.zeros(D[2]*D[1]*D[0], dtype=N.uint8).reshape(D[2], D[0], D[1]) + mode = 2 + scale = 0 + R.register_volume_resample(imageG['data'], image, Z, scale, mode) + F = N.zeros(3, dtype=N.float64); + zoom_image = {'data' : image, 'mat' : M, 'dim' : D, 'fwhm' : F} + + return zoom_image + def remap_image(image, parm_vector, resample='linear'): + # + # remap imageG to coordinates of imageF (creates imageG') + # use the 6 dim parm_vector (3 angles, 3 translations) to remap + # M_inverse = get_inverse_mappings(parm_vector) + (layers, rows, cols) = image['data'].shape # allocate the zero image - remaped_image = load_blank_image() + remaped_image = N.zeros(layers*rows*cols, dtype=N.uint8).reshape(layers, rows, cols) + remaped_image = {'data' : remaped_image, 'mat' : image['mat'], + 'dim' : image['dim'], 'fwhm' : image['fwhm']} imdata = build_structs(step=1) if resample == 'linear': @@ -117,12 +159,6 @@ return x -def test_image_filter(image, imdata, ftype=2): - # test the 3D image filter on an image. ftype 1 is SPM, ftype 2 is simple Gaussian - image['fwhm'] = build_fwhm(image['mat'], imdata['step']) - filt_image = filter_image_3D(image['data'], image['fwhm'], ftype) - return filt_image - def callback_powell(x): print 'Parameter Vector from Powell: - ' print x @@ -133,25 +169,6 @@ print x return -def test_alignment(image1, image2, imdata, method='ncc', lite=0, smhist=0, - alpha=0.0, beta=0.0, gamma=0.0, ret_histo=0): - - # to test the cost function and view the joint histogram - # for 2 images. used for debug - imdata['parms'][0] = alpha - imdata['parms'][1] = beta - imdata['parms'][2] = gamma - M = build_rotate_matrix(imdata['parms']) - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) - - if ret_histo: - cost, joint_histogram = optimize_function(imdata['parms'], optfunc_args) - return cost, joint_histogram - else: - cost = optimize_function(imdata['parms'], optfunc_args) - return cost - - def smooth_kernel(fwhm, x, ktype=1): eps = 0.00001 s = N.square((fwhm/math.sqrt(8.0*math.log(2.0)))) + eps @@ -196,8 +213,9 @@ Tx=0.0, Ty=0.0, Tz=0.0, stepsize=1): # takes an image and 3D rotate using trilinear interpolation - image1 = load_anatMRI_image() - image2 = load_blank_image() + anat_desc = load_anatMRI_desc() + image1 = load_volume(anat_desc, imagename='ANAT1_V0001.img') + image2 = load_volume(anat_desc, imagename=None) imdata = build_structs(step=stepsize) imdata['parms'][0] = alpha imdata['parms'][1] = beta @@ -252,9 +270,13 @@ # rot_matrix is the 4x4 constructed (current angles and translates) transform matrix # sample_vector is the subsample vector for x-y-z + # F_inv = N.linalg.inv(image_F['mat']) + # composite = N.dot(F_inv, rot_matrix) + # composite = N.dot(composite, image_G['mat']) F_inv = N.linalg.inv(image_F['mat']) - composite = N.dot(F_inv, rot_matrix) - composite = N.dot(composite, image_G['mat']) + composite = N.dot(F_inv, image_G['mat']) + composite = N.dot(composite, rot_matrix) + #print ' composite ', composite # allocate memory from Python as memory leaks when created in C-ext joint_histogram = N.zeros([256, 256], dtype=N.float64); @@ -334,7 +356,7 @@ return cost -def build_structs(step=2): +def build_structs(step=1): # build image data structures here P = N.zeros(6, dtype=N.float64); T = N.zeros(6, dtype=N.float64); @@ -413,59 +435,36 @@ return rot_matrix -def load_fMRI_image(imagename, rows=64, cols=64, layers=28, threshold=0.999, debug=0): - # un-scaled images - ImageVolume = N.fromfile(imagename, dtype=N.uint16).reshape(layers, rows, cols); +def load_volume(imagedesc, imagename=None, threshold=0.999, debug=0): + # imagename of none means to create a blank image + if imagename == None: + ImageVolume = N.zeros(imagedesc['layers']*imagedesc['rows']*imagedesc['cols'], + dtype=N.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']) + else: + ImageVolume = N.fromfile(imagename, + dtype=N.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']); + + # the mat (voxel to physical) matrix M = N.eye(4, dtype=N.float64); - # for the test data, set the xyz voxel sizes for fMRI volume - M[0][0] = 3.75 - M[1][1] = 3.75 - M[2][2] = 5.0 + # for now just the sample size (mm units) in x, y and z + M[0][0] = imagedesc['sample_x'] + M[1][1] = imagedesc['sample_y'] + M[2][2] = imagedesc['sample_z'] # dimensions D = N.zeros(3, dtype=N.int32); # Gaussian kernel - fill in with build_fwhm() F = N.zeros(3, dtype=N.float64); - D[0] = rows - D[1] = cols - D[2] = layers - max = ImageVolume.max() - min = ImageVolume.min() - ih = N.zeros(max-min+1, dtype=N.float64); - h = N.zeros(max-min+1, dtype=N.float64); - if threshold <= 0: - threshold = 0.999 - elif threshold > 1.0: - threshold = 1.0 - # get the integrated histogram of the volume and get max from - # the threshold crossing in the integrated histogram - index = R.register_image_threshold(ImageVolume, h, ih, threshold) - scale = 255.0 / (index-min) - # generate the scaled 8 bit image - images = (scale*(ImageVolume.astype(N.float)-min)) - images[images>255] = 255 - # the data type is now uchar - image = {'data' : images.astype(N.uint8), 'mat' : M, 'dim' : D, 'fwhm' : F} - if debug == 1: - return image, h, ih, index - else: + D[0] = imagedesc['rows'] + D[1] = imagedesc['cols'] + D[2] = imagedesc['layers'] + + if imagename == None: + # no voxels to scale to 8 bits + ImageVolume = ImageVolume.astype(N.uint8) + image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} return image - -def load_anatMRI_image(imagename=filename, rows=256, cols=256, layers=90, threshold=0.999, debug=0): - # un-scaled images - ImageVolume = N.fromfile(imagename, dtype=N.uint16).reshape(layers, rows, cols); - M = N.eye(4, dtype=N.float64); - # for the test data, set the xyz voxel sizes for anat-MRI volume - M[0][0] = 0.9375 - M[1][1] = 0.9375 - M[2][2] = 1.5 - # dimensions - D = N.zeros(3, dtype=N.int32); - # Gaussian kernel - fill in with build_fwhm() - F = N.zeros(3, dtype=N.float64); - D[0] = rows - D[1] = cols - D[2] = layers + # 8 bit scale with threshold clip of the volume integrated histogram max = ImageVolume.max() min = ImageVolume.min() ih = N.zeros(max-min+1, dtype=N.float64); @@ -481,78 +480,150 @@ # generate the scaled 8 bit image images = (scale*(ImageVolume.astype(N.float)-min)) images[images>255] = 255 - # the data type is now uchar image = {'data' : images.astype(N.uint8), 'mat' : M, 'dim' : D, 'fwhm' : F} if debug == 1: return image, h, ih, index else: return image +def load_anatMRI_desc(): + # this is for demo on the test MRI and fMRI volumes + rows = 256 + cols = 256 + layers = 90 + xsamp = 0.9375 + ysamp = 0.9375 + zsamp = 1.5 + desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, + 'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp} + return desc -def load_blank_image(rows=256, cols=256, layers=90): - ImageVolume = N.zeros(layers*rows*cols, dtype=N.uint16).reshape(layers, rows, cols); - # voxel to pixel is identity for this simulation using anatomical MRI volume - # 4x4 matrix - M = N.eye(4, dtype=N.float64); - # for the test data, set the xyz voxel sizes for anat-MRI volume - M[0][0] = 0.9375 - M[1][1] = 0.9375 - M[2][2] = 1.5 - # dimensions - D = N.zeros(3, dtype=N.int32); - # Gaussian kernel - fill in with build_fwhm() - F = N.zeros(3, dtype=N.float64); - D[0] = rows - D[1] = cols - D[2] = layers - # make sure the data type is uchar - ImageVolume = ImageVolume.astype(N.uint8) - image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} - return image +def load_fMRI_desc(): + # this is for demo on the test MRI and fMRI volumes + rows = 64 + cols = 64 + layers = 28 + xsamp = 3.75 + ysamp = 3.75 + zsamp = 5.0 + desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, + 'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp} + return desc def read_fMRI_directory(path): files_fMRI = glob.glob(path) return files_fMRI +def build_aligned_fMRI_mean_volume(path): + desc = load_fMRI_desc() + ave_fMRI_volume = N.zeros(desc['layers']*desc['rows']*desc['cols'], + dtype=N.float64).reshape(desc['layers'], desc['rows'], desc['cols']) + data = read_fMRI_directory(path) + count = 0 + for i in data: + print 'add volume ', i + # this uses integrated histogram normalization + image = load_volume(desc, i) + # co-reg successive pairs, then remap and ave + ave_fMRI_volume = ave_fMRI_volume + image['data'].astype(N.float64) + count = count + 1 -def get_test_rotated_images(alpha=0.0, beta=0.0, gamma=0.0): - image1 = load_anatMRI_image() - image2 = load_blank_image() - imdata = build_structs(step=1) - # allow the G image to be rotated for testing - imdata['parms'][0] = alpha - imdata['parms'][1] = beta - imdata['parms'][2] = gamma - image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) - image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) - M = build_rotate_matrix(imdata['parms']) - R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) - return image1, image2, imdata + ave_fMRI_volume = ave_fMRI_volume / float(count) -def get_test_scaled_images(scale=4.0): - # this is for coreg MRI / fMRI test - image1 = load_anatMRI_image() - image2 = build_scale_image(image1, scale) - imdata = build_structs(step=1) - # allow the G image to be rotated for testing - image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) - image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) - M = build_rotate_matrix(imdata['parms']) - R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) - return image1, image2, imdata + return ave_fMRI_volume +# +# ---- testing/debug routines ---- +# def build_scale_image(image, scale): (layers, rows, cols) = image['data'].shape - image2 = image['data'][0:layers:scale, 0:rows:scale, 0:cols:scale] + # + # rescale the 'mat' (voxel to physical mapping matrix) + # M = image['mat'] * scale # dimensions D = N.zeros(3, dtype=N.int32); # Gaussian kernel - fill in with build_fwhm() F = N.zeros(3, dtype=N.float64); + Z = N.zeros(3, dtype=N.float64); D[0] = rows/scale D[1] = cols/scale D[2] = layers/scale + image2 = N.zeros(D[2]*D[1]*D[0], dtype=N.uint8).reshape(D[2], D[0], D[1]); + mode = 1; + R.register_volume_resample(image['data'], image2, Z, scale, mode) scaled_image = {'data' : image2, 'mat' : M, 'dim' : D, 'fwhm' : F} return scaled_image + +def get_test_MRI_volumes(scale=2, alpha=3.0, beta=4.0, gamma=5.0): + # + # this is for coreg MRI / fMRI scale test. The volume is anatomical MRI. + # the image is rotated in 3D. after rotation the image is scaled. + # + + anat_desc = load_anatMRI_desc() + image1 = load_volume(anat_desc, imagename='ANAT1_V0001.img') + image2 = load_volume(anat_desc, imagename=None) + imdata = build_structs(step=1) + image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) + image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) + imdata['parms'][0] = alpha + imdata['parms'][1] = beta + imdata['parms'][2] = gamma + M = build_rotate_matrix(imdata['parms']) + R.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) + image3 = build_scale_image(image2, scale) + return image1, image3, imdata + +def get_test_fMRI_rotated(fMRIVol, alpha=3.0, beta=4.0, gamma=5.0): + # + # return rotated fMRIVol + # + + desc = load_fMRI_desc() + image = load_volume(desc, imagename=None) + imdata = build_structs(step=1) + image['fwhm'] = build_fwhm(image['mat'], imdata['step']) + imdata['parms'][0] = alpha + imdata['parms'][1] = beta + imdata['parms'][2] = gamma + M = build_rotate_matrix(imdata['parms']) + R.register_cubic_resample(fMRIVol['data'], image['data'], M, imdata['step']) + return image + + +def test_image_filter(image, imdata, ftype=2): + # + # test the 3D image filter on an image. ftype 1 is SPM, ftype 2 is simple Gaussian + # + image['fwhm'] = build_fwhm(image['mat'], imdata['step']) + filt_image = filter_image_3D(image['data'], image['fwhm'], ftype) + return filt_image + + +def test_alignment(image1, image2, imdata, method='ncc', lite=0, smhist=0, + alpha=0.0, beta=0.0, gamma=0.0, Tx=0, Ty=0, Tz=0, ret_histo=0): + + # + # to test the cost function and view the joint histogram + # for 2 images. used for debug + # + imdata['parms'][0] = alpha + imdata['parms'][1] = beta + imdata['parms'][2] = gamma + imdata['parms'][3] = Tx + imdata['parms'][4] = Ty + imdata['parms'][5] = Tz + M = build_rotate_matrix(imdata['parms']) + optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + + if ret_histo: + cost, joint_histogram = optimize_function(imdata['parms'], optfunc_args) + return cost, joint_histogram + else: + cost = optimize_function(imdata['parms'], optfunc_args) + return cost + +