From scipy-svn at scipy.org Wed Dec 1 01:07:30 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 1 Dec 2010 00:07:30 -0600 (CST) Subject: [Scipy-svn] r6983 - in trunk/scipy/ndimage: . tests Message-ID: <20101201060730.7ECAD37D3CF@scipy.org> Author: warren.weckesser Date: 2010-12-01 00:07:29 -0600 (Wed, 01 Dec 2010) New Revision: 6983 Modified: trunk/scipy/ndimage/measurements.py trunk/scipy/ndimage/tests/test_measurements.py Log: BUG: ndimage: correctly deal with 'labels' data types that are compatible with bincount (tickets #1254 and #1242). Use numpy.unique instead of numpy.unique1d. Modified: trunk/scipy/ndimage/measurements.py =================================================================== --- trunk/scipy/ndimage/measurements.py 2010-11-29 23:41:08 UTC (rev 6982) +++ trunk/scipy/ndimage/measurements.py 2010-12-01 06:07:29 UTC (rev 6983) @@ -374,6 +374,13 @@ return output +def _safely_castable_to_int(dt): + """Test whether the numpy data type `dt` can be safely cast to an int.""" + int_size = np.dtype(int).itemsize + safe = ((np.issubdtype(dt, int) and dt.itemsize <= int_size) or + (np.issubdtype(dt, np.unsignedinteger) and dt.itemsize < int_size)) + return safe + def _stats(input, labels=None, index=None, centered=False): '''returns count, sum, and optionally (sum - centre)^2 by label''' @@ -396,36 +403,35 @@ if numpy.isscalar(index): return single_group(input[labels == index]) - counts = numpy.bincount(labels.ravel()) - sums = numpy.bincount(labels.ravel(), weights=input.ravel()) - def _sum_centered(labels): means = sums / counts centered_input = input - means[labels] - return numpy.bincount(labels, + bc = numpy.bincount(labels, weights=(centered_input * \ centered_input.conjugate()).ravel()) + return bc - # remap labels to unique integers if necessary, or if the largest + # Remap labels to unique integers if necessary, or if the largest # label is larger than the number of values. - if not numpy.issubdtype(labels.dtype, (numpy.int, np.unsignedinteger)) or \ - (labels.min() < 0) or (labels.max() > labels.size): - unique_labels, new_labels = numpy.unique1d(labels, return_inverse=True) - + if (not _safely_castable_to_int(labels.dtype) or + labels.min() < 0 or labels.max() > labels.size): + unique_labels, new_labels = numpy.unique(labels, return_inverse=True) + counts = numpy.bincount(new_labels) + sums = numpy.bincount(new_labels, weights=input.ravel()) if centered: - sums_c, sums, counts = _sum_centered(new_labels) - + sums_c = _sum_centered(new_labels) idxs = numpy.searchsorted(unique_labels, index) # make all of idxs valid idxs[idxs >= unique_labels.size] = 0 found = (unique_labels[idxs] == index) else: - # labels are an integer type, and there aren't too many, so - # call bincount directly. + # labels are an integer type allowed by bincount, and there aren't too + # many, so call bincount directly. + counts = numpy.bincount(labels.ravel()) + sums = numpy.bincount(labels.ravel(), weights=input.ravel()) if centered: sums_c = _sum_centered(labels.ravel()) - # make sure all index values are valid idxs = numpy.asanyarray(index, numpy.int).copy() found = (idxs >= 0) & (idxs < counts.size) @@ -645,6 +651,7 @@ def _select(input, labels = None, index = None, find_min=False, find_max=False, find_min_positions=False, find_max_positions=False): '''returns min, max, or both, plus positions if requested''' + input = numpy.asanyarray(input) find_positions = find_min_positions or find_max_positions positions = None @@ -691,10 +698,10 @@ # remap labels to unique integers if necessary, or if the largest # label is larger than the number of values. - if ((not numpy.issubdtype(labels.dtype, numpy.int)) or - (labels.min() < 0) or (labels.max() > labels.size)): + if (_safely_castable_to_int(labels.dtype) or + labels.min() < 0 or labels.max() > labels.size): # remap labels, and indexes - unique_labels, labels = numpy.unique1d(labels, return_inverse=True) + unique_labels, labels = numpy.unique(labels, return_inverse=True) idxs = numpy.searchsorted(unique_labels, index) # make all of idxs valid Modified: trunk/scipy/ndimage/tests/test_measurements.py =================================================================== --- trunk/scipy/ndimage/tests/test_measurements.py 2010-11-29 23:41:08 UTC (rev 6982) +++ trunk/scipy/ndimage/tests/test_measurements.py 2010-12-01 06:07:29 UTC (rev 6983) @@ -1,6 +1,6 @@ from numpy.testing import assert_, assert_array_almost_equal, assert_equal, \ - assert_almost_equal, \ - run_module_suite + assert_almost_equal, assert_array_equal, \ + run_module_suite, TestCase import numpy as np import scipy.ndimage as ndimage @@ -10,6 +10,90 @@ np.int64, np.uint64, np.float32, np.float64] + +class Test_measurements_stats(TestCase): + """ndimage.measurements._stats() is a utility function used by other functions.""" + + def test_a(self): + x = [0,1,2,6] + labels = [0,0,1,1] + index = [0,1] + counts, sums = ndimage.measurements._stats(x, labels=labels, index=index) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + + def test_b(self): + # Same data as test_a, but different labels. The label 9 exceeds the + # length of 'labels', so this test will follow a different code path. + x = [0,1,2,6] + labels = [0,0,9,9] + index = [0,9] + counts, sums = ndimage.measurements._stats(x, labels=labels, index=index) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + + def test_a_centered(self): + x = [0,1,2,6] + labels = [0,0,1,1] + index = [0,1] + counts, sums, centers = ndimage.measurements._stats(x, labels=labels, + index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + def test_b_centered(self): + x = [0,1,2,6] + labels = [0,0,9,9] + index = [0,9] + counts, sums, centers = ndimage.measurements._stats(x, labels=labels, + index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + def test_nonint_labels(self): + x = [0,1,2,6] + labels = [0.0, 0.0, 9.0, 9.0] + index = [0.0, 9.0] + counts, sums, centers = ndimage.measurements._stats(x, labels=labels, + index=index, centered=True) + assert_array_equal(counts, [2, 2]) + assert_array_equal(sums, [1.0, 8.0]) + assert_array_equal(centers, [0.5, 8.0]) + + +class Test_measurements_select(TestCase): + """ndimage.measurements._select() is a utility function used by other functions.""" + + def test_basic(self): + x = [0,1,6,2] + cases = [ + ([0,0,1,1], [0,1]), # "Small" integer labels + ([0,0,9,9], [0,9]), # A label larger than len(labels) + ([0.0,0.0,7.0,7.0], [0.0, 7.0]), # Non-integer labels + ] + for labels, index in cases: + result = ndimage.measurements._select(x, labels=labels, index=index) + assert_(len(result) == 0) + result = ndimage.measurements._select(x, labels=labels, index=index, find_max=True) + assert_(len(result) == 1) + assert_array_equal(result[0], [1, 6]) + result = ndimage.measurements._select(x, labels=labels, index=index, find_min=True) + assert_(len(result) == 1) + assert_array_equal(result[0], [0, 2]) + result = ndimage.measurements._select(x, labels=labels, index=index, + find_min=True, find_min_positions=True) + assert_(len(result) == 2) + assert_array_equal(result[0], [0, 2]) + assert_array_equal(result[1], [0, 3]) + result = ndimage.measurements._select(x, labels=labels, index=index, + find_max=True, find_max_positions=True) + assert_(len(result) == 2) + assert_array_equal(result[0], [1, 6]) + assert_array_equal(result[1], [1, 2]) + + def test_label01(): "label 1" data = np.ones([]) From scipy-svn at scipy.org Fri Dec 3 01:56:21 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 3 Dec 2010 00:56:21 -0600 (CST) Subject: [Scipy-svn] r6984 - in trunk/scipy/stats: . tests Message-ID: <20101203065621.3501137A196@scipy.org> Author: warren.weckesser Date: 2010-12-03 00:56:20 -0600 (Fri, 03 Dec 2010) New Revision: 6984 Modified: trunk/scipy/stats/stats.py trunk/scipy/stats/tests/test_stats.py Log: BUG: stats: Eliminate the 'TINY' hack in pearsonr, so perfect correlations return probability=0. Also allow for numerical errors that could make the computed value of r be less than -1. Modified: trunk/scipy/stats/stats.py =================================================================== --- trunk/scipy/stats/stats.py 2010-12-01 06:07:29 UTC (rev 6983) +++ trunk/scipy/stats/stats.py 2010-12-03 06:56:20 UTC (rev 6984) @@ -2198,20 +2198,18 @@ r_den = n*np.sqrt(ss(xm)*ss(ym)) r = (r_num / r_den) - # Presumably, if r > 1, then it is only some small artifact of floating + # Presumably, if abs(r) > 1, then it is only some small artifact of floating # point arithmetic. - r = min(r, 1.0) + r = max(min(r, 1.0), -1.0) df = n-2 + if abs(r) == 1.0: + prob = 0.0 + else: + t = r * np.sqrt(df / ((1.0 - r) * (1.0 + r))) + prob = betai(0.5*df, 0.5, df / (df + t*t)) + return r, prob - # Use a small floating point value to prevent divide-by-zero nonsense - # fixme: TINY is probably not the right value and this is probably not - # the way to be robust. The scheme used in spearmanr is probably better. - TINY = 1.0e-20 - t = r*np.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY))) - prob = betai(0.5*df,0.5,df/(df+t*t)) - return r,prob - def fisher_exact(table) : """Performs a Fisher exact test on a 2x2 contingency table. Modified: trunk/scipy/stats/tests/test_stats.py =================================================================== --- trunk/scipy/stats/tests/test_stats.py 2010-12-01 06:07:29 UTC (rev 6983) +++ trunk/scipy/stats/tests/test_stats.py 2010-12-03 06:56:20 UTC (rev 6984) @@ -319,7 +319,20 @@ r = y[0] assert_approx_equal(r,1.0) + def test_r_exactly_pos1(self): + a = arange(3.0) + b = a + r, prob = stats.pearsonr(a,b) + assert_equal(r, 1.0) + assert_equal(prob, 0.0) + def test_r_exactly_neg1(self): + a = arange(3.0) + b = -a + r, prob = stats.pearsonr(a,b) + assert_equal(r, -1.0) + assert_equal(prob, 0.0) + def test_fisher_exact(): """Some tests to show that fisher_exact() works correctly. From scipy-svn at scipy.org Fri Dec 3 02:21:30 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 3 Dec 2010 01:21:30 -0600 (CST) Subject: [Scipy-svn] r6985 - trunk/scipy/stats Message-ID: <20101203072130.EEA3837A551@scipy.org> Author: warren.weckesser Date: 2010-12-03 01:21:30 -0600 (Fri, 03 Dec 2010) New Revision: 6985 Modified: trunk/scipy/stats/stats.py Log: ENH: stats: Slight optimization in pearsonr--avoid a call of sqrt() Modified: trunk/scipy/stats/stats.py =================================================================== --- trunk/scipy/stats/stats.py 2010-12-03 06:56:20 UTC (rev 6984) +++ trunk/scipy/stats/stats.py 2010-12-03 07:21:30 UTC (rev 6985) @@ -2205,8 +2205,8 @@ if abs(r) == 1.0: prob = 0.0 else: - t = r * np.sqrt(df / ((1.0 - r) * (1.0 + r))) - prob = betai(0.5*df, 0.5, df / (df + t*t)) + t_squared = r*r * (df / ((1.0 - r) * (1.0 + r))) + prob = betai(0.5*df, 0.5, df / (df + t_squared)) return r, prob From scipy-svn at scipy.org Sat Dec 4 15:24:50 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 14:24:50 -0600 (CST) Subject: [Scipy-svn] r6986 - trunk/scipy/sparse Message-ID: <20101204202450.7192732347@scipy.org> Author: ptvirtan Date: 2010-12-04 14:24:49 -0600 (Sat, 04 Dec 2010) New Revision: 6986 Modified: trunk/scipy/sparse/base.py Log: ENH: sparse: un-deprecate spmatrix.dot --- ndarrays also have the .dot() method meaning the matrix product, so sparse matrices should also have it Modified: trunk/scipy/sparse/base.py =================================================================== --- trunk/scipy/sparse/base.py 2010-12-03 07:21:30 UTC (rev 6985) +++ trunk/scipy/sparse/base.py 2010-12-04 20:24:49 UTC (rev 6986) @@ -247,7 +247,6 @@ def matmat(self,other): return self * other - @np.deprecate def dot(self, other): return self * other From scipy-svn at scipy.org Sat Dec 4 15:25:04 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 14:25:04 -0600 (CST) Subject: [Scipy-svn] r6987 - in trunk/scipy/sparse/linalg/eigen/arpack: . tests Message-ID: <20101204202504.B56AA323DD@scipy.org> Author: ptvirtan Date: 2010-12-04 14:25:03 -0600 (Sat, 04 Dec 2010) New Revision: 6987 Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py Log: BUG: sparse/arpack: fix ARPACK error handling Make exceptions instances of ArpackError. Raise ArpackNoConvergence always when no convergence is obtained. Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:24:49 UTC (rev 6986) +++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:25:03 UTC (rev 6987) @@ -39,10 +39,8 @@ __docformat__ = "restructuredtext en" -__all___=['eigen','eigen_symmetric', 'svd'] +__all___=['eigen','eigen_symmetric', 'svd', 'ArpackNoConvergence'] -import warnings - import _arpack import numpy as np from scipy.sparse.linalg.interface import aslinearoperator @@ -51,13 +49,123 @@ _type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} _ndigits = {'f':5, 'd':12, 'F':5, 'D':12} +_NAUPD_ERRORS = { + 0: "Normal exit.", + 1: "Maximum number of iterations taken. " + "All possible eigenvalues of OP has been found.", + 2: "No longer an informational error. Deprecated starting with " + "release 2 of ARPACK.", + 3: "No shifts could be applied during a cycle of the Implicitly " + "restarted Arnoldi iteration. One possibility is to increase " + "the size of NCV relative to NEV. ", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV must be greater than NEV and less than or equal to N.", + -4: "The maximum number of Arnoldi update iterations allowed " + "must be greater than zero.", + -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work array WORKL is not sufficient.", + -8: "Error return from trid. eigenvalue calculation; " + "Informational error from LAPACK routine dsteqr .", + -9: "Starting vector is zero.", + -10: "IPARAM(7) must be 1,2,3,4,5.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatable.", + -12: "IPARAM(1) must be equal to 0 or 1.", + -13: "NEV and WHICH = 'BE' are incompatable. ", + -9999: "Could not build an Arnoldi factorization. " + "IPARAM(5) returns the size of the current Arnoldi " + "factorization. The user is advised to check that " + "enough workspace and array storage has been allocated.", +} + +_NEUPD_ERRORS = { + 0: "Normal exit.", + 1: "The Schur form computed by LAPACK routine dlahqr " + "could not be reordered by LAPACK routine dtrsen. " + "Re-enter subroutine dneupd with IPARAM(5)NCV and " + "increase the size of the arrays DR and DI to have " + "dimension at least dimension NCV and allocate at least NCV " + "columns for Z. NOTE: Not necessary if Z and V share " + "the same space. Please notify the authors if this error" + "occurs.", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV-NEV >= 2 and less than or equal to N.", + -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work WORKL array is not sufficient.", + -8: "Error return from calculation of a real Schur form. " + "Informational error from LAPACK routine dlahqr .", + -9: "Error return from calculation of eigenvectors. " + "Informational error from LAPACK routine dtrevc.", + -10: "IPARAM(7) must be 1,2,3,4.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "HOWMNY = 'S' not yet implemented", + -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", + -14: "DNAUPD did not find any eigenvalues to sufficient " + "accuracy.", + -15: "DNEUPD got a different count of the number of converged " + "Ritz values than DNAUPD got. This indicates the user " + "probably made an error in passing data from DNAUPD to " + "DNEUPD or that the data was modified before entering " + "DNEUPD", +} + +_SEUPD_ERRORS = { + 0: "Normal exit.", + -1: "N must be positive.", + -2: "NEV must be positive.", + -3: "NCV must be greater than NEV and less than or equal to N.", + -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", + -6: "BMAT must be one of 'I' or 'G'.", + -7: "Length of private work WORKL array is not sufficient.", + -8: ("Error return from trid. eigenvalue calculation; " + "Information error from LAPACK routine dsteqr."), + -9: "Starting vector is zero.", + -10: "IPARAM(7) must be 1,2,3,4,5.", + -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", + -12: "NEV and WHICH = 'BE' are incompatible.", + -14: "DSAUPD did not find any eigenvalues to sufficient accuracy.", + -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.", + -16: "HOWMNY = 'S' not yet implemented", + -17: ("DSEUPD got a different count of the number of converged " + "Ritz values than DSAUPD got. This indicates the user " + "probably made an error in passing data from DSAUPD to " + "DSEUPD or that the data was modified before entering " + "DSEUPD.") +} + +class ArpackError(RuntimeError): + """ + ARPACK error + """ + def __init__(self, info, infodict=_NAUPD_ERRORS): + msg = infodict.get(info, "Unknown error") + RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg)) + +class ArpackNoConvergence(ArpackError): + """ + ARPACK iteration did not converge + + Attributes + ---------- + eigenvalues : ndarray + Partial result. Converged eigenvalues. + eigenvectors : ndarray + Partial result. Converged eigenvectors. + + """ + def __init__(self, msg, eigenvalues, eigenvectors): + ArpackError.__init__(self, -1, {-1: msg}) + self.eigenvalues = eigenvalues + self.eigenvectors = eigenvectors + class _ArpackParams(object): def __init__(self, n, k, tp, matvec, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): if k <= 0: raise ValueError("k must be positive, k=%d" % k) - if k == n: - raise ValueError("k must be less than rank(A), k=%d" % k) if maxiter is None: maxiter = n * 10 @@ -107,11 +215,26 @@ self.converged = False self.ido = 0 + def _raise_no_convergence(self): + msg = "No convergence (%d iterations, %d/%d eigenvectors converged)" + k_ok = self.iparam[4] + num_iter = self.iparam[2] + try: + ev, vec = self.extract(True) + except ArpackError, err: + msg = "%s [%s]" % (msg, err) + ev = np.zeros((0,)) + vec = np.zeros((self.n, 0)) + k_ok = 0 + raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec) + class _SymmetricArpackParams(_ArpackParams): def __init__(self, n, k, tp, matvec, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): if not which in ['LM', 'SM', 'LA', 'SA', 'BE']: raise ValueError("which must be one of %s" % ' '.join(whiches)) + if k >= n: + raise ValueError("k must be less than rank(A), k=%d" % k) _ArpackParams.__init__(self, n, k, tp, matvec, sigma, ncv, v0, maxiter, which, tol) @@ -145,14 +268,13 @@ else: self.converged = True - if self.info < -1 : - raise RuntimeError("Error info=%d in arpack" % self.info) - elif self.info == -1: - warnings.warn("Maximum number of iterations taken: %s" % self.iparam[2]) + if self.info == 0: + pass + elif self.info == 1: + self._raise_no_convergence() + else: + raise ArpackError(self.info) - if self.iparam[4] < self.k: - warnings.warn("Only %d/%d eigenvectors converged" % (self.iparam[4], self.k)) - def extract(self, return_eigenvectors): rvec = return_eigenvectors ierr = 0 @@ -160,14 +282,18 @@ sselect = np.zeros(self.ncv, 'int') # unused sigma = 0.0 # no shifts, not implemented - d, z, info = self._arpack_extract(rvec, howmny, sselect, sigma, self.bmat, + d, z, ierr = self._arpack_extract(rvec, howmny, sselect, sigma, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam[0:7], self.ipntr, self.workd[0:2*self.n], self.workl,ierr) if ierr != 0: - raise RuntimeError("Error info=%d in arpack" % params.info) + raise ArpackError(ierr, infodict=_SEUPD_ERRORS) + k_ok = self.iparam[4] + d = d[:k_ok] + z = z[:,:k_ok] + if return_eigenvectors: return d, z else: @@ -178,6 +304,8 @@ ncv=None, v0=None, maxiter=None, which="LM", tol=0): if not which in ["LM", "SM", "LR", "SR", "LI", "SI"]: raise ValueError("Parameter which must be one of %s" % ' '.join(whiches)) + if k >= n-1: + raise ValueError("k must be less than rank(A)-1, k=%d" % k) _ArpackParams.__init__(self, n, k, tp, matvec, sigma, ncv, v0, maxiter, which, tol) @@ -222,10 +350,12 @@ else: self.converged = True - if self.info < -1 : - raise RuntimeError("Error info=%d in arpack" % self.info) - elif self.info == -1: - warnings.warn("Maximum number of iterations taken: %s" % self.iparam[2]) + if self.info == 0: + pass + elif self.info == 1: + self._raise_no_convergence() + else: + raise ArpackError(self.info) def extract(self, return_eigenvectors): k, n = self.k, self.n @@ -241,13 +371,16 @@ dr = np.zeros(k+1, self.tp) di = np.zeros(k+1, self.tp) zr = np.zeros((n, k+1), self.tp) - dr, di, zr, self.info=\ + dr, di, zr, ierr=\ self._arpack_extract(return_eigenvectors, howmny, sselect, sigmar, sigmai, workev, self.bmat, self.which, k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) + if ierr != 0: + raise ArpackError(ierr, infodict=_NEUPD_ERRORS) + # The ARPACK nonsymmetric real and double interface (s,d)naupd return # eigenvalues and eigenvectors in real (float,double) arrays. @@ -294,16 +427,21 @@ else: # complex is so much simpler... - d, z, self.info =\ + d, z, ierr =\ self._arpack_extract(return_eigenvectors, howmny, sselect, sigmar, workev, self.bmat, self.which, k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.rwork, ierr) - if ierr != 0: - raise RuntimeError("Error info=%d in arpack" % info) + if ierr != 0: + raise ArpackError(ierr, infodict=_NEUPD_ERRORS) + k_ok = self.iparam[4] + d = d[:k_ok] + z = z[:,:k_ok] + + if return_eigenvectors: return d, z else: @@ -325,7 +463,9 @@ the matrix vector product A * x. The sparse matrix formats in scipy.sparse are appropriate for A. k : integer - The number of eigenvalues and eigenvectors desired + The number of eigenvalues and eigenvectors desired. + `k` must be smaller than N. It is not possible to compute all + eigenvectors of a matrix. Returns ------- @@ -363,9 +503,19 @@ Maximum number of Arnoldi update iterations allowed tol : float Relative accuracy for eigenvalues (stopping criterion) + The default value of 0 implies machine precision. return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues + Raises + ------ + ArpackNoConvergence + When the requested convergence is obtained. + + The currently converged eigenvalues and eigenvectors can be found + as ``eigenvalues`` and ``eigenvectors`` attributes of the exception + objed. + See Also -------- eigen_symmetric : eigenvalues and eigenvectors for symmetric matrix A @@ -445,7 +595,8 @@ ncv : integer The number of Lanczos vectors generated - ncv must be greater than k; it is recommended that ncv > 2*k + ncv must be greater than k and smaller than n; + it is recommended that ncv > 2*k which : string Which k eigenvectors and eigenvalues to find: @@ -460,11 +611,21 @@ Maximum number of Arnoldi update iterations allowed tol : float - Relative accuracy for eigenvalues (stopping criterion) + Relative accuracy for eigenvalues (stopping criterion). + The default value of 0 implies machine precision. return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues + Raises + ------ + ArpackNoConvergence + When the requested convergence is obtained. + + The currently converged eigenvalues and eigenvectors can be found + as ``eigenvalues`` and ``eigenvectors`` attributes of the exception + objed. + See Also -------- eigen : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A Modified: trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py 2010-12-04 20:24:49 UTC (rev 6986) +++ trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py 2010-12-04 20:25:03 UTC (rev 6987) @@ -11,8 +11,9 @@ assert_raises, verbose from numpy import array, finfo, argsort, dot, round, conj, random -from scipy.sparse import csc_matrix -from scipy.sparse.linalg.eigen.arpack import eigen_symmetric, eigen, svd +from scipy.sparse import csc_matrix, isspmatrix +from scipy.sparse.linalg.eigen.arpack import eigen_symmetric, eigen, svd, \ + ArpackNoConvergence from scipy.linalg import svd as dsvd @@ -132,6 +133,23 @@ self.eval_evec(self.symmetric[0],typ,k,which='LM',v0=v0) + def test_no_convergence(self): + np.random.seed(1234) + m = np.random.rand(30, 30) + m = m + m.T + try: + w, v = eigen_symmetric(m, 4, which='LM', v0=m[:,0], maxiter=5) + raise AssertionError("Spurious no-error exit") + except ArpackNoConvergence, err: + k = len(err.eigenvalues) + if k <= 0: + raise AssertionError("Spurious no-eigenvalues-found case") + w, v = err.eigenvalues, err.eigenvectors + for ww, vv in zip(w, v.T): + assert_array_almost_equal(dot(m, vv), ww*vv, + decimal=_ndigits['d']) + + class TestEigenComplexSymmetric(TestArpack): def sort_choose(self,eval,typ,k,which): @@ -173,6 +191,21 @@ self.eval_evec(self.symmetric[0],typ,k,which) + def test_no_convergence(self): + np.random.seed(1234) + m = np.random.rand(30, 30) + 1j*np.random.rand(30, 30) + try: + w, v = eigen(m, 3, which='LM', v0=m[:,0], maxiter=30) + raise AssertionError("Spurious no-error exit") + except ArpackNoConvergence, err: + k = len(err.eigenvalues) + if k <= 0: + raise AssertionError("Spurious no-eigenvalues-found case") + w, v = err.eigenvalues, err.eigenvectors + for ww, vv in zip(w, v.T): + assert_array_almost_equal(dot(m, vv), ww*vv, + decimal=_ndigits['D']) + class TestEigenNonSymmetric(TestArpack): @@ -231,9 +264,21 @@ v0 = random.rand(n).astype(typ) self.eval_evec(self.symmetric[0],typ,k,which='LM',v0=v0) + def test_no_convergence(self): + np.random.seed(1234) + m = np.random.rand(30, 30) + try: + w, v = eigen(m, 3, which='LM', v0=m[:,0], maxiter=30) + raise AssertionError("Spurious no-error exit") + except ArpackNoConvergence, err: + k = len(err.eigenvalues) + if k <= 0: + raise AssertionError("Spurious no-eigenvalues-found case") + w, v = err.eigenvalues, err.eigenvectors + for ww, vv in zip(w, v.T): + assert_array_almost_equal(dot(m, vv), ww*vv, + decimal=_ndigits['d']) - - class TestEigenComplexNonSymmetric(TestArpack): def sort_choose(self,eval,typ,k,which): @@ -287,6 +332,21 @@ self.eval_evec(m,typ,k,which) + def test_no_convergence(self): + np.random.seed(1234) + m = np.random.rand(30, 30) + 1j*np.random.rand(30, 30) + try: + w, v = eigen(m, 3, which='LM', v0=m[:,0], maxiter=30) + raise AssertionError("Spurious no-error exit") + except ArpackNoConvergence, err: + k = len(err.eigenvalues) + if k <= 0: + raise AssertionError("Spurious no-eigenvalues-found case") + w, v = err.eigenvalues, err.eigenvectors + for ww, vv in zip(w, v.T): + assert_array_almost_equal(dot(m, vv), ww*vv, + decimal=_ndigits['D']) + def test_eigen_bad_shapes(): # A is not square. A = csc_matrix(np.zeros((2,3))) From scipy-svn at scipy.org Sat Dec 4 15:25:14 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 14:25:14 -0600 (CST) Subject: [Scipy-svn] r6988 - trunk/scipy/sparse/linalg/eigen/arpack Message-ID: <20101204202514.9A10A37D3CE@scipy.org> Author: ptvirtan Date: 2010-12-04 14:25:14 -0600 (Sat, 04 Dec 2010) New Revision: 6988 Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py Log: BUG: sparse/arpack: fix bug in return value extraction from dneupd, when got less eigenvalues than desired Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:25:03 UTC (rev 6987) +++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:25:14 UTC (rev 6988) @@ -390,12 +390,11 @@ # Arrange the eigenvectors: complex eigenvectors are stored as # real,imaginary in consecutive columns z = zr.astype(self.tp.upper()) - eps = np.finfo(self.tp).eps i = 0 while i<=k: # check if complex - if abs(d[i].imag) > eps: - # assume this is a complex conjugate pair with eigenvalues + if abs(d[i].imag) != 0: + # this is a complex conjugate pair with eigenvalues # in consecutive columns z[:,i] = zr[:,i] + 1.0j * zr[:,i+1] z[:,i+1] = z[:,i].conjugate() @@ -405,10 +404,12 @@ # Now we have k+1 possible eigenvalues and eigenvectors # Return the ones specified by the keyword "which" nreturned = self.iparam[4] # number of good eigenvalues returned - if nreturned == k: # we got exactly how many eigenvalues we wanted - d = d[:k] - z = z[:,:k] - else: # we got one extra eigenvalue (likely a cc pair, but which?) + if nreturned <= k: + # we got less or equal as many eigenvalues we wanted + d = d[:nreturned] + z = z[:,:nreturned] + else: + # we got one extra eigenvalue (likely a cc pair, but which?) # cut at approx precision for sorting rd = np.round(d, decimals = _ndigits[self.tp]) if self.which in ['LR','SR']: From scipy-svn at scipy.org Sat Dec 4 15:25:25 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 14:25:25 -0600 (CST) Subject: [Scipy-svn] r6989 - in trunk/scipy/sparse/linalg/eigen/arpack: . tests Message-ID: <20101204202525.7D0BA37D3CF@scipy.org> Author: ptvirtan Date: 2010-12-04 14:25:25 -0600 (Sat, 04 Dec 2010) New Revision: 6989 Removed: trunk/scipy/sparse/linalg/eigen/arpack/speigs.py trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py Modified: trunk/scipy/sparse/linalg/eigen/arpack/__init__.py Log: DEP: sparse/arpack: remove a duplicate ARPACK interface Modified: trunk/scipy/sparse/linalg/eigen/arpack/__init__.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/__init__.py 2010-12-04 20:25:14 UTC (rev 6988) +++ trunk/scipy/sparse/linalg/eigen/arpack/__init__.py 2010-12-04 20:25:25 UTC (rev 6989) @@ -1,3 +1,2 @@ from info import __doc__ from arpack import * -import speigs Deleted: trunk/scipy/sparse/linalg/eigen/arpack/speigs.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/speigs.py 2010-12-04 20:25:14 UTC (rev 6988) +++ trunk/scipy/sparse/linalg/eigen/arpack/speigs.py 2010-12-04 20:25:25 UTC (rev 6989) @@ -1,224 +0,0 @@ -import numpy as np -import _arpack - -__all___=['ArpackException','ARPACK_eigs', 'ARPACK_gen_eigs'] - -class ArpackException(RuntimeError): - ARPACKErrors = { 0: """Normal exit.""", - 3: """No shifts could be applied during a cycle of the - Implicitly restarted Arnoldi iteration. One possibility - is to increase the size of NCV relative to NEV.""", - -1: """N must be positive.""", - -2: """NEV must be positive.""", - -3: """NCV-NEV >= 2 and less than or equal to N.""", - -4: """The maximum number of Arnoldi update iteration - must be greater than zero.""", - -5: """WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'""", - -6: """BMAT must be one of 'I' or 'G'.""", - -7: """Length of private work array is not sufficient.""", - -8: """Error return from LAPACK eigenvalue calculation;""", - -9: """Starting vector is zero.""", - -10: """IPARAM(7) must be 1,2,3,4.""", - -11: """IPARAM(7) = 1 and BMAT = 'G' are incompatable.""", - -12: """IPARAM(1) must be equal to 0 or 1.""", - -9999: """Could not build an Arnoldi factorization. - IPARAM(5) returns the size of the current Arnoldi - factorization.""", - } - def __init__(self, info): - self.info = info - def __str__(self): - try: return self.ARPACKErrors[self.info] - except KeyError: return "Unknown ARPACK error" - -def check_init(n, nev, ncv): - assert(nev <= n-4) # ARPACK seems to cause a segfault otherwise - if ncv is None: - ncv = min(2*nev+1, n-1) - maxitr = max(n, 1000) # Maximum number of iterations - return ncv, maxitr - -def init_workspaces(n,nev,ncv): - ipntr = np.zeros(14, np.int32) # Pointers into memory structure used by F77 calls - d = np.zeros((ncv, 3), np.float64, order='FORTRAN') # Temp workspace - # Temp workspace/error residuals upon iteration completion - resid = np.zeros(n, np.float64) - workd = np.zeros(3*n, np.float64) # workspace - workl = np.zeros(3*ncv*ncv+6*ncv, np.float64) # more workspace - # Storage for the Arnoldi basis vectors - v = np.zeros((n, ncv), dtype=np.float64, order='FORTRAN') - return (ipntr, d, resid, workd, workl, v) - -def init_debug(): - # Causes various debug info to be printed by ARPACK - _arpack.debug.ndigit = -3 - _arpack.debug.logfil = 6 - _arpack.debug.mnaitr = 0 - _arpack.debug.mnapps = 0 - _arpack.debug.mnaupd = 1 - _arpack.debug.mnaup2 = 0 - _arpack.debug.mneigh = 0 - _arpack.debug.mneupd = 1 - -def init_postproc_workspace(n, nev, ncv): - # Used as workspace and to return eigenvectors if requested. Not touched if - # eigenvectors are not requested - workev = np.zeros(3*ncv, np.float64) # More workspace - select = np.zeros(ncv, np.int32) # Used as internal workspace since dneupd - # parameter HOWMNY == 'A' - return (workev, select) - -def postproc(n, nev, ncv, sigmar, sigmai, bmat, which, - tol, resid, v, iparam, ipntr, workd, workl, info): - workev, select = init_postproc_workspace(n, nev, ncv) - ierr = 0 - # Postprocess the Arnouldi vectors to extract eigenvalues/vectors - # If dneupd's first paramter is 'True' the eigenvectors are also calculated, - # 'False' only the eigenvalues - dr,di,z,info = _arpack.dneupd( - True, 'A', select, sigmar, sigmai, workev, bmat, which, nev, tol, resid, v, - iparam, ipntr, workd, workl, info) - - if np.abs(di[:-1]).max() == 0: dr = dr[:-1] - else: dr = dr[:-1] + 1j*di[:-1] - return (dr, z[:,:-1]) - - -def ARPACK_eigs(matvec, n, nev, which='SM', ncv=None, tol=1e-14): - """ - Calculate eigenvalues for system with matrix-vector product matvec, dimension n - - Arguments - ========= - matvec -- Function that provides matrix-vector product, i.e. matvec(x) -> A*x - n -- Matrix dimension of the problem - nev -- Number of eigenvalues to calculate - which -- Spectrum selection. See details below. Defaults to 'SM' - ncv -- Number of Arnoldi basisvectors to use. If None, default to 2*nev+1 - tol -- Numerical tollerance for Arnouldi iteration convergence. Defaults to 1e-14 - - Spectrum Selection - ================== - which can take one of several values: - - 'LM' -> Request eigenvalues with largest magnitude. - 'SM' -> Request eigenvalues with smallest magnitude. - 'LR' -> Request eigenvalues with largest real part. - 'SR' -> Request eigenvalues with smallest real part. - 'LI' -> Request eigenvalues with largest imaginary part. - 'SI' -> Request eigenvalues with smallest imaginary part. - - Return Values - ============= - (eig_vals, eig_vecs) where eig_vals are the requested eigenvalues and - eig_vecs the corresponding eigenvectors. If all the eigenvalues are real, - eig_vals is a real array but if some eigenvalues are complex it is a - complex array. - - """ - bmat = 'I' # Standard eigenproblem - ncv, resid, iparam, ipntr, v, workd, workl, info = ARPACK_iteration( - matvec, lambda x: x, n, bmat, which, nev, tol, ncv, mode=1) - return postproc(n, nev, ncv, 0., 0., bmat, which, tol, - resid, v, iparam, ipntr, workd, workl, info) - -def ARPACK_gen_eigs(matvec, sigma_solve, n, sigma, nev, which='LR', ncv=None, tol=1e-14): - """ - Calculate eigenvalues close to sigma for generalised eigen system - - Given a system [A]x = k_i*[M]x where [A] and [M] are matrices and k_i are - eigenvalues, nev eigenvalues close to sigma are calculated. The user needs - to provide routines that calculate [M]*x and solve [A]-sigma*[M]*x = b for x. - - Arguments - ========= - matvec -- Function that provides matrix-vector product, i.e. matvec(x) -> [M]*x - sigma_solve -- sigma_solve(b) -> x, where [A]-sigma*[M]*x = b - n -- Matrix dimension of the problem - sigma -- Eigenvalue spectral shift real value - nev -- Number of eigenvalues to calculate - which -- Spectrum selection. See details below. Defaults to 'LR' - ncv -- Number of Arnoldi basisvectors to use. If None, default to 2*nev+1 - tol -- Numerical tollerance for Arnouldi iteration convergence. Defaults to 1e-14 - - Spectrum Shift - ============== - - The spectrum of the orignal system is shifted by sigma. This transforms the - original eigenvalues to be 1/(original_eig-sigma) in the shifted - system. ARPACK then operates on the shifted system, transforming it back to - the original system in a postprocessing step. - - The spectrum shift causes eigenvalues close to sigma to become very large - in the transformed system. This allows quick convergence for these - eigenvalues. This is particularly useful if a system has a number of - trivial zero-eigenvalues that are to be ignored. - - Spectrum Selection - ================== - which can take one of several values: - - 'LM' -> Request spectrum shifted eigenvalues with largest magnitude. - 'SM' -> Request spectrum shifted eigenvalues with smallest magnitude. - 'LR' -> Request spectrum shifted eigenvalues with largest real part. - 'SR' -> Request spectrum shifted eigenvalues with smallest real part. - 'LI' -> Request spectrum shifted eigenvalues with largest imaginary part. - 'SI' -> Request spectrum shifted eigenvalues with smallest imaginary part. - - The effect on the actual system is: - 'LM' -> Eigenvalues closest to sigma on the complex plane - 'LR' -> Eigenvalues with real part > sigma, provided they exist - - - Return Values - ============= - (eig_vals, eig_vecs) where eig_vals are the requested eigenvalues and - eig_vecs the corresponding eigenvectors. If all the eigenvalues are real, - eig_vals is a real array but if some eigenvalues are complex it is a - complex array. The eigenvalues and vectors correspond to the original - system, not the shifted system. The shifted system is only used interally. - - """ - bmat = 'G' # Generalised eigenproblem - ncv, resid, iparam, ipntr, v, workd, workl, info = ARPACK_iteration( - matvec, sigma_solve, n, bmat, which, nev, tol, ncv, mode=3) - sigmar = sigma - sigmai = 0. - return postproc(n, nev, ncv, sigmar, sigmai, bmat, which, tol, - resid, v, iparam, ipntr, workd, workl, info) - -def ARPACK_iteration(matvec, sigma_solve, n, bmat, which, nev, tol, ncv, mode): - ncv, maxitr = check_init(n, nev, ncv) - ipntr, d, resid, workd, workl, v = init_workspaces(n,nev,ncv) - #init_debug() - ishfts = 1 # Some random arpack parameter - # Some random arpack parameter (I think it tells ARPACK to solve the - # general eigenproblem using shift-invert - iparam = np.zeros(11, np.int32) # Array with assorted extra paramters for F77 call - iparam[[0,2,6]] = ishfts, maxitr, mode - ido = 0 # Communication variable used by ARPACK to tell the user what to do - info = 0 # Used for error reporting - # Arnouldi iteration. - while True: - ido,resid,v,iparam,ipntr,info = _arpack.dnaupd( - ido, bmat, which, nev, tol, resid, v, iparam, ipntr, workd, workl, info) - if ido == -1 or ido == 1 and mode not in (3,4): - # Perform y = inv[A - sigma*M]*M*x - x = workd[ipntr[0]-1:ipntr[0]+n-1] - Mx = matvec(x) # Mx = [M]*x - workd[ipntr[1]-1:ipntr[1]+n-1] = sigma_solve(Mx) - elif ido == 1: # Perform y = inv[A - sigma*M]*M*x using saved M*x - # Mx = [M]*x where it was saved by ARPACK - Mx = workd[ipntr[2]-1:ipntr[2]+n-1] - workd[ipntr[1]-1:ipntr[1]+n-1] = sigma_solve(Mx) - elif ido == 2: # Perform y = M*x - x = workd[ipntr[0]-1:ipntr[0]+n-1] - workd[ipntr[1]-1:ipntr[1]+n-1] = matvec(x) - else: # Finished, or error - break - if info == 1: - warn.warn("Maximum number of iterations taken: %s"%iparam[2]) - elif info != 0: - raise ArpackException(info) - - return (ncv, resid, iparam, ipntr, v, workd, workl, info) Deleted: trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py 2010-12-04 20:25:14 UTC (rev 6988) +++ trunk/scipy/sparse/linalg/eigen/arpack/tests/test_speigs.py 2010-12-04 20:25:25 UTC (rev 6989) @@ -1,52 +0,0 @@ -#!/usr/bin/env python - -from numpy.testing import run_module_suite, TestCase, assert_array_almost_equal - -from scipy.sparse.linalg.interface import aslinearoperator -from scipy.sparse.linalg.eigen.arpack import speigs - -import numpy as np - -class TestEigs(TestCase): - def test(self): - maxn=15 # Dimension of square matrix to be solved - # Use a PDP^-1 factorisation to construct matrix with known - # eigenvalues/vectors. Used random eigenvectors initially. - P = np.mat(np.random.random((maxn,)*2)) - P /= map(np.linalg.norm, P.T) # Normalise the eigenvectors - D = np.mat(np.zeros((maxn,)*2)) - D[range(maxn), range(maxn)] = (np.arange(maxn, dtype=float)+1)/np.sqrt(maxn) - A = P*D*np.linalg.inv(P) - vals = np.array(D.diagonal())[0] - vecs = P - uv_sortind = vals.argsort() - vals = vals[uv_sortind] - vecs = vecs[:,uv_sortind] - - A=aslinearoperator(A) - matvec = A.matvec - #= lambda x: np.asarray(A*x)[0] - nev=4 - eigvs = speigs.ARPACK_eigs(matvec, A.shape[0], nev=nev) - calc_vals = eigvs[0] - # Ensure the calculated eigenvectors have the same sign as the reference values - calc_vecs = eigvs[1] / [np.sign(x[0]) for x in eigvs[1].T] - assert_array_almost_equal(calc_vals, vals[0:nev], decimal=7) - assert_array_almost_equal(calc_vecs, np.array(vecs)[:,0:nev], decimal=7) - - -# class TestGeneigs(TestCase): -# def test(self): -# import pickle -# from scipy.sparse.linalg import dsolve -# A,B = pickle.load(file('mats.pickle')) -# sigma = 27. -# sigma_solve = dsolve.splu(A - sigma*B).solve -# w = ARPACK_gen_eigs(B.matvec, sigma_solve, B.shape[0], sigma, 10)[0] -# assert_array_almost_equal(w, -# [27.346442255386375, 49.100299170945405, 56.508474856551544, 56.835800191692492, -# 65.944215785041365, 66.194792400328367, 78.003788872725238, 79.550811647295944, -# 94.646308846854879, 95.30841709116271], decimal=11) - -if __name__ == "__main__": - run_module_suite() From scipy-svn at scipy.org Sat Dec 4 15:25:35 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 14:25:35 -0600 (CST) Subject: [Scipy-svn] r6990 - trunk/scipy/sparse/linalg/eigen/arpack Message-ID: <20101204202535.04344323E0@scipy.org> Author: ptvirtan Date: 2010-12-04 14:25:34 -0600 (Sat, 04 Dec 2010) New Revision: 6990 Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py Log: BUG: sparse/arpack: accept only real matrices in eigen_symmetric Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:25:25 UTC (rev 6989) +++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:25:34 UTC (rev 6990) @@ -246,6 +246,8 @@ self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp) ltr = _type_conv[self.tp] + if ltr not in ["s", "d"]: + raise ValueError("Input matrix is not real-valued.") self._arpack_solver = _arpack.__dict__[ltr + 'saupd'] self._arpack_extract = _arpack.__dict__[ltr + 'seupd'] From scipy-svn at scipy.org Sat Dec 4 15:25:44 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 14:25:44 -0600 (CST) Subject: [Scipy-svn] r6991 - in trunk/scipy/sparse/linalg/eigen/arpack: . tests Message-ID: <20101204202544.A35CA323DF@scipy.org> Author: ptvirtan Date: 2010-12-04 14:25:44 -0600 (Sat, 04 Dec 2010) New Revision: 6991 Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py Log: ENH: sparse/arpack: Rename routines eigen* -> eigs* to avoid conflict with module name Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:25:34 UTC (rev 6990) +++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:25:44 UTC (rev 6991) @@ -12,11 +12,11 @@ # The entry points to ARPACK are # - (s,d)seupd : single and double precision symmetric matrix # - (s,d,c,z)neupd: single,double,complex,double complex general matrix -# This wrapper puts the *neupd (general matrix) interfaces in eigen() -# and the *seupd (symmetric matrix) in eigen_symmetric(). +# This wrapper puts the *neupd (general matrix) interfaces in eigs() +# and the *seupd (symmetric matrix) in eigsh(). # There is no Hermetian complex/double complex interface. # To find eigenvalues of a Hermetian matrix you -# must use eigen() and not eigen_symmetric() +# must use eigs() and not eigsh() # It might be desirable to handle the Hermetian case differently # and, for example, return real eigenvalues. @@ -39,7 +39,7 @@ __docformat__ = "restructuredtext en" -__all___=['eigen','eigen_symmetric', 'svd', 'ArpackNoConvergence'] +__all___=['eigs', 'eigsh', 'svds', 'ArpackNoConvergence'] import _arpack import numpy as np @@ -450,9 +450,9 @@ else: return d -def eigen(A, k=6, M=None, sigma=None, which='LM', v0=None, - ncv=None, maxiter=None, tol=0, - return_eigenvectors=True): +def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, + ncv=None, maxiter=None, tol=0, + return_eigenvectors=True): """ Find k eigenvalues and eigenvectors of the square matrix A. @@ -521,14 +521,14 @@ See Also -------- - eigen_symmetric : eigenvalues and eigenvectors for symmetric matrix A + eigsh : eigenvalues and eigenvectors for symmetric matrix A Examples -------- Find 6 eigenvectors of the identity matrix: >>> id = np.identity(13) - >>> vals, vecs = sp.sparse.linalg.eigen(id, k=6) + >>> vals, vecs = sp.sparse.linalg.eigs(id, k=6) >>> vals array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) >>> vecs.shape @@ -552,11 +552,11 @@ return params.extract(return_eigenvectors) -def eigen_symmetric(A, k=6, M=None, sigma=None, which='LM', v0=None, - ncv=None, maxiter=None, tol=0, - return_eigenvectors=True): - """Find k eigenvalues and eigenvectors of the real symmetric - square matrix A. +def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, + ncv=None, maxiter=None, tol=0, + return_eigenvectors=True): + """ + Find k eigenvalues and eigenvectors of the real symmetric square matrix A. Solves A * x[i] = w[i] * x[i], the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. @@ -570,7 +570,7 @@ matrix formats in scipy.sparse are appropriate for A. k : integer - The number of eigenvalues and eigenvectors desired + The number of eigenvalues and eigenvectors desired. Returns ------- @@ -631,7 +631,7 @@ See Also -------- - eigen : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A + eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A Notes ----- @@ -656,7 +656,7 @@ return params.extract(return_eigenvectors) -def svd(A, k=6): +def svds(A, k=6): """Compute a few singular values/vectors for a sparse matrix using ARPACK. Parameters Modified: trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py 2010-12-04 20:25:34 UTC (rev 6990) +++ trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py 2010-12-04 20:25:44 UTC (rev 6991) @@ -12,10 +12,10 @@ from numpy import array, finfo, argsort, dot, round, conj, random from scipy.sparse import csc_matrix, isspmatrix -from scipy.sparse.linalg.eigen.arpack import eigen_symmetric, eigen, svd, \ +from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \ ArpackNoConvergence -from scipy.linalg import svd as dsvd +from scipy.linalg import svd def assert_almost_equal_cc(actual,desired,decimal=7,err_msg='',verbose=True): # almost equal or complex conjugates almost equal @@ -109,7 +109,7 @@ if v0 == None: v0 = d['v0'] exact_eval=self.get_exact_eval(d,typ,k,which) - eval,evec=eigen_symmetric(a,k,which=which,v0=v0) + eval,evec=eigsh(a,k,which=which,v0=v0) # check eigenvalues assert_array_almost_equal(eval,exact_eval,decimal=_ndigits[typ]) # check eigenvectors A*evec=eval*evec @@ -138,7 +138,7 @@ m = np.random.rand(30, 30) m = m + m.T try: - w, v = eigen_symmetric(m, 4, which='LM', v0=m[:,0], maxiter=5) + w, v = eigsh(m, 4, which='LM', v0=m[:,0], maxiter=5) raise AssertionError("Spurious no-error exit") except ArpackNoConvergence, err: k = len(err.eigenvalues) @@ -171,7 +171,7 @@ ind=self.sort_choose(exact_eval,typ,k,which) exact_eval=exact_eval[ind] # compute eigenvalues - eval,evec=eigen(a,k,which=which,v0=v0) + eval,evec=eigs(a,k,which=which,v0=v0) ind=self.sort_choose(eval,typ,k,which) eval=eval[ind] evec=evec[:,ind] @@ -195,7 +195,7 @@ np.random.seed(1234) m = np.random.rand(30, 30) + 1j*np.random.rand(30, 30) try: - w, v = eigen(m, 3, which='LM', v0=m[:,0], maxiter=30) + w, v = eigs(m, 3, which='LM', v0=m[:,0], maxiter=30) raise AssertionError("Spurious no-error exit") except ArpackNoConvergence, err: k = len(err.eigenvalues) @@ -234,7 +234,7 @@ ind=self.sort_choose(exact_eval,typ,k,which) exact_eval=exact_eval[ind] # compute eigenvalues - eval,evec=eigen(a,k,which=which,v0=v0) + eval,evec=eigs(a,k,which=which,v0=v0) ind=self.sort_choose(eval,typ,k,which) eval=eval[ind] evec=evec[:,ind] @@ -268,7 +268,7 @@ np.random.seed(1234) m = np.random.rand(30, 30) try: - w, v = eigen(m, 3, which='LM', v0=m[:,0], maxiter=30) + w, v = eigs(m, 3, which='LM', v0=m[:,0], maxiter=30) raise AssertionError("Spurious no-error exit") except ArpackNoConvergence, err: k = len(err.eigenvalues) @@ -310,7 +310,7 @@ # compute eigenvalues - eval,evec=eigen(a,k,which=which,v0=v0) + eval,evec=eigs(a,k,which=which,v0=v0) ind=self.sort_choose(eval,typ,k,which) eval=eval[ind] evec=evec[:,ind] @@ -336,7 +336,7 @@ np.random.seed(1234) m = np.random.rand(30, 30) + 1j*np.random.rand(30, 30) try: - w, v = eigen(m, 3, which='LM', v0=m[:,0], maxiter=30) + w, v = eigs(m, 3, which='LM', v0=m[:,0], maxiter=30) raise AssertionError("Spurious no-error exit") except ArpackNoConvergence, err: k = len(err.eigenvalues) @@ -350,13 +350,13 @@ def test_eigen_bad_shapes(): # A is not square. A = csc_matrix(np.zeros((2,3))) - assert_raises(ValueError, eigen, A) + assert_raises(ValueError, eigs, A) def sorted_svd(m, k): """Compute svd of a dense matrix m, and return singular vectors/values sorted.""" - u, s, vh = dsvd(m) + u, s, vh = svd(m) ii = np.argsort(s)[-k:] return u[:, ii], s[ii], vh[ii] @@ -374,7 +374,7 @@ for m in [x.T, x]: for k in range(1, 3): u, s, vh = sorted_svd(m, k) - su, ss, svh = svd(m, k) + su, ss, svh = svds(m, k) m_hat = svd_estimate(u, s, vh) sm_hat = svd_estimate(su, ss, svh) @@ -382,7 +382,7 @@ assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000) @dec.knownfailureif(True, "Complex sparse SVD not implemented (depends on " - "Hermitian support in eigen_symmetric") + "Hermitian support in eigsh") def test_simple_complex(self): x = np.array([[1, 2, 3], [3, 4, 3], @@ -392,7 +392,7 @@ for m in [x, x.T.conjugate()]: for k in range(1, 3): u, s, vh = sorted_svd(m, k) - su, ss, svh = svd(m, k) + su, ss, svh = svds(m, k) m_hat = svd_estimate(u, s, vh) sm_hat = svd_estimate(su, ss, svh) From scipy-svn at scipy.org Sat Dec 4 15:25:55 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 14:25:55 -0600 (CST) Subject: [Scipy-svn] r6992 - in trunk/scipy/sparse/linalg/eigen/arpack: . tests Message-ID: <20101204202555.9036A323DD@scipy.org> Author: ptvirtan Date: 2010-12-04 14:25:55 -0600 (Sat, 04 Dec 2010) New Revision: 6992 Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py Log: ENH: sparse/arpack: rewrite the sparse svd routine to handle complex matrices The ARPACK user guide implies that the only way to compute eigenvalues of Hermitian matrices is to use zneupd, as there is no special support for hermitian matrices. Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:25:44 UTC (rev 6991) +++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:25:55 UTC (rev 6992) @@ -43,8 +43,8 @@ import _arpack import numpy as np -from scipy.sparse.linalg.interface import aslinearoperator -from scipy.sparse import csc_matrix, csr_matrix +from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator +from scipy.sparse import csc_matrix, csr_matrix, isspmatrix _type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} _ndigits = {'f':5, 'd':12, 'F':5, 'D':12} @@ -656,70 +656,62 @@ return params.extract(return_eigenvectors) -def svds(A, k=6): +def svds(A, k=6, ncv=None, tol=0): """Compute a few singular values/vectors for a sparse matrix using ARPACK. Parameters ---------- - A: sparse matrix - Array to compute the SVD on. - k: int + A : sparse matrix + Array to compute the SVD on + k : int, optional Number of singular values and vectors to compute. + ncv : integer + The number of Lanczos vectors generated + ncv must be greater than k+1 and smaller than n; + it is recommended that ncv > 2*k + tol : float, optional + Tolerance for singular values. Zero (default) means machine precision. Note ---- - This is a naive implementation using the symmetric eigensolver on A.T * A - or A * A.T, depending on which one is more efficient. + This is a naive implementation using an eigensolver on A.H * A or + A * A.H, depending on which one is more efficient. - Complex support is not implemented yet """ - # TODO: implement complex support once ARPACK-based eigen_hermitian is - # available + if not (isinstance(A, np.ndarray) or isspmatrix(A)): + A = np.asarray(A) + n, m = A.shape - if np.iscomplexobj(A): - raise NotImplementedError("Complex support for sparse SVD not " - "implemented yet") - op = lambda x: x.T.conjugate() + if np.issubdtype(A.dtype, np.complexfloating): + herm = lambda x: x.T.conjugate() + eigensolver = eigs else: - op = lambda x: x.T + herm = lambda x: x.T + eigensolver = eigsh - tp = A.dtype.char - linear_at = aslinearoperator(op(A)) - linear_a = aslinearoperator(A) + if n > m: + X = A + XH = herm(A) + else: + XH = A + X = herm(A) - def _left(x, sz): - x = csc_matrix(x) + def matvec_XH_X(x): + return XH.dot(X.dot(x)) - matvec = lambda x: linear_at.matvec(linear_a.matvec(x)) - params = _SymmetricArpackParams(sz, k, tp, matvec) + XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype, + shape=(X.shape[1], X.shape[1])) - while not params.converged: - params.iterate() - eigvals, eigvec = params.extract(True) - s = np.sqrt(eigvals) + eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol**2) + s = np.sqrt(eigvals) + if n > m: v = eigvec - u = (x * v) / s - return u, s, op(v) - - def _right(x, sz): - x = csr_matrix(x) - - matvec = lambda x: linear_a.matvec(linear_at.matvec(x)) - params = _SymmetricArpackParams(sz, k, tp, matvec) - - while not params.converged: - params.iterate() - eigvals, eigvec = params.extract(True) - - s = np.sqrt(eigvals) - + u = X.dot(v) / s + vh = herm(v) + else: u = eigvec - vh = (op(u) * x) / s[:, None] - return u, s, vh + vh = herm(X.dot(u) / s) - if n > m: - return _left(A, m) - else: - return _right(A, n) + return u, s, vh Modified: trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py 2010-12-04 20:25:44 UTC (rev 6991) +++ trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py 2010-12-04 20:25:55 UTC (rev 6992) @@ -356,6 +356,8 @@ def sorted_svd(m, k): """Compute svd of a dense matrix m, and return singular vectors/values sorted.""" + if isspmatrix(m): + m = m.todense() u, s, vh = svd(m) ii = np.argsort(s)[-k:] @@ -370,9 +372,14 @@ [3, 4, 3], [1, 0, 2], [0, 0, 1]], np.float) + y = np.array([[1, 2, 3, 8], + [3, 4, 3, 5], + [1, 0, 2, 3], + [0, 0, 1, 0]], np.float) + z = csc_matrix(x) - for m in [x.T, x]: - for k in range(1, 3): + for m in [x.T, x, y, z, z.T]: + for k in range(1, min(m.shape)): u, s, vh = sorted_svd(m, k) su, ss, svh = svds(m, k) @@ -381,16 +388,19 @@ assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000) - @dec.knownfailureif(True, "Complex sparse SVD not implemented (depends on " - "Hermitian support in eigsh") def test_simple_complex(self): x = np.array([[1, 2, 3], [3, 4, 3], [1+1j, 0, 2], [0, 0, 1]], np.complex) + y = np.array([[1, 2, 3, 8+5j], + [3-2j, 4, 3, 5], + [1, 0, 2, 3], + [0, 0, 1, 0]], np.complex) + z = csc_matrix(x) - for m in [x, x.T.conjugate()]: - for k in range(1, 3): + for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]: + for k in range(1, min(m.shape)-1): u, s, vh = sorted_svd(m, k) su, ss, svh = svds(m, k) From scipy-svn at scipy.org Sat Dec 4 15:26:06 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 14:26:06 -0600 (CST) Subject: [Scipy-svn] r6993 - trunk/scipy/sparse/linalg/eigen/arpack Message-ID: <20101204202606.BF32F323E1@scipy.org> Author: ptvirtan Date: 2010-12-04 14:26:06 -0600 (Sat, 04 Dec 2010) New Revision: 6993 Modified: trunk/scipy/sparse/linalg/eigen/arpack/README trunk/scipy/sparse/linalg/eigen/arpack/arpack.py trunk/scipy/sparse/linalg/eigen/arpack/info.py Log: ENH: sparse/arpack: Update the documentation including adding some references. Modified: trunk/scipy/sparse/linalg/eigen/arpack/README =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/README 2010-12-04 20:25:55 UTC (rev 6992) +++ trunk/scipy/sparse/linalg/eigen/arpack/README 2010-12-04 20:26:06 UTC (rev 6993) @@ -49,8 +49,8 @@ --- -The ARPACK license is BSD-like. -http://www.caam.rice.edu/software/ARPACK/RiceBSD.doc +The ARPACK license is the BSD 3-clause license ("New BSD License") +http://www.caam.rice.edu/software/ARPACK/RiceBSD.txt --- Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:25:55 UTC (rev 6992) +++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:26:06 UTC (rev 6993) @@ -383,8 +383,8 @@ if ierr != 0: raise ArpackError(ierr, infodict=_NEUPD_ERRORS) - # The ARPACK nonsymmetric real and double interface (s,d)naupd return - # eigenvalues and eigenvectors in real (float,double) arrays. + # The ARPACK nonsymmetric real and double interface (s,d)naupd + # return eigenvalues and eigenvectors in real (float,double) arrays. # Build complex eigenvalues from real and imaginary parts d = dr + 1.0j * di @@ -417,7 +417,8 @@ if self.which in ['LR','SR']: ind = np.argsort(rd.real) elif self.which in ['LI','SI']: - # for LI,SI ARPACK returns largest,smallest abs(imaginary) why? + # for LI,SI ARPACK returns largest,smallest + # abs(imaginary) why? ind = np.argsort(abs(rd.imag)) else: ind = np.argsort(abs(rd)) @@ -517,12 +518,18 @@ The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception - objed. + object. See Also -------- eigsh : eigenvalues and eigenvectors for symmetric matrix A + Notes + ----- + This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD, + ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to + find the eigenvalues and eigenvectors [2]_. + Examples -------- Find 6 eigenvectors of the identity matrix: @@ -534,6 +541,12 @@ >>> vecs.shape (13, 6) + References + ---------- + .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ + .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: + Solution of Large Scale Eigenvalue Problems by Implicitly Restarted + Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ A = aslinearoperator(A) if A.shape[0] != A.shape[1]: @@ -561,14 +574,12 @@ Solves A * x[i] = w[i] * x[i], the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. - Parameters ---------- A : matrix or array with real entries or object with matvec(x) method An N x N real symmetric matrix or array or an object with matvec(x) method to perform the matrix vector product A * x. The sparse matrix formats in scipy.sparse are appropriate for A. - k : integer The number of eigenvalues and eigenvectors desired. @@ -576,7 +587,6 @@ ------- w : array Array of k eigenvalues - v : array An array of k eigenvectors The v[i] is the eigenvector corresponding to the eigenvector w[i] @@ -587,20 +597,15 @@ (Not implemented) A symmetric positive-definite matrix for the generalized eigenvalue problem A * x = w * M * x - - sigma : real (Not implemented) Find eigenvalues near sigma. Shift spectrum by sigma. - v0 : array Starting vector for iteration. - ncv : integer The number of Lanczos vectors generated ncv must be greater than k and smaller than n; it is recommended that ncv > 2*k - which : string Which k eigenvectors and eigenvalues to find: - 'LA' : Largest (algebraic) eigenvalues @@ -609,14 +614,11 @@ - 'SM' : Smallest (in magnitude) eigenvalues - 'BE' : Half (k/2) from each end of the spectrum When k is odd, return one more (k/2+1) from the high end - maxiter : integer Maximum number of Arnoldi update iterations allowed - tol : float Relative accuracy for eigenvalues (stopping criterion). The default value of 0 implies machine precision. - return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues @@ -627,7 +629,7 @@ The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception - objed. + object. See Also -------- @@ -635,9 +637,25 @@ Notes ----- + This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD + functions which use the Implicitly Restarted Lanczos Method to + find the eigenvalues and eigenvectors [2]_. Examples -------- + >>> id = np.identity(13) + >>> vals, vecs = sp.sparse.linalg.eigsh(id, k=6) + >>> vals + array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) + >>> vecs.shape + (13, 6) + + References + ---------- + .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ + .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: + Solution of Large Scale Eigenvalue Problems by Implicitly Restarted + Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ A = aslinearoperator(A) if A.shape[0] != A.shape[1]: @@ -657,7 +675,7 @@ return params.extract(return_eigenvectors) def svds(A, k=6, ncv=None, tol=0): - """Compute a few singular values/vectors for a sparse matrix using ARPACK. + """Compute k singular values/vectors for a sparse matrix using ARPACK. Parameters ---------- Modified: trunk/scipy/sparse/linalg/eigen/arpack/info.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/info.py 2010-12-04 20:25:55 UTC (rev 6992) +++ trunk/scipy/sparse/linalg/eigen/arpack/info.py 2010-12-04 20:26:06 UTC (rev 6993) @@ -2,20 +2,19 @@ Eigenvalue solver using iterative methods. Find k eigenvectors and eigenvalues of a matrix A using the -Arnoldi/Lanczos iterative methods from ARPACK. +Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_. These methods are most useful for large sparse matrices. - - eigen(A,k) - - eigen_symmetric(A,k) + - eigs(A,k) + - eigsh(A,k) -Reference ---------- - - http://www.caam.rice.edu/ -software/ARPACK/ - - http://www.caam.rice.edu/software/ARPACK/UG/ug.html - - http://books.google.com/books?hl=en&id=4E9PY7NT8a0C&dq=arpack+users+guide - +References +---------- +.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ +.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: + Solution of Large Scale Eigenvalue Problems by Implicitly Restarted + Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ global_symbols = [] postpone_import = 1 From scipy-svn at scipy.org Sat Dec 4 15:26:15 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 14:26:15 -0600 (CST) Subject: [Scipy-svn] r6994 - trunk/doc/release Message-ID: <20101204202615.477A8323E4@scipy.org> Author: ptvirtan Date: 2010-12-04 14:26:15 -0600 (Sat, 04 Dec 2010) New Revision: 6994 Modified: trunk/doc/release/0.9.0-notes.rst Log: DOC: mention ARPACK changes in release notes Modified: trunk/doc/release/0.9.0-notes.rst =================================================================== --- trunk/doc/release/0.9.0-notes.rst 2010-12-04 20:26:06 UTC (rev 6993) +++ trunk/doc/release/0.9.0-notes.rst 2010-12-04 20:26:15 UTC (rev 6994) @@ -39,23 +39,6 @@ Scipy 0.9.0 has full support for Python 3. -Deprecated features -=================== - -Obsolete nonlinear solvers (in ``scipy.optimize``) --------------------------------------------------- - -The following nonlinear solvers from ``scipy.optimize`` are -deprecated: - -- ``broyden_modified`` (bad performance) -- ``broyden1_modified`` (bad performance) -- ``broyden_generalized`` (equivalent to ``anderson``) -- ``anderson2`` (equivalent to ``anderson``) -- ``broyden3`` (obsoleted by new limited-memory broyden methods) -- ``vackar`` (renamed to ``diagbroyden``) - - New features ============ @@ -119,9 +102,26 @@ an arbitrary frequency response. The functions ``scipy.signal.kaiser_atten`` and ``scipy.signal.kaiser_beta`` -were added. +were added. +Deprecated features +=================== + +Obsolete nonlinear solvers (in ``scipy.optimize``) +-------------------------------------------------- + +The following nonlinear solvers from ``scipy.optimize`` are +deprecated: + +- ``broyden_modified`` (bad performance) +- ``broyden1_modified`` (bad performance) +- ``broyden_generalized`` (equivalent to ``anderson``) +- ``anderson2`` (equivalent to ``anderson``) +- ``broyden3`` (obsoleted by new limited-memory broyden methods) +- ``vackar`` (renamed to ``diagbroyden``) + + Improved statistical tests (``scipy.stats``) -------------------------------------------- @@ -163,8 +163,45 @@ The functions ``spkron``, ``speye``, ``spidentity``, ``lil_eye`` and ``lil_diags`` were removed from ``scipy.sparse``. The first three functions -are still available as ``scipy.sparse.kron``, ``scipy.sparse.eye`` and +are still available as ``scipy.sparse.kron``, ``scipy.sparse.eye`` and ``scipy.sparse.identity``. The `dims` and `nzmax` keywords were removed from the sparse matrix constructor. + +``scipy.sparse.linalg.arpack.speigs`` +------------------------------------- + +A duplicated interface to the ARPACK library was removed. + + +Other changes +============= + +ARPACK interface changes +------------------------ + +The interface to the ARPACK eigenvalue routines in +``scipy.sparse.linalg`` was changed for more robustness. + +The eigenvalue and SVD routines now raise ``ArpackNoConvergence`` if +the eigenvalue iteration fails to converge. If partially converged results +are desired, they can be accessed as follows:: + + import numpy as np + from scipy.sparse.linalg import eigs, ArpackNoConvergence + + m = np.random.randn(30, 30) + try: + w, v = eigs(m, 6) + except ArpackNoConvergence, err: + partially_converged_w = err.eigenvalues + partially_converged_v = err.eigenvectors + +Several bugs were also fixed. + +The routines were moreover renamed as follows: + + - eigen --> eigs + - eigen_symmetric --> eigsh + - svd --> svds From scipy-svn at scipy.org Sat Dec 4 16:04:44 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 15:04:44 -0600 (CST) Subject: [Scipy-svn] r6995 - in trunk/scipy/sparse/linalg/eigen/arpack: . tests Message-ID: <20101204210444.94419323E5@scipy.org> Author: ptvirtan Date: 2010-12-04 15:04:44 -0600 (Sat, 04 Dec 2010) New Revision: 6995 Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py Log: ENH: sparse/arpack: infer LinearOperator dtype if it is missing in eigs/eigsh (#1119) Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 20:26:15 UTC (rev 6994) +++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2010-12-04 21:04:44 UTC (rev 6995) @@ -451,6 +451,13 @@ else: return d +def _aslinearoperator_with_dtype(m): + m = aslinearoperator(m) + if not hasattr(m, 'dtype'): + x = np.zeros(m.shape[1]) + m.dtype = (m*x).dtype + return m + def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True): @@ -548,7 +555,7 @@ Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ - A = aslinearoperator(A) + A = _aslinearoperator_with_dtype(A) if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) n = A.shape[0] @@ -657,7 +664,7 @@ Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ - A = aslinearoperator(A) + A = _aslinearoperator_with_dtype(A) if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) n = A.shape[0] Modified: trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py 2010-12-04 20:26:15 UTC (rev 6994) +++ trunk/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py 2010-12-04 21:04:44 UTC (rev 6995) @@ -8,10 +8,11 @@ from numpy.testing import assert_almost_equal, assert_array_almost_equal, \ assert_array_almost_equal_nulp, TestCase, run_module_suite, dec, \ - assert_raises, verbose + assert_raises, verbose, assert_equal from numpy import array, finfo, argsort, dot, round, conj, random from scipy.sparse import csc_matrix, isspmatrix +from scipy.sparse.linalg import LinearOperator from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \ ArpackNoConvergence @@ -352,6 +353,11 @@ A = csc_matrix(np.zeros((2,3))) assert_raises(ValueError, eigs, A) +def test_eigs_operator(): + # Check inferring LinearOperator dtype + fft_op = LinearOperator((6, 6), np.fft.fft) + w, v = eigs(fft_op, k=3) + assert_equal(w.dtype, np.complex_) def sorted_svd(m, k): """Compute svd of a dense matrix m, and return singular vectors/values From scipy-svn at scipy.org Sun Dec 5 00:54:14 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 4 Dec 2010 23:54:14 -0600 (CST) Subject: [Scipy-svn] r6996 - in trunk: doc/release scipy/sparse Message-ID: <20101205055414.9ADB53234B@scipy.org> Author: rgommers Date: 2010-12-04 23:54:13 -0600 (Sat, 04 Dec 2010) New Revision: 6996 Modified: trunk/doc/release/0.9.0-notes.rst trunk/scipy/sparse/base.py trunk/scipy/sparse/compressed.py trunk/scipy/sparse/coo.py trunk/scipy/sparse/csc.py trunk/scipy/sparse/csr.py Log: DEP: remove deprecated methods and keywords from sparse matrices. Modified: trunk/doc/release/0.9.0-notes.rst =================================================================== --- trunk/doc/release/0.9.0-notes.rst 2010-12-04 21:04:44 UTC (rev 6995) +++ trunk/doc/release/0.9.0-notes.rst 2010-12-05 05:54:13 UTC (rev 6996) @@ -158,8 +158,9 @@ ``scipy.sparse`` ---------------- -The ``save`` method of the ``spmatrix`` class in ``scipy.sparse``, which has -been deprecated since version 0.7, was removed. +Several methods of the sparse matrix classes in ``scipy.sparse`` which had +been deprecated since version 0.7 were removed: `save`, `rowcol`, `getdata`, +`listprint`, `ensure_sorted_indices`, `matvec`, `matmat` and `rmatvec`. The functions ``spkron``, ``speye``, ``spidentity``, ``lil_eye`` and ``lil_diags`` were removed from ``scipy.sparse``. The first three functions @@ -167,7 +168,8 @@ ``scipy.sparse.identity``. The `dims` and `nzmax` keywords were removed from the sparse matrix -constructor. +constructor. The `colind` and `rowind` attributes were removed from CSR and CSC +matrices respectively. ``scipy.sparse.linalg.arpack.speigs`` ------------------------------------- Modified: trunk/scipy/sparse/base.py =================================================================== --- trunk/scipy/sparse/base.py 2010-12-04 21:04:44 UTC (rev 6995) +++ trunk/scipy/sparse/base.py 2010-12-05 05:54:13 UTC (rev 6996) @@ -135,21 +135,6 @@ format = 'und' return format - @np.deprecate - def rowcol(self, num): - return (None, None) - - @np.deprecate - def getdata(self, num): - return None - - @np.deprecate - def listprint(self, start, stop): - """Provides a way to print over a single index. - """ - return '\n'.join([' %s\t%s' % (self.rowcol(ind), self.getdata(ind)) - for ind in xrange(start,stop)]) + '\n' - def __repr__(self): nnz = self.getnnz() format = self.getformat() @@ -222,6 +207,9 @@ """ return self.tocsr().multiply(other) + def dot(self, other): + return self * other + def __abs__(self): return abs(self.tocsr()) @@ -238,34 +226,6 @@ def __rsub__(self, other): # other - self return self.tocsr().__rsub__(other) - # old __mul__ interfaces - @np.deprecate - def matvec(self,other): - return self * other - - @np.deprecate - def matmat(self,other): - return self * other - - def dot(self, other): - return self * other - - @np.deprecate - def rmatvec(self, other, conjugate=True): - """Multiplies the vector 'other' by the sparse matrix, returning a - dense vector as a result. - - If 'conjugate' is True: - - returns A.transpose().conj() * other - Otherwise: - - returns A.transpose() * other. - - """ - if conjugate: - return self.conj().transpose() * other - else: - return self.transpose() * other - def __mul__(self, other): """interpret other and call one of the following @@ -403,10 +363,6 @@ return result elif isscalarlike(other): raise ValueError('exponent must be an integer') - elif isspmatrix(other): - warn('Using ** for elementwise multiplication is deprecated.'\ - 'Use .multiply() instead', DeprecationWarning) - return self.multiply(other) else: raise NotImplementedError Modified: trunk/scipy/sparse/compressed.py =================================================================== --- trunk/scipy/sparse/compressed.py 2010-12-04 21:04:44 UTC (rev 6995) +++ trunk/scipy/sparse/compressed.py 2010-12-05 05:54:13 UTC (rev 6996) @@ -298,10 +298,6 @@ return self.__class__((data,indices,indptr),shape=(M,N)) - @np.deprecate - def getdata(self, ind): - return self.data[ind] - def diagonal(self): """Returns the main diagonal of the matrix """ @@ -623,19 +619,6 @@ fn( len(self.indptr) - 1, self.indptr, self.indices, self.data) self.has_sorted_indices = True - #TODO remove after 0.7 - def ensure_sorted_indices(self, inplace=False): - """Return a copy of this matrix where the column indices are sorted - """ - warn('ensure_sorted_indices is deprecated, ' \ - 'use sorted_indices() or sort_indices() instead', \ - DeprecationWarning) - - if inplace: - self.sort_indices() - else: - return self.sorted_indices() - def prune(self): """Remove empty space after all non-zero elements. """ Modified: trunk/scipy/sparse/coo.py =================================================================== --- trunk/scipy/sparse/coo.py 2010-12-04 21:04:44 UTC (rev 6995) +++ trunk/scipy/sparse/coo.py 2010-12-05 05:54:13 UTC (rev 6996) @@ -93,13 +93,9 @@ """ - def __init__(self, arg1, shape=None, dtype=None, copy=False, dims=None): + def __init__(self, arg1, shape=None, dtype=None, copy=False): _data_matrix.__init__(self) - if dims is not None: - warn("dims is deprecated, use shape instead", DeprecationWarning) - shape=dims - if isinstance(arg1, tuple): if isshape(arg1): M, N = arg1 @@ -215,14 +211,6 @@ raise ValueError('negative column index found') - @np.deprecate - def rowcol(self, num): - return (self.row[num], self.col[num]) - - @np.deprecate - def getdata(self, num): - return self.data[num] - def transpose(self, copy=False): M,N = self.shape return coo_matrix((self.data, (self.col, self.row)), shape=(N,M), copy=copy) Modified: trunk/scipy/sparse/csc.py =================================================================== --- trunk/scipy/sparse/csc.py 2010-12-04 21:04:44 UTC (rev 6995) +++ trunk/scipy/sparse/csc.py 2010-12-05 05:54:13 UTC (rev 6996) @@ -81,14 +81,6 @@ """ - def __getattr__(self, attr): - if attr == 'rowind': - warn("rowind attribute no longer in use. Use .indices instead", - DeprecationWarning) - return self.indices - else: - return _cs_matrix.__getattr__(self, attr) - def transpose(self, copy=False): from csr import csr_matrix M,N = self.shape @@ -99,13 +91,6 @@ for r in xrange(self.shape[0]): yield csr[r,:] - @np.deprecate - def rowcol(self, ind): - #TODO remove after 0.7 - row = self.indices[ind] - col = np.searchsorted(self.indptr, ind+1) - 1 - return (row, col) - def tocsc(self, copy=False): if copy: return self.copy() Modified: trunk/scipy/sparse/csr.py =================================================================== --- trunk/scipy/sparse/csr.py 2010-12-04 21:04:44 UTC (rev 6995) +++ trunk/scipy/sparse/csr.py 2010-12-05 05:54:13 UTC (rev 6996) @@ -81,27 +81,11 @@ """ - def __getattr__(self, attr): - if attr == 'colind': - warn("colind attribute no longer in use. Use .indices instead", - DeprecationWarning) - return self.indices - else: - return _cs_matrix.__getattr__(self, attr) - def transpose(self, copy=False): from csc import csc_matrix M,N = self.shape return csc_matrix((self.data,self.indices,self.indptr), shape=(N,M), copy=copy) - @np.deprecate - def rowcol(self, ind): - #TODO remove after 0.7 - col = self.indices[ind] - row = np.searchsorted(self.indptr, ind+1)-1 - return (row, col) - - def tolil(self): from lil import lil_matrix lil = lil_matrix(self.shape,dtype=self.dtype) @@ -200,7 +184,7 @@ slicing of the form self[[1,2,3],:] """ indices = asindices(indices) - + (min_indx,max_indx) = check_bounds(indices,N) if min_indx < 0: From scipy-svn at scipy.org Sun Dec 5 01:39:11 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 5 Dec 2010 00:39:11 -0600 (CST) Subject: [Scipy-svn] r6997 - trunk/scipy/stats/tests Message-ID: <20101205063911.C721F37D3B4@scipy.org> Author: rgommers Date: 2010-12-05 00:39:11 -0600 (Sun, 05 Dec 2010) New Revision: 6997 Modified: trunk/scipy/stats/tests/test_stats.py Log: TST: add correct significance to some stats tests. Closes #467. Modified: trunk/scipy/stats/tests/test_stats.py =================================================================== --- trunk/scipy/stats/tests/test_stats.py 2010-12-05 05:54:13 UTC (rev 6996) +++ trunk/scipy/stats/tests/test_stats.py 2010-12-05 06:39:11 UTC (rev 6997) @@ -114,26 +114,23 @@ II. C. Basic Statistics """ + dprec = np.finfo(np.float64).precision + + # Really need to write these tests to handle missing values properly def test_tmeanX(self): y = stats.tmean(X, (2, 8), (True, True)) - assert_almost_equal(y, 5.0) + assert_approx_equal(y, 5.0, significant=TestBasicStats.dprec) def test_tvarX(self): y = stats.tvar(X, (2, 8), (True, True)) - assert_almost_equal(y, 4.6666666666666661) + assert_approx_equal(y, 4.6666666666666661, + significant=TestBasicStats.dprec) def test_tstdX(self): y = stats.tstd(X, (2, 8), (True, True)) - assert_almost_equal(y, 2.1602468994692865) + assert_approx_equal(y, 2.1602468994692865, + significant=TestBasicStats.dprec) -## Really need to write these tests to handle missing values properly -## def test_meanMISS(self): -## y = np.mean(MISS) -## assert_almost_equal(y, 0.0) -## -## def test_stdMISS(self): -## y = stats.stdev(MISS) -## assert_almost_equal(y, 0.0) class TestNanFunc(TestCase): From scipy-svn at scipy.org Wed Dec 8 08:51:54 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 8 Dec 2010 07:51:54 -0600 (CST) Subject: [Scipy-svn] r6998 - trunk/scipy/optimize/tests Message-ID: <20101208135154.0B2B337C3EB@scipy.org> Author: rgommers Date: 2010-12-08 07:51:53 -0600 (Wed, 08 Dec 2010) New Revision: 6998 Modified: trunk/scipy/optimize/tests/test_optimize.py Log: TST: change criterion for fmin_ncg test. Closes #1323. The number of gradient evaluations has a slight platform dependence due to floating-point comparison differences. Number of calls is 18 on most platforms, but has been reported as 16 in the ticket and on-list, with Python 2.7 and 2.5 respectively. Modified: trunk/scipy/optimize/tests/test_optimize.py =================================================================== --- trunk/scipy/optimize/tests/test_optimize.py 2010-12-05 06:39:11 UTC (rev 6997) +++ trunk/scipy/optimize/tests/test_optimize.py 2010-12-08 13:51:53 UTC (rev 6998) @@ -174,7 +174,8 @@ # Ensure that function call counts are 'known good'; these are from # Scipy 0.7.0. Don't allow them to increase. assert_(self.funccalls == 7, self.funccalls) - assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0 + assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0 + #assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0 #assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0 # Ensure that the function behaves the same; this is from Scipy 0.7.0 @@ -347,7 +348,7 @@ class TestRosen(TestCase): - + def test_hess(self): """Compare rosen_hess(x) times p with rosen_hess_prod(x,p) (ticket #1248)""" x = array([3, 4, 5]) From scipy-svn at scipy.org Fri Dec 10 07:59:31 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 10 Dec 2010 06:59:31 -0600 (CST) Subject: [Scipy-svn] r6999 - branches Message-ID: <20101210125931.A98ED37D3B0@scipy.org> Author: rgommers Date: 2010-12-10 06:59:30 -0600 (Fri, 10 Dec 2010) New Revision: 6999 Added: branches/0.9.x/ Log: Create branch for 0.9 series From scipy-svn at scipy.org Fri Dec 10 21:22:30 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 10 Dec 2010 20:22:30 -0600 (CST) Subject: [Scipy-svn] r7000 - in trunk: . doc/release Message-ID: <20101211022230.B8E8137C3F2@scipy.org> Author: rgommers Date: 2010-12-10 20:22:29 -0600 (Fri, 10 Dec 2010) New Revision: 7000 Added: trunk/doc/release/0.10.0-notes.rst Modified: trunk/setup.py Log: Trunk is open for development of 0.10 (? is that the next version) series. Added: trunk/doc/release/0.10.0-notes.rst =================================================================== --- trunk/doc/release/0.10.0-notes.rst (rev 0) +++ trunk/doc/release/0.10.0-notes.rst 2010-12-11 02:22:29 UTC (rev 7000) @@ -0,0 +1,55 @@ +========================= +SciPy 0.10.0 Release Notes +========================= + +.. note:: Scipy 0.10.0 is not released yet! + +.. contents:: + +SciPy 0.10.0 is the culmination of XXX months of hard work. It contains +many new features, numerous bug-fixes, improved test coverage and +better documentation. There have been a number of deprecations and +API changes in this release, which are documented below. All users +are encouraged to upgrade to this release, as there are a large number +of bug-fixes and optimizations. Moreover, our development attention +will now shift to bug-fix releases on the 0.10.x branch, and on adding +new features on the development trunk. + +This release requires Python 2.4 - 2.7 or 3.1 - and NumPy 1.5 or greater. + +Please note that SciPy is still considered to have "Beta" status, as +we work toward a SciPy 1.0.0 release. The 1.0.0 release will mark a +major milestone in the development of SciPy, after which changing the +package structure or API will be much more difficult. Whilst these +pre-1.0 releases are considered to have "Beta" status, we are +committed to making them as bug-free as possible. For example, in +addition to fixing numerous bugs in this release, we have also doubled +the number of unit tests since the last release. + +However, until the 1.0 release, we are aggressively reviewing and +refining the functionality, organization, and interface. This is being +done in an effort to make the package as coherent, intuitive, and +useful as possible. To achieve this, we need help from the community +of users. Specifically, we need feedback regarding all aspects of the +project - everything - from which algorithms we implement, to details +about our function's call signatures. + + +New features +============ + + + +Deprecated features +=================== + + + +Removed features +================ + + + +Other changes +============= + Modified: trunk/setup.py =================================================================== --- trunk/setup.py 2010-12-10 12:59:30 UTC (rev 6999) +++ trunk/setup.py 2010-12-11 02:22:29 UTC (rev 7000) @@ -42,7 +42,7 @@ """ MAJOR = 0 -MINOR = 9 +MINOR = 10 MICRO = 0 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) From scipy-svn at scipy.org Sat Dec 11 13:27:02 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 11 Dec 2010 12:27:02 -0600 (CST) Subject: [Scipy-svn] r7001 - trunk/scipy/linalg Message-ID: <20101211182702.F2B2437D3CA@scipy.org> Author: ptvirtan Date: 2010-12-11 12:27:02 -0600 (Sat, 11 Dec 2010) New Revision: 7001 Modified: trunk/scipy/linalg/decomp_qr.py Log: ENH: linalg/decomp_qr: Avoid calling find_best_lapack_type twice. Modified: trunk/scipy/linalg/decomp_qr.py =================================================================== --- trunk/scipy/linalg/decomp_qr.py 2010-12-11 02:22:29 UTC (rev 7000) +++ trunk/scipy/linalg/decomp_qr.py 2010-12-11 18:27:02 UTC (rev 7001) @@ -100,8 +100,7 @@ if mode == 'r': return R - if find_best_lapack_type((a1,))[0] == 's' or \ - find_best_lapack_type((a1,))[0] == 'd': + if find_best_lapack_type((a1,))[0] in ('s', 'd'): gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,)) else: gor_un_gqr, = get_lapack_funcs(('ungqr',), (qr,)) From scipy-svn at scipy.org Sat Dec 11 16:09:41 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 11 Dec 2010 15:09:41 -0600 (CST) Subject: [Scipy-svn] r7002 - in trunk/scipy/linalg: . tests Message-ID: <20101211210941.9286A37D488@scipy.org> Author: ptvirtan Date: 2010-12-11 15:09:40 -0600 (Sat, 11 Dec 2010) New Revision: 7002 Modified: trunk/scipy/linalg/decomp.py trunk/scipy/linalg/tests/test_decomp.py Log: BUG: linalg: work around a LAPACK bug in DGGEV (#709) Modified: trunk/scipy/linalg/decomp.py =================================================================== --- trunk/scipy/linalg/decomp.py 2010-12-11 18:27:02 UTC (rev 7001) +++ trunk/scipy/linalg/decomp.py 2010-12-11 21:09:40 UTC (rev 7002) @@ -11,13 +11,13 @@ # April 2010: Functions for LU, QR, SVD, Schur and Cholesky decompositions were # moved to their own files. Still in this file are functions for eigenstuff # and for the Hessenberg form. - + __all__ = ['eig','eigh','eig_banded','eigvals','eigvalsh', 'eigvals_banded', 'hessenberg'] import numpy from numpy import array, asarray_chkfinite, asarray, diag, zeros, ones, \ - isfinite, inexact, nonzero, iscomplexobj, cast + isfinite, inexact, nonzero, iscomplexobj, cast, flatnonzero, conj # Local imports from scipy.linalg import calc_lwork @@ -28,20 +28,17 @@ _I = cast['F'](1j) -def _make_complex_eigvecs(w, vin, cmplx_tcode): - v = numpy.array(vin, dtype=cmplx_tcode) - #ind = numpy.flatnonzero(numpy.not_equal(w.imag,0.0)) - ind = numpy.flatnonzero(numpy.logical_and(numpy.not_equal(w.imag, 0.0), - numpy.isfinite(w))) - vnew = numpy.zeros((v.shape[0], len(ind)>>1), cmplx_tcode) - vnew.real = numpy.take(vin, ind[::2],1) - vnew.imag = numpy.take(vin, ind[1::2],1) - count = 0 - conj = numpy.conjugate - for i in range(len(ind)//2): - v[:, ind[2*i]] = vnew[:, count] - v[:, ind[2*i+1]] = conj(vnew[:, count]) - count += 1 +def _make_complex_eigvecs(w, vin, dtype): + """ + Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output + """ + # - see LAPACK man page DGGEV at ALPHAI + v = numpy.array(vin, dtype=dtype) + m = (w.imag > 0) + m[:-1] |= (w.imag[1:] < 0) # workaround for LAPACK bug, cf. ticket #709 + for i in flatnonzero(m): + v.imag[:,i] = vin[:,i+1] + conj(v[:,i], v[:,i+1]) return v def _geneig(a1, b, left, right, overwrite_a, overwrite_b): Modified: trunk/scipy/linalg/tests/test_decomp.py =================================================================== --- trunk/scipy/linalg/tests/test_decomp.py 2010-12-11 18:27:02 UTC (rev 7001) +++ trunk/scipy/linalg/tests/test_decomp.py 2010-12-11 21:09:40 UTC (rev 7002) @@ -132,7 +132,7 @@ assert_array_almost_equal(w,exact_w) -class TestEig(TestCase): +class TestEig(object): def test_simple(self): a = [[1,2,3],[1,2,3],[2,5,6]] @@ -154,6 +154,16 @@ for i in range(3): assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i]) + def test_simple_complex_eig(self): + a = [[1,2],[-2,1]] + w,vl,vr = eig(a,left=1,right=1) + assert_array_almost_equal(w, array([1+2j, 1-2j])) + for i in range(2): + assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i]) + for i in range(2): + assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]), + conjugate(w[i])*vl[:,i]) + def test_simple_complex(self): a = [[1,2,3],[1,2,3],[2,5,6+1j]] w,vl,vr = eig(a,left=1,right=1) @@ -163,29 +173,32 @@ assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]), conjugate(w[i])*vl[:,i]) + def _check_gen_eig(self, A, B): + A, B = asarray(A), asarray(B) + msg = "\n%r\n%r" % (A, B) + w, vr = eig(A,B) + wt = eigvals(A,B) + val1 = dot(A, vr) + val2 = dot(B, vr) * w + res = val1 - val2 + for i in range(res.shape[1]): + if all(isfinite(res[:, i])): + assert_array_almost_equal(res[:, i], 0, err_msg=msg) + + assert_array_almost_equal(sort(w[isfinite(w)]), sort(wt[isfinite(wt)]), + err_msg=msg) + def test_singular(self): """Test singular pair""" # Example taken from # http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html A = array(( [22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34], [27,31,26,21,15], [38,44,44,24,30])) - B = array(( [13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25], [16,25,27,14,23], [24,35,18,21,22])) - w, vr = eig(A,B) - wt = eigvals(A,B) - val1 = dot(A, vr) - val2 = dot(B, vr) * w - res = val1 - val2 - for i in range(res.shape[1]): - if all(isfinite(res[:, i])): - assert_array_almost_equal(res[:, i], 0) + self._check_gen_eig(A, B) - # Disable this test, which fails now, and is not really necessary if the above - # succeeds ? - #assert_array_almost_equal(w[isfinite(w)], wt[isfinite(w)]) - def test_falker(self): """Test matrices giving some Nan generalized eigen values.""" M = diag(array(([1,0,3]))) @@ -195,17 +208,31 @@ I = identity(3) A = bmat([[I,Z],[Z,-K]]) B = bmat([[Z,I],[M,D]]) - A = asarray(A) - B = asarray(B) - w, vr = eig(A,B) - val1 = dot(A, vr) - val2 = dot(B, vr) * w - res = val1 - val2 - for i in range(res.shape[1]): - if all(isfinite(res[:, i])): - assert_array_almost_equal(res[:, i], 0) + self._check_gen_eig(A, B) + def test_bad_geneig(self): + # Ticket #709 (strange return values from DGGEV) + + def matrices(omega): + c1 = -9 + omega**2 + c2 = 2*omega + A = [[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, c1, 0], + [0, 0, 0, c1]] + B = [[0, 0, 1, 0], + [0, 0, 0, 1], + [1, 0, 0, -c2], + [0, 1, c2, 0]] + return A, B + + # With a buggy LAPACK, this can fail for different omega on different + # machines -- so we need to test several values + for k in xrange(100): + A, B = matrices(omega=k*5./100) + self._check_gen_eig(A, B) + def test_not_square_error(self): """Check that passing a non-square array raises a ValueError.""" A = np.arange(6).reshape(3,2) From scipy-svn at scipy.org Sat Dec 11 18:47:46 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 11 Dec 2010 17:47:46 -0600 (CST) Subject: [Scipy-svn] r7003 - in trunk/scipy/odr: . tests Message-ID: <20101211234746.E3FFD37D3ED@scipy.org> Author: ptvirtan Date: 2010-12-11 17:47:46 -0600 (Sat, 11 Dec 2010) New Revision: 7003 Modified: trunk/scipy/odr/__odrpack.c trunk/scipy/odr/tests/test_odr.py Log: BUG: odr: use 'i' instead of 'l' in Py_BuildValue for ints (#1253) Modified: trunk/scipy/odr/__odrpack.c =================================================================== --- trunk/scipy/odr/__odrpack.c 2010-12-11 21:09:40 UTC (rev 7002) +++ trunk/scipy/odr/__odrpack.c 2010-12-11 23:47:46 UTC (rev 7003) @@ -436,7 +436,7 @@ work_ind = Py_BuildValue - ("{s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l,s:l}", + ("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i}", "delta", delta, "eps", eps, "xplus", xplus, "fn", fn, "sd", sd, "sd", vcv, "rvar", rvar, "wss", wss, "wssde", wssde, "wssep", wssep, "rcond", rcond, "eta", eta, "olmav", olmav, "tau", tau, "alpha", @@ -499,7 +499,7 @@ retobj = Py_BuildValue - ("OOO{s:O,s:O,s:O,s:O,s:d,s:d,s:d,s:d,s:d,s:d,s:O,s:O,s:O,s:l}", + ("OOO{s:O,s:O,s:O,s:O,s:d,s:d,s:d,s:d,s:d,s:d,s:O,s:O,s:O,s:i}", PyArray_Return(beta), PyArray_Return(sd_beta), PyArray_Return(cov_beta), "delta", PyArray_Return(deltaA), "eps", PyArray_Return(epsA), "xplus", PyArray_Return(xplusA), "y", @@ -551,7 +551,7 @@ if (kwds == NULL) { - if (!PyArg_ParseTuple(args, "OOOO|OOOOOOOllz#z#ldddlOOOOOOi:odr", + if (!PyArg_ParseTuple(args, "OOOO|OOOOOOOiiz#z#idddiOOOOOOi:odr", &fcn, &initbeta, &py, &px, &pwe, &pwd, &fjacb, &fjacd, &extra_args, &pifixb, &pifixx, &job, &iprint, &errfile, &lerrfile, &rptfile, @@ -565,7 +565,7 @@ else { if (!PyArg_ParseTupleAndKeywords(args, kwds, - "OOOO|OOOOOOOllz#z#ldddlOOOOOOi:odr", + "OOOO|OOOOOOOiiz#z#idddiOOOOOOi:odr", kw_list, &fcn, &initbeta, &py, &px, &pwe, &pwd, &fjacb, &fjacd, &extra_args, &pifixb, &pifixx, &job, @@ -1326,7 +1326,7 @@ printdict = Py_BuildValue - ("{s:l,s:l,s:l,s:l,s:O,s:O,s:l,s:O,s:l,s:O,s:l,s:l,s:O,s:l,s:l,s:O,s:O,s:l,s:l,s:l,s:d,s:d,s:d,s:l,s:O,s:O,s:l,s:O,s:O,s:l,s:O,s:l,s:O,s:l,s:l}", + ("{s:i,s:i,s:i,s:i,s:O,s:O,s:i,s:O,s:i,s:O,s:i,s:i,s:O,s:i,s:i,s:O,s:O,s:i,s:i,s:i,s:d,s:d,s:d,s:i,s:O,s:O,s:i,s:O,s:O,s:i,s:O,s:i,s:O,s:i,s:i}", "n", n, "m", m, "np", np, "nq", nq, "beta", (PyObject *) beta, "y", (PyObject *) y, "ldy", ldy, "x", (PyObject *) x, "ldx", ldx, "we", (PyObject *) we, "ldwe", ldwe, "ld2we", ld2we, "wd", (PyObject *) wd, Modified: trunk/scipy/odr/tests/test_odr.py =================================================================== --- trunk/scipy/odr/tests/test_odr.py 2010-12-11 21:09:40 UTC (rev 7002) +++ trunk/scipy/odr/tests/test_odr.py 2010-12-11 23:47:46 UTC (rev 7003) @@ -1,7 +1,8 @@ # Scipy imports. import numpy as np from numpy import pi -from numpy.testing import assert_array_almost_equal, TestCase, run_module_suite +from numpy.testing import assert_array_almost_equal, TestCase, \ + run_module_suite, assert_equal from scipy.odr import Data, Model, ODR, RealData, odr_stop @@ -307,6 +308,21 @@ ) + def test_ticket_1253(self): + def linear(c, x): + return c[0]*x+c[1] + + c = [2.0, 3.0] + x = np.linspace(0, 10) + y = linear(c, x) + + model = Model(linear) + data = Data(x, y, wd=1.0, we=1.0) + job = ODR(data, model, beta0=[1.0, 1.0]) + result = job.run() + assert_equal(result.info, 2) + + if __name__ == "__main__": run_module_suite() #### EOF ####################################################################### From scipy-svn at scipy.org Sat Dec 11 20:19:38 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 11 Dec 2010 19:19:38 -0600 (CST) Subject: [Scipy-svn] r7004 - trunk/scipy/sparse/linalg/dsolve Message-ID: <20101212011938.B60B537D4A5@scipy.org> Author: ptvirtan Date: 2010-12-11 19:19:38 -0600 (Sat, 11 Dec 2010) New Revision: 7004 Modified: trunk/scipy/sparse/linalg/dsolve/_superlu_utils.c Log: BUG: sparse/superlu: safer pointer to int conversion in _superlu_utils (#1236) Modified: trunk/scipy/sparse/linalg/dsolve/_superlu_utils.c =================================================================== --- trunk/scipy/sparse/linalg/dsolve/_superlu_utils.c 2010-12-11 23:47:46 UTC (rev 7003) +++ trunk/scipy/sparse/linalg/dsolve/_superlu_utils.c 2010-12-12 01:19:38 UTC (rev 7004) @@ -12,9 +12,9 @@ jmp_buf _superlu_py_jmpbuf; PyObject *_superlumodule_memory_dict=NULL; -/* Abort to be used inside the superlu module so that memory allocation +/* Abort to be used inside the superlu module so that memory allocation errors don't exit Python and memory allocated internal to SuperLU is freed. - Calling program should deallocate (using SUPERLU_FREE) all memory that could have + Calling program should deallocate (using SUPERLU_FREE) all memory that could have been allocated. (It's ok to FREE unallocated memory)---will be ignored. */ @@ -27,16 +27,14 @@ void *superlu_python_module_malloc(size_t size) { PyObject *key=NULL; - long keyval; - void *mem_ptr; + void *mem_ptr; if (_superlumodule_memory_dict == NULL) { _superlumodule_memory_dict = PyDict_New(); } mem_ptr = malloc(size); if (mem_ptr == NULL) return NULL; - keyval = (long) mem_ptr; - key = PyInt_FromLong(keyval); + key = PyLong_FromVoidPtr(mem_ptr); if (key == NULL) goto fail; if (PyDict_SetItem(_superlumodule_memory_dict, key, Py_None)) goto fail; Py_DECREF(key); @@ -47,31 +45,29 @@ free(mem_ptr); superlu_python_module_abort("superlu_malloc: Cannot set dictionary key value in malloc."); return NULL; - + } void superlu_python_module_free(void *ptr) { PyObject *key; - long keyval; PyObject *ptype, *pvalue, *ptraceback; if (ptr == NULL) return; PyErr_Fetch(&ptype, &pvalue, &ptraceback); - keyval = (long )ptr; - key = PyInt_FromLong(keyval); + key = PyLong_FromVoidPtr(ptr); /* This will only free the pointer if it could find it in the dictionary of already allocated pointers --- thus after abort, the module can free all - the memory that "might" have been allocated to avoid memory leaks on abort + the memory that "might" have been allocated to avoid memory leaks on abort calls. - */ + */ if (_superlumodule_memory_dict && \ !(PyDict_DelItem(_superlumodule_memory_dict, key))) { free(ptr); } Py_DECREF(key); PyErr_Restore(ptype, pvalue, ptraceback); - return; + return; } /* From scipy-svn at scipy.org Sat Dec 11 21:21:19 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 11 Dec 2010 20:21:19 -0600 (CST) Subject: [Scipy-svn] r7005 - in trunk/scipy/sparse/linalg: . isolve Message-ID: <20101212022119.44BD737D3FA@scipy.org> Author: ptvirtan Date: 2010-12-11 20:21:18 -0600 (Sat, 11 Dec 2010) New Revision: 7005 Modified: trunk/scipy/sparse/linalg/interface.py trunk/scipy/sparse/linalg/isolve/utils.py Log: ENH: sparse.linalg: cut unnecessary LinearOperator interface overhead for matrices and arrays (#1036) Modified: trunk/scipy/sparse/linalg/interface.py =================================================================== --- trunk/scipy/sparse/linalg/interface.py 2010-12-12 01:19:38 UTC (rev 7004) +++ trunk/scipy/sparse/linalg/interface.py 2010-12-12 02:21:18 UTC (rev 7005) @@ -200,6 +200,38 @@ return '<%dx%d LinearOperator with %s>' % (M,N,dt) +class MatrixLinearOperator(LinearOperator): + def __init__(self, A): + LinearOperator.__init__(self, shape=A.shape, dtype=A.dtype, + matvec=None, rmatvec=self.rmatvec) + self.matvec = A.dot + self.matmat = A.dot + self.__mul__ = A.dot + self.A = A + self.A_conj = None + + def rmatvec(self, x): + if self.A_conj is None: + self.A_conj = self.A.T.conj() + return self.A_conj.dot(x) + +class IdentityOperator(LinearOperator): + def __init__(self, shape, dtype): + LinearOperator.__init__(self, shape=shape, dtype=dtype, matvec=None, + rmatvec=self.rmatvec) + + def matvec(self, x): + return x + + def rmatvec(self, x): + return x + + def matmat(self, x): + return x + + def __mul__(self, x): + return x + def aslinearoperator(A): """Return A as a LinearOperator. @@ -226,27 +258,11 @@ elif isinstance(A, np.ndarray) or isinstance(A, np.matrix): if A.ndim > 2: raise ValueError('array must have rank <= 2') - A = np.atleast_2d(np.asarray(A)) + return MatrixLinearOperator(A) - def matvec(v): - return np.dot(A, v) - def rmatvec(v): - return np.dot(A.conj().transpose(), v) - def matmat(V): - return np.dot(A, V) - return LinearOperator(A.shape, matvec, rmatvec=rmatvec, - matmat=matmat, dtype=A.dtype) - elif isspmatrix(A): - def matvec(v): - return A * v - def rmatvec(v): - return A.conj().transpose() * v - def matmat(V): - return A * V - return LinearOperator(A.shape, matvec, rmatvec=rmatvec, - matmat=matmat, dtype=A.dtype) + return MatrixLinearOperator(A) else: if hasattr(A, 'shape') and hasattr(A, 'matvec'): Modified: trunk/scipy/sparse/linalg/isolve/utils.py =================================================================== --- trunk/scipy/sparse/linalg/isolve/utils.py 2010-12-12 01:19:38 UTC (rev 7004) +++ trunk/scipy/sparse/linalg/isolve/utils.py 2010-12-12 02:21:18 UTC (rev 7005) @@ -6,7 +6,8 @@ from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros -from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator +from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator, \ + IdentityOperator _coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F', ('f','D'):'D', ('d','f'):'d', ('d','d'):'d', @@ -116,7 +117,11 @@ rpsolve = A_.rpsolve else: rpsolve = id - M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve, dtype=A.dtype) + if psolve is id and rpsolve is id: + M = IdentityOperator(shape=A.shape, dtype=A.dtype) + else: + M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve, + dtype=A.dtype) else: M = aslinearoperator(M) if A.shape != M.shape: From scipy-svn at scipy.org Sun Dec 12 03:14:03 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 12 Dec 2010 02:14:03 -0600 (CST) Subject: [Scipy-svn] r7006 - trunk/scipy/stats Message-ID: <20101212081403.3D84D32347@scipy.org> Author: rgommers Date: 2010-12-12 02:14:02 -0600 (Sun, 12 Dec 2010) New Revision: 7006 Modified: trunk/scipy/stats/stats.py Log: BUG: 3K: fix integer division and list.sort issues in kendalltau. Modified: trunk/scipy/stats/stats.py =================================================================== --- trunk/scipy/stats/stats.py 2010-12-12 02:21:18 UTC (rev 7005) +++ trunk/scipy/stats/stats.py 2010-12-12 08:14:02 UTC (rev 7006) @@ -2600,7 +2600,7 @@ perm[offs] = perm[offs+1] perm[offs+1] = t return 1 - length0 = length / 2 + length0 = length // 2 length1 = length - length0 middle = offs + length0 exchcnt += mergesort(offs, length0) @@ -2632,25 +2632,25 @@ else: # sort implemented as quicksort, 30% faster but with worst case: O(n^2) perm = range(n) - perm.sort(lambda a,b: cmp(x[a],x[b]) or cmp(y[a],y[b])) + perm.sort(key=lambda a: (x[a], y[a])) # compute joint ties first = 0 t = 0 for i in xrange(1, n): if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]: - t += ((i - first) * (i - first - 1)) / 2 + t += ((i - first) * (i - first - 1)) // 2 first = i - t += ((n - first) * (n - first - 1)) / 2 + t += ((n - first) * (n - first - 1)) // 2 # compute ties in x first = 0 u = 0 for i in xrange(1,n): if x[perm[first]] != x[perm[i]]: - u += ((i - first) * (i - first - 1)) / 2 + u += ((i - first) * (i - first - 1)) // 2 first = i - u += ((n - first) * (n - first - 1)) / 2 + u += ((n - first) * (n - first - 1)) // 2 # count exchanges exchanges = mergesort(0, n) @@ -2659,11 +2659,11 @@ v = 0 for i in xrange(1,n): if y[perm[first]] != y[perm[i]]: - v += ((i - first) * (i - first - 1)) / 2 + v += ((i - first) * (i - first - 1)) // 2 first = i - v += ((n - first) * (n - first - 1)) / 2 + v += ((n - first) * (n - first - 1)) // 2 - tot = (n * (n - 1)) / 2 + tot = (n * (n - 1)) // 2 if tot == u and tot == v: return 1 # Special case for all ties in both ranks From scipy-svn at scipy.org Sun Dec 12 03:19:11 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 12 Dec 2010 02:19:11 -0600 (CST) Subject: [Scipy-svn] r7007 - branches/0.9.x Message-ID: <20101212081911.2811E32375@scipy.org> Author: rgommers Date: 2010-12-12 02:19:10 -0600 (Sun, 12 Dec 2010) New Revision: 7007 Modified: branches/0.9.x/pavement.py Log: REL: update pavement.py for 0.9.x series. Modified: branches/0.9.x/pavement.py =================================================================== --- branches/0.9.x/pavement.py 2010-12-12 08:14:02 UTC (rev 7006) +++ branches/0.9.x/pavement.py 2010-12-12 08:19:10 UTC (rev 7007) @@ -78,25 +78,29 @@ WINE_PY25 = [r"C:\Python25\python.exe"] WINE_PY26 = [r"C:\Python26\python26.exe"] WINE_PY27 = [r"C:\Python27\python27.exe"] + WINE_PY31 = [r"C:\Python31\python.exe"] MAKENSIS = ["makensis"] elif sys.platform == "darwin": WINE_PY25 = ["wine", os.environ['HOME'] + "/.wine/drive_c/Python25/python.exe"] WINE_PY26 = ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"] WINE_PY27 = ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"] + WINE_PY31 = ["wine", os.environ['HOME'] + "/.wine/drive_c/Python31/python.exe"] MAKENSIS = ["wine", "makensis"] else: WINE_PY25 = [os.environ['HOME'] + "/.wine/drive_c/Python25/python.exe"] WINE_PY26 = [os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"] WINE_PY27 = [os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"] + WINE_PY31 = [os.environ['HOME'] + "/.wine/drive_c/Python31/python.exe"], MAKENSIS = ["wine", "makensis"] -WINE_PYS = {'2.7' : WINE_PY27, '2.6' : WINE_PY26, '2.5': WINE_PY25} +WINE_PYS = {'3.1':WINE_PY31, '2.7':WINE_PY27, '2.6':WINE_PY26, '2.5':WINE_PY25} SUPERPACK_BUILD = 'build-superpack' SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries') # XXX: fix this in a sane way MPKG_PYTHON = {"2.5": "/Library/Frameworks/Python.framework/Versions/2.5/bin/python", "2.6": "/Library/Frameworks/Python.framework/Versions/2.6/bin/python", - "2.7": "/Library/Frameworks/Python.framework/Versions/2.7/bin/python"} + "2.7": "/Library/Frameworks/Python.framework/Versions/2.7/bin/python", + "3.1": "/Library/Frameworks/Python.framework/Versions/3.1/bin/python3"} # Full path to the *static* gfortran runtime LIBGFORTRAN_A_PATH = "/usr/local/lib/libgfortran.a" @@ -114,7 +118,7 @@ # Start/end of the log (from git) LOG_START = 'svn/tags/0.8.0' -LOG_END = 'master' +LOG_END = 'svn/0.9.x' # Virtualenv bootstrap stuff BOOTSTRAP_DIR = "bootstrap" @@ -447,10 +451,8 @@ def _build_mpkg(pyver): numver = parse_numpy_version(MPKG_PYTHON[pyver]) numverstr = ".".join(["%i" % i for i in numver]) - if pyver == "2.5" and not numver[:2] == (1, 2): - raise ValueError("Scipy 0.7.x should be built against numpy 1.2.x for python 2.5 (detected %s)" % numverstr) - elif pyver == "2.6" and not numver[:2] == (1, 3): - raise ValueError("Scipy 0.7.x should be built against numpy 1.3.x for python 2.6 (detected %s)" % numverstr) + if not numver == (1, 5, 1): + raise ValueError("Scipy 0.9.x should be built against numpy 1.5.1, (detected %s)" % numverstr) prepare_static_gfortran_runtime("build") ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch ppc -Wl,-search_paths_first" From scipy-svn at scipy.org Sun Dec 12 03:19:30 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 12 Dec 2010 02:19:30 -0600 (CST) Subject: [Scipy-svn] r7008 - branches/0.9.x/scipy/stats Message-ID: <20101212081930.89CC737D3CB@scipy.org> Author: rgommers Date: 2010-12-12 02:19:30 -0600 (Sun, 12 Dec 2010) New Revision: 7008 Modified: branches/0.9.x/scipy/stats/stats.py Log: BUG: 3K: fix integer division and list.sort issues in kendalltau. Backported from r7006. Modified: branches/0.9.x/scipy/stats/stats.py =================================================================== --- branches/0.9.x/scipy/stats/stats.py 2010-12-12 08:19:10 UTC (rev 7007) +++ branches/0.9.x/scipy/stats/stats.py 2010-12-12 08:19:30 UTC (rev 7008) @@ -2600,7 +2600,7 @@ perm[offs] = perm[offs+1] perm[offs+1] = t return 1 - length0 = length / 2 + length0 = length // 2 length1 = length - length0 middle = offs + length0 exchcnt += mergesort(offs, length0) @@ -2632,25 +2632,25 @@ else: # sort implemented as quicksort, 30% faster but with worst case: O(n^2) perm = range(n) - perm.sort(lambda a,b: cmp(x[a],x[b]) or cmp(y[a],y[b])) + perm.sort(key=lambda a: (x[a], y[a])) # compute joint ties first = 0 t = 0 for i in xrange(1, n): if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]: - t += ((i - first) * (i - first - 1)) / 2 + t += ((i - first) * (i - first - 1)) // 2 first = i - t += ((n - first) * (n - first - 1)) / 2 + t += ((n - first) * (n - first - 1)) // 2 # compute ties in x first = 0 u = 0 for i in xrange(1,n): if x[perm[first]] != x[perm[i]]: - u += ((i - first) * (i - first - 1)) / 2 + u += ((i - first) * (i - first - 1)) // 2 first = i - u += ((n - first) * (n - first - 1)) / 2 + u += ((n - first) * (n - first - 1)) // 2 # count exchanges exchanges = mergesort(0, n) @@ -2659,11 +2659,11 @@ v = 0 for i in xrange(1,n): if y[perm[first]] != y[perm[i]]: - v += ((i - first) * (i - first - 1)) / 2 + v += ((i - first) * (i - first - 1)) // 2 first = i - v += ((n - first) * (n - first - 1)) / 2 + v += ((n - first) * (n - first - 1)) // 2 - tot = (n * (n - 1)) / 2 + tot = (n * (n - 1)) // 2 if tot == u and tot == v: return 1 # Special case for all ties in both ranks From scipy-svn at scipy.org Sun Dec 12 04:29:31 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 12 Dec 2010 03:29:31 -0600 (CST) Subject: [Scipy-svn] r7009 - branches/0.9.x Message-ID: <20101212092931.99FDC37D3D9@scipy.org> Author: rgommers Date: 2010-12-12 03:29:30 -0600 (Sun, 12 Dec 2010) New Revision: 7009 Modified: branches/0.9.x/setup.py Log: REL: set version to 0.9.0b1 Modified: branches/0.9.x/setup.py =================================================================== --- branches/0.9.x/setup.py 2010-12-12 08:19:30 UTC (rev 7008) +++ branches/0.9.x/setup.py 2010-12-12 09:29:30 UTC (rev 7009) @@ -44,8 +44,8 @@ MAJOR = 0 MINOR = 9 MICRO = 0 -ISRELEASED = False -VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) +ISRELEASED = True +VERSION = '%d.%d.%db1' % (MAJOR, MINOR, MICRO) # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly # update it when the contents of directories change. From scipy-svn at scipy.org Sun Dec 12 04:30:12 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 12 Dec 2010 03:30:12 -0600 (CST) Subject: [Scipy-svn] r7010 - tags Message-ID: <20101212093012.61E1737D3DB@scipy.org> Author: rgommers Date: 2010-12-12 03:30:12 -0600 (Sun, 12 Dec 2010) New Revision: 7010 Added: tags/0.9.0b1/ Log: Create tag 0.9.0b1 From scipy-svn at scipy.org Sun Dec 12 04:31:16 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 12 Dec 2010 03:31:16 -0600 (CST) Subject: [Scipy-svn] r7011 - branches/0.9.x Message-ID: <20101212093116.34B5237D3E0@scipy.org> Author: rgommers Date: 2010-12-12 03:31:16 -0600 (Sun, 12 Dec 2010) New Revision: 7011 Modified: branches/0.9.x/setup.py Log: REL: set released=False again. Modified: branches/0.9.x/setup.py =================================================================== --- branches/0.9.x/setup.py 2010-12-12 09:30:12 UTC (rev 7010) +++ branches/0.9.x/setup.py 2010-12-12 09:31:16 UTC (rev 7011) @@ -44,7 +44,7 @@ MAJOR = 0 MINOR = 9 MICRO = 0 -ISRELEASED = True +ISRELEASED = False VERSION = '%d.%d.%db1' % (MAJOR, MINOR, MICRO) # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly From scipy-svn at scipy.org Sun Dec 12 14:59:09 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 12 Dec 2010 13:59:09 -0600 (CST) Subject: [Scipy-svn] r7012 - trunk/scipy/integrate Message-ID: <20101212195909.2668537D3E1@scipy.org> Author: ptvirtan Date: 2010-12-12 13:59:08 -0600 (Sun, 12 Dec 2010) New Revision: 7012 Modified: trunk/scipy/integrate/__quadpack.h Log: BUG: integrate: correct use of npy_intp vs int in __quadpack.h (#1103) Modified: trunk/scipy/integrate/__quadpack.h =================================================================== --- trunk/scipy/integrate/__quadpack.h 2010-12-12 09:31:16 UTC (rev 7011) +++ trunk/scipy/integrate/__quadpack.h 2010-12-12 19:59:08 UTC (rev 7012) @@ -1,11 +1,11 @@ /* This file should be included into the _multipackmodule file */ /* $Revision$ */ /* module_methods: - {"_qagse", quadpack_qagse, METH_VARARGS, doc_qagse}, + {"_qagse", quadpack_qagse, METH_VARARGS, doc_qagse}, {"_qagie", quadpack_qagie, METH_VARARGS, doc_qagie}, - {"_qagpe", quadpack_qagpe, METH_VARARGS, doc_qagpe}, - {"_qawoe", quadpack_qawoe, METH_VARARGS, doc_qawoe}, - {"_qawfe", quadpack_qawfe, METH_VARARGS, doc_qawfe}, + {"_qagpe", quadpack_qagpe, METH_VARARGS, doc_qagpe}, + {"_qawoe", quadpack_qawoe, METH_VARARGS, doc_qawoe}, + {"_qawfe", quadpack_qawfe, METH_VARARGS, doc_qawfe}, {"_qawse", quadpack_qawse, METH_VARARGS, doc_qawse}, {"_qawce", quadpack_qawce, METH_VARARGS, doc_qawce}, */ @@ -76,16 +76,16 @@ /* Build argument list */ if ((arg1 = PyTuple_New(1)) == NULL) goto fail; - PyTuple_SET_ITEM(arg1, 0, PyFloat_FromDouble(*x)); + PyTuple_SET_ITEM(arg1, 0, PyFloat_FromDouble(*x)); /* arg1 now owns reference to Float object*/ if ((arglist = PySequence_Concat( arg1, quadpack_extra_arguments)) == NULL) goto fail; - + /* Call function object --- stored as a global variable. Extra arguments are in another global variable. */ if ((result = PyEval_CallObject(quadpack_python_function, arglist))==NULL) goto fail; - /* Have to do own error checking because PyFloat_AsDouble returns -1 on + /* Have to do own error checking because PyFloat_AsDouble returns -1 on error -- making that return value from the function unusable. No; Solution is to test for Python Error Occurrence if -1 is return of PyFloat_AsDouble. @@ -119,7 +119,8 @@ PyObject *extra_args = NULL; PyObject *fcn; - npy_intp limit=50; + int limit=50; + npy_intp limit_shape[1]; int full_output = 0; double a, b, epsabs=1.49e-8, epsrel=1.49e-8; int neval=0, ier=6, last=0, *iord; @@ -129,19 +130,20 @@ STORE_VARS(); if (!PyArg_ParseTuple(args, "Odd|Oiddi", &fcn, &a, &b, &extra_args, &full_output, &epsabs, &epsrel, &limit)) return NULL; + limit_shape[0] = limit; /* Need to check that limit is bigger than 1 */ - if (limit < 1) + if (limit < 1) return Py_BuildValue("ddi",result,abserr,ier); QUAD_INIT_FUNC(fcn,extra_args) /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); + ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); + ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); if (ap_iord == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL) goto fail; iord = (int *)ap_iord->data; alist = (double *)ap_alist->data; @@ -167,7 +169,7 @@ if (full_output) { return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist),ier); } - else { + else { Py_DECREF(ap_alist); Py_DECREF(ap_blist); Py_DECREF(ap_rlist); @@ -198,16 +200,18 @@ PyObject *extra_args = NULL; PyObject *fcn; - npy_intp limit=50; + int limit=50; + npy_intp limit_shape[1]; int full_output = 0; double bound, epsabs=1.49e-8, epsrel=1.49e-8; int inf, neval=0, ier=6, last=0, *iord; double result=0.0, abserr=0.0; double *alist, *blist, *rlist, *elist; - + STORE_VARS(); - + if (!PyArg_ParseTuple(args, "Odi|Oiddi", &fcn, &bound, &inf, &extra_args, &full_output, &epsabs, &epsrel, &limit)) return NULL; + limit_shape[0] = limit; /* Need to check that limit is bigger than 1 */ if (limit < 1) @@ -216,11 +220,11 @@ QUAD_INIT_FUNC(fcn,extra_args); /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); + ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); + ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); if (ap_iord == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL) goto fail; iord = (int *)ap_iord->data; alist = (double *)ap_alist->data; @@ -247,7 +251,7 @@ if (full_output) { return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist),ier); } - else { + else { Py_DECREF(ap_alist); Py_DECREF(ap_blist); Py_DECREF(ap_rlist); @@ -281,7 +285,8 @@ PyObject *extra_args = NULL; PyObject *fcn, *o_points; - npy_intp limit=50, npts2; + int limit=50, npts2; + npy_intp limit_shape[1], npts2_shape[1]; int full_output = 0; double a, b, epsabs=1.49e-8, epsrel=1.49e-8; int neval=0, ier=6, last=0, *iord; @@ -293,6 +298,7 @@ STORE_VARS(); if (!PyArg_ParseTuple(args, "OddO|Oiddi", &fcn, &a, &b, &o_points, &extra_args, &full_output, &epsabs, &epsrel, &limit)) return NULL; + limit_shape[0] = limit; /* Need to check that limit is bigger than 1 */ if (limit < 1) @@ -303,17 +309,18 @@ ap_points = (PyArrayObject *)PyArray_ContiguousFromObject(o_points, PyArray_DOUBLE, 1, 1); if (ap_points == NULL) goto fail; npts2 = ap_points->dimensions[0]; + npts2_shape[0] = npts2; points = (double *)ap_points->data; /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_pts = (PyArrayObject *)PyArray_SimpleNew(1,&npts2,PyArray_DOUBLE); - ap_level = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_ndin = (PyArrayObject *)PyArray_SimpleNew(1,&npts2,PyArray_DOUBLE); + ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); + ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_pts = (PyArrayObject *)PyArray_SimpleNew(1,npts2_shape,PyArray_DOUBLE); + ap_level = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_ndin = (PyArrayObject *)PyArray_SimpleNew(1,npts2_shape,PyArray_DOUBLE); if (ap_iord == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL || ap_pts == NULL || ap_level == NULL || ap_ndin == NULL) goto fail; iord = (int *)ap_iord->data; alist = (double *)ap_alist->data; @@ -327,7 +334,7 @@ if (setjmp(quadpack_jmpbuf)) { goto fail; } - else { + else { DQAGPE(quad_function, &a, &b, &npts2, points, &epsabs, &epsrel, &limit, &result, &abserr, &neval, &ier, alist, blist, rlist, elist, pts, iord, level, ndin, &last); } @@ -343,7 +350,7 @@ if (full_output) { return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist), "pts", PyArray_Return(ap_pts), "level", PyArray_Return(ap_level), "ndin", PyArray_Return(ap_ndin),ier); } - else { + else { Py_DECREF(ap_alist); Py_DECREF(ap_blist); Py_DECREF(ap_rlist); @@ -383,7 +390,8 @@ PyObject *extra_args = NULL, *o_chebmo = NULL; PyObject *fcn; - npy_intp limit=50, sz[2]; + int limit=50; + npy_intp limit_shape[1], sz[2]; int full_output = 0, maxp1=50, icall=1; double a, b, epsabs=1.49e-8, epsrel=1.49e-8; int neval=0, ier=6, integr=1, last=0, momcom=0, *iord; @@ -391,10 +399,11 @@ double result=0.0, abserr=0.0, omega=0.0; double *chebmo; double *alist, *blist, *rlist, *elist; - + STORE_VARS(); if (!PyArg_ParseTuple(args, "Odddi|OiddiiiiO", &fcn, &a, &b, &omega, &integr, &extra_args, &full_output, &epsabs, &epsrel, &limit, &maxp1, &icall, &momcom, &o_chebmo)) return NULL; + limit_shape[0] = limit; /* Need to check that limit is bigger than 1 */ if (limit < 1) @@ -417,12 +426,12 @@ chebmo = (double *) ap_chebmo->data; /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_INT); - ap_nnlog = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); + ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); + ap_nnlog = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); + ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); if (ap_iord == NULL || ap_nnlog == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL) goto fail; iord = (int *)ap_iord->data; nnlog = (int *)ap_nnlog->data; @@ -449,7 +458,7 @@ if (full_output) { return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N,s:N,s:i,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist), "nnlog", PyArray_Return(ap_nnlog), "momcom", momcom, "chebmo", PyArray_Return(ap_chebmo),ier); } - else { + else { Py_DECREF(ap_alist); Py_DECREF(ap_blist); Py_DECREF(ap_rlist); @@ -487,7 +496,8 @@ PyObject *extra_args = NULL; PyObject *fcn; - npy_intp limlst = 50, limit=50, sz[2]; + int limlst = 50, limit=50; + npy_intp limlst_shape[1], limit_shape[1], sz[2]; int full_output = 0, maxp1=50; double a, epsabs=1.49e-8; int neval=0, ier=6, integr=1, *iord; @@ -499,6 +509,8 @@ STORE_VARS(); if (!PyArg_ParseTuple(args, "Oddi|Oidiii", &fcn, &a, &omega, &integr, &extra_args, &full_output, &epsabs, &limlst, &limit, &maxp1)) return NULL; + limit_shape[0] = limit; + limlst_shape[0] = limlst; /* Need to check that limit is bigger than 1 */ if (limit < 1) @@ -513,15 +525,15 @@ chebmo = (double *) ap_chebmo->data; /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_INT); - ap_nnlog = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_rslst = (PyArrayObject *)PyArray_SimpleNew(1,&limlst,PyArray_DOUBLE); - ap_erlst = (PyArrayObject *)PyArray_SimpleNew(1,&limlst,PyArray_DOUBLE); - ap_ierlst = (PyArrayObject *)PyArray_SimpleNew(1,&limlst,PyArray_INT); + ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); + ap_nnlog = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); + ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_rslst = (PyArrayObject *)PyArray_SimpleNew(1,limlst_shape,PyArray_DOUBLE); + ap_erlst = (PyArrayObject *)PyArray_SimpleNew(1,limlst_shape,PyArray_DOUBLE); + ap_ierlst = (PyArrayObject *)PyArray_SimpleNew(1,limlst_shape,PyArray_INT); if (ap_iord == NULL || ap_nnlog == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL || ap_rslst == NULL || ap_erlst == NULL || ap_ierlst == NULL) goto fail; iord = (int *)ap_iord->data; nnlog = (int *)ap_nnlog->data; @@ -558,7 +570,7 @@ if (full_output) { return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N}i", result, abserr, "neval", neval, "lst", lst, "rslst", PyArray_Return(ap_rslst), "erlst", PyArray_Return(ap_erlst), "ierlst", PyArray_Return(ap_ierlst), ier); } - else { + else { Py_DECREF(ap_rslst); Py_DECREF(ap_erlst); Py_DECREF(ap_ierlst); @@ -593,7 +605,8 @@ PyObject *extra_args = NULL; PyObject *fcn; - npy_intp limit=50; + int limit; + npy_intp limit_shape[1]; int full_output = 0; double a, b, c, epsabs=1.49e-8, epsrel=1.49e-8; int neval=0, ier=6, last=0, *iord; @@ -601,8 +614,9 @@ double *alist, *blist, *rlist, *elist; STORE_VARS(); - + if (!PyArg_ParseTuple(args, "Oddd|Oiddi", &fcn, &a, &b, &c, &extra_args, &full_output, &epsabs, &epsrel, &limit)) return NULL; + limit_shape[0] = limit; /* Need to check that limit is bigger than 1 */ if (limit < 1) @@ -611,11 +625,11 @@ QUAD_INIT_FUNC(fcn,extra_args) /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); + ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); + ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); if (ap_iord == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL) goto fail; iord = (int *)ap_iord->data; alist = (double *)ap_alist->data; @@ -641,7 +655,7 @@ if (full_output) { return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist),ier); } - else { + else { Py_DECREF(ap_alist); Py_DECREF(ap_blist); Py_DECREF(ap_rlist); @@ -674,7 +688,8 @@ PyObject *fcn; int full_output = 0, integr; - npy_intp limit=50; + int limit=50; + npy_intp limit_shape[1]; double a, b, epsabs=1.49e-8, epsrel=1.49e-8; double alfa, beta; int neval=0, ier=6, last=0, *iord; @@ -682,8 +697,9 @@ double *alist, *blist, *rlist, *elist; STORE_VARS(); - + if (!PyArg_ParseTuple(args, "Odd(dd)i|Oiddi", &fcn, &a, &b, &alfa, &beta, &integr, &extra_args, &full_output, &epsabs, &epsrel, &limit)) return NULL; + limit_shape[0] = limit; /* Need to check that limit is bigger than 1 */ if (limit < 1) @@ -692,11 +708,11 @@ QUAD_INIT_FUNC(fcn,extra_args) /* Setup iwork and work arrays */ - ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_INT); - ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); - ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,&limit,PyArray_DOUBLE); + ap_iord = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_INT); + ap_alist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_blist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_rlist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); + ap_elist = (PyArrayObject *)PyArray_SimpleNew(1,limit_shape,PyArray_DOUBLE); if (ap_iord == NULL || ap_alist == NULL || ap_blist == NULL || ap_rlist == NULL || ap_elist == NULL) goto fail; iord = (int *)ap_iord->data; alist = (double *)ap_alist->data; @@ -722,7 +738,7 @@ if (full_output) { return Py_BuildValue("dd{s:i,s:i,s:N,s:N,s:N,s:N,s:N}i", result, abserr, "neval", neval, "last", last, "iord", PyArray_Return(ap_iord), "alist", PyArray_Return(ap_alist), "blist", PyArray_Return(ap_blist), "rlist", PyArray_Return(ap_rlist), "elist", PyArray_Return(ap_elist),ier); } - else { + else { Py_DECREF(ap_alist); Py_DECREF(ap_blist); Py_DECREF(ap_rlist); From scipy-svn at scipy.org Mon Dec 13 16:29:12 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 13 Dec 2010 15:29:12 -0600 (CST) Subject: [Scipy-svn] r7013 - in trunk/scipy/spatial: . qhull/src Message-ID: <20101213212912.AC87037D411@scipy.org> Author: ptvirtan Date: 2010-12-13 15:29:11 -0600 (Mon, 13 Dec 2010) New Revision: 7013 Modified: trunk/scipy/spatial/SConscript trunk/scipy/spatial/qhull/src/mem.h trunk/scipy/spatial/setup.py Log: BUG: spatial/qhull: define ptr_intT correctly in Qhull Modified: trunk/scipy/spatial/SConscript =================================================================== --- trunk/scipy/spatial/SConscript 2010-12-12 19:59:08 UTC (rev 7012) +++ trunk/scipy/spatial/SConscript 2010-12-13 21:29:11 UTC (rev 7013) @@ -1,10 +1,14 @@ # Last Change: Mon Nov 03 06:00 PM 2008 J # vim:syntax=python from os.path import join +from numpy.distutils.misc_util import get_numpy_include_dirs, get_pkg_info +from distutils.sysconfig import get_python_inc from numscons import GetNumpyEnvironment, CheckF77LAPACK, CheckF77Clib from numscons import write_info env = GetNumpyEnvironment(ARGUMENTS) +env.PrependUnique(CPPPATH=[get_numpy_include_dirs(), get_python_inc(), + env["PYEXTCPPPATH"]]) #======================= # Starting Configuration @@ -30,14 +34,14 @@ env.NumpyPythonExtension('ckdtree', source = ['ckdtree.c']) -env.NumpyPythonExtension('_distance_wrap', +env.NumpyPythonExtension('_distance_wrap', source = [join('src', 'distance_wrap.c'), join('src', 'distance.c')]) # Build qhull src = [join('qhull', 'src', s) for s in [ - 'geom2.c', 'geom.c', 'global.c', 'io.c', 'libqhull.c', - 'mem.c', 'merge.c', 'poly2.c', 'poly.c', 'qset.c', + 'geom2.c', 'geom.c', 'global.c', 'io.c', 'libqhull.c', + 'mem.c', 'merge.c', 'poly2.c', 'poly.c', 'qset.c', 'random.c', 'rboxlib.c', 'stat.c', 'user.c', 'usermem.c', 'userprintf.c']] Modified: trunk/scipy/spatial/qhull/src/mem.h =================================================================== --- trunk/scipy/spatial/qhull/src/mem.h 2010-12-12 19:59:08 UTC (rev 7012) +++ trunk/scipy/spatial/qhull/src/mem.h 2010-12-13 21:29:11 UTC (rev 7013) @@ -19,6 +19,9 @@ #ifndef qhDEFmem #define qhDEFmem 1 +#include +#include + #include /*---------------------------------- Modified: trunk/scipy/spatial/setup.py =================================================================== --- trunk/scipy/spatial/setup.py 2010-12-12 19:59:08 UTC (rev 7012) +++ trunk/scipy/spatial/setup.py 2010-12-13 21:29:11 UTC (rev 7013) @@ -5,18 +5,21 @@ def configuration(parent_package = '', top_path = None): from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs from numpy.distutils.system_info import get_info + from distutils.sysconfig import get_python_inc config = Configuration('spatial', parent_package, top_path) config.add_data_dir('tests') - qhull_src = ['geom2.c', 'geom.c', 'global.c', 'io.c', 'libqhull.c', - 'mem.c', 'merge.c', 'poly2.c', 'poly.c', 'qset.c', + qhull_src = ['geom2.c', 'geom.c', 'global.c', 'io.c', 'libqhull.c', + 'mem.c', 'merge.c', 'poly2.c', 'poly.c', 'qset.c', 'random.c', 'rboxlib.c', 'stat.c', 'user.c', 'usermem.c', 'userprintf.c'] config.add_library('qhull', sources=[join('qhull', 'src', x) for x in qhull_src], + include_dirs=[get_python_inc(), + get_numpy_include_dirs()], # XXX: GCC dependency! #extra_compiler_args=['-fno-strict-aliasing'], ) From scipy-svn at scipy.org Tue Dec 14 09:18:48 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 14 Dec 2010 08:18:48 -0600 (CST) Subject: [Scipy-svn] r7014 - branches/0.9.x/doc/release Message-ID: <20101214141848.ED0AB37D3AE@scipy.org> Author: rgommers Date: 2010-12-14 08:18:48 -0600 (Tue, 14 Dec 2010) New Revision: 7014 Modified: branches/0.9.x/doc/release/0.9.0-notes.rst Log: DOC: add note on Py3k status - all good except for weave. Modified: branches/0.9.x/doc/release/0.9.0-notes.rst =================================================================== --- branches/0.9.x/doc/release/0.9.0-notes.rst 2010-12-13 21:29:11 UTC (rev 7013) +++ branches/0.9.x/doc/release/0.9.0-notes.rst 2010-12-14 14:18:48 UTC (rev 7014) @@ -37,8 +37,10 @@ Python 3 ======== -Scipy 0.9.0 has full support for Python 3. +Scipy 0.9.0 is the first SciPy release to support Python 3. The only module +that is not yet ported is ``scipy.weave``. + New features ============ From scipy-svn at scipy.org Tue Dec 21 23:05:47 2010 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 21 Dec 2010 22:05:47 -0600 (CST) Subject: [Scipy-svn] r7019 - trunk/scipy/signal/tests Message-ID: <20101222040547.6928A3215A@scipy.org> Author: warren.weckesser Date: 2010-12-21 22:05:46 -0600 (Tue, 21 Dec 2010) New Revision: 7019 Modified: trunk/scipy/signal/tests/test_fir_filter_design.py Log: 3K: signal: use explicit integer division in FIR filter tests where appropriate Modified: trunk/scipy/signal/tests/test_fir_filter_design.py =================================================================== --- trunk/scipy/signal/tests/test_fir_filter_design.py 2010-12-19 14:19:41 UTC (rev 7018) +++ trunk/scipy/signal/tests/test_fir_filter_design.py 2010-12-22 04:05:46 UTC (rev 7019) @@ -104,7 +104,7 @@ taps = firwin(ntaps, cutoff=0.5, window=('kaiser', beta), scale=False) # Check the symmetry of taps. - assert_array_almost_equal(taps[:ntaps/2], taps[ntaps:ntaps-ntaps/2-1:-1]) + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) # Check the gain at a few samples where we know it should be approximately 0 or 1. freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0]) @@ -123,7 +123,7 @@ pass_zero=False, scale=False) # Check the symmetry of taps. - assert_array_almost_equal(taps[:ntaps/2], taps[ntaps:ntaps-ntaps/2-1:-1]) + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) # Check the gain at a few samples where we know it should be approximately 0 or 1. freq_samples = np.array([0.0, 0.25, 0.5-width/2, 0.5+width/2, 0.75, 1.0]) @@ -138,7 +138,7 @@ pass_zero=False, scale=False) # Check the symmetry of taps. - assert_array_almost_equal(taps[:ntaps/2], taps[ntaps:ntaps-ntaps/2-1:-1]) + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) # Check the gain at a few samples where we know it should be approximately 0 or 1. freq_samples = np.array([0.0, 0.2, 0.3-width/2, 0.3+width/2, 0.5, @@ -154,7 +154,7 @@ pass_zero=True, scale=False) # Check the symmetry of taps. - assert_array_almost_equal(taps[:ntaps/2], taps[ntaps:ntaps-ntaps/2-1:-1]) + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) # Check the gain at a few samples where we know it should be approximately 0 or 1. freq_samples = np.array([0.0, 0.1, 0.2-width/2, 0.2+width/2, 0.35, @@ -175,7 +175,7 @@ pass_zero=False, scale=False, nyq=nyquist) # Check the symmetry of taps. - assert_array_almost_equal(taps[:ntaps/2], taps[ntaps:ntaps-ntaps/2-1:-1]) + assert_array_almost_equal(taps[:ntaps//2], taps[ntaps:ntaps-ntaps//2-1:-1]) # Check the gain at a few samples where we know it should be approximately 0 or 1. freq_samples = np.array([0.0, 200, 300-width/2, 300+width/2, 500, @@ -288,8 +288,8 @@ # make sure the filter has correct # of taps assert_(len(h) == N, "Number of Taps") - # make sure it is type III (anti-symmtric tap coefficients) - assert_array_almost_equal(h[:(N-1)/2], -h[:-(N-1)/2-1:-1]) + # make sure it is type III (anti-symmetric tap coefficients) + assert_array_almost_equal(h[:(N-1)//2], -h[:-(N-1)//2-1:-1]) # Since the requested response is symmetric, all even coeffcients # should be zero (or in this case really small)