From scipy-svn at scipy.org Mon Jun 2 22:00:13 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 2 Jun 2008 21:00:13 -0500 (CDT) Subject: [Scipy-svn] r4403 - in trunk/scipy/sparse: . tests Message-ID: <20080603020013.A28D139C63B@scipy.org> Author: wnbell Date: 2008-06-02 21:00:08 -0500 (Mon, 02 Jun 2008) New Revision: 4403 Modified: trunk/scipy/sparse/compressed.py trunk/scipy/sparse/tests/test_base.py Log: fixed missing import of numpy.multiply resolves ticket #680 Modified: trunk/scipy/sparse/compressed.py =================================================================== --- trunk/scipy/sparse/compressed.py 2008-05-31 00:15:46 UTC (rev 4402) +++ trunk/scipy/sparse/compressed.py 2008-06-03 02:00:08 UTC (rev 4403) @@ -251,11 +251,13 @@ def __truediv__(self,other): if isscalarlike(other): return self * (1./other) + elif isspmatrix(other): - if (other.shape != self.shape): - raise ValueError, "inconsistent shapes" + if other.shape != self.shape: + raise ValueError('inconsistent shapes') return self._binopt(other,'_eldiv_') + else: raise NotImplementedError @@ -263,11 +265,11 @@ def multiply(self, other): """Point-wise multiplication by another matrix """ - if (other.shape != self.shape): - raise ValueError, "inconsistent shapes" + if other.shape != self.shape: + raise ValueError('inconsistent shapes') if isdense(other): - return multiply(self.todense(),other) + return numpy.multiply(self.todense(),other) else: other = self.__class__(other) return self._binopt(other,'_elmul_') Modified: trunk/scipy/sparse/tests/test_base.py =================================================================== --- trunk/scipy/sparse/tests/test_base.py 2008-05-31 00:15:46 UTC (rev 4402) +++ trunk/scipy/sparse/tests/test_base.py 2008-06-03 02:00:08 UTC (rev 4403) @@ -215,19 +215,27 @@ assert_array_equal(self.datsp - A.todense(),self.dat - A.todense()) def test_elmul(self): - temp = self.dat.copy() - temp[0,2] = 2.0 - temp = self.spmatrix(temp) - c = temp.multiply(self.datsp) - assert_array_equal(c.todense(),[[1,0,0,4],[9,0,1,0],[0,4,0,0]]) - - # complex - A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]]) - B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]]) + # real/real + A = array([[4,0,9],[2,-3,5]]) + B = array([[0,7,0],[0,-4,0]]) Asp = self.spmatrix(A) Bsp = self.spmatrix(B) - assert_almost_equal( Asp.multiply(Bsp).todense(), A*B) + assert_almost_equal( Asp.multiply(Bsp).todense(), A*B) #sparse/sparse + assert_almost_equal( Asp.multiply(B), A*B) #sparse/dense + # complex/complex + C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]]) + D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]]) + Csp = self.spmatrix(C) + Dsp = self.spmatrix(D) + assert_almost_equal( Csp.multiply(Dsp).todense(), C*D) #sparse/sparse + assert_almost_equal( Csp.multiply(D), C*D) #sparse/dense + + # real/complex + assert_almost_equal( Asp.multiply(Dsp).todense(), A*D) #sparse/sparse + assert_almost_equal( Asp.multiply(D), A*D) #sparse/dense + + def test_eldiv(self): expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]] assert_array_equal((self.datsp / self.datsp).todense(),expected) From scipy-svn at scipy.org Tue Jun 3 00:41:13 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 2 Jun 2008 23:41:13 -0500 (CDT) Subject: [Scipy-svn] r4404 - trunk/scipy/cluster/tests Message-ID: <20080603044113.0AAC239C288@scipy.org> Author: damian.eads Date: 2008-06-02 23:41:10 -0500 (Mon, 02 Jun 2008) New Revision: 4404 Added: trunk/scipy/cluster/tests/random-bool-data.txt Log: Added small boolean observation vector data set for testing boolean distance metrics. Added: trunk/scipy/cluster/tests/random-bool-data.txt =================================================================== --- trunk/scipy/cluster/tests/random-bool-data.txt 2008-06-03 02:00:08 UTC (rev 4403) +++ trunk/scipy/cluster/tests/random-bool-data.txt 2008-06-03 04:41:10 UTC (rev 4404) @@ -0,0 +1,100 @@ +0 1 1 0 1 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 1 1 0 0 1 1 +1 1 1 1 1 1 1 0 0 1 1 1 0 0 0 0 1 0 1 0 1 1 1 0 1 0 1 1 1 1 +0 1 0 1 1 0 0 1 1 1 1 0 1 0 0 1 1 0 1 1 1 0 1 1 0 1 1 1 0 1 +1 1 1 0 0 1 1 0 0 1 1 1 0 0 1 1 0 1 1 1 0 1 1 0 0 0 0 1 0 0 +1 0 0 0 0 1 1 0 1 1 0 1 0 0 0 0 1 0 0 1 0 1 0 0 1 1 1 1 0 0 +1 0 1 1 0 0 0 1 1 1 1 1 0 1 1 0 1 0 1 0 1 0 0 0 0 0 0 0 1 1 +0 1 0 0 1 0 0 0 1 0 0 1 1 0 0 0 0 1 1 0 0 1 0 1 1 1 1 0 1 0 +1 0 1 1 1 0 0 0 0 1 1 0 0 0 0 1 0 1 0 0 0 1 1 1 0 1 0 0 1 0 +1 1 1 0 0 1 1 0 0 1 0 0 1 0 0 1 0 1 1 0 1 1 0 1 1 1 0 0 1 1 +1 1 0 1 0 0 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 +1 0 1 0 1 1 0 1 1 0 1 1 0 1 1 1 1 0 0 1 1 0 0 0 0 1 0 0 0 0 +1 1 1 1 0 1 0 0 0 0 0 1 0 1 1 1 1 0 1 1 1 1 1 1 0 1 0 1 1 1 +1 1 1 1 1 1 1 1 1 0 1 1 0 0 1 0 1 0 1 0 1 0 0 0 1 0 0 1 0 1 +0 1 1 0 0 1 1 0 0 0 0 1 0 1 1 0 1 0 1 0 1 1 0 1 0 0 1 1 1 1 +1 0 0 1 0 0 1 0 1 0 0 1 0 0 0 1 1 0 0 0 1 0 1 0 0 1 1 0 1 1 +1 0 0 1 1 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 1 1 1 0 1 1 1 0 0 +1 1 0 0 1 0 0 0 1 0 0 1 0 1 0 0 1 0 1 1 0 1 0 0 0 1 1 1 1 1 +0 0 0 1 1 1 1 1 0 1 0 1 1 1 1 0 0 1 1 1 1 1 0 0 1 0 1 0 0 0 +1 0 1 1 0 1 0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 1 1 0 1 1 0 1 1 +0 0 0 0 1 0 1 0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 1 1 1 +0 1 0 0 1 1 0 0 1 1 1 0 0 0 1 0 0 0 0 1 1 0 0 1 0 1 1 0 1 0 +1 0 1 0 1 1 1 0 0 0 1 0 1 1 0 0 0 0 0 0 0 1 0 0 1 1 1 0 1 1 +0 0 1 0 0 0 0 0 1 1 0 0 1 1 1 1 1 1 1 1 0 1 0 0 0 0 0 0 1 0 +0 1 0 1 1 1 0 1 1 1 0 1 0 1 1 1 0 0 0 0 1 1 1 0 0 1 1 0 0 1 +0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 1 1 0 1 1 0 0 +1 0 0 0 1 0 1 0 0 1 0 1 1 0 1 0 1 0 1 0 1 1 1 0 0 0 1 1 1 0 +1 0 0 0 1 1 1 0 0 1 0 1 1 1 0 0 0 1 1 1 0 0 0 0 1 0 0 0 1 1 +0 1 0 0 0 1 1 1 0 1 1 1 0 1 0 0 1 1 1 1 0 1 0 1 0 1 1 0 1 1 +0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 0 0 1 0 0 0 1 0 1 0 1 0 1 0 1 +0 0 1 0 1 0 1 1 1 1 0 0 1 1 1 1 0 0 1 1 1 1 0 0 1 0 1 0 1 0 +1 1 0 1 1 1 1 1 0 1 0 0 0 1 1 1 0 1 0 0 0 1 1 0 1 0 0 0 0 1 +0 1 0 0 0 1 0 1 1 0 0 1 0 0 0 1 1 1 0 0 1 1 0 1 1 0 0 1 0 1 +1 1 0 0 0 0 0 1 1 0 1 1 0 0 1 0 1 1 0 0 0 1 0 1 0 1 0 1 0 1 +1 1 1 0 1 0 0 1 1 0 1 1 1 0 1 0 1 1 0 0 0 1 1 0 0 1 1 1 1 1 +0 1 0 0 1 1 0 0 1 1 1 1 0 1 0 1 0 1 1 1 0 1 1 0 1 1 0 0 1 0 +1 1 1 1 0 1 0 1 0 1 1 1 1 0 1 0 1 0 1 0 1 1 0 0 1 0 1 0 0 0 +0 0 0 0 1 1 1 0 1 1 0 0 1 1 1 1 0 1 0 1 1 1 1 1 1 0 0 0 0 0 +0 1 1 1 0 0 0 1 1 1 0 1 0 0 1 1 1 1 1 0 1 0 0 1 0 0 0 0 1 1 +0 1 0 0 1 1 1 1 0 0 1 0 1 0 1 1 0 0 1 0 0 1 1 0 0 0 0 1 0 0 +1 1 0 1 0 0 1 1 0 0 1 1 1 0 0 1 1 1 0 0 0 0 1 1 1 0 1 0 0 1 +0 1 1 0 1 0 1 1 0 0 0 1 1 0 0 0 0 0 0 1 0 0 1 1 0 1 0 0 1 1 +0 0 1 1 1 0 1 0 0 1 1 0 0 0 1 1 1 0 1 0 0 0 0 1 1 0 1 1 0 0 +1 0 1 1 1 1 1 1 1 1 0 1 0 0 0 1 0 1 0 0 0 1 1 0 0 1 0 0 0 0 +1 0 1 1 1 0 1 1 1 1 0 0 1 0 1 1 1 0 0 0 0 1 1 1 1 1 0 1 0 0 +1 0 0 0 1 1 1 0 1 1 0 0 1 1 1 0 1 0 0 1 0 1 0 1 1 1 0 0 0 1 +1 0 1 0 1 0 0 0 1 0 0 1 1 0 1 1 0 0 0 1 0 1 1 0 1 0 0 1 0 0 +0 1 1 0 1 0 1 1 1 1 1 0 0 0 0 1 0 1 0 0 1 1 1 1 0 1 0 1 1 1 +0 1 0 1 1 0 1 0 0 1 0 0 1 0 0 1 1 0 1 0 0 0 1 1 1 0 0 1 0 1 +1 0 1 1 1 0 1 0 1 0 1 1 0 1 0 0 0 1 0 0 0 0 0 0 0 0 1 1 0 1 +1 1 1 1 1 1 1 1 1 1 0 0 1 0 0 1 0 0 1 1 0 0 1 1 1 1 0 1 0 1 +1 1 1 1 0 0 0 1 0 1 1 0 0 0 1 1 0 0 1 1 1 1 0 0 0 1 0 1 0 0 +1 0 1 0 0 1 1 1 1 0 1 1 0 0 1 0 0 1 1 0 1 1 1 1 1 1 0 0 0 0 +0 1 1 0 0 1 0 0 0 0 0 1 0 1 0 0 1 1 0 1 0 1 0 0 0 1 0 0 1 0 +0 0 0 1 0 0 0 1 1 1 1 1 0 0 0 1 1 0 0 0 1 1 1 0 1 0 1 1 1 0 +1 1 0 0 0 0 1 1 1 0 1 0 1 1 1 0 0 1 0 0 0 0 0 0 1 1 1 0 0 0 +1 0 1 1 1 0 1 0 1 0 0 1 1 1 1 1 0 0 1 1 0 1 1 1 1 0 0 0 0 1 +0 0 1 1 1 0 0 0 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 1 1 0 0 1 0 0 +0 0 1 1 1 1 1 0 1 0 1 0 0 1 1 1 1 0 0 0 1 0 1 1 0 1 1 1 0 0 +0 0 0 0 0 1 0 0 1 1 0 1 1 0 0 0 0 1 0 1 1 0 0 1 0 0 1 0 1 0 +1 0 0 1 0 1 1 1 0 1 0 1 1 0 0 1 1 0 1 1 1 0 1 0 0 0 1 1 1 1 +0 0 0 1 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 1 1 0 0 1 1 0 0 0 +1 0 0 1 1 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 1 0 1 1 0 0 1 0 1 0 +0 1 0 1 1 1 1 1 0 1 0 1 1 0 0 1 1 0 1 1 0 1 1 0 1 1 0 0 0 1 +1 0 1 1 1 0 0 0 1 0 0 1 0 0 0 1 0 1 1 1 0 0 1 1 1 1 0 0 0 1 +0 1 0 0 1 1 1 1 1 1 0 0 1 0 0 1 1 0 1 0 1 0 1 1 1 0 1 1 0 1 +0 0 1 0 1 1 1 0 0 0 1 0 1 0 1 1 0 0 1 1 0 1 0 1 1 0 0 1 0 1 +0 1 1 1 1 1 0 0 0 0 0 1 0 1 1 1 1 1 0 1 1 1 0 0 1 0 0 1 1 1 +1 1 1 1 0 1 1 1 1 1 1 0 0 1 1 0 1 1 0 1 0 1 0 1 0 1 1 0 0 0 +1 0 0 0 1 0 1 0 0 0 1 0 1 0 0 1 0 1 1 1 1 1 0 0 1 1 1 1 1 0 +0 0 0 0 1 1 1 0 1 0 0 1 1 0 0 1 1 1 1 0 0 1 0 1 0 0 0 1 0 0 +1 1 1 1 1 0 0 0 1 1 0 0 1 1 1 1 0 1 0 1 0 0 0 0 1 1 0 1 1 0 +1 0 1 1 0 1 0 1 0 1 1 0 1 1 1 0 0 1 0 0 1 1 0 0 1 1 0 1 0 1 +1 1 1 1 1 0 0 0 0 1 0 0 1 0 0 0 1 0 0 0 0 0 1 0 0 0 0 1 1 1 +0 1 1 0 0 1 0 0 0 0 0 0 0 0 1 1 1 0 0 1 0 1 1 1 0 1 1 1 1 1 +1 1 1 0 1 1 1 1 1 0 0 0 0 1 0 0 1 0 1 0 1 1 1 0 0 1 0 0 1 1 +1 1 0 1 0 1 0 1 0 0 1 0 0 0 1 0 1 1 0 1 1 0 1 0 0 1 0 0 1 0 +1 0 1 1 0 0 1 1 0 0 1 1 0 0 0 1 1 0 0 1 0 0 0 0 0 1 0 1 1 0 +1 1 1 1 1 0 0 1 0 0 1 1 1 0 1 0 0 1 1 1 0 1 1 1 1 1 1 1 1 1 +1 0 1 1 0 0 1 1 0 1 1 1 0 0 0 1 0 1 0 0 0 1 1 1 1 1 0 0 1 0 +0 0 0 0 0 1 1 1 0 0 0 0 0 1 1 1 1 1 0 0 1 1 0 0 1 0 0 1 0 0 +1 1 1 0 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 1 1 1 0 1 0 +1 0 0 1 0 1 0 0 0 0 0 0 1 0 1 0 1 1 0 1 0 1 1 0 0 1 0 1 0 1 +1 0 0 0 1 0 1 1 0 1 0 0 0 1 0 1 0 0 0 0 1 1 1 0 1 0 1 1 0 1 +0 1 0 0 0 0 1 0 1 1 1 0 1 1 0 1 0 1 0 1 1 0 0 0 0 0 0 1 1 1 +0 1 0 0 1 0 1 1 0 0 0 0 1 1 0 1 1 1 0 0 1 1 0 0 1 0 1 0 0 0 +0 1 0 1 1 1 1 1 1 1 0 0 1 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 1 0 +0 0 0 1 0 0 1 0 0 1 1 0 0 0 0 0 0 0 1 0 0 1 0 0 0 1 0 1 0 0 +1 0 0 0 1 0 1 1 1 1 1 1 1 0 1 0 1 1 1 0 0 1 0 1 0 1 0 1 0 0 +1 0 0 0 1 0 1 0 0 0 1 1 0 0 0 1 1 0 0 1 1 1 1 1 1 0 1 1 1 0 +0 0 0 1 0 0 1 0 0 0 0 1 1 0 0 0 0 0 0 1 1 1 0 1 0 0 0 1 1 0 +1 0 0 0 0 0 1 0 1 0 1 0 0 1 1 1 0 1 1 1 0 0 1 0 1 1 1 0 1 0 +0 1 0 0 1 1 1 0 0 1 0 0 0 1 0 1 0 0 0 1 0 0 1 0 0 1 1 1 0 1 +0 0 0 1 1 0 1 0 1 0 1 0 0 0 1 1 1 0 1 1 0 0 0 1 1 0 0 1 0 1 +1 1 1 1 1 1 1 1 0 0 1 1 0 0 0 1 0 1 0 1 0 0 0 1 1 0 1 0 1 0 +0 1 1 0 0 0 1 1 0 0 1 1 0 1 1 1 1 1 0 1 0 0 0 0 1 0 1 0 0 0 +1 1 1 0 1 1 1 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 1 1 0 1 1 0 0 1 +0 0 1 0 0 1 0 0 1 0 0 0 1 0 0 1 1 1 1 1 1 1 0 1 0 0 0 1 1 0 +1 1 1 0 1 1 0 1 1 0 1 1 0 1 0 0 1 0 0 0 1 1 1 1 0 1 1 0 1 1 +0 0 1 1 1 0 0 0 0 1 1 0 0 1 1 0 1 0 1 0 0 1 0 0 0 1 1 0 0 1 +0 0 0 1 0 0 1 1 1 1 1 1 0 0 1 0 0 1 0 0 0 0 1 1 1 1 1 1 0 0 From scipy-svn at scipy.org Tue Jun 3 02:50:53 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 3 Jun 2008 01:50:53 -0500 (CDT) Subject: [Scipy-svn] r4405 - in trunk/scipy/cluster: . tests Message-ID: <20080603065053.DED4339C7CA@scipy.org> Author: damian.eads Date: 2008-06-03 01:50:49 -0500 (Tue, 03 Jun 2008) New Revision: 4405 Modified: trunk/scipy/cluster/hierarchy.py trunk/scipy/cluster/tests/test_hierarchy.py Log: Removed unnecessary imports in hierarchy and refactored its code. Wrote more tests. Modified: trunk/scipy/cluster/hierarchy.py =================================================================== --- trunk/scipy/cluster/hierarchy.py 2008-06-03 04:41:10 UTC (rev 4404) +++ trunk/scipy/cluster/hierarchy.py 2008-06-03 06:50:49 UTC (rev 4405) @@ -175,7 +175,7 @@ """ import numpy as np -import _hierarchy_wrap, scipy, types, math, sys, scipy.stats +import _hierarchy_wrap, types, math, sys _cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2, 'weighted': 6} @@ -199,7 +199,7 @@ observations. """ #n = np.double(X.shape[1]) - return scipy.stats.var(X, axis=0) # * n / (n - 1.0) + return np.var(X, axis=0, ddof=1) # * n / (n - 1.0) def _copy_array_if_base_present(a): """ @@ -638,6 +638,8 @@ functions in this library. """ + Z = numpy.asarray(Z) + is_valid_linkage(Z, throw=True, name='Z') # The number of original objects is equal to the number of rows minus @@ -795,6 +797,8 @@ ||u-v||_p = (\sum {|u_i - v_i|^p})^(1/p). """ + u = np.asarray(u) + v = np.asarray(v) if p < 1: raise ValueError("p must be at least 1") return math.pow((abs(u-v)**p).sum(), 1.0/p) @@ -805,6 +809,8 @@ Computes the Euclidean distance between two n-vectors u and v, ||u-v||_2 """ + u = np.asarray(u) + v = np.asarray(v) q=np.matrix(u-v) return np.sqrt((q*q.T).sum()) @@ -815,6 +821,8 @@ Computes the squared Euclidean distance between two n-vectors u and v, (||u-v||_2)^2. """ + u = np.asarray(u) + v = np.asarray(v) return ((u-v)*(u-v).T).sum() def cosine(u, v): @@ -824,8 +832,10 @@ Computes the Cosine distance between two n-vectors u and v, (1-uv^T)/(||u||_2 * ||v||_2). """ - return (1.0 - (scipy.dot(u, v.T) / \ - (np.sqrt(scipy.dot(u, u.T)) * np.sqrt(scipy.dot(v, v.T))))) + u = np.asarray(u) + v = np.asarray(v) + return (1.0 - (np.dot(u, v.T) / \ + (np.sqrt(np.dot(u, u.T)) * np.sqrt(np.dot(v, v.T))))) def correlation(u, v): """ @@ -844,9 +854,9 @@ vmu = v.mean() um = u - umu vm = v - vmu - return 1.0 - (scipy.dot(um, vm) / - (np.sqrt(scipy.dot(um, um)) \ - * np.sqrt(scipy.dot(vm, vm)))) + return 1.0 - (np.dot(um, vm) / + (np.sqrt(np.dot(um, um)) \ + * np.sqrt(np.dot(vm, vm)))) def hamming(u, v): """ @@ -864,6 +874,8 @@ for k < n. """ + u = np.asarray(u) + v = np.asarray(v) return (u != v).mean() def jaccard(u, v): @@ -883,6 +895,8 @@ for k < n. """ + u = np.asarray(u) + v = np.asarray(v) return (np.double(np.bitwise_and((u != v), np.bitwise_or(u != 0, v != 0)).sum()) / np.double(np.bitwise_or(u != 0, v != 0).sum())) @@ -904,6 +918,9 @@ for k < n. """ + u = np.asarray(u) + v = np.asarray(v) + n = len(u) (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) return (ntf + nft - ntt + n) / (ntf + nft + n) @@ -916,6 +933,8 @@ n-vectors u and v. V is a m-dimensional vector of component variances. It is usually computed among a larger collection vectors. """ + u = np.asarray(u) + v = np.asarray(v) V = np.asarray(V) if len(V.shape) != 1 or V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]: raise TypeError('V must be a 1-D array of the same dimension as u and v.') @@ -928,6 +947,8 @@ Computes the Manhattan distance between two n-vectors u and v, \sum {u_i-v_i}. """ + u = np.asarray(u) + v = np.asarray(v) return abs(u-v).sum() def mahalanobis(u, v, VI): @@ -938,7 +959,9 @@ (u-v)VI(u-v)^T where VI is the inverse covariance matrix. """ - V = np.asarray(V) + u = np.asarray(u) + v = np.asarray(v) + VI = np.asarray(VI) return np.sqrt(np.dot(np.dot((u-v),VI),(u-v).T).sum()) def chebyshev(u, v): @@ -948,6 +971,8 @@ Computes the Chebyshev distance between two n-vectors u and v, \max {|u_i-v_i|}. """ + u = np.asarray(u) + v = np.asarray(v) return max(abs(u-v)) def braycurtis(u, v): @@ -957,6 +982,8 @@ Computes the Bray-Curtis distance between two n-vectors u and v, \sum{|u_i-v_i|} / \sum{|u_i+v_i|}. """ + u = np.asarray(u) + v = np.asarray(v) return abs(u-v).sum() / abs(u+v).sum() def canberra(u, v): @@ -980,22 +1007,30 @@ ntf = (u * not_v).sum() ntt = (u * v).sum() elif u.dtype == np.bool: - not_u = scipy.bitwise_not(u) - not_v = scipy.bitwise_not(v) - nff = scipy.bitwise_and(not_u, not_v).sum() - nft = scipy.bitwise_and(not_u, v).sum() - ntf = scipy.bitwise_and(u, not_v).sum() - ntt = scipy.bitwise_and(u, v).sum() + not_u = ~u + not_v = ~v + nff = (not_u & not_v).sum() + nft = (not_u & v).sum() + ntf = (u & not_v).sum() + ntt = (u & v).sum() else: raise TypeError("Arrays being compared have unknown type.") return (nff, nft, ntf, ntt) def _nbool_correspond_ft_tf(u, v): - not_u = scipy.bitwise_not(u) - not_v = scipy.bitwise_not(v) - nft = scipy.bitwise_and(not_u, v).sum() - ntf = scipy.bitwise_and(u, not_v).sum() + if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double: + not_u = 1.0 - u + not_v = 1.0 - v + nff = (not_u * not_v).sum() + nft = (not_u * v).sum() + ntf = (u * not_v).sum() + ntt = (u * v).sum() + else: + not_u = ~u + not_v = ~v + nft = (not_u & v).sum() + ntf = (u & not_v).sum() return (nft, ntf) def yule(u, v): @@ -1015,6 +1050,8 @@ R = 2.0 * (c_{TF} + c_{FT}). """ + u = np.asarray(u) + v = np.asarray(v) (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) print nff, nft, ntf, ntt return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft) @@ -1034,6 +1071,8 @@ for k < n. """ + u = np.asarray(u) + v = np.asarray(v) (nft, ntf) = _nbool_correspond_ft_tf(u, v) return float(nft + ntf) / float(len(u)) @@ -1054,9 +1093,14 @@ for k < n. """ - ntt = scipy.bitwise_and(u, v).sum() + u = np.asarray(u) + v = np.asarray(v) + if u.dtype == np.bool: + ntt = (u & v).sum() + else: + ntt = (u * v).sum() (nft, ntf) = _nbool_correspond_ft_tf(u, v) - return float(ntf + nft)/float(2.0 * ntt + ntf + nft) + return float(ntf + nft) / float(2.0 * ntt + ntf + nft) def rogerstanimoto(u, v): """ @@ -1078,6 +1122,8 @@ R = 2.0 * (c_{TF} + c_{FT}). """ + u = np.asarray(u) + v = np.asarray(v) (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft))) @@ -1089,7 +1135,12 @@ u and v, (n - c_{TT}) / n where c_{ij} is the number of occurrences of u[k] == i and v[k] == j for k < n. """ - ntt = scipy.bitwise_and(u, v).sum() + u = np.asarray(u) + v = np.asarray(v) + if u.dtype == np.bool: + ntt = (u & v).sum() + else: + ntt = (u * v).sum() return float(len(u) - ntt) / float(len(u)) def sokalmichener(u, v): @@ -1101,8 +1152,14 @@ u[k] == i and v[k] == j for k < n and R = 2 * (c_{TF} + c{FT}) and S = c_{FF} + c_{TT}. """ - ntt = scipy.bitwise_and(u, v).sum() - nff = scipy.bitwise_and(scipy.bitwise_not(u), scipy.bitwise_not(v)).sum() + u = np.asarray(u) + v = np.asarray(v) + if u.dtype == np.bool: + ntt = (u & v).sum() + nff = (~u & ~v).sum() + else: + ntt = (u * v).sum() + nff = ((1.0 - u) * (1.0 - v)).sum() (nft, ntf) = _nbool_correspond_ft_tf(u, v) return float(2.0 * (ntf + nft))/float(ntt + nff + 2.0 * (ntf + nft)) @@ -1114,33 +1171,29 @@ u and v, 2R / (c_{TT} + 2R) where c_{ij} is the number of occurrences of u[k] == i and v[k] == j for k < n and R = 2 * (c_{TF} + c{FT}). """ - ntt = scipy.bitwise_and(u, v).sum() + u = np.asarray(u) + v = np.asarray(v) + if u.dtype == np.bool: + ntt = (u & v).sum() + else: + ntt = (u * v).sum() (nft, ntf) = _nbool_correspond_ft_tf(u, v) return float(2.0 * (ntf + nft))/float(ntt + 2.0 * (ntf + nft)) -# V means pass covariance -_pdist_metric_info = {'euclidean': ['double'], - 'seuclidean': ['double'], - 'sqeuclidean': ['double'], - 'minkowski': ['double'], - 'cityblock': ['double'], - 'cosine': ['double'], - 'correlation': ['double'], - 'hamming': ['double','bool'], - 'jaccard': ['double', 'bool'], - 'chebyshev': ['double'], - 'canberra': ['double'], - 'braycurtis': ['double'], - 'mahalanobis': ['bool'], - 'yule': ['bool'], - 'matching': ['bool'], - 'dice': ['bool'], - 'kulsinski': ['bool'], - 'rogerstanimoto': ['bool'], - 'russellrao': ['bool'], - 'sokalmichener': ['bool'], - 'sokalsneath': ['bool']} +def _convert_to_bool(X): + if X.dtype != np.bool: + X = np.bool_(X) + if not X.flags.contiguous: + X = X.copy() + return X +def _convert_to_double(X): + if X.dtype != np.double: + X = np.double(X) + if not X.flags.contiguous: + X = X.copy() + return X + def pdist(X, metric='euclidean', p=2, V=None, VI=None): """ Y = pdist(X, method='euclidean', p=2) @@ -1322,12 +1375,12 @@ X = np.asarray(X) - if np.issubsctype(X, np.floating) and not np.issubsctype(X, np.double): - raise TypeError('Floating point arrays must be 64-bit (got %r).' % - (X.dtype.type,)) + #if np.issubsctype(X, np.floating) and not np.issubsctype(X, np.double): + # raise TypeError('Floating point arrays must be 64-bit (got %r).' % + # (X.dtype.type,)) # The C code doesn't do striding. - [X] = _copy_arrays_if_base_present([X]) + [X] = _copy_arrays_if_base_present([_convert_to_double(X)]) s = X.shape @@ -1365,38 +1418,33 @@ elif mtype is types.StringType: mstr = metric.lower() - if X.dtype != np.double and \ - (mstr != 'hamming' and mstr != 'jaccard'): - TypeError('A double array must be passed.') + #if X.dtype != np.double and \ + # (mstr != 'hamming' and mstr != 'jaccard'): + # TypeError('A double array must be passed.') if mstr in set(['euclidean', 'euclid', 'eu', 'e']): - _hierarchy_wrap.pdist_euclidean_wrap(X, dm) - elif mstr in set(['sqeuclidean']): - _hierarchy_wrap.pdist_euclidean_wrap(X, dm) + _hierarchy_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm) + elif mstr in set(['sqeuclidean', 'sqe', 'sqeuclid']): + _hierarchy_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm) dm = dm ** 2.0 elif mstr in set(['cityblock', 'cblock', 'cb', 'c']): _hierarchy_wrap.pdist_city_block_wrap(X, dm) elif mstr in set(['hamming', 'hamm', 'ha', 'h']): - if X.dtype == np.double: - _hierarchy_wrap.pdist_hamming_wrap(X, dm) - elif X.dtype == bool: - _hierarchy_wrap.pdist_hamming_bool_wrap(X, dm) + if X.dtype == np.bool: + _hierarchy_wrap.pdist_hamming_bool_wrap(_convert_to_bool(X), dm) else: - raise TypeError('Invalid input array value type %s ' - 'for hamming.' % str(X.dtype)) + _hierarchy_wrap.pdist_hamming_wrap(_convert_to_double(X), dm) elif mstr in set(['jaccard', 'jacc', 'ja', 'j']): - if X.dtype == np.double: - _hierarchy_wrap.pdist_jaccard_wrap(X, dm) - elif X.dtype == np.bool: - _hierarchy_wrap.pdist_jaccard_bool_wrap(X, dm) + if X.dtype == np.bool: + _hierarchy_wrap.pdist_jaccard_bool_wrap(_convert_to_bool(X), dm) else: - raise TypeError('Invalid input array value type %s for ' - 'jaccard.' % str(X.dtype)) + _hierarchy_wrap.pdist_jaccard_wrap(_convert_to_double(X), dm) elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']): - _hierarchy_wrap.pdist_chebyshev_wrap(X, dm) + _hierarchy_wrap.pdist_chebyshev_wrap(_convert_to_double(X), dm) elif mstr in set(['minkowski', 'mi', 'm']): - _hierarchy_wrap.pdist_minkowski_wrap(X, dm, p) + _hierarchy_wrap.pdist_minkowski_wrap(_convert_to_double(X), dm, p) elif mstr in set(['seuclidean', 'se', 's']): if V is not None: + V = np.asarray(V) if type(V) is not _array_type: raise TypeError('Variance vector V must be a numpy array') if V.dtype != np.double: @@ -1406,17 +1454,17 @@ if V.shape[0] != n: raise ValueError('Variance vector V must be of the same dimension as the vectors on which the distances are computed.') # The C code doesn't do striding. - [VV] = _copy_arrays_if_base_present([V]) + [VV] = _copy_arrays_if_base_present([_convert_to_double(V)]) else: VV = _unbiased_variance(X) - _hierarchy_wrap.pdist_seuclidean_wrap(X, VV, dm) + _hierarchy_wrap.pdist_seuclidean_wrap(_convert_to_double(X), VV, dm) # Need to test whether vectorized cosine works better. # Find out: Is there a dot subtraction operator so I can # subtract matrices in a similar way to multiplying them? # Need to get rid of as much unnecessary C code as possible. elif mstr in set(['cosine_old', 'cos_old']): norms = np.sqrt(np.sum(X * X, axis=1)) - _hierarchy_wrap.pdist_cosine_wrap(X, dm, norms) + _hierarchy_wrap.pdist_cosine_wrap(_convert_to_double(X), dm, norms) elif mstr in set(['cosine', 'cos']): norms = np.sqrt(np.sum(X * X, axis=1)) nV = norms.reshape(m, 1) @@ -1431,9 +1479,10 @@ X2 = X - X.mean(1)[:,np.newaxis] #X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n) norms = np.sqrt(np.sum(X2 * X2, axis=1)) - _hierarchy_wrap.pdist_cosine_wrap(X2, dm, norms) + _hierarchy_wrap.pdist_cosine_wrap(_convert_to_double(X2), _convert_to_double(dm), _convert_to_double(norms)) elif mstr in set(['mahalanobis', 'mahal', 'mah']): if VI is not None: + VI = _convert_to_double(np.asarray(VI)) if type(VI) != _array_type: raise TypeError('VI must be a numpy array.') if VI.dtype != np.double: @@ -1441,29 +1490,29 @@ [VI] = _copy_arrays_if_base_present([VI]) else: V = np.cov(X.T) - VI = np.linalg.inv(V).T.copy() + VI = _convert_to_double(np.linalg.inv(V).T.copy()) # (u-v)V^(-1)(u-v)^T - _hierarchy_wrap.pdist_mahalanobis_wrap(X, VI, dm) + _hierarchy_wrap.pdist_mahalanobis_wrap(_convert_to_double(X), VI, dm) elif mstr == 'canberra': - _hierarchy_wrap.pdist_canberra_wrap(X, dm) + _hierarchy_wrap.pdist_canberra_wrap(_convert_to_bool(X), dm) elif mstr == 'braycurtis': - _hierarchy_wrap.pdist_bray_curtis_wrap(X, dm) + _hierarchy_wrap.pdist_bray_curtis_wrap(_convert_to_bool(X), dm) elif mstr == 'yule': - _hierarchy_wrap.pdist_yule_bool_wrap(X, dm) + _hierarchy_wrap.pdist_yule_bool_wrap(_convert_to_bool(X), dm) elif mstr == 'matching': - _hierarchy_wrap.pdist_matching_bool_wrap(X, dm) + _hierarchy_wrap.pdist_matching_bool_wrap(_convert_to_bool(X), dm) elif mstr == 'kulsinski': - _hierarchy_wrap.pdist_kulsinski_bool_wrap(X, dm) + _hierarchy_wrap.pdist_kulsinski_bool_wrap(_convert_to_bool(X), dm) elif mstr == 'dice': - _hierarchy_wrap.pdist_dice_bool_wrap(X, dm) + _hierarchy_wrap.pdist_dice_bool_wrap(_convert_to_bool(X), dm) elif mstr == 'rogerstanimoto': - _hierarchy_wrap.pdist_rogerstanimoto_bool_wrap(X, dm) + _hierarchy_wrap.pdist_rogerstanimoto_bool_wrap(_convert_to_bool(X), dm) elif mstr == 'russellrao': - _hierarchy_wrap.pdist_russellrao_bool_wrap(X, dm) + _hierarchy_wrap.pdist_russellrao_bool_wrap(_convert_to_bool(X), dm) elif mstr == 'sokalmichener': - _hierarchy_wrap.pdist_sokalmichener_bool_wrap(X, dm) + _hierarchy_wrap.pdist_sokalmichener_bool_wrap(_convert_to_bool(X), dm) elif mstr == 'sokalsneath': - _hierarchy_wrap.pdist_sokalsneath_bool_wrap(X, dm) + _hierarchy_wrap.pdist_sokalsneath_bool_wrap(_convert_to_bool(X), dm) elif metric == 'test_euclidean': dm = pdist(X, euclidean) elif metric == 'test_sqeuclidean': @@ -1499,12 +1548,16 @@ dm = pdist(X, matching) elif metric == 'test_dice': dm = pdist(X, dice) + elif metric == 'test_kulsinski': + dm = pdist(X, kulsinski) elif metric == 'test_rogerstanimoto': dm = pdist(X, rogerstanimoto) elif metric == 'test_russellrao': dm = pdist(X, russellrao) elif metric == 'test_sokalsneath': dm = pdist(X, sokalsneath) + elif metric == 'test_sokalmichener': + dm = pdist(X, sokalmichener) else: raise ValueError('Unknown Distance Metric: %s' % mstr) else: @@ -1919,7 +1972,7 @@ Returns the number of original observations that correspond to a square, non-condensed distance matrix D. """ - is_valid_dm(D, tol=scipy.inf, throw=True, name='D') + is_valid_dm(D, tol=np.inf, throw=True, name='D') return D.shape[0] def numobs_y(Y): @@ -2123,10 +2176,10 @@ # p <= 20, size="12" # 20 < p <= 30, size="10" # 30 < p <= 50, size="8" - # 50 < p <= scipy.inf, size="6" + # 50 < p <= np.inf, size="6" - _dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, scipy.inf: 5} - _drotation = {20: 0, 40: 45, scipy.inf: 90} + _dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5} + _drotation = {20: 0, 40: 45, np.inf: 90} _dtextsortedkeys = list(_dtextsizes.keys()) _dtextsortedkeys.sort() _drotationsortedkeys = list(_drotation.keys()) @@ -2162,7 +2215,7 @@ ivw = len(ivl) * 10 # Depenendent variable plot height dvw = mh + mh * 0.05 - ivticks = scipy.arange(5, len(ivl)*10+5, 10) + ivticks = np.arange(5, len(ivl)*10+5, 10) if orientation == 'top': axis.set_ylim([0, dvw]) axis.set_xlim([0, ivw]) @@ -2558,7 +2611,7 @@ if truncate_mode == 'mtica' or truncate_mode == 'level': if p <= 0: - p = scipy.inf + p = np.inf if get_leaves: lvs = [] else: @@ -2658,7 +2711,7 @@ def _dendrogram_calculate_info(Z, p, truncate_mode, \ - colorthreshold=scipy.inf, get_leaves=True, \ + colorthreshold=np.inf, get_leaves=True, \ orientation='top', labels=None, \ count_sort=False, distance_sort=False, \ show_leaf_counts=False, i=-1, iv=0.0, \ @@ -2940,6 +2993,7 @@ Note that when Z[:,2] is monotonic, Z[:,2] and MD should not differ. See linkage for more information on this issue. """ + Z = np.asarray(Z) is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 @@ -2957,6 +3011,8 @@ inconsistency matrix. MI is a monotonic (n-1)-sized numpy array of doubles. """ + Z = np.asarray(Z) + R = np.asarray(R) is_valid_linkage(Z, throw=True, name='Z') is_valid_im(R, throw=True, name='R') @@ -2975,6 +3031,8 @@ is the maximum over R[Q(j)-n, i] where Q(j) the set of all node ids corresponding to nodes below and including j. """ + Z = np.asarray(Z) + R = np.asarray(R) is_valid_linkage(Z, throw=True, name='Z') is_valid_im(R, throw=True, name='R') if type(i) is not types.IntType: Modified: trunk/scipy/cluster/tests/test_hierarchy.py =================================================================== --- trunk/scipy/cluster/tests/test_hierarchy.py 2008-06-03 04:41:10 UTC (rev 4404) +++ trunk/scipy/cluster/tests/test_hierarchy.py 2008-06-03 06:50:49 UTC (rev 4405) @@ -68,7 +68,8 @@ "linkage-single-tdist.txt", "linkage-complete-tdist.txt", "linkage-average-tdist.txt", - "linkage-weighted-tdist.txt"] + "linkage-weighted-tdist.txt", + "random-bool-data.txt"] _tdist = numpy.array([[0, 662, 877, 255, 412, 996], [662, 0, 295, 468, 268, 400], @@ -101,73 +102,10 @@ #print numpy.abs(Y_test1 - Y_right).max() class TestPdist(TestCase): + """ + Test suite for the pdist function. + """ - def test_pdist_raises_type_error_float32(self): - "Testing whether passing a float32 observation array generates an exception." - X = numpy.zeros((10, 10), dtype=numpy.float32) - try: - pdist(X, 'euclidean') - except TypeError: - pass - except: - self.fail("float32 observation matrices should generate an error in pdist.") - - def test_pdist_raises_type_error_longdouble(self): - "Testing whether passing a longdouble observation array generates an exception." - X = numpy.zeros((10, 10), dtype=numpy.longdouble) - try: - pdist(X, 'euclidean') - except TypeError: - pass - except: - self.fail("longdouble observation matrices should generate an error in pdist.") - - def test_pdist_var_raises_type_error_float32(self): - "Testing whether passing a float32 variance matrix generates an exception." - X = numpy.zeros((10, 10)) - V = numpy.zeros((10, 10), dtype=numpy.float32) - try: - pdist(X, 'seuclidean', V=V) - except TypeError: - pass - except: - self.fail("float32 V matrices should generate an error in pdist('seuclidean').") - - def test_pdist_var_raises_type_error_longdouble(self): - "Testing whether passing a longdouble variance matrix generates an exception." - X = numpy.zeros((10, 10)) - V = numpy.zeros((10, 10), dtype=numpy.longdouble) - - try: - pdist(X, 'seuclidean', V=V) - except TypeError: - pass - except: - self.fail("longdouble matrices should generate an error in pdist('seuclidean').") - - def test_pdist_ivar_raises_type_error_float32(self): - "Testing whether passing a float32 variance matrix generates an exception." - X = numpy.zeros((10, 10)) - VI = numpy.zeros((10, 10), dtype=numpy.float32) - try: - pdist(X, 'mahalanobis', VI=VI) - except TypeError: - pass - except: - self.fail("float32 matrices should generate an error in pdist('mahalanobis').") - - def test_pdist_ivar_raises_type_error_longdouble(self): - "Testing whether passing a longdouble variance matrix generates an exception." - X = numpy.zeros((10, 10)) - VI = numpy.zeros((10, 10), dtype=numpy.longdouble) - - try: - pdist(X, 'mahalanobis', VI=VI) - except TypeError: - pass - except: - self.fail("longdouble matrices should generate an error in pdist('mahalanobis').") - ################### pdist: euclidean def test_pdist_euclidean_random(self): "Tests pdist(X, 'euclidean') on random data." @@ -179,6 +117,16 @@ Y_test1 = pdist(X, 'euclidean') self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_euclidean_random_float32(self): + "Tests pdist(X, 'euclidean') on random data (float32)." + eps = 1e-07 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-euclidean'] + + Y_test1 = pdist(X, 'euclidean') + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_euclidean_random_nonC(self): "Tests pdist(X, 'test_euclidean') [the non-C implementation] on random data." eps = 1e-07 @@ -188,7 +136,7 @@ Y_test2 = pdist(X, 'test_euclidean') self.failUnless(within_tol(Y_test2, Y_right, eps)) - def test_pdist_euclidean_iris(self): + def test_pdist_euclidean_iris_double(self): "Tests pdist(X, 'euclidean') on the Iris data set." eps = 1e-07 # Get the data: the input matrix and the right output. @@ -198,6 +146,17 @@ Y_test1 = pdist(X, 'euclidean') self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_euclidean_iris_float32(self): + "Tests pdist(X, 'euclidean') on the Iris data set. (float32)" + eps = 1e-06 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['iris']) + Y_right = eo['pdist-euclidean-iris'] + + Y_test1 = pdist(X, 'euclidean') + print numpy.abs(Y_right - Y_test1).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_euclidean_iris_nonC(self): "Tests pdist(X, 'test_euclidean') [the non-C implementation] on the Iris data set." eps = 1e-07 @@ -218,6 +177,16 @@ Y_test1 = pdist(X, 'seuclidean') self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_seuclidean_random_float32(self): + "Tests pdist(X, 'seuclidean') on random data (float32)." + eps = 1e-05 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-seuclidean'] + + Y_test1 = pdist(X, 'seuclidean') + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_seuclidean_random_nonC(self): "Tests pdist(X, 'test_sqeuclidean') [the non-C implementation] on random data." eps = 1e-05 @@ -237,6 +206,16 @@ Y_test1 = pdist(X, 'seuclidean') self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_seuclidean_iris_float32(self): + "Tests pdist(X, 'seuclidean') on the Iris data set (float32)." + eps = 1e-05 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['iris']) + Y_right = eo['pdist-seuclidean-iris'] + + Y_test1 = pdist(X, 'seuclidean') + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_seuclidean_iris_nonC(self): "Tests pdist(X, 'test_seuclidean') [the non-C implementation] on the Iris data set." eps = 1e-05 @@ -253,7 +232,16 @@ # Get the data: the input matrix and the right output. X = eo['pdist-double-inp'] Y_right = eo['pdist-cosine'] + Y_test1 = pdist(X, 'cosine') + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_cosine_random_float32(self): + "Tests pdist(X, 'cosine') on random data. (float32)" + eps = 1e-08 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-cosine'] + Y_test1 = pdist(X, 'cosine') self.failUnless(within_tol(Y_test1, Y_right, eps)) @@ -277,6 +265,18 @@ self.failUnless(within_tol(Y_test1, Y_right, eps)) #print "cosine-iris", numpy.abs(Y_test1 - Y_right).max() + def test_pdist_cosine_iris_float32(self): + "Tests pdist(X, 'cosine') on the Iris data set." + eps = 1e-07 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['iris']) + Y_right = eo['pdist-cosine-iris'] + + Y_test1 = pdist(X, 'cosine') + print numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + #print "cosine-iris", numpy.abs(Y_test1 - Y_right).max() + def test_pdist_cosine_iris_nonC(self): "Tests pdist(X, 'test_cosine') [the non-C implementation] on the Iris data set." eps = 1e-08 @@ -293,7 +293,16 @@ # Get the data: the input matrix and the right output. X = eo['pdist-double-inp'] Y_right = eo['pdist-cityblock'] + Y_test1 = pdist(X, 'cityblock') + #print "cityblock", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_cityblock_random_float32(self): + "Tests pdist(X, 'cityblock') on random data. (float32)" + eps = 1e-06 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-cityblock'] Y_test1 = pdist(X, 'cityblock') #print "cityblock", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) @@ -318,6 +327,17 @@ self.failUnless(within_tol(Y_test1, Y_right, eps)) #print "cityblock-iris", numpy.abs(Y_test1 - Y_right).max() + def test_pdist_cityblock_iris_float32(self): + "Tests pdist(X, 'cityblock') on the Iris data set. (float32)" + eps = 1e-06 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['iris']) + Y_right = eo['pdist-cityblock-iris'] + + Y_test1 = pdist(X, 'cityblock') + print "cityblock-iris-float32", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_cityblock_iris_nonC(self): "Tests pdist(X, 'test_cityblock') [the non-C implementation] on the Iris data set." eps = 1e-14 @@ -339,6 +359,17 @@ #print "correlation", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_correlation_random_float32(self): + "Tests pdist(X, 'correlation') on random data. (float32)" + eps = 1e-07 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-correlation'] + + Y_test1 = pdist(X, 'correlation') + #print "correlation", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_correlation_random_nonC(self): "Tests pdist(X, 'test_correlation') [the non-C implementation] on random data." eps = 1e-07 @@ -359,6 +390,17 @@ #print "correlation-iris", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_correlation_iris_float32(self): + "Tests pdist(X, 'correlation') on the Iris data set. (float32)" + eps = 1e-07 + # Get the data: the input matrix and the right output. + X = eo['iris'] + Y_right = numpy.float32(eo['pdist-correlation-iris']) + + Y_test1 = pdist(X, 'correlation') + print "correlation-iris", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_correlation_iris_nonC(self): "Tests pdist(X, 'test_correlation') [the non-C implementation] on the Iris data set." eps = 1e-08 @@ -382,6 +424,17 @@ #print "minkowski", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_minkowski_random_float32(self): + "Tests pdist(X, 'minkowski') on random data. (float32)" + eps = 1e-05 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-minkowski-3.2'] + + Y_test1 = pdist(X, 'minkowski', 3.2) + #print "minkowski", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_minkowski_random_nonC(self): "Tests pdist(X, 'test_minkowski') [the non-C implementation] on random data." eps = 1e-05 @@ -397,7 +450,16 @@ # Get the data: the input matrix and the right output. X = eo['iris'] Y_right = eo['pdist-minkowski-3.2-iris'] + Y_test1 = pdist(X, 'minkowski', 3.2) + #print "minkowski-iris-3.2", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_minkowski_iris_float32(self): + "Tests pdist(X, 'minkowski') on iris data. (float32)" + eps = 1e-07 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['iris']) + Y_right = eo['pdist-minkowski-3.2-iris'] Y_test1 = pdist(X, 'minkowski', 3.2) #print "minkowski-iris-3.2", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) @@ -417,11 +479,21 @@ # Get the data: the input matrix and the right output. X = eo['iris'] Y_right = eo['pdist-minkowski-5.8-iris'] - Y_test1 = pdist(X, 'minkowski', 5.8) #print "minkowski-iris-5.8", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_minkowski_iris_float32(self): + "Tests pdist(X, 'minkowski') on iris data. (float32)" + eps = 1e-06 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['iris']) + Y_right = eo['pdist-minkowski-5.8-iris'] + + Y_test1 = pdist(X, 'minkowski', 5.8) + print "minkowski-iris-5.8", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_minkowski_iris_nonC(self): "Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data." eps = 1e-07 @@ -443,6 +515,17 @@ #print "hamming", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_hamming_random_float32(self): + "Tests pdist(X, 'hamming') on random data." + eps = 1e-07 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-boolean-inp']) + Y_right = eo['pdist-hamming'] + + Y_test1 = pdist(X, 'hamming') + #print "hamming", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_hamming_random_nonC(self): "Tests pdist(X, 'test_hamming') [the non-C implementation] on random data." eps = 1e-07 @@ -460,7 +543,16 @@ # Get the data: the input matrix and the right output. X = numpy.float64(eo['pdist-boolean-inp']) Y_right = eo['pdist-hamming'] + Y_test1 = pdist(X, 'hamming') + #print "hamming", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_dhamming_random_float32(self): + "Tests pdist(X, 'hamming') on random data. (float32)" + eps = 1e-07 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-boolean-inp']) + Y_right = eo['pdist-hamming'] Y_test1 = pdist(X, 'hamming') #print "hamming", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) @@ -487,6 +579,17 @@ #print "jaccard", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_jaccard_random_float32(self): + "Tests pdist(X, 'jaccard') on random data. (float32)" + eps = 1e-08 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-boolean-inp']) + Y_right = eo['pdist-jaccard'] + + Y_test1 = pdist(X, 'jaccard') + #print "jaccard", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_jaccard_random_nonC(self): "Tests pdist(X, 'test_jaccard') [the non-C implementation] on random data." eps = 1e-08 @@ -509,6 +612,17 @@ #print "jaccard", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_djaccard_random_float32(self): + "Tests pdist(X, 'jaccard') on random data. (float32)" + eps = 1e-08 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-boolean-inp']) + Y_right = eo['pdist-jaccard'] + + Y_test1 = pdist(X, 'jaccard') + #print "jaccard", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_djaccard_random_nonC(self): "Tests pdist(X, 'test_jaccard') [the non-C implementation] on random data." eps = 1e-08 @@ -531,6 +645,17 @@ #print "chebychev", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_chebychev_random_float32(self): + "Tests pdist(X, 'chebychev') on random data. (float32)" + eps = 1e-07 + # Get the data: the input matrix and the right output. + X = numpy.float32(eo['pdist-double-inp']) + Y_right = eo['pdist-chebychev'] + + Y_test1 = pdist(X, 'chebychev') + print "chebychev", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) + def test_pdist_chebychev_random_nonC(self): "Tests pdist(X, 'test_chebychev') [the non-C implementation] on random data." eps = 1e-08 @@ -547,20 +672,19 @@ # Get the data: the input matrix and the right output. X = eo['iris'] Y_right = eo['pdist-chebychev-iris'] - Y_test1 = pdist(X, 'chebychev') #print "chebychev-iris", numpy.abs(Y_test1 - Y_right).max() self.failUnless(within_tol(Y_test1, Y_right, eps)) - def test_pdist_chebychev_iris_nonC(self): - "Tests pdist(X, 'test_chebychev') [the non-C implementation] on the Iris data set." - eps = 1e-15 + def test_pdist_chebychev_iris_float32(self): + "Tests pdist(X, 'chebychev') on the Iris data set. (float32)" + eps = 1e-06 # Get the data: the input matrix and the right output. - X = eo['iris'] + X = numpy.float32(eo['iris']) Y_right = eo['pdist-chebychev-iris'] - Y_test2 = pdist(X, 'test_chebychev') - #print "test-chebychev-iris", numpy.abs(Y_test2 - Y_right).max() - self.failUnless(within_tol(Y_test2, Y_right, eps)) + Y_test1 = pdist(X, 'chebychev') + print "chebychev-iris", numpy.abs(Y_test1 - Y_right).max() + self.failUnless(within_tol(Y_test1, Y_right, eps)) def test_pdist_chebychev_iris_nonC(self): "Tests pdist(X, 'test_chebychev') [the non-C implementation] on the Iris data set." @@ -590,6 +714,20 @@ self.failUnless(numpy.abs(m - (2.0/3.0)) <= 1e-10) self.failUnless(numpy.abs(m2 - (2.0/3.0)) <= 1e-10) + def test_pdist_matching_match(self): + "Tests pdist('matching') to see if the two implementations match on random boolean input data." + D = eo['random-bool-data'] + B = numpy.bool_(D) + print B.shape, B.dtype + eps = 1e-10 + y1 = pdist(B, "matching") + y2 = pdist(B, "test_matching") + y3 = pdist(D, "test_matching") + print numpy.abs(y1-y2).max() + print numpy.abs(y1-y3).max() + self.failUnless(within_tol(y1, y2, eps)) + self.failUnless(within_tol(y2, y3, eps)) + def test_pdist_jaccard_mtica1(self): "Tests jaccard(*,*) with mtica example #1." m = jaccard(numpy.array([1, 0, 1, 1, 0]), @@ -608,6 +746,19 @@ self.failUnless(numpy.abs(m - (2.0/3.0)) <= 1e-10) self.failUnless(numpy.abs(m2 - (2.0/3.0)) <= 1e-10) + def test_pdist_jaccard_match(self): + "Tests pdist('jaccard') to see if the two implementations match on random double input data." + D = eo['random-bool-data'] + print D.shape, D.dtype + eps = 1e-10 + y1 = pdist(D, "jaccard") + y2 = pdist(D, "test_jaccard") + y3 = pdist(numpy.bool_(D), "test_jaccard") + print numpy.abs(y1-y2).max() + print numpy.abs(y2-y3).max() + self.failUnless(within_tol(y1, y2, eps)) + self.failUnless(within_tol(y2, y3, eps)) + def test_pdist_yule_mtica1(self): "Tests yule(*,*) with mtica example #1." m = yule(numpy.array([1, 0, 1, 1, 0]), @@ -628,6 +779,19 @@ self.failUnless(numpy.abs(m - 2.0) <= 1e-10) self.failUnless(numpy.abs(m2 - 2.0) <= 1e-10) + def test_pdist_yule_match(self): + "Tests pdist('yule') to see if the two implementations match on random double input data." + D = eo['random-bool-data'] + print D.shape, D.dtype + eps = 1e-10 + y1 = pdist(D, "yule") + y2 = pdist(D, "test_yule") + y3 = pdist(numpy.bool_(D), "test_yule") + print numpy.abs(y1-y2).max() + print numpy.abs(y2-y3).max() + self.failUnless(within_tol(y1, y2, eps)) + self.failUnless(within_tol(y2, y3, eps)) + def test_pdist_dice_mtica1(self): "Tests dice(*,*) with mtica example #1." m = dice(numpy.array([1, 0, 1, 1, 0]), @@ -648,6 +812,19 @@ self.failUnless(numpy.abs(m - 0.5) <= 1e-10) self.failUnless(numpy.abs(m2 - 0.5) <= 1e-10) + def test_pdist_dice_match(self): + "Tests pdist('dice') to see if the two implementations match on random double input data." + D = eo['random-bool-data'] + print D.shape, D.dtype + eps = 1e-10 + y1 = pdist(D, "dice") + y2 = pdist(D, "test_dice") + y3 = pdist(D, "test_dice") + print numpy.abs(y1-y2).max() + print numpy.abs(y2-y3).max() + self.failUnless(within_tol(y1, y2, eps)) + self.failUnless(within_tol(y2, y3, eps)) + def test_pdist_sokalsneath_mtica1(self): "Tests sokalsneath(*,*) with mtica example #1." m = sokalsneath(numpy.array([1, 0, 1, 1, 0]), @@ -668,6 +845,19 @@ self.failUnless(numpy.abs(m - (4.0/5.0)) <= 1e-10) self.failUnless(numpy.abs(m2 - (4.0/5.0)) <= 1e-10) + def test_pdist_sokalsneath_match(self): + "Tests pdist('sokalsneath') to see if the two implementations match on random double input data." + D = eo['random-bool-data'] + print D.shape, D.dtype + eps = 1e-10 + y1 = pdist(D, "sokalsneath") + y2 = pdist(D, "test_sokalsneath") + y3 = pdist(numpy.bool_(D), "test_sokalsneath") + print numpy.abs(y1-y2).max() + print numpy.abs(y2-y3).max() + self.failUnless(within_tol(y1, y2, eps)) + self.failUnless(within_tol(y2, y3, eps)) + def test_pdist_rogerstanimoto_mtica1(self): "Tests rogerstanimoto(*,*) with mtica example #1." m = rogerstanimoto(numpy.array([1, 0, 1, 1, 0]), @@ -688,6 +878,18 @@ self.failUnless(numpy.abs(m - (4.0/5.0)) <= 1e-10) self.failUnless(numpy.abs(m2 - (4.0/5.0)) <= 1e-10) + def test_pdist_rogerstanimoto_match(self): + "Tests pdist('rogerstanimoto') to see if the two implementations match on random double input data." + D = eo['random-bool-data'] + print D.shape, D.dtype + eps = 1e-10 + y1 = pdist(D, "rogerstanimoto") + y2 = pdist(D, "test_rogerstanimoto") + y3 = pdist(numpy.bool_(D), "test_rogerstanimoto") + print numpy.abs(y1-y2).max() + print numpy.abs(y2-y3).max() + self.failUnless(within_tol(y1, y2, eps)) + self.failUnless(within_tol(y2, y3, eps)) def test_pdist_russellrao_mtica1(self): "Tests russellrao(*,*) with mtica example #1." @@ -709,6 +911,43 @@ self.failUnless(numpy.abs(m - (2.0/3.0)) <= 1e-10) self.failUnless(numpy.abs(m2 - (2.0/3.0)) <= 1e-10) + def test_pdist_russellrao_match(self): + "Tests pdist('russellrao') to see if the two implementations match on random double input data." + D = eo['random-bool-data'] + print D.shape, D.dtype + eps = 1e-10 + y1 = pdist(D, "russellrao") + y2 = pdist(D, "test_russellrao") + y3 = pdist(numpy.bool_(D), "test_russellrao") + print numpy.abs(y1-y2).max() + print numpy.abs(y2-y3).max() + self.failUnless(within_tol(y1, y2, eps)) + self.failUnless(within_tol(y2, y3, eps)) + + def test_pdist_sokalmichener_match(self): + "Tests pdist('sokalmichener') to see if the two implementations match on random double input data." + D = eo['random-bool-data'] + print D.shape, D.dtype + eps = 1e-10 + y1 = pdist(D, "sokalmichener") + y2 = pdist(D, "test_sokalmichener") + y3 = pdist(numpy.bool_(D), "test_sokalmichener") + print numpy.abs(y1-y2).max() + print numpy.abs(y2-y3).max() + self.failUnless(within_tol(y1, y2, eps)) + self.failUnless(within_tol(y2, y3, eps)) + + def test_pdist_kulsinski_match(self): + "Tests pdist('kulsinski') to see if the two implementations match on random double input data." + D = eo['random-bool-data'] + print D.shape, D.dtype + eps = 1e-10 + y1 = pdist(D, "kulsinski") + y2 = pdist(D, "test_kulsinski") + y3 = pdist(numpy.bool_(D), "test_kulsinski") + print numpy.abs(y1-y2).max() + self.failUnless(within_tol(y1, y2, eps)) + class TestSquareForm(TestCase): ################### squareform From scipy-svn at scipy.org Tue Jun 3 03:35:11 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 3 Jun 2008 02:35:11 -0500 (CDT) Subject: [Scipy-svn] r4406 - trunk/scipy/cluster Message-ID: <20080603073511.E672239C766@scipy.org> Author: damian.eads Date: 2008-06-03 02:35:10 -0500 (Tue, 03 Jun 2008) New Revision: 4406 Modified: trunk/scipy/cluster/hierarchy.py Log: Removed unnecessary imports in hierarchy and refactored its code. Wrote more tests. Modified: trunk/scipy/cluster/hierarchy.py =================================================================== --- trunk/scipy/cluster/hierarchy.py 2008-06-03 06:50:49 UTC (rev 4405) +++ trunk/scipy/cluster/hierarchy.py 2008-06-03 07:35:10 UTC (rev 4406) @@ -638,7 +638,7 @@ functions in this library. """ - Z = numpy.asarray(Z) + Z = np.asarray(Z) is_valid_linkage(Z, throw=True, name='Z') @@ -993,6 +993,8 @@ Computes the Canberra distance between two n-vectors u and v, \sum{|u_i-v_i|} / \sum{|u_i|+|v_i}. """ + u = np.asarray(u) + v = np.asarray(v) return abs(u-v).sum() / (abs(u).sum() + abs(v).sum()) def _nbool_correspond_all(u, v): @@ -1518,6 +1520,8 @@ elif metric == 'test_sqeuclidean': if V is None: V = _unbiased_variance(X) + else: + V = np.asarray(V) dm = pdist(X, lambda u, v: seuclidean(u, v, V)) elif metric == 'test_braycurtis': dm = pdist(X, braycurtis) @@ -1525,6 +1529,8 @@ if VI is None: V = np.cov(X.T) VI = np.linalg.inv(V) + else: + VI = np.asarray(VI) [VI] = _copy_arrays_if_base_present([VI]) # (u-v)V^(-1)(u-v)^T dm = pdist(X, (lambda u, v: mahalanobis(u, v, VI))) @@ -1591,6 +1597,8 @@ Also returns the cophenetic distance matrix in condensed form. """ + Z = np.asarray(Z) + nargs = len(args) if nargs < 1: @@ -1648,6 +1656,7 @@ This function behaves similarly to the MATLAB(TM) inconsistent function. """ + Z = np.asarray(Z) Zs = Z.shape is_valid_linkage(Z, throw=True, name='Z') @@ -1680,6 +1689,7 @@ the number of original observations (leaves) in the non-singleton cluster i. """ + Z = np.asarray(Z) Zs = Z.shape Zpart = Z[:,0:2] Zd = Z[:,2].reshape(Zs[0], 1) @@ -1701,6 +1711,7 @@ last column removed and the cluster indices converted to use 1..N indexing. """ + Z = np.asarray(Z) is_valid_linkage(Z, throw=True, name='Z') return np.hstack([Z[:,0:2] + 1, Z[:,2]]) @@ -1713,6 +1724,7 @@ if for every cluster s and t joined, the distance between them is no less than the distance between any previously joined clusters. """ + Z = np.asarray(Z) is_valid_linkage(Z, throw=True, name='Z') # We expect the i'th value to be greater than its successor. @@ -1727,6 +1739,7 @@ must be nonnegative. The link counts R[:,2] must be positive and no greater than n-1. """ + R = np.asarray(R) valid = True try: if type(R) is not _array_type: @@ -1786,6 +1799,7 @@ variable. """ + Z = np.asarray(Z) valid = True try: if type(Z) is not _array_type: @@ -1847,6 +1861,7 @@ referencing the offending variable. """ + y = np.asarray(y) valid = True try: if type(y) is not _array_type: @@ -1908,7 +1923,7 @@ the offending variable. """ - + D = np.asarray(D) valid = True try: if type(D) is not _array_type: @@ -1962,6 +1977,7 @@ Returns the number of original observations that correspond to a linkage matrix Z. """ + Z = np.asarray(Z) is_valid_linkage(Z, throw=True, name='Z') return (Z.shape[0] + 1) @@ -1972,6 +1988,7 @@ Returns the number of original observations that correspond to a square, non-condensed distance matrix D. """ + D = np.asarray(D) is_valid_dm(D, tol=np.inf, throw=True, name='D') return D.shape[0] @@ -1982,6 +1999,7 @@ Returns the number of original observations that correspond to a condensed distance matrix Y. """ + Y = np.asarray(Y) is_valid_y(Y, throw=True, name='Y') d = int(np.ceil(np.sqrt(Y.shape[0] * 2))) return d @@ -1996,6 +2014,8 @@ check in algorithms that make extensive use of linkage and distance matrices that must correspond to the same set of original observations. """ + Z = np.asarray(Z) + Y = np.asarray(Y) return numobs_y(Y) == numobs_Z(Z) def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None): @@ -2055,6 +2075,7 @@ cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI) """ + Z = np.asarray(Z) is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 @@ -2068,6 +2089,7 @@ if R is None: R = inconsistent(Z, depth) else: + R = np.asarray(R) is_valid_im(R, throw=True, name='R') # Since the C code does not support striding using strides. # The dimensions are used instead. @@ -2137,14 +2159,17 @@ This function is similar to MATLAB(TM) clusterdata function. """ + X = np.asarray(X) if type(X) is not _array_type or len(X.shape) != 2: - raise TypeError('X must be an n by m numpy array.') + raise TypeError('The observation matrix X must be an n by m numpy array.') Y = pdist(X, metric=distance) Z = linkage(Y, method=method) if R is None: R = inconsistent(Z, d=depth) + else: + R = np.asarray(R) T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t) return T @@ -2155,6 +2180,7 @@ Returns a list of leaf node ids as they appear in the tree from left to right. Z is a linkage matrix. """ + Z = np.asarray(Z) is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 ML = np.zeros((n,), dtype=np.int32) @@ -2593,6 +2619,7 @@ # or results in a crossing, an exception will be thrown. Passing # None orders leaf nodes based on the order they appear in the # pre-order traversal. + Z = np.asarray(Z) is_valid_linkage(Z, throw=True, name='Z') Zs = Z.shape @@ -2956,6 +2983,9 @@ Returns True iff two different cluster assignments T1 and T2 are equivalent. T1 and T2 must be arrays of the same size. """ + T1 = np.asarray(T1) + T2 = np.asarray(T2) + if type(T1) is not _array_type: raise TypeError('T1 must be a numpy array.') if type(T2) is not _array_type: @@ -3068,6 +3098,8 @@ i < n, i corresponds to an original observation, otherwise it corresponds to a non-singleton cluster. """ + Z = np.asarray(Z) + T = np.asarray(T) if type(T) != _array_type or T.dtype != np.int: raise TypeError('T must be a one-dimensional numpy array of integers.') is_valid_linkage(Z, throw=True, name='Z') From scipy-svn at scipy.org Tue Jun 3 03:48:56 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 3 Jun 2008 02:48:56 -0500 (CDT) Subject: [Scipy-svn] r4407 - trunk/scipy/cluster Message-ID: <20080603074856.D8AD639C647@scipy.org> Author: damian.eads Date: 2008-06-03 02:48:54 -0500 (Tue, 03 Jun 2008) New Revision: 4407 Modified: trunk/scipy/cluster/hierarchy.py Log: Convert non-double arrays to double so that hierarchy functions can be called with more input types. Modified: trunk/scipy/cluster/hierarchy.py =================================================================== --- trunk/scipy/cluster/hierarchy.py 2008-06-03 07:35:10 UTC (rev 4406) +++ trunk/scipy/cluster/hierarchy.py 2008-06-03 07:48:54 UTC (rev 4407) @@ -452,7 +452,7 @@ if not isinstance(method, str): raise TypeError("Argument 'method' must be a string.") - y = np.asarray(y) + y = np.asarray(_convert_to_double(y)) s = y.shape if len(s) == 1: @@ -723,7 +723,7 @@ transformation. """ - X = np.asarray(X) + X = _convert_to_double(np.asarray(X)) if not np.issubsctype(X, np.double): raise TypeError('A double array must be passed.') @@ -1612,7 +1612,7 @@ zz = np.zeros((n*(n-1)/2,), dtype=np.double) # Since the C code does not support striding using strides. # The dimensions are used instead. - [Z] = _copy_arrays_if_base_present([Z]) + Z = _convert_to_double(Z) _hierarchy_wrap.cophenetic_distances_wrap(Z, zz, int(n)) if nargs == 1: From scipy-svn at scipy.org Tue Jun 3 04:51:33 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 3 Jun 2008 03:51:33 -0500 (CDT) Subject: [Scipy-svn] r4408 - trunk/scipy/cluster Message-ID: <20080603085133.5640B39C9E9@scipy.org> Author: damian.eads Date: 2008-06-03 03:51:31 -0500 (Tue, 03 Jun 2008) New Revision: 4408 Modified: trunk/scipy/cluster/hierarchy.py Log: Fixed minor dtype conversion bug in hierarchy.linkage. Modified: trunk/scipy/cluster/hierarchy.py =================================================================== --- trunk/scipy/cluster/hierarchy.py 2008-06-03 07:48:54 UTC (rev 4407) +++ trunk/scipy/cluster/hierarchy.py 2008-06-03 08:51:31 UTC (rev 4408) @@ -452,7 +452,7 @@ if not isinstance(method, str): raise TypeError("Argument 'method' must be a string.") - y = np.asarray(_convert_to_double(y)) + y = _convert_to_double(np.asarray(y)) s = y.shape if len(s) == 1: From scipy-svn at scipy.org Tue Jun 3 11:55:08 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 3 Jun 2008 10:55:08 -0500 (CDT) Subject: [Scipy-svn] r4409 - trunk Message-ID: <20080603155508.E701039C707@scipy.org> Author: jarrod.millman Date: 2008-06-03 10:55:06 -0500 (Tue, 03 Jun 2008) New Revision: 4409 Modified: trunk/INSTALL.txt Log: updated 1.0.5 to 1.1.0 Modified: trunk/INSTALL.txt =================================================================== --- trunk/INSTALL.txt 2008-06-03 08:51:31 UTC (rev 4408) +++ trunk/INSTALL.txt 2008-06-03 15:55:06 UTC (rev 4409) @@ -32,7 +32,7 @@ __ http://www.python.org -2) NumPy__ 1.0.5 or newer +2) NumPy__ 1.1.0 or newer Debian package: python-numpy From scipy-svn at scipy.org Tue Jun 3 13:50:54 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 3 Jun 2008 12:50:54 -0500 (CDT) Subject: [Scipy-svn] r4410 - trunk/scipy/cluster Message-ID: <20080603175054.D041739C1DE@scipy.org> Author: damian.eads Date: 2008-06-03 12:50:47 -0500 (Tue, 03 Jun 2008) New Revision: 4410 Modified: trunk/scipy/cluster/hierarchy.py Log: Removed more unnecessary imports in hierarchy, namely sys and math. Now uses np.ndarray instead of _array_type for type checking. Modified: trunk/scipy/cluster/hierarchy.py =================================================================== --- trunk/scipy/cluster/hierarchy.py 2008-06-03 15:55:06 UTC (rev 4409) +++ trunk/scipy/cluster/hierarchy.py 2008-06-03 17:50:47 UTC (rev 4410) @@ -175,22 +175,21 @@ """ import numpy as np -import _hierarchy_wrap, types, math, sys +import _hierarchy_wrap, types _cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2, 'weighted': 6} _cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5} _cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union( set(_cpy_euclid_methods.keys())) -_array_type = np.ndarray try: import warnings def _warning(s): - warnings.warn('scipy-cluster: %s' % s, stacklevel=3) + warnings.warn('scipy.cluster: %s' % s, stacklevel=3) except: def _warning(s): - print ('[WARNING] scipy-cluster: %s' % s) + print ('[WARNING] scipy.cluster: %s' % s) def _unbiased_variance(X): """ @@ -801,7 +800,7 @@ v = np.asarray(v) if p < 1: raise ValueError("p must be at least 1") - return math.pow((abs(u-v)**p).sum(), 1.0/p) + return (abs(u-v)**p).sum() ** (1.0 / p) def euclidean(u, v): """ @@ -1447,7 +1446,7 @@ elif mstr in set(['seuclidean', 'se', 's']): if V is not None: V = np.asarray(V) - if type(V) is not _array_type: + if type(V) != np.ndarray: raise TypeError('Variance vector V must be a numpy array') if V.dtype != np.double: raise TypeError('Variance vector V must contain doubles.') @@ -1485,7 +1484,7 @@ elif mstr in set(['mahalanobis', 'mahal', 'mah']): if VI is not None: VI = _convert_to_double(np.asarray(VI)) - if type(VI) != _array_type: + if type(VI) != np.ndarray: raise TypeError('VI must be a numpy array.') if VI.dtype != np.double: raise TypeError('The array must contain 64-bit floats.') @@ -1742,7 +1741,7 @@ R = np.asarray(R) valid = True try: - if type(R) is not _array_type: + if type(R) != np.ndarray: if name: raise TypeError('Variable \'%s\' passed as inconsistency matrix is not a numpy array.' % name) else: @@ -1802,7 +1801,7 @@ Z = np.asarray(Z) valid = True try: - if type(Z) is not _array_type: + if type(Z) != np.ndarray: if name: raise TypeError('\'%s\' passed as a linkage is not a valid array.' % name) else: @@ -1864,7 +1863,7 @@ y = np.asarray(y) valid = True try: - if type(y) is not _array_type: + if type(y) != np.ndarray: if name: raise TypeError('\'%s\' passed as a condensed distance matrix is not a numpy array.' % name) else: @@ -1926,7 +1925,7 @@ D = np.asarray(D) valid = True try: - if type(D) is not _array_type: + if type(D) != np.ndarray: if name: raise TypeError('\'%s\' passed as a distance matrix is not a numpy array.' % name) else: @@ -2161,7 +2160,7 @@ """ X = np.asarray(X) - if type(X) is not _array_type or len(X.shape) != 2: + if type(X) != np.ndarray or len(X.shape) != 2: raise TypeError('The observation matrix X must be an n by m numpy array.') Y = pdist(X, metric=distance) @@ -2986,9 +2985,9 @@ T1 = np.asarray(T1) T2 = np.asarray(T2) - if type(T1) is not _array_type: + if type(T1) != np.ndarray: raise TypeError('T1 must be a numpy array.') - if type(T2) is not _array_type: + if type(T2) != np.ndarray: raise TypeError('T2 must be a numpy array.') T1S = T1.shape @@ -3100,7 +3099,7 @@ """ Z = np.asarray(Z) T = np.asarray(T) - if type(T) != _array_type or T.dtype != np.int: + if type(T) != np.ndarray or T.dtype != np.int: raise TypeError('T must be a one-dimensional numpy array of integers.') is_valid_linkage(Z, throw=True, name='Z') if len(T) != Z.shape[0] + 1: From scipy-svn at scipy.org Tue Jun 3 13:53:26 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 3 Jun 2008 12:53:26 -0500 (CDT) Subject: [Scipy-svn] r4411 - trunk/scipy/cluster Message-ID: <20080603175326.E425239C1DE@scipy.org> Author: damian.eads Date: 2008-06-03 12:53:23 -0500 (Tue, 03 Jun 2008) New Revision: 4411 Modified: trunk/scipy/cluster/hierarchy.py Log: Changed allocation of result arrays in hierarchy so np.int is used instead of np.int32. Will assume np.int corresponds to the int data type in C on the host machine. Modified: trunk/scipy/cluster/hierarchy.py =================================================================== --- trunk/scipy/cluster/hierarchy.py 2008-06-03 17:50:47 UTC (rev 4410) +++ trunk/scipy/cluster/hierarchy.py 2008-06-03 17:53:23 UTC (rev 4411) @@ -2078,7 +2078,7 @@ is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 - T = np.zeros((n,), dtype=np.int32) + T = np.zeros((n,), dtype=np.int) # Since the C code does not support striding using strides. # The dimensions are used instead. @@ -2182,7 +2182,7 @@ Z = np.asarray(Z) is_valid_linkage(Z, throw=True, name='Z') n = Z.shape[0] + 1 - ML = np.zeros((n,), dtype=np.int32) + ML = np.zeros((n,), dtype=np.int) [Z] = _copy_arrays_if_base_present([Z]) _hierarchy_wrap.prelist_wrap(Z, ML, int(n)) return ML @@ -3107,8 +3107,8 @@ Cl = np.unique(T) kk = len(Cl) - L = np.zeros((kk,), dtype=np.int32) - M = np.zeros((kk,), dtype=np.int32) + L = np.zeros((kk,), dtype=np.int) + M = np.zeros((kk,), dtype=np.int) n = Z.shape[0] + 1 [Z, T] = _copy_arrays_if_base_present([Z, T]) s = _hierarchy_wrap.leaders_wrap(Z, T, L, M, int(kk), int(n)) From scipy-svn at scipy.org Wed Jun 4 04:08:32 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 4 Jun 2008 03:08:32 -0500 (CDT) Subject: [Scipy-svn] r4412 - trunk/scipy/cluster Message-ID: <20080604080832.8EF3B39C18C@scipy.org> Author: damian.eads Date: 2008-06-04 03:08:30 -0500 (Wed, 04 Jun 2008) New Revision: 4412 Modified: trunk/scipy/cluster/hierarchy.py Log: Fixed issue with cosine. Modified: trunk/scipy/cluster/hierarchy.py =================================================================== --- trunk/scipy/cluster/hierarchy.py 2008-06-03 17:53:23 UTC (rev 4411) +++ trunk/scipy/cluster/hierarchy.py 2008-06-04 08:08:30 UTC (rev 4412) @@ -1463,18 +1463,18 @@ # Find out: Is there a dot subtraction operator so I can # subtract matrices in a similar way to multiplying them? # Need to get rid of as much unnecessary C code as possible. - elif mstr in set(['cosine_old', 'cos_old']): + elif mstr in set(['cosine', 'cos']): norms = np.sqrt(np.sum(X * X, axis=1)) _hierarchy_wrap.pdist_cosine_wrap(_convert_to_double(X), dm, norms) - elif mstr in set(['cosine', 'cos']): + elif mstr in set(['old_cosine', 'old_cos']): norms = np.sqrt(np.sum(X * X, axis=1)) nV = norms.reshape(m, 1) # The numerator u * v nm = np.dot(X, X.T) # The denom. ||u||*||v|| de = np.dot(nV, nV.T); - dm = 1 - (nm / de) - dm[xrange(0,m),xrange(0,m)] = 0 + dm = 1.0 - (nm / de) + dm[xrange(0,m),xrange(0,m)] = 0.0 dm = squareform(dm) elif mstr in set(['correlation', 'co']): X2 = X - X.mean(1)[:,np.newaxis] From scipy-svn at scipy.org Wed Jun 4 12:21:26 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 4 Jun 2008 11:21:26 -0500 (CDT) Subject: [Scipy-svn] r4413 - trunk/scipy/stats Message-ID: <20080604162126.732BCC7C01D@scipy.org> Author: pierregm Date: 2008-06-04 11:21:10 -0500 (Wed, 04 Jun 2008) New Revision: 4413 Modified: trunk/scipy/stats/mstats.py Log: * Fixed some documentation * Fixed theilslopes Modified: trunk/scipy/stats/mstats.py =================================================================== --- trunk/scipy/stats/mstats.py 2008-06-04 08:08:30 UTC (rev 4412) +++ trunk/scipy/stats/mstats.py 2008-06-04 16:21:10 UTC (rev 4413) @@ -518,8 +518,8 @@ def kendalltau(x, y, use_ties=True, use_missing=False): """Computes Kendall's rank correlation tau on two variables *x* and *y*. -Parameters ----------- + Parameters + ---------- xdata: sequence First data list (for example, time). ydata: sequence @@ -529,6 +529,13 @@ use_missing: {False, True} optional Whether missing data should be allocated a rank of 0 (False) or the average rank (True) + + Returns + ------- + tau : float + Kendall tau + prob : float + Approximate 2-side p-value. """ (x, y, n) = _chk_size(x, y) (x, y) = (x.flatten(), y.flatten()) @@ -724,6 +731,17 @@ Independent variable. If None, use arange(len(y)) instead. alpha : float Confidence degree. + + Returns + ------- + medslope : float + Theil slope + medintercept : float + Intercept of the Theil line, as median(y)-medslope*median(x) + lo_slope : float + Lower bound of the confidence interval on medslope + up_slope : float + Upper bound of the confidence interval on medslope """ y = ma.asarray(y).flatten() @@ -755,8 +773,8 @@ sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in yties.iteritems()) sigma = np.sqrt(sigsq) - Ru = np.round((nt - z*sigma)/2. + 1) - Rl = np.round((nt + z*sigma)/2.) + Ru = min(np.round((nt - z*sigma)/2. + 1), len(slopes)-1) + Rl = max(np.round((nt + z*sigma)/2.), 0) delta = slopes[[Rl,Ru]] return medslope, medinter, delta[0], delta[1] From scipy-svn at scipy.org Fri Jun 6 02:29:52 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 6 Jun 2008 01:29:52 -0500 (CDT) Subject: [Scipy-svn] r4414 - trunk/scipy/sparse/linalg/eigen/lobpcg Message-ID: <20080606062952.A759839C1AE@scipy.org> Author: wnbell Date: 2008-06-06 01:29:48 -0500 (Fri, 06 Jun 2008) New Revision: 4414 Modified: trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py Log: minor cleanup of lobpcg Modified: trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py 2008-06-04 16:21:10 UTC (rev 4413) +++ trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py 2008-06-06 06:29:48 UTC (rev 4414) @@ -10,13 +10,11 @@ Examples in tests directory contributed by Nils Wagner. """ -import types from warnings import warn -import numpy as nm -import scipy as sc -import scipy.sparse as sp -import scipy.io as io +import numpy as np +import scipy as sp + from scipy.sparse.linalg import aslinearoperator, LinearOperator ## try: @@ -28,7 +26,7 @@ import scipy.linalg as sla import scipy.lib.lapack as ll if select is None: - if nm.iscomplexobj( mtxA ): + if np.iscomplexobj( mtxA ): if mtxB is None: fun = ll.get_lapack_funcs( ['heev'], arrays = (mtxA,) )[0] else: @@ -51,7 +49,7 @@ else: out = sla.eig( mtxA, mtxB, right = eigenvectors ) w = out[0] - ii = nm.argsort( w ) + ii = np.argsort( w ) w = w[slice( *select )] if eigenvectors: v = out[1][:,ii] @@ -66,7 +64,8 @@ raw_input() def save( ar, fileName ): - io.write_array( fileName, ar, precision = 8 ) + from scipy.io import write_array + write_array( fileName, ar, precision = 8 ) ## # 21.05.2007, c @@ -78,7 +77,7 @@ if ar.ndim == 2: return ar else: # Assume 1! - aux = nm.array( ar, copy = False ) + aux = np.array( ar, copy = False ) aux.shape = (ar.shape[0], 1) return aux @@ -111,10 +110,10 @@ def applyConstraints( blockVectorV, factYBY, blockVectorBY, blockVectorY ): """Internal. Changes blockVectorV in place.""" - gramYBV = sc.dot( blockVectorBY.T, blockVectorV ) + gramYBV = sp.dot( blockVectorBY.T, blockVectorV ) import scipy.linalg as sla tmp = sla.cho_solve( factYBY, gramYBV ) - blockVectorV -= sc.dot( blockVectorY, tmp ) + blockVectorV -= sp.dot( blockVectorY, tmp ) def b_orthonormalize( B, blockVectorV, @@ -126,13 +125,13 @@ blockVectorBV = B( blockVectorV ) else: blockVectorBV = blockVectorV # Shared data!!! - gramVBV = sc.dot( blockVectorV.T, blockVectorBV ) + gramVBV = sp.dot( blockVectorV.T, blockVectorBV ) gramVBV = sla.cholesky( gramVBV ) sla.inv( gramVBV, overwrite_a = True ) # gramVBV is now R^{-1}. - blockVectorV = sc.dot( blockVectorV, gramVBV ) + blockVectorV = sp.dot( blockVectorV, gramVBV ) if B is not None: - blockVectorBV = sc.dot( blockVectorBV, gramVBV ) + blockVectorBV = sp.dot( blockVectorBV, gramVBV ) if retInvR: return blockVectorV, blockVectorBV, gramVBV @@ -236,10 +235,10 @@ else: lohi = (1, sizeX) - A_dense = A(nm.eye(n)) + A_dense = A(np.eye(n)) if B is not None: - B_dense = B(nm.eye(n)) + B_dense = B(np.eye(n)) _lambda, eigBlockVector = symeig(A_dense, B_dense, select=lohi ) else: _lambda, eigBlockVector = symeig(A_dense, select=lohi ) @@ -248,7 +247,7 @@ if residualTolerance is None: - residualTolerance = nm.sqrt( 1e-15 ) * n + residualTolerance = np.sqrt( 1e-15 ) * n maxIterations = min( n, maxIterations ) @@ -283,7 +282,7 @@ blockVectorBY = blockVectorY # gramYBY is a dense array. - gramYBY = sc.dot( blockVectorY.T, blockVectorBY ) + gramYBY = sp.dot( blockVectorY.T, blockVectorBY ) try: # gramYBY is a Cholesky factor from now on... gramYBY = sla.cho_factor( gramYBY ) @@ -299,32 +298,32 @@ ## # Compute the initial Ritz vectors: solve the eigenproblem. blockVectorAX = A( blockVectorX ) - gramXAX = sc.dot( blockVectorX.T, blockVectorAX ) + gramXAX = sp.dot( blockVectorX.T, blockVectorAX ) # gramXBX is X^T * X. - gramXBX = sc.dot( blockVectorX.T, blockVectorX ) + gramXBX = sp.dot( blockVectorX.T, blockVectorX ) _lambda, eigBlockVector = symeig( gramXAX ) - ii = nm.argsort( _lambda )[:sizeX] + ii = np.argsort( _lambda )[:sizeX] if largest: ii = ii[::-1] _lambda = _lambda[ii] - eigBlockVector = nm.asarray( eigBlockVector[:,ii] ) - blockVectorX = sc.dot( blockVectorX, eigBlockVector ) - blockVectorAX = sc.dot( blockVectorAX, eigBlockVector ) + eigBlockVector = np.asarray( eigBlockVector[:,ii] ) + blockVectorX = sp.dot( blockVectorX, eigBlockVector ) + blockVectorAX = sp.dot( blockVectorAX, eigBlockVector ) if B is not None: - blockVectorBX = sc.dot( blockVectorBX, eigBlockVector ) + blockVectorBX = sp.dot( blockVectorBX, eigBlockVector ) ## # Active index set. - activeMask = nm.ones( (sizeX,), dtype = nm.bool ) + activeMask = np.ones( (sizeX,), dtype = np.bool ) lambdaHistory = [_lambda] residualNormsHistory = [] previousBlockSize = sizeX - ident = nm.eye( sizeX, dtype = A.dtype ) - ident0 = nm.eye( sizeX, dtype = A.dtype ) + ident = np.eye( sizeX, dtype = A.dtype ) + ident0 = np.eye( sizeX, dtype = A.dtype ) ## # Main iteration loop. @@ -332,15 +331,15 @@ if verbosityLevel > 0: print 'iteration %d' % iterationNumber - aux = blockVectorBX * _lambda[nm.newaxis,:] + aux = blockVectorBX * _lambda[np.newaxis,:] blockVectorR = blockVectorAX - aux - aux = nm.sum( blockVectorR.conjugate() * blockVectorR, 0 ) - residualNorms = nm.sqrt( aux ) + aux = np.sum( blockVectorR.conjugate() * blockVectorR, 0 ) + residualNorms = np.sqrt( aux ) residualNormsHistory.append( residualNorms ) - ii = nm.where( residualNorms > residualTolerance, True, False ) + ii = np.where( residualNorms > residualTolerance, True, False ) activeMask = activeMask & ii if verbosityLevel > 2: print activeMask @@ -348,7 +347,7 @@ currentBlockSize = activeMask.sum() if currentBlockSize != previousBlockSize: previousBlockSize = currentBlockSize - ident = nm.eye( currentBlockSize, dtype = A.dtype ) + ident = np.eye( currentBlockSize, dtype = A.dtype ) if currentBlockSize == 0: failureFlag = False # All eigenpairs converged. @@ -390,44 +389,44 @@ aux = b_orthonormalize( B, activeBlockVectorP, activeBlockVectorBP, retInvR = True ) activeBlockVectorP, activeBlockVectorBP, invR = aux - activeBlockVectorAP = sc.dot( activeBlockVectorAP, invR ) + activeBlockVectorAP = sp.dot( activeBlockVectorAP, invR ) ## # Perform the Rayleigh Ritz Procedure: # Compute symmetric Gram matrices: - xaw = sc.dot( blockVectorX.T, activeBlockVectorAR ) - waw = sc.dot( activeBlockVectorR.T, activeBlockVectorAR ) - xbw = sc.dot( blockVectorX.T, activeBlockVectorBR ) + xaw = sp.dot( blockVectorX.T, activeBlockVectorAR ) + waw = sp.dot( activeBlockVectorR.T, activeBlockVectorAR ) + xbw = sp.dot( blockVectorX.T, activeBlockVectorBR ) if iterationNumber > 0: - xap = sc.dot( blockVectorX.T, activeBlockVectorAP ) - wap = sc.dot( activeBlockVectorR.T, activeBlockVectorAP ) - pap = sc.dot( activeBlockVectorP.T, activeBlockVectorAP ) - xbp = sc.dot( blockVectorX.T, activeBlockVectorBP ) - wbp = sc.dot( activeBlockVectorR.T, activeBlockVectorBP ) + xap = sp.dot( blockVectorX.T, activeBlockVectorAP ) + wap = sp.dot( activeBlockVectorR.T, activeBlockVectorAP ) + pap = sp.dot( activeBlockVectorP.T, activeBlockVectorAP ) + xbp = sp.dot( blockVectorX.T, activeBlockVectorBP ) + wbp = sp.dot( activeBlockVectorR.T, activeBlockVectorBP ) - gramA = nm.bmat( [[nm.diag( _lambda ), xaw, xap], + gramA = np.bmat( [[np.diag( _lambda ), xaw, xap], [ xaw.T, waw, wap], [ xap.T, wap.T, pap]] ) - gramB = nm.bmat( [[ident0, xbw, xbp], + gramB = np.bmat( [[ident0, xbw, xbp], [ xbw.T, ident, wbp], [ xbp.T, wbp.T, ident]] ) else: - gramA = nm.bmat( [[nm.diag( _lambda ), xaw], + gramA = np.bmat( [[np.diag( _lambda ), xaw], [ xaw.T, waw]] ) - gramB = nm.bmat( [[ident0, xbw], + gramB = np.bmat( [[ident0, xbw], [ xbw.T, ident]] ) try: - assert nm.allclose( gramA.T, gramA ) + assert np.allclose( gramA.T, gramA ) except: print gramA.T - gramA raise try: - assert nm.allclose( gramB.T, gramB ) + assert np.allclose( gramB.T, gramB ) except: print gramB.T - gramB raise @@ -440,23 +439,23 @@ # Solve the generalized eigenvalue problem. # _lambda, eigBlockVector = la.eig( gramA, gramB ) _lambda, eigBlockVector = symeig( gramA, gramB ) - ii = nm.argsort( _lambda )[:sizeX] + ii = np.argsort( _lambda )[:sizeX] if largest: ii = ii[::-1] if verbosityLevel > 10: print ii - _lambda = _lambda[ii].astype( nm.float64 ) - eigBlockVector = nm.asarray( eigBlockVector[:,ii].astype( nm.float64 ) ) + _lambda = _lambda[ii].astype( np.float64 ) + eigBlockVector = np.asarray( eigBlockVector[:,ii].astype( np.float64 ) ) lambdaHistory.append( _lambda ) if verbosityLevel > 10: print 'lambda:', _lambda ## # Normalize eigenvectors! -## aux = nm.sum( eigBlockVector.conjugate() * eigBlockVector, 0 ) -## eigVecNorms = nm.sqrt( aux ) -## eigBlockVector = eigBlockVector / eigVecNorms[nm.newaxis,:] +## aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 ) +## eigVecNorms = np.sqrt( aux ) +## eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis,:] # eigBlockVector, aux = b_orthonormalize( B, eigBlockVector ) if verbosityLevel > 10: @@ -470,21 +469,21 @@ eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize] eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:] - pp = sc.dot( activeBlockVectorR, eigBlockVectorR ) - pp += sc.dot( activeBlockVectorP, eigBlockVectorP ) + pp = sp.dot( activeBlockVectorR, eigBlockVectorR ) + pp += sp.dot( activeBlockVectorP, eigBlockVectorP ) - app = sc.dot( activeBlockVectorAR, eigBlockVectorR ) - app += sc.dot( activeBlockVectorAP, eigBlockVectorP ) + app = sp.dot( activeBlockVectorAR, eigBlockVectorR ) + app += sp.dot( activeBlockVectorAP, eigBlockVectorP ) - bpp = sc.dot( activeBlockVectorBR, eigBlockVectorR ) - bpp += sc.dot( activeBlockVectorBP, eigBlockVectorP ) + bpp = sp.dot( activeBlockVectorBR, eigBlockVectorR ) + bpp += sp.dot( activeBlockVectorBP, eigBlockVectorP ) else: eigBlockVectorX = eigBlockVector[:sizeX] eigBlockVectorR = eigBlockVector[sizeX:] - pp = sc.dot( activeBlockVectorR, eigBlockVectorR ) - app = sc.dot( activeBlockVectorAR, eigBlockVectorR ) - bpp = sc.dot( activeBlockVectorBR, eigBlockVectorR ) + pp = sp.dot( activeBlockVectorR, eigBlockVectorR ) + app = sp.dot( activeBlockVectorAR, eigBlockVectorR ) + bpp = sp.dot( activeBlockVectorBR, eigBlockVectorR ) if verbosityLevel > 10: print pp @@ -492,17 +491,17 @@ print bpp pause() - blockVectorX = sc.dot( blockVectorX, eigBlockVectorX ) + pp - blockVectorAX = sc.dot( blockVectorAX, eigBlockVectorX ) + app - blockVectorBX = sc.dot( blockVectorBX, eigBlockVectorX ) + bpp + blockVectorX = sp.dot( blockVectorX, eigBlockVectorX ) + pp + blockVectorAX = sp.dot( blockVectorAX, eigBlockVectorX ) + app + blockVectorBX = sp.dot( blockVectorBX, eigBlockVectorX ) + bpp blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp - aux = blockVectorBX * _lambda[nm.newaxis,:] + aux = blockVectorBX * _lambda[np.newaxis,:] blockVectorR = blockVectorAX - aux - aux = nm.sum( blockVectorR.conjugate() * blockVectorR, 0 ) - residualNorms = nm.sqrt( aux ) + aux = np.sum( blockVectorR.conjugate() * blockVectorR, 0 ) + residualNorms = np.sqrt( aux ) if verbosityLevel > 0: @@ -522,31 +521,31 @@ ########################################################################### if __name__ == '__main__': - from scipy.sparse import spdiags, speye + from scipy.sparse import spdiags, speye, issparse import time ## def B( vec ): ## return vec n = 100 - vals = [nm.arange( n, dtype = nm.float64 ) + 1] + vals = [np.arange( n, dtype = np.float64 ) + 1] A = spdiags( vals, 0, n, n ) B = speye( n, n ) # B[0,0] = 0 - B = nm.eye( n, n ) - Y = nm.eye( n, 3 ) + B = np.eye( n, n ) + Y = np.eye( n, 3 ) -# X = sc.rand( n, 3 ) +# X = sp.rand( n, 3 ) xfile = {100 : 'X.txt', 1000 : 'X2.txt', 10000 : 'X3.txt'} - X = nm.fromfile( xfile[n], dtype = nm.float64, sep = ' ' ) + X = np.fromfile( xfile[n], dtype = np.float64, sep = ' ' ) X.shape = (n, 3) ivals = [1./vals[0]] def precond( x ): invA = spdiags( ivals, 0, n, n ) y = invA * x - if sp.issparse( y ): + if issparse( y ): y = y.toarray() return as2d( y ) From scipy-svn at scipy.org Fri Jun 6 02:54:48 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 6 Jun 2008 01:54:48 -0500 (CDT) Subject: [Scipy-svn] r4415 - in trunk/scipy/sparse/linalg/eigen: arpack lobpcg lobpcg/tests Message-ID: <20080606065448.1E4BA39C1AE@scipy.org> Author: wnbell Date: 2008-06-06 01:54:44 -0500 (Fri, 06 Jun 2008) New Revision: 4415 Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py trunk/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py Log: renamed lobpcg parameters to better conform to other iterative methods Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2008-06-06 06:29:48 UTC (rev 4414) +++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2008-06-06 06:54:44 UTC (rev 4415) @@ -62,7 +62,7 @@ Parameters ---------- - A : A : matrix, array, or object with matvec(x) method + A : matrix, array, or object with matvec(x) method An N x N matrix, array, or an object with matvec(x) method to perform the matrix vector product A * x. The sparse matrix formats in scipy.sparse are appropriate for A. @@ -76,8 +76,8 @@ Array of k eigenvalues v : array - An array of k eigenvectors - The v[i] is the eigenvector corresponding to the eigenvector w[i] + An array of k eigenvectors + The v[i] is the eigenvector corresponding to the eigenvector w[i] Other Parameters ---------------- Modified: trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py 2008-06-06 06:29:48 UTC (rev 4414) +++ trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py 2008-06-06 06:54:44 UTC (rev 4415) @@ -17,6 +17,8 @@ from scipy.sparse.linalg import aslinearoperator, LinearOperator +__all__ = ['lobpcg'] + ## try: ## from symeig import symeig ## except: @@ -138,9 +140,9 @@ else: return blockVectorV, blockVectorBV -def lobpcg( blockVectorX, A, - B = None, M = None, blockVectorY = None, - residualTolerance = None, maxIterations = 20, +def lobpcg( A, X, + B=None, M=None, Y=None, + tol= None, maxiter=20, largest = True, verbosityLevel = 0, retLambdaHistory = False, retResidualNormsHistory = False ): """Solve symmetric partial eigenproblems with optional preconditioning @@ -148,23 +150,24 @@ This function implements the Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG). - TODO write in terms of Ax=lambda B x - + Parameters ---------- - blockVectorX : array_like - initial approximation to eigenvectors shape=(n,blockSize) - A : {dense matrix, sparse matrix, LinearOperator} - the linear operator of the problem, usually a sparse matrix - often called the "stiffness matrix" + A : {sparse matrix, dense matrix, LinearOperator} + The symmetric linear operator of the problem, usually a + sparse matrix. Often called the "stiffness matrix". + X : array_like + Initial approximation to the k eigenvectors. If A has + shape=(n,n) then X should have shape shape=(n,k). Returns ------- - (lambda,blockVectorV) : tuple of arrays - blockVectorX and lambda are computed blockSize eigenpairs, where - blockSize=size(blockVectorX,2) for the initial guess blockVectorX - if it is full rank. + w : array + Array of k eigenvalues + v : array + An array of k eigenvectors. V has the same shape as X. + Optional Parameters ------------------- B : {dense matrix, sparse matrix, LinearOperator} @@ -174,18 +177,19 @@ M : {dense matrix, sparse matrix, LinearOperator} preconditioner to A; by default M = Identity M should approximate the inverse of A - blockVectorY : array_like + Y : array_like n-by-sizeY matrix of constraints, sizeY < n The iterations will be performed in the B-orthogonal complement - of the column-space of blockVectorY. blockVectorY must be full rank. + of the column-space of Y. Y must be full rank. Other Parameters ---------------- - residualTolerance : scalar - solver tolerance. default: residualTolerance=n*sqrt(eps) - maxIterations: integer + tol : scalar + Solver tolerance (stopping criterion) + by default: tol=n*sqrt(eps) + maxiter: integer maximum number of iterations - by default: maxIterations=min(n,20) + by default: maxiter=min(n,20) largest : boolean when True, solve for the largest eigenvalues, otherwise the smallest verbosityLevel : integer @@ -200,12 +204,17 @@ ----- If both retLambdaHistory and retResidualNormsHistory are True, the return tuple has the following format: - (lambda, blockVectorV, lambda history, residual norms history) + (lambda, V, lambda history, residual norms history) """ failureFlag = True import scipy.linalg as sla + blockVectorX = X + blockVectorY = Y + residualTolerance = tol + maxIterations = maxiter + if blockVectorY is not None: sizeY = blockVectorY.shape[1] else: @@ -213,11 +222,11 @@ # Block size. if len(blockVectorX.shape) != 2: - raise ValueError('expected rank-2 array for argument blockVectorX') + raise ValueError('expected rank-2 array for argument X') n, sizeX = blockVectorX.shape if sizeX > n: - raise ValueError('blockVectorX column dimension exceeds the row dimension') + raise ValueError('X column dimension exceeds the row dimension') A = makeOperator(A, (n,n)) B = makeOperator(B, (n,n)) Modified: trunk/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py 2008-06-06 06:29:48 UTC (rev 4414) +++ trunk/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py 2008-06-06 06:54:44 UTC (rev 4415) @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env pytho n """ Test functions for the sparse.linalg.eigen.lobpcg module """ @@ -8,7 +8,7 @@ from scipy import array, arange, ones, sort, cos, pi, rand, \ set_printoptions, r_, diag, linalg from scipy.linalg import eig -from scipy.sparse.linalg.eigen import lobpcg +from scipy.sparse.linalg.eigen.lobpcg import lobpcg set_printoptions(precision=3,linewidth=90) @@ -47,7 +47,7 @@ V = rand(n,m) X = linalg.orth(V) - eigs,vecs = lobpcg.lobpcg(X,A,B,residualTolerance=1e-5, maxIterations=30) + eigs,vecs = lobpcg(A, X, B=B, tol=1e-5, maxiter=30) eigs.sort() #w,v = symeig(A,B) From scipy-svn at scipy.org Sat Jun 7 04:13:06 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 7 Jun 2008 03:13:06 -0500 (CDT) Subject: [Scipy-svn] r4416 - trunk/scipy/sparse/sparsetools Message-ID: <20080607081306.9875E39C232@scipy.org> Author: wnbell Date: 2008-06-07 03:13:02 -0500 (Sat, 07 Jun 2008) New Revision: 4416 Modified: trunk/scipy/sparse/sparsetools/coo.h Log: added coo_matvec Modified: trunk/scipy/sparse/sparsetools/coo.h =================================================================== --- trunk/scipy/sparse/sparsetools/coo.h 2008-06-06 06:54:44 UTC (rev 4415) +++ trunk/scipy/sparse/sparsetools/coo.h 2008-06-07 08:13:02 UTC (rev 4416) @@ -116,5 +116,37 @@ } +/* + * Compute Y += A*X for COO matrix A and dense vectors X,Y + * + * + * Input Arguments: + * I nnz - number of nonzeros in A + * I Ai[nnz] - row indices + * I Aj[nnz] - column indices + * T Ax[nnz] - nonzero values + * T Xx[n_col] - input vector + * + * Output Arguments: + * T Yx[n_row] - output vector + * + * Notes: + * Output array Yx must be preallocated + * + * Complexity: Linear. Specifically O(nnz(A)) + * + */ +template +void coo_matvec(const I nnz, + const I Ai[], + const I Aj[], + const T Ax[], + const T Xx[], + T Yx[]) +{ + for(I n = 0; n < nnz; n++){ + Yx[Ai[n]] += Ax[n] * Xx[Aj[n]]; + } +} #endif From scipy-svn at scipy.org Mon Jun 9 01:55:52 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 9 Jun 2008 00:55:52 -0500 (CDT) Subject: [Scipy-svn] r4417 - in trunk/scipy/cluster: . src tests Message-ID: <20080609055552.97E9339C55F@scipy.org> Author: damian.eads Date: 2008-06-09 00:55:44 -0500 (Mon, 09 Jun 2008) New Revision: 4417 Added: trunk/scipy/cluster/distance.py trunk/scipy/cluster/src/common.h trunk/scipy/cluster/src/distance.c trunk/scipy/cluster/src/distance.h trunk/scipy/cluster/src/distance_wrap.c Modified: trunk/scipy/cluster/__init__.py trunk/scipy/cluster/hierarchy.py trunk/scipy/cluster/setup.py trunk/scipy/cluster/src/hierarchy.c trunk/scipy/cluster/src/hierarchy.h trunk/scipy/cluster/src/hierarchy_wrap.c trunk/scipy/cluster/tests/test_hierarchy.py Log: Moved distance functions to new module. Modified: trunk/scipy/cluster/__init__.py =================================================================== --- trunk/scipy/cluster/__init__.py 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/__init__.py 2008-06-09 05:55:44 UTC (rev 4417) @@ -4,7 +4,7 @@ from info import __doc__ -__all__ = ['vq', 'hierarchy'] +__all__ = ['vq', 'hierarchy', 'distance'] import vq, hierarchy from scipy.testing.pkgtester import Tester Added: trunk/scipy/cluster/distance.py =================================================================== --- trunk/scipy/cluster/distance.py 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/distance.py 2008-06-09 05:55:44 UTC (rev 4417) @@ -0,0 +1,839 @@ +""" +Distance matrix computation from a collection of raw observation vectors + + pdist computes distances between each observation pair. + +Distance functions between two vectors u and v + + braycurtis the Bray-Curtis distance. + canberra the Canberra distance. + chebyshev the Chebyshev distance. + cityblock the Manhattan distance. + correlation the Correlation distance. + cosine the Cosine distance. + dice the Dice dissimilarity (boolean). + euclidean the Euclidean distance. + hamming the Hamming distance (boolean). + jaccard the Jaccard distance (boolean). + kulsinski the Kulsinski distance (boolean). + mahalanobis the Mahalanobis distance. + matching the matching dissimilarity (boolean). + minkowski the Minkowski distance. + rogerstanimoto the Rogers-Tanimoto dissimilarity (boolean). + russellrao the Russell-Rao dissimilarity (boolean). + seuclidean the normalized Euclidean distance. + sokalmichener the Sokal-Michener dissimilarity (boolean). + sokalsneath the Sokal-Sneath dissimilarity (boolean). + sqeuclidean the squared Euclidean distance. + yule the Yule dissimilarity (boolean). + +Copyright (C) Damian Eads, 2007-2008. New BSD License. + +""" + +import numpy as np +import _distance_wrap +import types + +def _copy_array_if_base_present(a): + """ + Copies the array if its base points to a parent array. + """ + if a.base is not None: + return a.copy() + elif np.issubsctype(a, np.float32): + return array(a, dtype=np.double) + else: + return a + +def _copy_arrays_if_base_present(T): + """ + Accepts a tuple of arrays T. Copies the array T[i] if its base array + points to an actual array. Otherwise, the reference is just copied. + This is useful if the arrays are being passed to a C function that + does not do proper striding. + """ + l = [_copy_array_if_base_present(a) for a in T] + return l + +def _convert_to_bool(X): + if X.dtype != np.bool: + X = np.bool_(X) + if not X.flags.contiguous: + X = X.copy() + return X + +def _convert_to_double(X): + if X.dtype != np.double: + X = np.double(X) + if not X.flags.contiguous: + X = X.copy() + return X + +def minkowski(u, v, p): + """ + d = minkowski(u, v, p) + + Returns the Minkowski distance between two vectors u and v, + + ||u-v||_p = (\sum {|u_i - v_i|^p})^(1/p). + """ + u = np.asarray(u) + v = np.asarray(v) + if p < 1: + raise ValueError("p must be at least 1") + return (abs(u-v)**p).sum() ** (1.0 / p) + +def euclidean(u, v): + """ + d = euclidean(u, v) + + Computes the Euclidean distance between two n-vectors u and v, ||u-v||_2 + """ + u = np.asarray(u) + v = np.asarray(v) + q=np.matrix(u-v) + return np.sqrt((q*q.T).sum()) + +def sqeuclidean(u, v): + """ + d = sqeuclidean(u, v) + + Computes the squared Euclidean distance between two n-vectors u and v, + (||u-v||_2)^2. + """ + u = np.asarray(u) + v = np.asarray(v) + return ((u-v)*(u-v).T).sum() + +def cosine(u, v): + """ + d = cosine(u, v) + + Computes the Cosine distance between two n-vectors u and v, + (1-uv^T)/(||u||_2 * ||v||_2). + """ + u = np.asarray(u) + v = np.asarray(v) + return (1.0 - (np.dot(u, v.T) / \ + (np.sqrt(np.dot(u, u.T)) * np.sqrt(np.dot(v, v.T))))) + +def correlation(u, v): + """ + d = correlation(u, v) + + Computes the correlation distance between two n-vectors u and v, + + 1 - (u - n|u|_1)(v - n|v|_1)^T + --------------------------------- , + |(u - n|u|_1)|_2 |(v - n|v|_1)|^T + + where |*|_1 is the Manhattan norm and n is the common dimensionality + of the vectors. + """ + umu = u.mean() + vmu = v.mean() + um = u - umu + vm = v - vmu + return 1.0 - (np.dot(um, vm) / + (np.sqrt(np.dot(um, um)) \ + * np.sqrt(np.dot(vm, vm)))) + +def hamming(u, v): + """ + d = hamming(u, v) + + Computes the Hamming distance between two n-vectors u and v, + which is simply the proportion of disagreeing components in u + and v. If u and v are boolean vectors, the hamming distance is + + (c_{01} + c_{10}) / n + + where c_{ij} is the number of occurrences of + + u[k] == i and v[k] == j + + for k < n. + """ + u = np.asarray(u) + v = np.asarray(v) + return (u != v).mean() + +def jaccard(u, v): + """ + d = jaccard(u, v) + + Computes the Jaccard-Needham dissimilarity between two boolean + n-vectors u and v, which is + + c_{TF} + c_{FT} + ------------------------ + c_{TT} + c_{FT} + c_{TF} + + where c_{ij} is the number of occurrences of + + u[k] == i and v[k] == j + + for k < n. + """ + u = np.asarray(u) + v = np.asarray(v) + return (np.double(np.bitwise_and((u != v), + np.bitwise_or(u != 0, v != 0)).sum()) + / np.double(np.bitwise_or(u != 0, v != 0).sum())) + +def kulsinski(u, v): + """ + d = kulsinski(u, v) + + Computes the Kulsinski dissimilarity between two boolean n-vectors + u and v, which is + + c_{TF} + c_{FT} - c_{TT} + n + ---------------------------- + c_{FT} + c_{TF} + n + + where c_{ij} is the number of occurrences of + + u[k] == i and v[k] == j + + for k < n. + """ + u = np.asarray(u) + v = np.asarray(v) + n = len(u) + (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) + + return (ntf + nft - ntt + n) / (ntf + nft + n) + +def seuclidean(u, v, V): + """ + d = seuclidean(u, v, V) + + Returns the standardized Euclidean distance between two + n-vectors u and v. V is a m-dimensional vector of component + variances. It is usually computed among a larger collection vectors. + """ + u = np.asarray(u) + v = np.asarray(v) + V = np.asarray(V) + if len(V.shape) != 1 or V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]: + raise TypeError('V must be a 1-D array of the same dimension as u and v.') + return np.sqrt(((u-v)**2 / V).sum()) + +def cityblock(u, v): + """ + d = cityblock(u, v) + + Computes the Manhattan distance between two n-vectors u and v, + \sum {u_i-v_i}. + """ + u = np.asarray(u) + v = np.asarray(v) + return abs(u-v).sum() + +def mahalanobis(u, v, VI): + """ + d = mahalanobis(u, v, VI) + + Computes the Mahalanobis distance between two n-vectors u and v, + (u-v)VI(u-v)^T + where VI is the inverse covariance matrix. + """ + u = np.asarray(u) + v = np.asarray(v) + VI = np.asarray(VI) + return np.sqrt(np.dot(np.dot((u-v),VI),(u-v).T).sum()) + +def chebyshev(u, v): + """ + d = chebyshev(u, v) + + Computes the Chebyshev distance between two n-vectors u and v, + \max {|u_i-v_i|}. + """ + u = np.asarray(u) + v = np.asarray(v) + return max(abs(u-v)) + +def braycurtis(u, v): + """ + d = braycurtis(u, v) + + Computes the Bray-Curtis distance between two n-vectors u and v, + \sum{|u_i-v_i|} / \sum{|u_i+v_i|}. + """ + u = np.asarray(u) + v = np.asarray(v) + return abs(u-v).sum() / abs(u+v).sum() + +def canberra(u, v): + """ + d = canberra(u, v) + + Computes the Canberra distance between two n-vectors u and v, + \sum{|u_i-v_i|} / \sum{|u_i|+|v_i}. + """ + u = np.asarray(u) + v = np.asarray(v) + return abs(u-v).sum() / (abs(u).sum() + abs(v).sum()) + +def _nbool_correspond_all(u, v): + if u.dtype != v.dtype: + raise TypeError("Arrays being compared must be of the same data type.") + + if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double: + not_u = 1.0 - u + not_v = 1.0 - v + nff = (not_u * not_v).sum() + nft = (not_u * v).sum() + ntf = (u * not_v).sum() + ntt = (u * v).sum() + elif u.dtype == np.bool: + not_u = ~u + not_v = ~v + nff = (not_u & not_v).sum() + nft = (not_u & v).sum() + ntf = (u & not_v).sum() + ntt = (u & v).sum() + else: + raise TypeError("Arrays being compared have unknown type.") + + return (nff, nft, ntf, ntt) + +def _nbool_correspond_ft_tf(u, v): + if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double: + not_u = 1.0 - u + not_v = 1.0 - v + nff = (not_u * not_v).sum() + nft = (not_u * v).sum() + ntf = (u * not_v).sum() + ntt = (u * v).sum() + else: + not_u = ~u + not_v = ~v + nft = (not_u & v).sum() + ntf = (u & not_v).sum() + return (nft, ntf) + +def yule(u, v): + """ + d = yule(u, v) + Computes the Yule dissimilarity between two boolean n-vectors u and v, + + R + --------------------- + c_{TT} + c_{FF} + R/2 + + where c_{ij} is the number of occurrences of + + u[k] == i and v[k] == j + + for k < n, and + + R = 2.0 * (c_{TF} + c_{FT}). + """ + u = np.asarray(u) + v = np.asarray(v) + (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) + print nff, nft, ntf, ntt + return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft) + +def matching(u, v): + """ + d = matching(u, v) + + Computes the Matching dissimilarity between two boolean n-vectors + u and v, which is + + (c_{TF} + c_{FT}) / n + + where c_{ij} is the number of occurrences of + + u[k] == i and v[k] == j + + for k < n. + """ + u = np.asarray(u) + v = np.asarray(v) + (nft, ntf) = _nbool_correspond_ft_tf(u, v) + return float(nft + ntf) / float(len(u)) + +def dice(u, v): + """ + d = dice(u, v) + + Computes the Dice dissimilarity between two boolean n-vectors + u and v, which is + + c_{TF} + c_{FT} + ---------------------------- + 2 * c_{TT} + c_{FT} + c_{TF} + + where c_{ij} is the number of occurrences of + + u[k] == i and v[k] == j + + for k < n. + """ + u = np.asarray(u) + v = np.asarray(v) + if u.dtype == np.bool: + ntt = (u & v).sum() + else: + ntt = (u * v).sum() + (nft, ntf) = _nbool_correspond_ft_tf(u, v) + return float(ntf + nft) / float(2.0 * ntt + ntf + nft) + +def rogerstanimoto(u, v): + """ + d = rogerstanimoto(u, v) + + Computes the Rogers-Tanimoto dissimilarity between two boolean + n-vectors u and v, + + R + ------------------- + c_{TT} + c_{FF} + R + + where c_{ij} is the number of occurrences of + + u[k] == i and v[k] == j + + for k < n, and + + R = 2.0 * (c_{TF} + c_{FT}). + + """ + u = np.asarray(u) + v = np.asarray(v) + (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) + return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft))) + +def russellrao(u, v): + """ + d = russellrao(u, v) + + Computes the Russell-Rao dissimilarity between two boolean n-vectors + u and v, (n - c_{TT}) / n where c_{ij} is the number of occurrences + of u[k] == i and v[k] == j for k < n. + """ + u = np.asarray(u) + v = np.asarray(v) + if u.dtype == np.bool: + ntt = (u & v).sum() + else: + ntt = (u * v).sum() + return float(len(u) - ntt) / float(len(u)) + +def sokalmichener(u, v): + """ + d = sokalmichener(u, v) + + Computes the Sokal-Michener dissimilarity between two boolean vectors + u and v, 2R / (S + 2R) where c_{ij} is the number of occurrences of + u[k] == i and v[k] == j for k < n and R = 2 * (c_{TF} + c{FT}) and + S = c_{FF} + c_{TT}. + """ + u = np.asarray(u) + v = np.asarray(v) + if u.dtype == np.bool: + ntt = (u & v).sum() + nff = (~u & ~v).sum() + else: + ntt = (u * v).sum() + nff = ((1.0 - u) * (1.0 - v)).sum() + (nft, ntf) = _nbool_correspond_ft_tf(u, v) + return float(2.0 * (ntf + nft))/float(ntt + nff + 2.0 * (ntf + nft)) + +def sokalsneath(u, v): + """ + d = sokalsneath(u, v) + + Computes the Sokal-Sneath dissimilarity between two boolean vectors + u and v, 2R / (c_{TT} + 2R) where c_{ij} is the number of occurrences + of u[k] == i and v[k] == j for k < n and R = 2 * (c_{TF} + c{FT}). + """ + u = np.asarray(u) + v = np.asarray(v) + if u.dtype == np.bool: + ntt = (u & v).sum() + else: + ntt = (u * v).sum() + (nft, ntf) = _nbool_correspond_ft_tf(u, v) + return float(2.0 * (ntf + nft))/float(ntt + 2.0 * (ntf + nft)) + + +def pdist(X, metric='euclidean', p=2, V=None, VI=None): + """ Y = pdist(X, method='euclidean', p=2) + + Computes the distance between m original observations in + n-dimensional space. Returns a condensed distance matrix Y. + For each i and j (i=1. + + 3. Y = pdist(X, 'cityblock') + + Computes the city block or Manhattan distance between the + points. + + 4. Y = pdist(X, 'seuclidean', V=None) + + Computes the standardized Euclidean distance. The standardized + Euclidean distance between two n-vectors u and v is + + sqrt(\sum {(u_i-v_i)^2 / V[x_i]}). + + V is the variance vector; V[i] is the variance computed over all + the i'th components of the points. If not passed, it is + automatically computed. + + 5. Y = pdist(X, 'sqeuclidean') + + Computes the squared Euclidean distance ||u-v||_2^2 between + the vectors. + + 6. Y = pdist(X, 'cosine') + + Computes the cosine distance between vectors u and v, + + 1 - uv^T + ----------- + |u|_2 |v|_2 + + where |*|_2 is the 2 norm of its argument *. + + 7. Y = pdist(X, 'correlation') + + Computes the correlation distance between vectors u and v. This is + + 1 - (u - n|u|_1)(v - n|v|_1)^T + --------------------------------- , + |(u - n|u|_1)|_2 |(v - n|v|_1)|^T + + where |*|_1 is the Manhattan (or 1-norm) of its argument *, + and n is the common dimensionality of the vectors. + + 8. Y = pdist(X, 'hamming') + + Computes the normalized Hamming distance, or the proportion + of those vector elements between two n-vectors u and v which + disagree. To save memory, the matrix X can be of type boolean. + + 9. Y = pdist(X, 'jaccard') + + Computes the Jaccard distance between the points. Given two + vectors, u and v, the Jaccard distance is the proportion of + those elements u_i and v_i that disagree where at least one + of them is non-zero. + + 10. Y = pdist(X, 'chebyshev') + + Computes the Chebyshev distance between the points. The + Chebyshev distance between two n-vectors u and v is the maximum + norm-1 distance between their respective elements. More + precisely, the distance is given by + + d(u,v) = max {|u_i-v_i|}. + + 11. Y = pdist(X, 'canberra') + + Computes the Canberra distance between the points. The + Canberra distance between two points u and v is + + |u_1-v_1| |u_2-v_2| |u_n-v_n| + d(u,v) = ----------- + ----------- + ... + ----------- + |u_1|+|v_1| |u_2|+|v_2| |u_n|+|v_n| + + 12. Y = pdist(X, 'braycurtis') + + Computes the Bray-Curtis distance between the points. The + Bray-Curtis distance between two points u and v is + + |u_1-v_1| + |u_2-v_2| + ... + |u_n-v_n| + d(u,v) = --------------------------------------- + |u_1+v_1| + |u_2+v_2| + ... + |u_n+v_n| + + 13. Y = pdist(X, 'mahalanobis', VI=None) + + Computes the Mahalanobis distance between the points. The + Mahalanobis distance between two points u and v is + (u-v)(1/V)(u-v)^T + where (1/V) is the inverse covariance. If VI is not None, + VI will be used as the inverse covariance matrix. + + 14. Y = pdist(X, 'yule') + + Computes the Yule distance between each pair of boolean + vectors. (see yule function documentation) + + 15. Y = pdist(X, 'matching') + + Computes the matching distance between each pair of boolean + vectors. (see matching function documentation) + + 16. Y = pdist(X, 'dice') + + Computes the Dice distance between each pair of boolean + vectors. (see dice function documentation) + + 17. Y = pdist(X, 'kulsinski') + + Computes the Kulsinski distance between each pair of + boolean vectors. (see kulsinski function documentation) + + 17. Y = pdist(X, 'rogerstanimoto') + + Computes the Rogers-Tanimoto distance between each pair of + boolean vectors. (see rogerstanimoto function documentation) + + 18. Y = pdist(X, 'russellrao') + + Computes the Russell-Rao distance between each pair of + boolean vectors. (see russellrao function documentation) + + 19. Y = pdist(X, 'sokalmichener') + + Computes the Sokal-Michener distance between each pair of + boolean vectors. (see sokalmichener function documentation) + + 20. Y = pdist(X, 'sokalsneath') + + Computes the Sokal-Sneath distance between each pair of + boolean vectors. (see sokalsneath function documentation) + + 21. Y = pdist(X, f) + + Computes the distance between all pairs of vectors in X + using the user supplied 2-arity function f. For example, + Euclidean distance between the vectors could be computed + as follows, + + dm = pdist(X, (lambda u, v: np.sqrt(((u-v)*(u-v).T).sum()))) + + Note that you should avoid passing a reference to one of + the distance functions defined in this library. For example, + + dm = pdist(X, sokalsneath) + + would calculate the pair-wise distances between the vectors + in X using the Python function sokalsneath. This would result + in sokalsneath being called {n \choose 2} times, which is + inefficient. Instead, the optimized C version is more + efficient, and we call it using the following syntax. + + dm = pdist(X, 'sokalsneath') + """ +# 21. Y = pdist(X, 'test_Y') +# +# Computes the distance between all pairs of vectors in X +# using the distance metric Y but with a more succint, +# verifiable, but less efficient implementation. + + + X = np.asarray(X) + + #if np.issubsctype(X, np.floating) and not np.issubsctype(X, np.double): + # raise TypeError('Floating point arrays must be 64-bit (got %r).' % + # (X.dtype.type,)) + + # The C code doesn't do striding. + [X] = _copy_arrays_if_base_present([_convert_to_double(X)]) + + s = X.shape + + if len(s) != 2: + raise ValueError('A 2-dimensional array must be passed.'); + + m = s[0] + n = s[1] + dm = np.zeros((m * (m - 1) / 2,), dtype=np.double) + + mtype = type(metric) + if mtype is types.FunctionType: + k = 0 + if metric == minkowski: + for i in xrange(0, m - 1): + for j in xrange(i+1, m): + dm[k] = minkowski(X[i, :], X[j, :], p) + k = k + 1 + elif metric == seuclidean: + for i in xrange(0, m - 1): + for j in xrange(i+1, m): + dm[k] = seuclidean(X[i, :], X[j, :], V) + k = k + 1 + elif metric == mahalanobis: + for i in xrange(0, m - 1): + for j in xrange(i+1, m): + dm[k] = mahalanobis(X[i, :], X[j, :], V) + k = k + 1 + else: + for i in xrange(0, m - 1): + for j in xrange(i+1, m): + dm[k] = metric(X[i, :], X[j, :]) + k = k + 1 + + elif mtype is types.StringType: + mstr = metric.lower() + + #if X.dtype != np.double and \ + # (mstr != 'hamming' and mstr != 'jaccard'): + # TypeError('A double array must be passed.') + if mstr in set(['euclidean', 'euclid', 'eu', 'e']): + _distance_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm) + elif mstr in set(['sqeuclidean', 'sqe', 'sqeuclid']): + _distance_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm) + dm = dm ** 2.0 + elif mstr in set(['cityblock', 'cblock', 'cb', 'c']): + _distance_wrap.pdist_city_block_wrap(X, dm) + elif mstr in set(['hamming', 'hamm', 'ha', 'h']): + if X.dtype == np.bool: + _distance_wrap.pdist_hamming_bool_wrap(_convert_to_bool(X), dm) + else: + _distance_wrap.pdist_hamming_wrap(_convert_to_double(X), dm) + elif mstr in set(['jaccard', 'jacc', 'ja', 'j']): + if X.dtype == np.bool: + _distance_wrap.pdist_jaccard_bool_wrap(_convert_to_bool(X), dm) + else: + _distance_wrap.pdist_jaccard_wrap(_convert_to_double(X), dm) + elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']): + _distance_wrap.pdist_chebyshev_wrap(_convert_to_double(X), dm) + elif mstr in set(['minkowski', 'mi', 'm']): + _distance_wrap.pdist_minkowski_wrap(_convert_to_double(X), dm, p) + elif mstr in set(['seuclidean', 'se', 's']): + if V is not None: + V = np.asarray(V) + if type(V) != np.ndarray: + raise TypeError('Variance vector V must be a numpy array') + if V.dtype != np.double: + raise TypeError('Variance vector V must contain doubles.') + if len(V.shape) != 1: + raise ValueError('Variance vector V must be one-dimensional.') + if V.shape[0] != n: + raise ValueError('Variance vector V must be of the same dimension as the vectors on which the distances are computed.') + # The C code doesn't do striding. + [VV] = _copy_arrays_if_base_present([_convert_to_double(V)]) + else: + VV = np.var(X, axis=0, ddof=1) + _distance_wrap.pdist_seuclidean_wrap(_convert_to_double(X), VV, dm) + # Need to test whether vectorized cosine works better. + # Find out: Is there a dot subtraction operator so I can + # subtract matrices in a similar way to multiplying them? + # Need to get rid of as much unnecessary C code as possible. + elif mstr in set(['cosine', 'cos']): + norms = np.sqrt(np.sum(X * X, axis=1)) + _distance_wrap.pdist_cosine_wrap(_convert_to_double(X), dm, norms) + elif mstr in set(['old_cosine', 'old_cos']): + norms = np.sqrt(np.sum(X * X, axis=1)) + nV = norms.reshape(m, 1) + # The numerator u * v + nm = np.dot(X, X.T) + # The denom. ||u||*||v|| + de = np.dot(nV, nV.T); + dm = 1.0 - (nm / de) + dm[xrange(0,m),xrange(0,m)] = 0.0 + dm = squareform(dm) + elif mstr in set(['correlation', 'co']): + X2 = X - X.mean(1)[:,np.newaxis] + #X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n) + norms = np.sqrt(np.sum(X2 * X2, axis=1)) + _distance_wrap.pdist_cosine_wrap(_convert_to_double(X2), _convert_to_double(dm), _convert_to_double(norms)) + elif mstr in set(['mahalanobis', 'mahal', 'mah']): + if VI is not None: + VI = _convert_to_double(np.asarray(VI)) + if type(VI) != np.ndarray: + raise TypeError('VI must be a numpy array.') + if VI.dtype != np.double: + raise TypeError('The array must contain 64-bit floats.') + [VI] = _copy_arrays_if_base_present([VI]) + else: + V = np.cov(X.T) + VI = _convert_to_double(np.linalg.inv(V).T.copy()) + # (u-v)V^(-1)(u-v)^T + _distance_wrap.pdist_mahalanobis_wrap(_convert_to_double(X), VI, dm) + elif mstr == 'canberra': + _distance_wrap.pdist_canberra_wrap(_convert_to_bool(X), dm) + elif mstr == 'braycurtis': + _distance_wrap.pdist_bray_curtis_wrap(_convert_to_bool(X), dm) + elif mstr == 'yule': + _distance_wrap.pdist_yule_bool_wrap(_convert_to_bool(X), dm) + elif mstr == 'matching': + _distance_wrap.pdist_matching_bool_wrap(_convert_to_bool(X), dm) + elif mstr == 'kulsinski': + _distance_wrap.pdist_kulsinski_bool_wrap(_convert_to_bool(X), dm) + elif mstr == 'dice': + _distance_wrap.pdist_dice_bool_wrap(_convert_to_bool(X), dm) + elif mstr == 'rogerstanimoto': + _distance_wrap.pdist_rogerstanimoto_bool_wrap(_convert_to_bool(X), dm) + elif mstr == 'russellrao': + _distance_wrap.pdist_russellrao_bool_wrap(_convert_to_bool(X), dm) + elif mstr == 'sokalmichener': + _distance_wrap.pdist_sokalmichener_bool_wrap(_convert_to_bool(X), dm) + elif mstr == 'sokalsneath': + _distance_wrap.pdist_sokalsneath_bool_wrap(_convert_to_bool(X), dm) + elif metric == 'test_euclidean': + dm = pdist(X, euclidean) + elif metric == 'test_sqeuclidean': + if V is None: + V = np.var(X, axis=0, ddof=1) + else: + V = np.asarray(V) + dm = pdist(X, lambda u, v: seuclidean(u, v, V)) + elif metric == 'test_braycurtis': + dm = pdist(X, braycurtis) + elif metric == 'test_mahalanobis': + if VI is None: + V = np.cov(X.T) + VI = np.linalg.inv(V) + else: + VI = np.asarray(VI) + [VI] = _copy_arrays_if_base_present([VI]) + # (u-v)V^(-1)(u-v)^T + dm = pdist(X, (lambda u, v: mahalanobis(u, v, VI))) + elif metric == 'test_cityblock': + dm = pdist(X, cityblock) + elif metric == 'test_minkowski': + dm = pdist(X, minkowski, p) + elif metric == 'test_cosine': + dm = pdist(X, cosine) + elif metric == 'test_correlation': + dm = pdist(X, correlation) + elif metric == 'test_hamming': + dm = pdist(X, hamming) + elif metric == 'test_jaccard': + dm = pdist(X, jaccard) + elif metric == 'test_chebyshev' or metric == 'test_chebychev': + dm = pdist(X, chebyshev) + elif metric == 'test_yule': + dm = pdist(X, yule) + elif metric == 'test_matching': + dm = pdist(X, matching) + elif metric == 'test_dice': + dm = pdist(X, dice) + elif metric == 'test_kulsinski': + dm = pdist(X, kulsinski) + elif metric == 'test_rogerstanimoto': + dm = pdist(X, rogerstanimoto) + elif metric == 'test_russellrao': + dm = pdist(X, russellrao) + elif metric == 'test_sokalsneath': + dm = pdist(X, sokalsneath) + elif metric == 'test_sokalmichener': + dm = pdist(X, sokalmichener) + else: + raise ValueError('Unknown Distance Metric: %s' % mstr) + else: + raise TypeError('2nd argument metric must be a string identifier or a function.') + return dm Modified: trunk/scipy/cluster/hierarchy.py =================================================================== --- trunk/scipy/cluster/hierarchy.py 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/hierarchy.py 2008-06-09 05:55:44 UTC (rev 4417) @@ -22,9 +22,6 @@ median the median/WPGMC algorithm. (alias) ward the Ward/incremental algorithm. (alias) -Distance matrix computation from a collection of raw observation vectors - - pdist computes distances between each observation pair. squareform converts a sq. D.M. to a condensed one and vice versa. Statistic computations on hierarchies @@ -47,30 +44,6 @@ lvlist a left-to-right traversal of the leaves. totree represents a linkage matrix as a tree object. -Distance functions between two vectors u and v - - braycurtis the Bray-Curtis distance. - canberra the Canberra distance. - chebyshev the Chebyshev distance. - cityblock the Manhattan distance. - correlation the Correlation distance. - cosine the Cosine distance. - dice the Dice dissimilarity (boolean). - euclidean the Euclidean distance. - hamming the Hamming distance (boolean). - jaccard the Jaccard distance (boolean). - kulsinski the Kulsinski distance (boolean). - mahalanobis the Mahalanobis distance. - matching the matching dissimilarity (boolean). - minkowski the Minkowski distance. - rogerstanimoto the Rogers-Tanimoto dissimilarity (boolean). - russellrao the Russell-Rao dissimilarity (boolean). - seuclidean the normalized Euclidean distance. - sokalmichener the Sokal-Michener dissimilarity (boolean). - sokalsneath the Sokal-Sneath dissimilarity (boolean). - sqeuclidean the squared Euclidean distance. - yule the Yule dissimilarity (boolean). - Predicates is_valid_dm checks for a valid distance matrix. @@ -176,6 +149,7 @@ import numpy as np import _hierarchy_wrap, types +from distance import pdist _cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2, 'weighted': 6} @@ -191,15 +165,6 @@ def _warning(s): print ('[WARNING] scipy.cluster: %s' % s) -def _unbiased_variance(X): - """ - Computes the unbiased variance of each dimension of a collection of - observation vectors, represented by a matrix where the rows are the - observations. - """ - #n = np.double(X.shape[1]) - return np.var(X, axis=0, ddof=1) # * n / (n - 1.0) - def _copy_array_if_base_present(a): """ Copies the array if its base points to a parent array. @@ -788,399 +753,6 @@ else: raise ValueError('The first argument must be one or two dimensional array. A %d-dimensional array is not permitted' % len(s)) -def minkowski(u, v, p): - """ - d = minkowski(u, v, p) - - Returns the Minkowski distance between two vectors u and v, - - ||u-v||_p = (\sum {|u_i - v_i|^p})^(1/p). - """ - u = np.asarray(u) - v = np.asarray(v) - if p < 1: - raise ValueError("p must be at least 1") - return (abs(u-v)**p).sum() ** (1.0 / p) - -def euclidean(u, v): - """ - d = euclidean(u, v) - - Computes the Euclidean distance between two n-vectors u and v, ||u-v||_2 - """ - u = np.asarray(u) - v = np.asarray(v) - q=np.matrix(u-v) - return np.sqrt((q*q.T).sum()) - -def sqeuclidean(u, v): - """ - d = sqeuclidean(u, v) - - Computes the squared Euclidean distance between two n-vectors u and v, - (||u-v||_2)^2. - """ - u = np.asarray(u) - v = np.asarray(v) - return ((u-v)*(u-v).T).sum() - -def cosine(u, v): - """ - d = cosine(u, v) - - Computes the Cosine distance between two n-vectors u and v, - (1-uv^T)/(||u||_2 * ||v||_2). - """ - u = np.asarray(u) - v = np.asarray(v) - return (1.0 - (np.dot(u, v.T) / \ - (np.sqrt(np.dot(u, u.T)) * np.sqrt(np.dot(v, v.T))))) - -def correlation(u, v): - """ - d = correlation(u, v) - - Computes the correlation distance between two n-vectors u and v, - - 1 - (u - n|u|_1)(v - n|v|_1)^T - --------------------------------- , - |(u - n|u|_1)|_2 |(v - n|v|_1)|^T - - where |*|_1 is the Manhattan norm and n is the common dimensionality - of the vectors. - """ - umu = u.mean() - vmu = v.mean() - um = u - umu - vm = v - vmu - return 1.0 - (np.dot(um, vm) / - (np.sqrt(np.dot(um, um)) \ - * np.sqrt(np.dot(vm, vm)))) - -def hamming(u, v): - """ - d = hamming(u, v) - - Computes the Hamming distance between two n-vectors u and v, - which is simply the proportion of disagreeing components in u - and v. If u and v are boolean vectors, the hamming distance is - - (c_{01} + c_{10}) / n - - where c_{ij} is the number of occurrences of - - u[k] == i and v[k] == j - - for k < n. - """ - u = np.asarray(u) - v = np.asarray(v) - return (u != v).mean() - -def jaccard(u, v): - """ - d = jaccard(u, v) - - Computes the Jaccard-Needham dissimilarity between two boolean - n-vectors u and v, which is - - c_{TF} + c_{FT} - ------------------------ - c_{TT} + c_{FT} + c_{TF} - - where c_{ij} is the number of occurrences of - - u[k] == i and v[k] == j - - for k < n. - """ - u = np.asarray(u) - v = np.asarray(v) - return (np.double(np.bitwise_and((u != v), - np.bitwise_or(u != 0, v != 0)).sum()) - / np.double(np.bitwise_or(u != 0, v != 0).sum())) - -def kulsinski(u, v): - """ - d = kulsinski(u, v) - - Computes the Kulsinski dissimilarity between two boolean n-vectors - u and v, which is - - c_{TF} + c_{FT} - c_{TT} + n - ---------------------------- - c_{FT} + c_{TF} + n - - where c_{ij} is the number of occurrences of - - u[k] == i and v[k] == j - - for k < n. - """ - u = np.asarray(u) - v = np.asarray(v) - n = len(u) - (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) - - return (ntf + nft - ntt + n) / (ntf + nft + n) - -def seuclidean(u, v, V): - """ - d = seuclidean(u, v, V) - - Returns the standardized Euclidean distance between two - n-vectors u and v. V is a m-dimensional vector of component - variances. It is usually computed among a larger collection vectors. - """ - u = np.asarray(u) - v = np.asarray(v) - V = np.asarray(V) - if len(V.shape) != 1 or V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]: - raise TypeError('V must be a 1-D array of the same dimension as u and v.') - return np.sqrt(((u-v)**2 / V).sum()) - -def cityblock(u, v): - """ - d = cityblock(u, v) - - Computes the Manhattan distance between two n-vectors u and v, - \sum {u_i-v_i}. - """ - u = np.asarray(u) - v = np.asarray(v) - return abs(u-v).sum() - -def mahalanobis(u, v, VI): - """ - d = mahalanobis(u, v, VI) - - Computes the Mahalanobis distance between two n-vectors u and v, - (u-v)VI(u-v)^T - where VI is the inverse covariance matrix. - """ - u = np.asarray(u) - v = np.asarray(v) - VI = np.asarray(VI) - return np.sqrt(np.dot(np.dot((u-v),VI),(u-v).T).sum()) - -def chebyshev(u, v): - """ - d = chebyshev(u, v) - - Computes the Chebyshev distance between two n-vectors u and v, - \max {|u_i-v_i|}. - """ - u = np.asarray(u) - v = np.asarray(v) - return max(abs(u-v)) - -def braycurtis(u, v): - """ - d = braycurtis(u, v) - - Computes the Bray-Curtis distance between two n-vectors u and v, - \sum{|u_i-v_i|} / \sum{|u_i+v_i|}. - """ - u = np.asarray(u) - v = np.asarray(v) - return abs(u-v).sum() / abs(u+v).sum() - -def canberra(u, v): - """ - d = canberra(u, v) - - Computes the Canberra distance between two n-vectors u and v, - \sum{|u_i-v_i|} / \sum{|u_i|+|v_i}. - """ - u = np.asarray(u) - v = np.asarray(v) - return abs(u-v).sum() / (abs(u).sum() + abs(v).sum()) - -def _nbool_correspond_all(u, v): - if u.dtype != v.dtype: - raise TypeError("Arrays being compared must be of the same data type.") - - if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double: - not_u = 1.0 - u - not_v = 1.0 - v - nff = (not_u * not_v).sum() - nft = (not_u * v).sum() - ntf = (u * not_v).sum() - ntt = (u * v).sum() - elif u.dtype == np.bool: - not_u = ~u - not_v = ~v - nff = (not_u & not_v).sum() - nft = (not_u & v).sum() - ntf = (u & not_v).sum() - ntt = (u & v).sum() - else: - raise TypeError("Arrays being compared have unknown type.") - - return (nff, nft, ntf, ntt) - -def _nbool_correspond_ft_tf(u, v): - if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double: - not_u = 1.0 - u - not_v = 1.0 - v - nff = (not_u * not_v).sum() - nft = (not_u * v).sum() - ntf = (u * not_v).sum() - ntt = (u * v).sum() - else: - not_u = ~u - not_v = ~v - nft = (not_u & v).sum() - ntf = (u & not_v).sum() - return (nft, ntf) - -def yule(u, v): - """ - d = yule(u, v) - Computes the Yule dissimilarity between two boolean n-vectors u and v, - - R - --------------------- - c_{TT} + c_{FF} + R/2 - - where c_{ij} is the number of occurrences of - - u[k] == i and v[k] == j - - for k < n, and - - R = 2.0 * (c_{TF} + c_{FT}). - """ - u = np.asarray(u) - v = np.asarray(v) - (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) - print nff, nft, ntf, ntt - return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft) - -def matching(u, v): - """ - d = matching(u, v) - - Computes the Matching dissimilarity between two boolean n-vectors - u and v, which is - - (c_{TF} + c_{FT}) / n - - where c_{ij} is the number of occurrences of - - u[k] == i and v[k] == j - - for k < n. - """ - u = np.asarray(u) - v = np.asarray(v) - (nft, ntf) = _nbool_correspond_ft_tf(u, v) - return float(nft + ntf) / float(len(u)) - -def dice(u, v): - """ - d = dice(u, v) - - Computes the Dice dissimilarity between two boolean n-vectors - u and v, which is - - c_{TF} + c_{FT} - ---------------------------- - 2 * c_{TT} + c_{FT} + c_{TF} - - where c_{ij} is the number of occurrences of - - u[k] == i and v[k] == j - - for k < n. - """ - u = np.asarray(u) - v = np.asarray(v) - if u.dtype == np.bool: - ntt = (u & v).sum() - else: - ntt = (u * v).sum() - (nft, ntf) = _nbool_correspond_ft_tf(u, v) - return float(ntf + nft) / float(2.0 * ntt + ntf + nft) - -def rogerstanimoto(u, v): - """ - d = rogerstanimoto(u, v) - - Computes the Rogers-Tanimoto dissimilarity between two boolean - n-vectors u and v, - - R - ------------------- - c_{TT} + c_{FF} + R - - where c_{ij} is the number of occurrences of - - u[k] == i and v[k] == j - - for k < n, and - - R = 2.0 * (c_{TF} + c_{FT}). - - """ - u = np.asarray(u) - v = np.asarray(v) - (nff, nft, ntf, ntt) = _nbool_correspond_all(u, v) - return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft))) - -def russellrao(u, v): - """ - d = russellrao(u, v) - - Computes the Russell-Rao dissimilarity between two boolean n-vectors - u and v, (n - c_{TT}) / n where c_{ij} is the number of occurrences - of u[k] == i and v[k] == j for k < n. - """ - u = np.asarray(u) - v = np.asarray(v) - if u.dtype == np.bool: - ntt = (u & v).sum() - else: - ntt = (u * v).sum() - return float(len(u) - ntt) / float(len(u)) - -def sokalmichener(u, v): - """ - d = sokalmichener(u, v) - - Computes the Sokal-Michener dissimilarity between two boolean vectors - u and v, 2R / (S + 2R) where c_{ij} is the number of occurrences of - u[k] == i and v[k] == j for k < n and R = 2 * (c_{TF} + c{FT}) and - S = c_{FF} + c_{TT}. - """ - u = np.asarray(u) - v = np.asarray(v) - if u.dtype == np.bool: - ntt = (u & v).sum() - nff = (~u & ~v).sum() - else: - ntt = (u * v).sum() - nff = ((1.0 - u) * (1.0 - v)).sum() - (nft, ntf) = _nbool_correspond_ft_tf(u, v) - return float(2.0 * (ntf + nft))/float(ntt + nff + 2.0 * (ntf + nft)) - -def sokalsneath(u, v): - """ - d = sokalsneath(u, v) - - Computes the Sokal-Sneath dissimilarity between two boolean vectors - u and v, 2R / (c_{TT} + 2R) where c_{ij} is the number of occurrences - of u[k] == i and v[k] == j for k < n and R = 2 * (c_{TF} + c{FT}). - """ - u = np.asarray(u) - v = np.asarray(v) - if u.dtype == np.bool: - ntt = (u & v).sum() - else: - ntt = (u * v).sum() - (nft, ntf) = _nbool_correspond_ft_tf(u, v) - return float(2.0 * (ntf + nft))/float(ntt + 2.0 * (ntf + nft)) - def _convert_to_bool(X): if X.dtype != np.bool: X = np.bool_(X) @@ -1195,380 +767,6 @@ X = X.copy() return X -def pdist(X, metric='euclidean', p=2, V=None, VI=None): - """ Y = pdist(X, method='euclidean', p=2) - - Computes the distance between m original observations in - n-dimensional space. Returns a condensed distance matrix Y. - For each i and j (i=1. - - 3. Y = pdist(X, 'cityblock') - - Computes the city block or Manhattan distance between the - points. - - 4. Y = pdist(X, 'seuclidean', V=None) - - Computes the standardized Euclidean distance. The standardized - Euclidean distance between two n-vectors u and v is - - sqrt(\sum {(u_i-v_i)^2 / V[x_i]}). - - V is the variance vector; V[i] is the variance computed over all - the i'th components of the points. If not passed, it is - automatically computed. - - 5. Y = pdist(X, 'sqeuclidean') - - Computes the squared Euclidean distance ||u-v||_2^2 between - the vectors. - - 6. Y = pdist(X, 'cosine') - - Computes the cosine distance between vectors u and v, - - 1 - uv^T - ----------- - |u|_2 |v|_2 - - where |*|_2 is the 2 norm of its argument *. - - 7. Y = pdist(X, 'correlation') - - Computes the correlation distance between vectors u and v. This is - - 1 - (u - n|u|_1)(v - n|v|_1)^T - --------------------------------- , - |(u - n|u|_1)|_2 |(v - n|v|_1)|^T - - where |*|_1 is the Manhattan (or 1-norm) of its argument *, - and n is the common dimensionality of the vectors. - - 8. Y = pdist(X, 'hamming') - - Computes the normalized Hamming distance, or the proportion - of those vector elements between two n-vectors u and v which - disagree. To save memory, the matrix X can be of type boolean. - - 9. Y = pdist(X, 'jaccard') - - Computes the Jaccard distance between the points. Given two - vectors, u and v, the Jaccard distance is the proportion of - those elements u_i and v_i that disagree where at least one - of them is non-zero. - - 10. Y = pdist(X, 'chebyshev') - - Computes the Chebyshev distance between the points. The - Chebyshev distance between two n-vectors u and v is the maximum - norm-1 distance between their respective elements. More - precisely, the distance is given by - - d(u,v) = max {|u_i-v_i|}. - - 11. Y = pdist(X, 'canberra') - - Computes the Canberra distance between the points. The - Canberra distance between two points u and v is - - |u_1-v_1| |u_2-v_2| |u_n-v_n| - d(u,v) = ----------- + ----------- + ... + ----------- - |u_1|+|v_1| |u_2|+|v_2| |u_n|+|v_n| - - 12. Y = pdist(X, 'braycurtis') - - Computes the Bray-Curtis distance between the points. The - Bray-Curtis distance between two points u and v is - - |u_1-v_1| + |u_2-v_2| + ... + |u_n-v_n| - d(u,v) = --------------------------------------- - |u_1+v_1| + |u_2+v_2| + ... + |u_n+v_n| - - 13. Y = pdist(X, 'mahalanobis', VI=None) - - Computes the Mahalanobis distance between the points. The - Mahalanobis distance between two points u and v is - (u-v)(1/V)(u-v)^T - where (1/V) is the inverse covariance. If VI is not None, - VI will be used as the inverse covariance matrix. - - 14. Y = pdist(X, 'yule') - - Computes the Yule distance between each pair of boolean - vectors. (see yule function documentation) - - 15. Y = pdist(X, 'matching') - - Computes the matching distance between each pair of boolean - vectors. (see matching function documentation) - - 16. Y = pdist(X, 'dice') - - Computes the Dice distance between each pair of boolean - vectors. (see dice function documentation) - - 17. Y = pdist(X, 'kulsinski') - - Computes the Kulsinski distance between each pair of - boolean vectors. (see kulsinski function documentation) - - 17. Y = pdist(X, 'rogerstanimoto') - - Computes the Rogers-Tanimoto distance between each pair of - boolean vectors. (see rogerstanimoto function documentation) - - 18. Y = pdist(X, 'russellrao') - - Computes the Russell-Rao distance between each pair of - boolean vectors. (see russellrao function documentation) - - 19. Y = pdist(X, 'sokalmichener') - - Computes the Sokal-Michener distance between each pair of - boolean vectors. (see sokalmichener function documentation) - - 20. Y = pdist(X, 'sokalsneath') - - Computes the Sokal-Sneath distance between each pair of - boolean vectors. (see sokalsneath function documentation) - - 21. Y = pdist(X, f) - - Computes the distance between all pairs of vectors in X - using the user supplied 2-arity function f. For example, - Euclidean distance between the vectors could be computed - as follows, - - dm = pdist(X, (lambda u, v: np.sqrt(((u-v)*(u-v).T).sum()))) - - Note that you should avoid passing a reference to one of - the distance functions defined in this library. For example, - - dm = pdist(X, sokalsneath) - - would calculate the pair-wise distances between the vectors - in X using the Python function sokalsneath. This would result - in sokalsneath being called {n \choose 2} times, which is - inefficient. Instead, the optimized C version is more - efficient, and we call it using the following syntax. - - dm = pdist(X, 'sokalsneath') - """ -# 21. Y = pdist(X, 'test_Y') -# -# Computes the distance between all pairs of vectors in X -# using the distance metric Y but with a more succint, -# verifiable, but less efficient implementation. - - - X = np.asarray(X) - - #if np.issubsctype(X, np.floating) and not np.issubsctype(X, np.double): - # raise TypeError('Floating point arrays must be 64-bit (got %r).' % - # (X.dtype.type,)) - - # The C code doesn't do striding. - [X] = _copy_arrays_if_base_present([_convert_to_double(X)]) - - s = X.shape - - if len(s) != 2: - raise ValueError('A 2-dimensional array must be passed.'); - - m = s[0] - n = s[1] - dm = np.zeros((m * (m - 1) / 2,), dtype=np.double) - - mtype = type(metric) - if mtype is types.FunctionType: - k = 0 - if metric == minkowski: - for i in xrange(0, m - 1): - for j in xrange(i+1, m): - dm[k] = minkowski(X[i, :], X[j, :], p) - k = k + 1 - elif metric == seuclidean: - for i in xrange(0, m - 1): - for j in xrange(i+1, m): - dm[k] = seuclidean(X[i, :], X[j, :], V) - k = k + 1 - elif metric == mahalanobis: - for i in xrange(0, m - 1): - for j in xrange(i+1, m): - dm[k] = mahalanobis(X[i, :], X[j, :], V) - k = k + 1 - else: - for i in xrange(0, m - 1): - for j in xrange(i+1, m): - dm[k] = metric(X[i, :], X[j, :]) - k = k + 1 - - elif mtype is types.StringType: - mstr = metric.lower() - - #if X.dtype != np.double and \ - # (mstr != 'hamming' and mstr != 'jaccard'): - # TypeError('A double array must be passed.') - if mstr in set(['euclidean', 'euclid', 'eu', 'e']): - _hierarchy_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm) - elif mstr in set(['sqeuclidean', 'sqe', 'sqeuclid']): - _hierarchy_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm) - dm = dm ** 2.0 - elif mstr in set(['cityblock', 'cblock', 'cb', 'c']): - _hierarchy_wrap.pdist_city_block_wrap(X, dm) - elif mstr in set(['hamming', 'hamm', 'ha', 'h']): - if X.dtype == np.bool: - _hierarchy_wrap.pdist_hamming_bool_wrap(_convert_to_bool(X), dm) - else: - _hierarchy_wrap.pdist_hamming_wrap(_convert_to_double(X), dm) - elif mstr in set(['jaccard', 'jacc', 'ja', 'j']): - if X.dtype == np.bool: - _hierarchy_wrap.pdist_jaccard_bool_wrap(_convert_to_bool(X), dm) - else: - _hierarchy_wrap.pdist_jaccard_wrap(_convert_to_double(X), dm) - elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']): - _hierarchy_wrap.pdist_chebyshev_wrap(_convert_to_double(X), dm) - elif mstr in set(['minkowski', 'mi', 'm']): - _hierarchy_wrap.pdist_minkowski_wrap(_convert_to_double(X), dm, p) - elif mstr in set(['seuclidean', 'se', 's']): - if V is not None: - V = np.asarray(V) - if type(V) != np.ndarray: - raise TypeError('Variance vector V must be a numpy array') - if V.dtype != np.double: - raise TypeError('Variance vector V must contain doubles.') - if len(V.shape) != 1: - raise ValueError('Variance vector V must be one-dimensional.') - if V.shape[0] != n: - raise ValueError('Variance vector V must be of the same dimension as the vectors on which the distances are computed.') - # The C code doesn't do striding. - [VV] = _copy_arrays_if_base_present([_convert_to_double(V)]) - else: - VV = _unbiased_variance(X) - _hierarchy_wrap.pdist_seuclidean_wrap(_convert_to_double(X), VV, dm) - # Need to test whether vectorized cosine works better. - # Find out: Is there a dot subtraction operator so I can - # subtract matrices in a similar way to multiplying them? - # Need to get rid of as much unnecessary C code as possible. - elif mstr in set(['cosine', 'cos']): - norms = np.sqrt(np.sum(X * X, axis=1)) - _hierarchy_wrap.pdist_cosine_wrap(_convert_to_double(X), dm, norms) - elif mstr in set(['old_cosine', 'old_cos']): - norms = np.sqrt(np.sum(X * X, axis=1)) - nV = norms.reshape(m, 1) - # The numerator u * v - nm = np.dot(X, X.T) - # The denom. ||u||*||v|| - de = np.dot(nV, nV.T); - dm = 1.0 - (nm / de) - dm[xrange(0,m),xrange(0,m)] = 0.0 - dm = squareform(dm) - elif mstr in set(['correlation', 'co']): - X2 = X - X.mean(1)[:,np.newaxis] - #X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n) - norms = np.sqrt(np.sum(X2 * X2, axis=1)) - _hierarchy_wrap.pdist_cosine_wrap(_convert_to_double(X2), _convert_to_double(dm), _convert_to_double(norms)) - elif mstr in set(['mahalanobis', 'mahal', 'mah']): - if VI is not None: - VI = _convert_to_double(np.asarray(VI)) - if type(VI) != np.ndarray: - raise TypeError('VI must be a numpy array.') - if VI.dtype != np.double: - raise TypeError('The array must contain 64-bit floats.') - [VI] = _copy_arrays_if_base_present([VI]) - else: - V = np.cov(X.T) - VI = _convert_to_double(np.linalg.inv(V).T.copy()) - # (u-v)V^(-1)(u-v)^T - _hierarchy_wrap.pdist_mahalanobis_wrap(_convert_to_double(X), VI, dm) - elif mstr == 'canberra': - _hierarchy_wrap.pdist_canberra_wrap(_convert_to_bool(X), dm) - elif mstr == 'braycurtis': - _hierarchy_wrap.pdist_bray_curtis_wrap(_convert_to_bool(X), dm) - elif mstr == 'yule': - _hierarchy_wrap.pdist_yule_bool_wrap(_convert_to_bool(X), dm) - elif mstr == 'matching': - _hierarchy_wrap.pdist_matching_bool_wrap(_convert_to_bool(X), dm) - elif mstr == 'kulsinski': - _hierarchy_wrap.pdist_kulsinski_bool_wrap(_convert_to_bool(X), dm) - elif mstr == 'dice': - _hierarchy_wrap.pdist_dice_bool_wrap(_convert_to_bool(X), dm) - elif mstr == 'rogerstanimoto': - _hierarchy_wrap.pdist_rogerstanimoto_bool_wrap(_convert_to_bool(X), dm) - elif mstr == 'russellrao': - _hierarchy_wrap.pdist_russellrao_bool_wrap(_convert_to_bool(X), dm) - elif mstr == 'sokalmichener': - _hierarchy_wrap.pdist_sokalmichener_bool_wrap(_convert_to_bool(X), dm) - elif mstr == 'sokalsneath': - _hierarchy_wrap.pdist_sokalsneath_bool_wrap(_convert_to_bool(X), dm) - elif metric == 'test_euclidean': - dm = pdist(X, euclidean) - elif metric == 'test_sqeuclidean': - if V is None: - V = _unbiased_variance(X) - else: - V = np.asarray(V) - dm = pdist(X, lambda u, v: seuclidean(u, v, V)) - elif metric == 'test_braycurtis': - dm = pdist(X, braycurtis) - elif metric == 'test_mahalanobis': - if VI is None: - V = np.cov(X.T) - VI = np.linalg.inv(V) - else: - VI = np.asarray(VI) - [VI] = _copy_arrays_if_base_present([VI]) - # (u-v)V^(-1)(u-v)^T - dm = pdist(X, (lambda u, v: mahalanobis(u, v, VI))) - elif metric == 'test_cityblock': - dm = pdist(X, cityblock) - elif metric == 'test_minkowski': - dm = pdist(X, minkowski, p) - elif metric == 'test_cosine': - dm = pdist(X, cosine) - elif metric == 'test_correlation': - dm = pdist(X, correlation) - elif metric == 'test_hamming': - dm = pdist(X, hamming) - elif metric == 'test_jaccard': - dm = pdist(X, jaccard) - elif metric == 'test_chebyshev' or metric == 'test_chebychev': - dm = pdist(X, chebyshev) - elif metric == 'test_yule': - dm = pdist(X, yule) - elif metric == 'test_matching': - dm = pdist(X, matching) - elif metric == 'test_dice': - dm = pdist(X, dice) - elif metric == 'test_kulsinski': - dm = pdist(X, kulsinski) - elif metric == 'test_rogerstanimoto': - dm = pdist(X, rogerstanimoto) - elif metric == 'test_russellrao': - dm = pdist(X, russellrao) - elif metric == 'test_sokalsneath': - dm = pdist(X, sokalsneath) - elif metric == 'test_sokalmichener': - dm = pdist(X, sokalmichener) - else: - raise ValueError('Unknown Distance Metric: %s' % mstr) - else: - raise TypeError('2nd argument metric must be a string identifier or a function.') - return dm - def cophenet(*args, **kwargs): """ d = cophenet(Z) Modified: trunk/scipy/cluster/setup.py =================================================================== --- trunk/scipy/cluster/setup.py 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/setup.py 2008-06-09 05:55:44 UTC (rev 4417) @@ -12,6 +12,10 @@ sources=[join('src', 'vq_module.c'), join('src', 'vq.c')], include_dirs = [get_numpy_include_dirs()]) + config.add_extension('_distance_wrap', + sources=[join('src', 'distance_wrap.c'), join('src', 'distance.c')], + include_dirs = [get_numpy_include_dirs()]) + config.add_extension('_hierarchy_wrap', sources=[join('src', 'hierarchy_wrap.c'), join('src', 'hierarchy.c')], include_dirs = [get_numpy_include_dirs()]) Added: trunk/scipy/cluster/src/common.h =================================================================== --- trunk/scipy/cluster/src/common.h 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/src/common.h 2008-06-09 05:55:44 UTC (rev 4417) @@ -0,0 +1,70 @@ +/** + * common.h + * + * Author: Damian Eads + * Date: September 22, 2007 (moved into new file on June 8, 2008) + * + * Copyright (c) 2007, 2008, Damian Eads. All rights reserved. + * Adapted for incorporation into Scipy, April 9, 2008. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * - Neither the name of the author nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLUSTER_COMMON_H +#define _CLUSTER_COMMON_H + +#define CPY_MAX(_x, _y) ((_x > _y) ? (_x) : (_y)) +#define CPY_MIN(_x, _y) ((_x < _y) ? (_x) : (_y)) + +#define NCHOOSE2(_n) ((_n)*(_n-1)/2) + +#define CPY_BITS_PER_CHAR (sizeof(unsigned char) * 8) +#define CPY_FLAG_ARRAY_SIZE_BYTES(num_bits) (CPY_CEIL_DIV((num_bits), \ + CPY_BITS_PER_CHAR)) +#define CPY_GET_BIT(_xx, i) (((_xx)[(i) / CPY_BITS_PER_CHAR] >> \ + ((CPY_BITS_PER_CHAR-1) - \ + ((i) % CPY_BITS_PER_CHAR))) & 0x1) +#define CPY_SET_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] |= \ + ((0x1) << ((CPY_BITS_PER_CHAR-1) \ + -((i) % CPY_BITS_PER_CHAR)))) +#define CPY_CLEAR_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] &= \ + ~((0x1) << ((CPY_BITS_PER_CHAR-1) \ + -((i) % CPY_BITS_PER_CHAR)))) + +#ifndef CPY_CEIL_DIV +#define CPY_CEIL_DIV(x, y) ((((double)x)/(double)y) == \ + ((double)((x)/(y))) ? ((x)/(y)) : ((x)/(y) + 1)) +#endif + + +#ifdef CPY_DEBUG +#define CPY_DEBUG_MSG(...) fprintf(stderr, __VA_ARGS__) +#else +#define CPY_DEBUG_MSG(...) +#endif + +#endif Added: trunk/scipy/cluster/src/distance.c =================================================================== --- trunk/scipy/cluster/src/distance.c 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/src/distance.c 2008-06-09 05:55:44 UTC (rev 4417) @@ -0,0 +1,592 @@ +/** + * distance.c + * + * Author: Damian Eads + * Date: September 22, 2007 (moved to new file on June 8, 2008) + * + * Copyright (c) 2007, 2008, Damian Eads. All rights reserved. + * Adapted for incorporation into Scipy, April 9, 2008. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * - Neither the name of the author nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include "common.h" +#include "distance.h" + +static inline double euclidean_distance(const double *u, const double *v, int n) { + int i = 0; + double s = 0.0, d; + for (i = 0; i < n; i++) { + d = u[i] - v[i]; + s = s + d * d; + } + return sqrt(s); +} + +static inline double ess_distance(const double *u, const double *v, int n) { + int i = 0; + double s = 0.0, d; + for (i = 0; i < n; i++) { + d = fabs(u[i] - v[i]); + s = s + d * d; + } + return s; +} + +static inline double chebyshev_distance(const double *u, const double *v, int n) { + int i = 0; + double d, maxv = 0.0; + for (i = 0; i < n; i++) { + d = fabs(u[i] - v[i]); + if (d > maxv) { + maxv = d; + } + } + return maxv; +} + +static inline double canberra_distance(const double *u, const double *v, int n) { + int i; + double s = 0.0; + for (i = 0; i < n; i++) { + s += (fabs(u[i] - v[i]) / (fabs(u[i]) + fabs(v[i]))); + } + return s; +} + +static inline double bray_curtis_distance(const double *u, const double *v, int n) { + int i; + double s1 = 0.0, s2 = 0.0; + for (i = 0; i < n; i++) { + s1 += fabs(u[i] - v[i]); + s2 += fabs(u[i] + v[i]); + } + return s1 / s2; +} + +static inline double mahalanobis_distance(const double *u, const double *v, + const double *covinv, double *dimbuf1, + double *dimbuf2, int n) { + int i, j; + double s; + const double *covrow = covinv; + for (i = 0; i < n; i++) { + dimbuf1[i] = u[i] - v[i]; + } + for (i = 0; i < n; i++) { + covrow = covinv + (i * n); + s = 0.0; + for (j = 0; j < n; j++) { + s += dimbuf1[j] * covrow[j]; + } + dimbuf2[i] = s; + } + s = 0.0; + for (i = 0; i < n; i++) { + s += dimbuf1[i] * dimbuf2[i]; + } + return sqrt(s); +} + +double hamming_distance(const double *u, const double *v, int n) { + int i = 0; + double s = 0.0; + for (i = 0; i < n; i++) { + s = s + (u[i] != v[i]); + } + return s / (double)n; +} + +static inline double hamming_distance_bool(const char *u, const char *v, int n) { + int i = 0; + double s = 0.0; + for (i = 0; i < n; i++) { + s = s + (u[i] != v[i]); + } + return s / (double)n; +} + +static inline double yule_distance_bool(const char *u, const char *v, int n) { + int i = 0; + int ntt = 0, nff = 0, nft = 0, ntf = 0; + for (i = 0; i < n; i++) { + ntt += (u[i] && v[i]); + ntf += (u[i] && !v[i]); + nft += (!u[i] && v[i]); + nff += (!u[i] && !v[i]); + } + return (2.0 * ntf * nft) / (double)(ntt * nff + ntf * nft); +} + +static inline double matching_distance_bool(const char *u, const char *v, int n) { + int i = 0; + int nft = 0, ntf = 0; + for (i = 0; i < n; i++) { + ntf += (u[i] && !v[i]); + nft += (!u[i] && v[i]); + } + return (double)(ntf + nft) / (double)(n); +} + +static inline double dice_distance_bool(const char *u, const char *v, int n) { + int i = 0; + int ntt = 0, nft = 0, ntf = 0; + for (i = 0; i < n; i++) { + ntt += (u[i] && v[i]); + ntf += (u[i] && !v[i]); + nft += (!u[i] && v[i]); + } + return (double)(nft + ntf) / (double)(2.0 * ntt + ntf + nft); +} + + +static inline double rogerstanimoto_distance_bool(const char *u, const char *v, int n) { + int i = 0; + int ntt = 0, nff = 0, nft = 0, ntf = 0; + for (i = 0; i < n; i++) { + ntt += (u[i] && v[i]); + ntf += (u[i] && !v[i]); + nft += (!u[i] && v[i]); + nff += (!u[i] && !v[i]); + } + return (2.0 * (ntf + nft)) / ((double)ntt + nff + (2.0 * (ntf + nft))); +} + +static inline double russellrao_distance_bool(const char *u, const char *v, int n) { + int i = 0; + /** int nff = 0, nft = 0, ntf = 0;**/ + int ntt = 0; + for (i = 0; i < n; i++) { + /** nff += (!u[i] && !v[i]); + ntf += (u[i] && !v[i]); + nft += (!u[i] && v[i]);**/ + ntt += (u[i] && v[i]); + } + /** return (double)(ntf + nft + nff) / (double)n;**/ + return (double) (n - ntt) / (double) n; +} + +static inline double kulsinski_distance_bool(const char *u, const char *v, int n) { + int _i = 0; + int ntt = 0, nft = 0, ntf = 0, nff = 0; + for (_i = 0; _i < n; _i++) { + ntt += (u[_i] && v[_i]); + ntf += (u[_i] && !v[_i]); + nft += (!u[_i] && v[_i]); + nff += (!u[_i] && !v[_i]); + } + return ((double)(ntf + nft - ntt + n)) / ((double)(ntf + nft + n)); +} + +static inline double sokalsneath_distance_bool(const char *u, const char *v, int n) { + int _i = 0; + int ntt = 0, nft = 0, ntf = 0; + for (_i = 0; _i < n; _i++) { + ntt += (u[_i] && v[_i]); + ntf += (u[_i] && !v[_i]); + nft += (!u[_i] && v[_i]); + } + return (2.0 * (ntf + nft))/(2.0 * (ntf + nft) + ntt); +} + +static inline double sokalmichener_distance_bool(const char *u, const char *v, int n) { + int _i = 0; + int ntt = 0, nft = 0, ntf = 0, nff = 0; + for (_i = 0; _i < n; _i++) { + ntt += (u[_i] && v[_i]); + nff += (!u[_i] && !v[_i]); + ntf += (u[_i] && !v[_i]); + nft += (!u[_i] && v[_i]); + } + return (2.0 * (ntf + nft))/(2.0 * (ntf + nft) + ntt + nff); +} + +static inline double jaccard_distance(const double *u, const double *v, int n) { + int i = 0; + double denom = 0.0, num = 0.0; + for (i = 0; i < n; i++) { + num += (u[i] != v[i]) && ((u[i] != 0.0) || (v[i] != 0.0)); + denom += (u[i] != 0.0) || (v[i] != 0.0); + } + return num / denom; +} + +static inline double jaccard_distance_bool(const char *u, const char *v, int n) { + int i = 0; + double num = 0.0, denom = 0.0; + for (i = 0; i < n; i++) { + num += (u[i] != v[i]) && ((u[i] != 0) || (v[i] != 0)); + denom += (u[i] != 0) || (v[i] != 0); + } + return num / denom; +} + +static inline double dot_product(const double *u, const double *v, int n) { + int i; + double s = 0.0; + for (i = 0; i < n; i++) { + s += u[i] * v[i]; + } + return s; +} + +static inline double cosine_distance(const double *u, const double *v, int n, + const double nu, const double nv) { + return 1.0 - (dot_product(u, v, n) / (nu * nv)); +} + +static inline double seuclidean_distance(const double *var, + const double *u, const double *v, int n) { + int i = 0; + double s = 0.0, d; + for (i = 0; i < n; i++) { + d = u[i] - v[i]; + s = s + (d * d) / var[i]; + } + return sqrt(s); +} + +static inline double city_block_distance(const double *u, const double *v, int n) { + int i = 0; + double s = 0.0, d; + for (i = 0; i < n; i++) { + d = fabs(u[i] - v[i]); + s = s + d; + } + return s; +} + +double minkowski_distance(const double *u, const double *v, int n, double p) { + int i = 0; + double s = 0.0, d; + for (i = 0; i < n; i++) { + d = fabs(u[i] - v[i]); + s = s + pow(d, p); + } + return pow(s, 1.0 / p); +} + +void compute_mean_vector(double *res, const double *X, int m, int n) { + int i, j; + const double *v; + for (i = 0; i < n; i++) { + res[i] = 0.0; + } + for (j = 0; j < m; j++) { + + v = X + (j * n); + for (i = 0; i < n; i++) { + res[i] += v[i]; + } + } + for (i = 0; i < n; i++) { + res[i] /= (double)m; + } +} + +void pdist_euclidean(const double *X, double *dm, int m, int n) { + int i, j; + const double *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = euclidean_distance(u, v, n); + } + } +} + +void pdist_mahalanobis(const double *X, const double *covinv, + double *dm, int m, int n) { + int i, j; + const double *u, *v; + double *it = dm; + double *dimbuf1, *dimbuf2; + dimbuf1 = (double*)malloc(sizeof(double) * 2 * n); + dimbuf2 = dimbuf1 + n; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = mahalanobis_distance(u, v, covinv, dimbuf1, dimbuf2, n); + } + } + dimbuf2 = 0; + free(dimbuf1); +} + +void pdist_bray_curtis(const double *X, double *dm, int m, int n) { + int i, j; + const double *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = bray_curtis_distance(u, v, n); + } + } +} + +void pdist_canberra(const double *X, double *dm, int m, int n) { + int i, j; + const double *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = canberra_distance(u, v, n); + } + } +} + +void pdist_hamming(const double *X, double *dm, int m, int n) { + int i, j; + const double *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = hamming_distance(u, v, n); + } + } +} + +void pdist_hamming_bool(const char *X, double *dm, int m, int n) { + int i, j; + const char *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = hamming_distance_bool(u, v, n); + } + } +} + +void pdist_jaccard(const double *X, double *dm, int m, int n) { + int i, j; + const double *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = jaccard_distance(u, v, n); + } + } +} + +void pdist_jaccard_bool(const char *X, double *dm, int m, int n) { + int i, j; + const char *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = jaccard_distance_bool(u, v, n); + } + } +} + + +void pdist_chebyshev(const double *X, double *dm, int m, int n) { + int i, j; + const double *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = chebyshev_distance(u, v, n); + } + } +} + +void pdist_cosine(const double *X, double *dm, int m, int n, const double *norms) { + int i, j; + const double *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = cosine_distance(u, v, n, norms[i], norms[j]); + } + } +} + +void pdist_seuclidean(const double *X, const double *var, + double *dm, int m, int n) { + int i, j; + const double *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = seuclidean_distance(var, u, v, n); + } + } +} + +void pdist_city_block(const double *X, double *dm, int m, int n) { + int i, j; + const double *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = city_block_distance(u, v, n); + } + } +} + +void pdist_minkowski(const double *X, double *dm, int m, int n, double p) { + int i, j; + const double *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = minkowski_distance(u, v, n, p); + } + } +} + +void pdist_yule_bool(const char *X, double *dm, int m, int n) { + int i, j; + const char *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = yule_distance_bool(u, v, n); + } + } +} + +void pdist_matching_bool(const char *X, double *dm, int m, int n) { + int i, j; + const char *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = matching_distance_bool(u, v, n); + } + } +} + +void pdist_dice_bool(const char *X, double *dm, int m, int n) { + int i, j; + const char *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = dice_distance_bool(u, v, n); + } + } +} + +void pdist_rogerstanimoto_bool(const char *X, double *dm, int m, int n) { + int i, j; + const char *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = rogerstanimoto_distance_bool(u, v, n); + } + } +} + +void pdist_russellrao_bool(const char *X, double *dm, int m, int n) { + int i, j; + const char *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = russellrao_distance_bool(u, v, n); + } + } +} + +void pdist_kulsinski_bool(const char *X, double *dm, int m, int n) { + int i, j; + const char *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = kulsinski_distance_bool(u, v, n); + } + } +} + +void pdist_sokalsneath_bool(const char *X, double *dm, int m, int n) { + int i, j; + const char *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = sokalsneath_distance_bool(u, v, n); + } + } +} + +void pdist_sokalmichener_bool(const char *X, double *dm, int m, int n) { + int i, j; + const char *u, *v; + double *it = dm; + for (i = 0; i < m; i++) { + for (j = i + 1; j < m; j++, it++) { + u = X + (n * i); + v = X + (n * j); + *it = sokalmichener_distance_bool(u, v, n); + } + } +} Added: trunk/scipy/cluster/src/distance.h =================================================================== --- trunk/scipy/cluster/src/distance.h 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/src/distance.h 2008-06-09 05:55:44 UTC (rev 4417) @@ -0,0 +1,64 @@ +/** + * distance.h + * + * Author: Damian Eads + * Date: September 22, 2007 (moved to new file on June 8, 2008) + * Adapted for incorporation into Scipy, April 9, 2008. + * + * Copyright (c) 2007, 2008, Damian Eads. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * - Neither the name of the author nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CPY_DISTANCE_H +#define _CPY_DISTANCE_H + +void pdist_euclidean(const double *X, double *dm, int m, int n); +void pdist_seuclidean(const double *X, + const double *var, double *dm, int m, int n); +void pdist_mahalanobis(const double *X, const double *covinv, + double *dm, int m, int n); +void pdist_bray_curtis(const double *X, double *dm, int m, int n); +void pdist_canberra(const double *X, double *dm, int m, int n); +void pdist_hamming(const double *X, double *dm, int m, int n); +void pdist_hamming_bool(const char *X, double *dm, int m, int n); +void pdist_city_block(const double *X, double *dm, int m, int n); +void pdist_cosine(const double *X, double *dm, int m, int n, const double *norms); +void pdist_chebyshev(const double *X, double *dm, int m, int n); +void pdist_jaccard(const double *X, double *dm, int m, int n); +void pdist_jaccard_bool(const char *X, double *dm, int m, int n); +void pdist_kulsinski_bool(const char *X, double *dm, int m, int n); +void pdist_minkowski(const double *X, double *dm, int m, int n, double p); +void pdist_yule_bool(const char *X, double *dm, int m, int n); +void pdist_matching_bool(const char *X, double *dm, int m, int n); +void pdist_dice_bool(const char *X, double *dm, int m, int n); +void pdist_rogerstanimoto_bool(const char *X, double *dm, int m, int n); +void pdist_russellrao_bool(const char *X, double *dm, int m, int n); +void pdist_sokalmichener_bool(const char *X, double *dm, int m, int n); +void pdist_sokalsneath_bool(const char *X, double *dm, int m, int n); + +#endif Added: trunk/scipy/cluster/src/distance_wrap.c =================================================================== --- trunk/scipy/cluster/src/distance_wrap.c 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/src/distance_wrap.c 2008-06-09 05:55:44 UTC (rev 4417) @@ -0,0 +1,525 @@ +/** + * distance_wrap.c + * + * Author: Damian Eads + * Date: September 22, 2007 (moved to new file on June 8, 2008) + * Adapted for incorporation into Scipy, April 9, 2008. + * + * Copyright (c) 2007, Damian Eads. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the + * following disclaimer. + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * - Neither the name of the author nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include "distance.h" +#include "Python.h" +#include +#include + +extern PyObject *pdist_euclidean_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const double *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const double*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_euclidean(X, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + +extern PyObject *pdist_canberra_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const double *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const double*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_canberra(X, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + +extern PyObject *pdist_bray_curtis_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const double *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const double*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_bray_curtis(X, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + + +extern PyObject *pdist_mahalanobis_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *covinv_, *dm_; + int m, n; + double *dm; + const double *X; + const double *covinv; + if (!PyArg_ParseTuple(args, "O!O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &covinv_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const double*)X_->data; + covinv = (const double*)covinv_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_mahalanobis(X, covinv, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + + +extern PyObject *pdist_chebyshev_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const double *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const double*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_chebyshev(X, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + + +extern PyObject *pdist_cosine_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_, *norms_; + int m, n; + double *dm; + const double *X, *norms; + if (!PyArg_ParseTuple(args, "O!O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_, + &PyArray_Type, &norms_)) { + return 0; + } + else { + X = (const double*)X_->data; + dm = (double*)dm_->data; + norms = (const double*)norms_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_cosine(X, dm, m, n, norms); + } + return Py_BuildValue("d", 0.0); +} + +extern PyObject *pdist_seuclidean_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_, *var_; + int m, n; + double *dm; + const double *X, *var; + if (!PyArg_ParseTuple(args, "O!O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &var_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (double*)X_->data; + dm = (double*)dm_->data; + var = (double*)var_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_seuclidean(X, var, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + +extern PyObject *pdist_city_block_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const double *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const double*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_city_block(X, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + +extern PyObject *pdist_hamming_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const double *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const double*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_hamming(X, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + +extern PyObject *pdist_hamming_bool_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const char *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const char*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_hamming_bool(X, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + +extern PyObject *pdist_jaccard_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const double *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const double*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_jaccard(X, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + +extern PyObject *pdist_jaccard_bool_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const char *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const char*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_jaccard_bool(X, dm, m, n); + } + return Py_BuildValue("d", 0.0); +} + +extern PyObject *pdist_minkowski_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm, *X; + double p; + if (!PyArg_ParseTuple(args, "O!O!d", + &PyArray_Type, &X_, + &PyArray_Type, &dm_, + &p)) { + return 0; + } + else { + X = (double*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_minkowski(X, dm, m, n, p); + } + return Py_BuildValue("d", 0.0); +} + + +extern PyObject *pdist_yule_bool_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const char *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const char*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_yule_bool(X, dm, m, n); + } + return Py_BuildValue(""); +} + +extern PyObject *pdist_matching_bool_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const char *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const char*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_matching_bool(X, dm, m, n); + } + return Py_BuildValue(""); +} + +extern PyObject *pdist_dice_bool_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const char *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const char*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_dice_bool(X, dm, m, n); + } + return Py_BuildValue(""); +} + +extern PyObject *pdist_rogerstanimoto_bool_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const char *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const char*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_rogerstanimoto_bool(X, dm, m, n); + } + return Py_BuildValue(""); +} + +extern PyObject *pdist_russellrao_bool_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const char *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const char*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_russellrao_bool(X, dm, m, n); + } + return Py_BuildValue(""); +} + +extern PyObject *pdist_kulsinski_bool_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const char *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const char*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_kulsinski_bool(X, dm, m, n); + } + return Py_BuildValue(""); +} + +extern PyObject *pdist_sokalmichener_bool_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const char *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const char*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_sokalmichener_bool(X, dm, m, n); + } + return Py_BuildValue(""); +} + +extern PyObject *pdist_sokalsneath_bool_wrap(PyObject *self, PyObject *args) { + PyArrayObject *X_, *dm_; + int m, n; + double *dm; + const char *X; + if (!PyArg_ParseTuple(args, "O!O!", + &PyArray_Type, &X_, + &PyArray_Type, &dm_)) { + return 0; + } + else { + X = (const char*)X_->data; + dm = (double*)dm_->data; + m = X_->dimensions[0]; + n = X_->dimensions[1]; + + pdist_sokalsneath_bool(X, dm, m, n); + } + return Py_BuildValue(""); +} + + +static PyMethodDef _distanceWrapMethods[] = { + {"pdist_bray_curtis_wrap", pdist_bray_curtis_wrap, METH_VARARGS}, + {"pdist_canberra_wrap", pdist_canberra_wrap, METH_VARARGS}, + {"pdist_chebyshev_wrap", pdist_chebyshev_wrap, METH_VARARGS}, + {"pdist_city_block_wrap", pdist_city_block_wrap, METH_VARARGS}, + {"pdist_cosine_wrap", pdist_cosine_wrap, METH_VARARGS}, + {"pdist_dice_bool_wrap", pdist_dice_bool_wrap, METH_VARARGS}, + {"pdist_euclidean_wrap", pdist_euclidean_wrap, METH_VARARGS}, + {"pdist_hamming_wrap", pdist_hamming_wrap, METH_VARARGS}, + {"pdist_hamming_bool_wrap", pdist_hamming_bool_wrap, METH_VARARGS}, + {"pdist_jaccard_wrap", pdist_jaccard_wrap, METH_VARARGS}, + {"pdist_jaccard_bool_wrap", pdist_jaccard_bool_wrap, METH_VARARGS}, + {"pdist_kulsinski_bool_wrap", pdist_kulsinski_bool_wrap, METH_VARARGS}, + {"pdist_mahalanobis_wrap", pdist_mahalanobis_wrap, METH_VARARGS}, + {"pdist_matching_bool_wrap", pdist_matching_bool_wrap, METH_VARARGS}, + {"pdist_minkowski_wrap", pdist_minkowski_wrap, METH_VARARGS}, + {"pdist_rogerstanimoto_bool_wrap", pdist_rogerstanimoto_bool_wrap, METH_VARARGS}, + {"pdist_russellrao_bool_wrap", pdist_russellrao_bool_wrap, METH_VARARGS}, + {"pdist_seuclidean_wrap", pdist_seuclidean_wrap, METH_VARARGS}, + {"pdist_sokalmichener_bool_wrap", pdist_sokalmichener_bool_wrap, METH_VARARGS}, + {"pdist_sokalsneath_bool_wrap", pdist_sokalsneath_bool_wrap, METH_VARARGS}, + {"pdist_yule_bool_wrap", pdist_yule_bool_wrap, METH_VARARGS}, + {NULL, NULL} /* Sentinel - marks the end of this structure */ +}; + +void init_distance_wrap(void) { + (void) Py_InitModule("_distance_wrap", _distanceWrapMethods); + import_array(); // Must be present for NumPy. Called first after above line. +} Modified: trunk/scipy/cluster/src/hierarchy.c =================================================================== --- trunk/scipy/cluster/src/hierarchy.c 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/src/hierarchy.c 2008-06-09 05:55:44 UTC (rev 4417) @@ -34,12 +34,11 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#define NCHOOSE2(_n) ((_n)*(_n-1)/2) +#include "common.h" + #define ISCLUSTER(_nd) ((_nd)->id >= n) #define GETCLUSTER(_id) ((lists + _id - n)) -#define CPY_MAX(_x, _y) ((_x > _y) ? (_x) : (_y)) -#define CPY_MIN(_x, _y) ((_x < _y) ? (_x) : (_y)) /** The number of link stats (for the inconsistency computation) for each cluster. */ @@ -61,39 +60,15 @@ #define CPY_LIN_DIST 2 #define CPY_LIN_CNT 3 -#define CPY_BITS_PER_CHAR (sizeof(unsigned char) * 8) -#define CPY_FLAG_ARRAY_SIZE_BYTES(num_bits) (CPY_CEIL_DIV((num_bits), \ - CPY_BITS_PER_CHAR)) -#define CPY_GET_BIT(_xx, i) (((_xx)[(i) / CPY_BITS_PER_CHAR] >> \ - ((CPY_BITS_PER_CHAR-1) - \ - ((i) % CPY_BITS_PER_CHAR))) & 0x1) -#define CPY_SET_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] |= \ - ((0x1) << ((CPY_BITS_PER_CHAR-1) \ - -((i) % CPY_BITS_PER_CHAR)))) -#define CPY_CLEAR_BIT(_xx, i) ((_xx)[(i) / CPY_BITS_PER_CHAR] &= \ - ~((0x1) << ((CPY_BITS_PER_CHAR-1) \ - -((i) % CPY_BITS_PER_CHAR)))) - -#ifndef CPY_CEIL_DIV -#define CPY_CEIL_DIV(x, y) ((((double)x)/(double)y) == \ - ((double)((x)/(y))) ? ((x)/(y)) : ((x)/(y) + 1)) -#endif - - -#ifdef CPY_DEBUG -#define CPY_DEBUG_MSG(...) fprintf(stderr, __VA_ARGS__) -#else -#define CPY_DEBUG_MSG(...) -#endif - #include #include #include #include #include "hierarchy.h" +#include "distance.h" -double euclidean_distance(const double *u, const double *v, int n) { +static inline double euclidean_distance(const double *u, const double *v, int n) { int i = 0; double s = 0.0, d; for (i = 0; i < n; i++) { @@ -103,548 +78,6 @@ return sqrt(s); } -double ess_distance(const double *u, const double *v, int n) { - int i = 0; - double s = 0.0, d; - for (i = 0; i < n; i++) { - d = fabs(u[i] - v[i]); - s = s + d * d; - } - return s; -} - -double chebyshev_distance(const double *u, const double *v, int n) { - int i = 0; - double d, maxv = 0.0; - for (i = 0; i < n; i++) { - d = fabs(u[i] - v[i]); - if (d > maxv) { - maxv = d; - } - } - return maxv; -} - -double canberra_distance(const double *u, const double *v, int n) { - int i; - double s = 0.0; - for (i = 0; i < n; i++) { - s += (fabs(u[i] - v[i]) / (fabs(u[i]) + fabs(v[i]))); - } - return s; -} - -double bray_curtis_distance(const double *u, const double *v, int n) { - int i; - double s1 = 0.0, s2 = 0.0; - for (i = 0; i < n; i++) { - s1 += fabs(u[i] - v[i]); - s2 += fabs(u[i] + v[i]); - } - return s1 / s2; -} - -double mahalanobis_distance(const double *u, const double *v, - const double *covinv, double *dimbuf1, - double *dimbuf2, int n) { - int i, j; - double s; - const double *covrow = covinv; - for (i = 0; i < n; i++) { - dimbuf1[i] = u[i] - v[i]; - } - for (i = 0; i < n; i++) { - covrow = covinv + (i * n); - s = 0.0; - for (j = 0; j < n; j++) { - s += dimbuf1[j] * covrow[j]; - } - dimbuf2[i] = s; - } - s = 0.0; - for (i = 0; i < n; i++) { - s += dimbuf1[i] * dimbuf2[i]; - } - return sqrt(s); -} - -double hamming_distance(const double *u, const double *v, int n) { - int i = 0; - double s = 0.0; - for (i = 0; i < n; i++) { - s = s + (u[i] != v[i]); - } - return s / (double)n; -} - -double hamming_distance_bool(const char *u, const char *v, int n) { - int i = 0; - double s = 0.0; - for (i = 0; i < n; i++) { - s = s + (u[i] != v[i]); - } - return s / (double)n; -} - -double yule_distance_bool(const char *u, const char *v, int n) { - int i = 0; - int ntt = 0, nff = 0, nft = 0, ntf = 0; - for (i = 0; i < n; i++) { - ntt += (u[i] && v[i]); - ntf += (u[i] && !v[i]); - nft += (!u[i] && v[i]); - nff += (!u[i] && !v[i]); - } - return (2.0 * ntf * nft) / (double)(ntt * nff + ntf * nft); -} - -double matching_distance_bool(const char *u, const char *v, int n) { - int i = 0; - int nft = 0, ntf = 0; - for (i = 0; i < n; i++) { - ntf += (u[i] && !v[i]); - nft += (!u[i] && v[i]); - } - return (double)(ntf + nft) / (double)(n); -} - -double dice_distance_bool(const char *u, const char *v, int n) { - int i = 0; - int ntt = 0, nft = 0, ntf = 0; - for (i = 0; i < n; i++) { - ntt += (u[i] && v[i]); - ntf += (u[i] && !v[i]); - nft += (!u[i] && v[i]); - } - return (double)(nft + ntf) / (double)(2.0 * ntt + ntf + nft); -} - - -double rogerstanimoto_distance_bool(const char *u, const char *v, int n) { - int i = 0; - int ntt = 0, nff = 0, nft = 0, ntf = 0; - for (i = 0; i < n; i++) { - ntt += (u[i] && v[i]); - ntf += (u[i] && !v[i]); - nft += (!u[i] && v[i]); - nff += (!u[i] && !v[i]); - } - return (2.0 * (ntf + nft)) / ((double)ntt + nff + (2.0 * (ntf + nft))); -} - -double russellrao_distance_bool(const char *u, const char *v, int n) { - int i = 0; - /** int nff = 0, nft = 0, ntf = 0;**/ - int ntt = 0; - for (i = 0; i < n; i++) { - /** nff += (!u[i] && !v[i]); - ntf += (u[i] && !v[i]); - nft += (!u[i] && v[i]);**/ - ntt += (u[i] && v[i]); - } - /** return (double)(ntf + nft + nff) / (double)n;**/ - return (double) (n - ntt) / (double) n; -} - -static inline double kulsinski_distance_bool(const char *u, const char *v, int n) { - int _i = 0; - int ntt = 0, nft = 0, ntf = 0, nff = 0; - for (_i = 0; _i < n; _i++) { - ntt += (u[_i] && v[_i]); - ntf += (u[_i] && !v[_i]); - nft += (!u[_i] && v[_i]); - nff += (!u[_i] && !v[_i]); - } - return ((double)(ntf + nft - ntt + n)) / ((double)(ntf + nft + n)); -} - -static inline double sokalsneath_distance_bool(const char *u, const char *v, int n) { - int _i = 0; - int ntt = 0, nft = 0, ntf = 0; - for (_i = 0; _i < n; _i++) { - ntt += (u[_i] && v[_i]); - ntf += (u[_i] && !v[_i]); - nft += (!u[_i] && v[_i]); - } - return (2.0 * (ntf + nft))/(2.0 * (ntf + nft) + ntt); -} - -static inline double sokalmichener_distance_bool(const char *u, const char *v, int n) { - int _i = 0; - int ntt = 0, nft = 0, ntf = 0, nff = 0; - for (_i = 0; _i < n; _i++) { - ntt += (u[_i] && v[_i]); - nff += (!u[_i] && !v[_i]); - ntf += (u[_i] && !v[_i]); - nft += (!u[_i] && v[_i]); - } - return (2.0 * (ntf + nft))/(2.0 * (ntf + nft) + ntt + nff); -} - -double jaccard_distance(const double *u, const double *v, int n) { - int i = 0; - double denom = 0.0, num = 0.0; - for (i = 0; i < n; i++) { - num += (u[i] != v[i]) && ((u[i] != 0.0) || (v[i] != 0.0)); - denom += (u[i] != 0.0) || (v[i] != 0.0); - } - return num / denom; -} - -double jaccard_distance_bool(const char *u, const char *v, int n) { - int i = 0; - double num = 0.0, denom = 0.0; - for (i = 0; i < n; i++) { - num += (u[i] != v[i]) && ((u[i] != 0) || (v[i] != 0)); - denom += (u[i] != 0) || (v[i] != 0); - } - return num / denom; -} - -double dot_product(const double *u, const double *v, int n) { - int i; - double s = 0.0; - for (i = 0; i < n; i++) { - s += u[i] * v[i]; - } - return s; -} - -double cosine_distance(const double *u, const double *v, int n, - const double nu, const double nv) { - return 1.0 - (dot_product(u, v, n) / (nu * nv)); -} - -double seuclidean_distance(const double *var, - const double *u, const double *v, int n) { - int i = 0; - double s = 0.0, d; - for (i = 0; i < n; i++) { - d = u[i] - v[i]; - s = s + (d * d) / var[i]; - } - return sqrt(s); -} - -double city_block_distance(const double *u, const double *v, int n) { - int i = 0; - double s = 0.0, d; - for (i = 0; i < n; i++) { - d = fabs(u[i] - v[i]); - s = s + d; - } - return s; -} - -double minkowski_distance(const double *u, const double *v, int n, double p) { - int i = 0; - double s = 0.0, d; - for (i = 0; i < n; i++) { - d = fabs(u[i] - v[i]); - s = s + pow(d, p); - } - return pow(s, 1.0 / p); -} - -void compute_mean_vector(double *res, const double *X, int m, int n) { - int i, j; - const double *v; - for (i = 0; i < n; i++) { - res[i] = 0.0; - } - for (j = 0; j < m; j++) { - - v = X + (j * n); - for (i = 0; i < n; i++) { - res[i] += v[i]; - } - } - for (i = 0; i < n; i++) { - res[i] /= (double)m; - } -} - -void pdist_euclidean(const double *X, double *dm, int m, int n) { - int i, j; - const double *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = euclidean_distance(u, v, n); - } - } -} - -void pdist_mahalanobis(const double *X, const double *covinv, - double *dm, int m, int n) { - int i, j; - const double *u, *v; - double *it = dm; - double *dimbuf1, *dimbuf2; - dimbuf1 = (double*)malloc(sizeof(double) * 2 * n); - dimbuf2 = dimbuf1 + n; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = mahalanobis_distance(u, v, covinv, dimbuf1, dimbuf2, n); - } - } - dimbuf2 = 0; - free(dimbuf1); -} - -void pdist_bray_curtis(const double *X, double *dm, int m, int n) { - int i, j; - const double *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = bray_curtis_distance(u, v, n); - } - } -} - -void pdist_canberra(const double *X, double *dm, int m, int n) { - int i, j; - const double *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = canberra_distance(u, v, n); - } - } -} - -void pdist_hamming(const double *X, double *dm, int m, int n) { - int i, j; - const double *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = hamming_distance(u, v, n); - } - } -} - -void pdist_hamming_bool(const char *X, double *dm, int m, int n) { - int i, j; - const char *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = hamming_distance_bool(u, v, n); - } - } -} - -void pdist_jaccard(const double *X, double *dm, int m, int n) { - int i, j; - const double *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = jaccard_distance(u, v, n); - } - } -} - -void pdist_jaccard_bool(const char *X, double *dm, int m, int n) { - int i, j; - const char *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = jaccard_distance_bool(u, v, n); - } - } -} - - -void pdist_chebyshev(const double *X, double *dm, int m, int n) { - int i, j; - const double *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = chebyshev_distance(u, v, n); - } - } -} - -void pdist_cosine(const double *X, double *dm, int m, int n, const double *norms) { - int i, j; - const double *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = cosine_distance(u, v, n, norms[i], norms[j]); - } - } -} - -void pdist_seuclidean(const double *X, const double *var, - double *dm, int m, int n) { - int i, j; - const double *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = seuclidean_distance(var, u, v, n); - } - } -} - -void pdist_city_block(const double *X, double *dm, int m, int n) { - int i, j; - const double *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = city_block_distance(u, v, n); - } - } -} - -void pdist_minkowski(const double *X, double *dm, int m, int n, double p) { - int i, j; - const double *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = minkowski_distance(u, v, n, p); - } - } -} - -void pdist_yule_bool(const char *X, double *dm, int m, int n) { - int i, j; - const char *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = yule_distance_bool(u, v, n); - } - } -} - -void pdist_matching_bool(const char *X, double *dm, int m, int n) { - int i, j; - const char *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = matching_distance_bool(u, v, n); - } - } -} - -void pdist_dice_bool(const char *X, double *dm, int m, int n) { - int i, j; - const char *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = dice_distance_bool(u, v, n); - } - } -} - -void pdist_rogerstanimoto_bool(const char *X, double *dm, int m, int n) { - int i, j; - const char *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = rogerstanimoto_distance_bool(u, v, n); - } - } -} - -void pdist_russellrao_bool(const char *X, double *dm, int m, int n) { - int i, j; - const char *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = russellrao_distance_bool(u, v, n); - } - } -} - -void pdist_kulsinski_bool(const char *X, double *dm, int m, int n) { - int i, j; - const char *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = kulsinski_distance_bool(u, v, n); - } - } -} - -void pdist_sokalsneath_bool(const char *X, double *dm, int m, int n) { - int i, j; - const char *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = sokalsneath_distance_bool(u, v, n); - } - } -} - -void pdist_sokalmichener_bool(const char *X, double *dm, int m, int n) { - int i, j; - const char *u, *v; - double *it = dm; - for (i = 0; i < m; i++) { - for (j = i + 1; j < m; j++, it++) { - u = X + (n * i); - v = X + (n * j); - *it = sokalmichener_distance_bool(u, v, n); - } - } -} - void chopmins(int *ind, int mini, int minj, int np) { int i; for (i = mini; i < minj - 1; i++) { Modified: trunk/scipy/cluster/src/hierarchy.h =================================================================== --- trunk/scipy/cluster/src/hierarchy.h 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/src/hierarchy.h 2008-06-09 05:55:44 UTC (rev 4417) @@ -34,8 +34,8 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _CPY_CLUSTER_H -#define _CPY_CLUSTER_H +#ifndef _CPY_HIERARCHY_H +#define _CPY_HIERARCHY_H #define CPY_LINKAGE_SINGLE 0 #define CPY_LINKAGE_COMPLETE 1 @@ -89,35 +89,9 @@ void dist_to_squareform_from_vector(double *M, const double *v, int n); void dist_to_vector_from_squareform(const double *M, double *v, int n); -void pdist_euclidean(const double *X, double *dm, int m, int n); -void pdist_seuclidean(const double *X, - const double *var, double *dm, int m, int n); -void pdist_mahalanobis(const double *X, const double *covinv, - double *dm, int m, int n); -void pdist_bray_curtis(const double *X, double *dm, int m, int n); -void pdist_canberra(const double *X, double *dm, int m, int n); -void pdist_hamming(const double *X, double *dm, int m, int n); -void pdist_hamming_bool(const char *X, double *dm, int m, int n); -void pdist_city_block(const double *X, double *dm, int m, int n); -void pdist_cosine(const double *X, double *dm, int m, int n, const double *norms); -void pdist_chebyshev(const double *X, double *dm, int m, int n); -void pdist_jaccard(const double *X, double *dm, int m, int n); -void pdist_jaccard_bool(const char *X, double *dm, int m, int n); -void pdist_kulsinski_bool(const char *X, double *dm, int m, int n); -void pdist_minkowski(const double *X, double *dm, int m, int n, double p); -void pdist_yule_bool(const char *X, double *dm, int m, int n); -void pdist_matching_bool(const char *X, double *dm, int m, int n); -void pdist_dice_bool(const char *X, double *dm, int m, int n); -void pdist_rogerstanimoto_bool(const char *X, double *dm, int m, int n); -void pdist_russellrao_bool(const char *X, double *dm, int m, int n); -void pdist_sokalmichener_bool(const char *X, double *dm, int m, int n); -void pdist_sokalsneath_bool(const char *X, double *dm, int m, int n); - void inconsistency_calculation(const double *Z, double *R, int n, int d); void inconsistency_calculation_alt(const double *Z, double *R, int n, int d); -double dot_product(const double *u, const double *v, int n); - void chopmins(int *ind, int mini, int minj, int np); void chopmins_ns_i(double *ind, int mini, int np); void chopmins_ns_ij(double *ind, int mini, int minj, int np); Modified: trunk/scipy/cluster/src/hierarchy_wrap.c =================================================================== --- trunk/scipy/cluster/src/hierarchy_wrap.c 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/src/hierarchy_wrap.c 2008-06-09 05:55:44 UTC (rev 4417) @@ -332,18 +332,6 @@ return Py_BuildValue("d", 0.0); } -extern PyObject *dot_product_wrap(PyObject *self, PyObject *args) { - PyArrayObject *d1_, *d2_; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &d1_, - &PyArray_Type, &d2_)) { - return 0; - } - return Py_BuildValue("d", dot_product((const double*)d1_->data, - (const double*)d2_->data, - d1_->dimensions[0])); -} - extern PyObject *to_squareform_from_vector_wrap(PyObject *self, PyObject *args) { PyArrayObject *M_, *v_; int n; @@ -382,459 +370,6 @@ return Py_BuildValue("d", 0.0); } -extern PyObject *pdist_euclidean_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const double *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const double*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_euclidean(X, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *pdist_canberra_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const double *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const double*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_canberra(X, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *pdist_bray_curtis_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const double *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const double*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_bray_curtis(X, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - - -extern PyObject *pdist_mahalanobis_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *covinv_, *dm_; - int m, n; - double *dm; - const double *X; - const double *covinv; - if (!PyArg_ParseTuple(args, "O!O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &covinv_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const double*)X_->data; - covinv = (const double*)covinv_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_mahalanobis(X, covinv, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - - -extern PyObject *pdist_chebyshev_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const double *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const double*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_chebyshev(X, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - - -extern PyObject *pdist_cosine_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_, *norms_; - int m, n; - double *dm; - const double *X, *norms; - if (!PyArg_ParseTuple(args, "O!O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_, - &PyArray_Type, &norms_)) { - return 0; - } - else { - X = (const double*)X_->data; - dm = (double*)dm_->data; - norms = (const double*)norms_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_cosine(X, dm, m, n, norms); - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *pdist_seuclidean_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_, *var_; - int m, n; - double *dm; - const double *X, *var; - if (!PyArg_ParseTuple(args, "O!O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &var_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (double*)X_->data; - dm = (double*)dm_->data; - var = (double*)var_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_seuclidean(X, var, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *pdist_city_block_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const double *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const double*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_city_block(X, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *pdist_hamming_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const double *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const double*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_hamming(X, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *pdist_hamming_bool_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const char *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const char*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_hamming_bool(X, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *pdist_jaccard_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const double *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const double*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_jaccard(X, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *pdist_jaccard_bool_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const char *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const char*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_jaccard_bool(X, dm, m, n); - } - return Py_BuildValue("d", 0.0); -} - -extern PyObject *pdist_minkowski_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm, *X; - double p; - if (!PyArg_ParseTuple(args, "O!O!d", - &PyArray_Type, &X_, - &PyArray_Type, &dm_, - &p)) { - return 0; - } - else { - X = (double*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_minkowski(X, dm, m, n, p); - } - return Py_BuildValue("d", 0.0); -} - - -extern PyObject *pdist_yule_bool_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const char *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const char*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_yule_bool(X, dm, m, n); - } - return Py_BuildValue(""); -} - -extern PyObject *pdist_matching_bool_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const char *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const char*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_matching_bool(X, dm, m, n); - } - return Py_BuildValue(""); -} - -extern PyObject *pdist_dice_bool_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const char *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const char*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_dice_bool(X, dm, m, n); - } - return Py_BuildValue(""); -} - -extern PyObject *pdist_rogerstanimoto_bool_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const char *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const char*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_rogerstanimoto_bool(X, dm, m, n); - } - return Py_BuildValue(""); -} - -extern PyObject *pdist_russellrao_bool_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const char *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const char*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_russellrao_bool(X, dm, m, n); - } - return Py_BuildValue(""); -} - -extern PyObject *pdist_kulsinski_bool_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const char *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const char*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_kulsinski_bool(X, dm, m, n); - } - return Py_BuildValue(""); -} - -extern PyObject *pdist_sokalmichener_bool_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const char *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const char*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_sokalmichener_bool(X, dm, m, n); - } - return Py_BuildValue(""); -} - -extern PyObject *pdist_sokalsneath_bool_wrap(PyObject *self, PyObject *args) { - PyArrayObject *X_, *dm_; - int m, n; - double *dm; - const char *X; - if (!PyArg_ParseTuple(args, "O!O!", - &PyArray_Type, &X_, - &PyArray_Type, &dm_)) { - return 0; - } - else { - X = (const char*)X_->data; - dm = (double*)dm_->data; - m = X_->dimensions[0]; - n = X_->dimensions[1]; - - pdist_sokalsneath_bool(X, dm, m, n); - } - return Py_BuildValue(""); -} - extern PyObject *leaders_wrap(PyObject *self, PyObject *args) { PyArrayObject *Z_, *T_, *L_, *M_; int kk, n, res; @@ -864,7 +399,6 @@ {"cluster_maxclust_monocrit_wrap", cluster_maxclust_monocrit_wrap, METH_VARARGS}, {"cluster_monocrit_wrap", cluster_monocrit_wrap, METH_VARARGS}, {"cophenetic_distances_wrap", cophenetic_distances_wrap, METH_VARARGS}, - {"dot_product_wrap", dot_product_wrap, METH_VARARGS}, {"get_max_dist_for_each_cluster_wrap", get_max_dist_for_each_cluster_wrap, METH_VARARGS}, {"get_max_Rfield_for_each_cluster_wrap", @@ -873,27 +407,6 @@ {"leaders_wrap", leaders_wrap, METH_VARARGS}, {"linkage_euclid_wrap", linkage_euclid_wrap, METH_VARARGS}, {"linkage_wrap", linkage_wrap, METH_VARARGS}, - {"pdist_bray_curtis_wrap", pdist_bray_curtis_wrap, METH_VARARGS}, - {"pdist_canberra_wrap", pdist_canberra_wrap, METH_VARARGS}, - {"pdist_chebyshev_wrap", pdist_chebyshev_wrap, METH_VARARGS}, - {"pdist_city_block_wrap", pdist_city_block_wrap, METH_VARARGS}, - {"pdist_cosine_wrap", pdist_cosine_wrap, METH_VARARGS}, - {"pdist_dice_bool_wrap", pdist_dice_bool_wrap, METH_VARARGS}, - {"pdist_euclidean_wrap", pdist_euclidean_wrap, METH_VARARGS}, - {"pdist_hamming_wrap", pdist_hamming_wrap, METH_VARARGS}, - {"pdist_hamming_bool_wrap", pdist_hamming_bool_wrap, METH_VARARGS}, - {"pdist_jaccard_wrap", pdist_jaccard_wrap, METH_VARARGS}, - {"pdist_jaccard_bool_wrap", pdist_jaccard_bool_wrap, METH_VARARGS}, - {"pdist_kulsinski_bool_wrap", pdist_kulsinski_bool_wrap, METH_VARARGS}, - {"pdist_mahalanobis_wrap", pdist_mahalanobis_wrap, METH_VARARGS}, - {"pdist_matching_bool_wrap", pdist_matching_bool_wrap, METH_VARARGS}, - {"pdist_minkowski_wrap", pdist_minkowski_wrap, METH_VARARGS}, - {"pdist_rogerstanimoto_bool_wrap", pdist_rogerstanimoto_bool_wrap, METH_VARARGS}, - {"pdist_russellrao_bool_wrap", pdist_russellrao_bool_wrap, METH_VARARGS}, - {"pdist_seuclidean_wrap", pdist_seuclidean_wrap, METH_VARARGS}, - {"pdist_sokalmichener_bool_wrap", pdist_sokalmichener_bool_wrap, METH_VARARGS}, - {"pdist_sokalsneath_bool_wrap", pdist_sokalsneath_bool_wrap, METH_VARARGS}, - {"pdist_yule_bool_wrap", pdist_yule_bool_wrap, METH_VARARGS}, {"prelist_wrap", prelist_wrap, METH_VARARGS}, {"to_squareform_from_vector_wrap", to_squareform_from_vector_wrap, METH_VARARGS}, Modified: trunk/scipy/cluster/tests/test_hierarchy.py =================================================================== --- trunk/scipy/cluster/tests/test_hierarchy.py 2008-06-07 08:13:02 UTC (rev 4416) +++ trunk/scipy/cluster/tests/test_hierarchy.py 2008-06-09 05:55:44 UTC (rev 4417) @@ -33,44 +33,14 @@ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - import sys import os.path -from scipy.testing import * -from scipy.cluster.hierarchy import pdist, squareform, linkage, from_mlab_linkage, numobs_dm, numobs_y, numobs_linkage, matching, jaccard, dice, sokalsneath, rogerstanimoto, russellrao, yule - import numpy -#import math -#from scipy.cluster.hierarchy import pdist, euclidean +from scipy.testing import * +from scipy.cluster.hierarchy import squareform, linkage, from_mlab_linkage, numobs_dm, numobs_y, numobs_linkage +from scipy.cluster.distance import pdist, matching, jaccard, dice, sokalsneath, rogerstanimoto, russellrao, yule -_filenames = ["iris.txt", - "pdist-hamming-ml.txt", - "pdist-boolean-inp.txt", - "pdist-jaccard-ml.txt", - "pdist-cityblock-ml-iris.txt", - "pdist-minkowski-3.2-ml-iris.txt", - "pdist-cityblock-ml.txt", - "pdist-correlation-ml-iris.txt", - "pdist-minkowski-5.8-ml-iris.txt", - "pdist-correlation-ml.txt", - "pdist-minkowski-3.2-ml.txt", - "pdist-cosine-ml-iris.txt", - "pdist-seuclidean-ml-iris.txt", - "pdist-cosine-ml.txt", - "pdist-seuclidean-ml.txt", - "pdist-double-inp.txt", - "pdist-spearman-ml.txt", - "pdist-euclidean-ml.txt", - "pdist-euclidean-ml-iris.txt", - "pdist-chebychev-ml.txt", - "pdist-chebychev-ml-iris.txt", - "linkage-single-tdist.txt", - "linkage-complete-tdist.txt", - "linkage-average-tdist.txt", - "linkage-weighted-tdist.txt", - "random-bool-data.txt"] - _tdist = numpy.array([[0, 662, 877, 255, 412, 996], [662, 0, 295, 468, 268, 400], [877, 295, 0, 754, 564, 138], @@ -80,874 +50,27 @@ _ytdist = squareform(_tdist) -# A hashmap of expected output arrays for the tests. These arrays -# come from a list of text files, which are read prior to testing. eo = {} +_filenames = ["iris.txt", + "linkage-single-tdist.txt", + "linkage-complete-tdist.txt", + "linkage-average-tdist.txt", + "linkage-weighted-tdist.txt", + "random-bool-data.txt"] + + def load_testing_files(): for fn in _filenames: name = fn.replace(".txt", "").replace("-ml", "") fqfn = os.path.join(os.path.dirname(__file__), fn) eo[name] = numpy.loadtxt(open(fqfn)) #print "%s: %s %s" % (name, str(eo[name].shape), str(eo[name].dtype)) - eo['pdist-boolean-inp'] = numpy.bool_(eo['pdist-boolean-inp']) + #eo['pdist-boolean-inp'] = numpy.bool_(eo['pdist-boolean-inp']) load_testing_files() -#print eo.keys() - - -#print numpy.abs(Y_test2 - Y_right).max() -#print numpy.abs(Y_test1 - Y_right).max() - -class TestPdist(TestCase): - """ - Test suite for the pdist function. - """ - - ################### pdist: euclidean - def test_pdist_euclidean_random(self): - "Tests pdist(X, 'euclidean') on random data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-euclidean'] - - Y_test1 = pdist(X, 'euclidean') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_euclidean_random_float32(self): - "Tests pdist(X, 'euclidean') on random data (float32)." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-double-inp']) - Y_right = eo['pdist-euclidean'] - - Y_test1 = pdist(X, 'euclidean') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_euclidean_random_nonC(self): - "Tests pdist(X, 'test_euclidean') [the non-C implementation] on random data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-euclidean'] - Y_test2 = pdist(X, 'test_euclidean') - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - def test_pdist_euclidean_iris_double(self): - "Tests pdist(X, 'euclidean') on the Iris data set." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-euclidean-iris'] - - Y_test1 = pdist(X, 'euclidean') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_euclidean_iris_float32(self): - "Tests pdist(X, 'euclidean') on the Iris data set. (float32)" - eps = 1e-06 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['iris']) - Y_right = eo['pdist-euclidean-iris'] - - Y_test1 = pdist(X, 'euclidean') - print numpy.abs(Y_right - Y_test1).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_euclidean_iris_nonC(self): - "Tests pdist(X, 'test_euclidean') [the non-C implementation] on the Iris data set." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-euclidean-iris'] - Y_test2 = pdist(X, 'test_euclidean') - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - ################### pdist: seuclidean - def test_pdist_seuclidean_random(self): - "Tests pdist(X, 'seuclidean') on random data." - eps = 1e-05 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-seuclidean'] - - Y_test1 = pdist(X, 'seuclidean') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_seuclidean_random_float32(self): - "Tests pdist(X, 'seuclidean') on random data (float32)." - eps = 1e-05 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-double-inp']) - Y_right = eo['pdist-seuclidean'] - - Y_test1 = pdist(X, 'seuclidean') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_seuclidean_random_nonC(self): - "Tests pdist(X, 'test_sqeuclidean') [the non-C implementation] on random data." - eps = 1e-05 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-seuclidean'] - Y_test2 = pdist(X, 'test_sqeuclidean') - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - def test_pdist_seuclidean_iris(self): - "Tests pdist(X, 'seuclidean') on the Iris data set." - eps = 1e-05 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-seuclidean-iris'] - - Y_test1 = pdist(X, 'seuclidean') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_seuclidean_iris_float32(self): - "Tests pdist(X, 'seuclidean') on the Iris data set (float32)." - eps = 1e-05 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['iris']) - Y_right = eo['pdist-seuclidean-iris'] - - Y_test1 = pdist(X, 'seuclidean') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_seuclidean_iris_nonC(self): - "Tests pdist(X, 'test_seuclidean') [the non-C implementation] on the Iris data set." - eps = 1e-05 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-seuclidean-iris'] - Y_test2 = pdist(X, 'test_sqeuclidean') - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - ################### pdist: cosine - def test_pdist_cosine_random(self): - "Tests pdist(X, 'cosine') on random data." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-cosine'] - Y_test1 = pdist(X, 'cosine') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_cosine_random_float32(self): - "Tests pdist(X, 'cosine') on random data. (float32)" - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-double-inp']) - Y_right = eo['pdist-cosine'] - - Y_test1 = pdist(X, 'cosine') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_cosine_random_nonC(self): - "Tests pdist(X, 'test_cosine') [the non-C implementation] on random data." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-cosine'] - Y_test2 = pdist(X, 'test_cosine') - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - def test_pdist_cosine_iris(self): - "Tests pdist(X, 'cosine') on the Iris data set." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-cosine-iris'] - - Y_test1 = pdist(X, 'cosine') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - #print "cosine-iris", numpy.abs(Y_test1 - Y_right).max() - - def test_pdist_cosine_iris_float32(self): - "Tests pdist(X, 'cosine') on the Iris data set." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['iris']) - Y_right = eo['pdist-cosine-iris'] - - Y_test1 = pdist(X, 'cosine') - print numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - #print "cosine-iris", numpy.abs(Y_test1 - Y_right).max() - - def test_pdist_cosine_iris_nonC(self): - "Tests pdist(X, 'test_cosine') [the non-C implementation] on the Iris data set." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-cosine-iris'] - Y_test2 = pdist(X, 'test_cosine') - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - ################### pdist: cityblock - def test_pdist_cityblock_random(self): - "Tests pdist(X, 'cityblock') on random data." - eps = 1e-06 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-cityblock'] - Y_test1 = pdist(X, 'cityblock') - #print "cityblock", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_cityblock_random_float32(self): - "Tests pdist(X, 'cityblock') on random data. (float32)" - eps = 1e-06 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-double-inp']) - Y_right = eo['pdist-cityblock'] - Y_test1 = pdist(X, 'cityblock') - #print "cityblock", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_cityblock_random_nonC(self): - "Tests pdist(X, 'test_cityblock') [the non-C implementation] on random data." - eps = 1e-06 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-cityblock'] - Y_test2 = pdist(X, 'test_cityblock') - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - def test_pdist_cityblock_iris(self): - "Tests pdist(X, 'cityblock') on the Iris data set." - eps = 1e-14 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-cityblock-iris'] - - Y_test1 = pdist(X, 'cityblock') - self.failUnless(within_tol(Y_test1, Y_right, eps)) - #print "cityblock-iris", numpy.abs(Y_test1 - Y_right).max() - - def test_pdist_cityblock_iris_float32(self): - "Tests pdist(X, 'cityblock') on the Iris data set. (float32)" - eps = 1e-06 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['iris']) - Y_right = eo['pdist-cityblock-iris'] - - Y_test1 = pdist(X, 'cityblock') - print "cityblock-iris-float32", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_cityblock_iris_nonC(self): - "Tests pdist(X, 'test_cityblock') [the non-C implementation] on the Iris data set." - eps = 1e-14 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-cityblock-iris'] - Y_test2 = pdist(X, 'test_cityblock') - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - ################### pdist: correlation - def test_pdist_correlation_random(self): - "Tests pdist(X, 'correlation') on random data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-correlation'] - - Y_test1 = pdist(X, 'correlation') - #print "correlation", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_correlation_random_float32(self): - "Tests pdist(X, 'correlation') on random data. (float32)" - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-double-inp']) - Y_right = eo['pdist-correlation'] - - Y_test1 = pdist(X, 'correlation') - #print "correlation", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_correlation_random_nonC(self): - "Tests pdist(X, 'test_correlation') [the non-C implementation] on random data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-correlation'] - Y_test2 = pdist(X, 'test_correlation') - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - def test_pdist_correlation_iris(self): - "Tests pdist(X, 'correlation') on the Iris data set." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-correlation-iris'] - - Y_test1 = pdist(X, 'correlation') - #print "correlation-iris", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_correlation_iris_float32(self): - "Tests pdist(X, 'correlation') on the Iris data set. (float32)" - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = numpy.float32(eo['pdist-correlation-iris']) - - Y_test1 = pdist(X, 'correlation') - print "correlation-iris", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_correlation_iris_nonC(self): - "Tests pdist(X, 'test_correlation') [the non-C implementation] on the Iris data set." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-correlation-iris'] - Y_test2 = pdist(X, 'test_correlation') - #print "test-correlation-iris", numpy.abs(Y_test2 - Y_right).max() - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - ################# minkowski - - def test_pdist_minkowski_random(self): - "Tests pdist(X, 'minkowski') on random data." - eps = 1e-05 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-minkowski-3.2'] - - Y_test1 = pdist(X, 'minkowski', 3.2) - #print "minkowski", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_minkowski_random_float32(self): - "Tests pdist(X, 'minkowski') on random data. (float32)" - eps = 1e-05 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-double-inp']) - Y_right = eo['pdist-minkowski-3.2'] - - Y_test1 = pdist(X, 'minkowski', 3.2) - #print "minkowski", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_minkowski_random_nonC(self): - "Tests pdist(X, 'test_minkowski') [the non-C implementation] on random data." - eps = 1e-05 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-minkowski-3.2'] - Y_test2 = pdist(X, 'test_minkowski', 3.2) - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - def test_pdist_minkowski_iris(self): - "Tests pdist(X, 'minkowski') on iris data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-minkowski-3.2-iris'] - Y_test1 = pdist(X, 'minkowski', 3.2) - #print "minkowski-iris-3.2", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_minkowski_iris_float32(self): - "Tests pdist(X, 'minkowski') on iris data. (float32)" - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['iris']) - Y_right = eo['pdist-minkowski-3.2-iris'] - Y_test1 = pdist(X, 'minkowski', 3.2) - #print "minkowski-iris-3.2", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_minkowski_iris_nonC(self): - "Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-minkowski-3.2-iris'] - Y_test2 = pdist(X, 'test_minkowski', 3.2) - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - def test_pdist_minkowski_iris(self): - "Tests pdist(X, 'minkowski') on iris data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-minkowski-5.8-iris'] - Y_test1 = pdist(X, 'minkowski', 5.8) - #print "minkowski-iris-5.8", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_minkowski_iris_float32(self): - "Tests pdist(X, 'minkowski') on iris data. (float32)" - eps = 1e-06 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['iris']) - Y_right = eo['pdist-minkowski-5.8-iris'] - - Y_test1 = pdist(X, 'minkowski', 5.8) - print "minkowski-iris-5.8", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_minkowski_iris_nonC(self): - "Tests pdist(X, 'test_minkowski') [the non-C implementation] on iris data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-minkowski-5.8-iris'] - Y_test2 = pdist(X, 'test_minkowski', 5.8) - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - ################### pdist: hamming - def test_pdist_hamming_random(self): - "Tests pdist(X, 'hamming') on random data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['pdist-boolean-inp'] - Y_right = eo['pdist-hamming'] - - Y_test1 = pdist(X, 'hamming') - #print "hamming", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_hamming_random_float32(self): - "Tests pdist(X, 'hamming') on random data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-boolean-inp']) - Y_right = eo['pdist-hamming'] - - Y_test1 = pdist(X, 'hamming') - #print "hamming", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_hamming_random_nonC(self): - "Tests pdist(X, 'test_hamming') [the non-C implementation] on random data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = eo['pdist-boolean-inp'] - Y_right = eo['pdist-hamming'] - Y_test2 = pdist(X, 'test_hamming') - #print "test-hamming", numpy.abs(Y_test2 - Y_right).max() - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - ################### pdist: hamming (double) - def test_pdist_dhamming_random(self): - "Tests pdist(X, 'hamming') on random data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = numpy.float64(eo['pdist-boolean-inp']) - Y_right = eo['pdist-hamming'] - Y_test1 = pdist(X, 'hamming') - #print "hamming", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_dhamming_random_float32(self): - "Tests pdist(X, 'hamming') on random data. (float32)" - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-boolean-inp']) - Y_right = eo['pdist-hamming'] - Y_test1 = pdist(X, 'hamming') - #print "hamming", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_dhamming_random_nonC(self): - "Tests pdist(X, 'test_hamming') [the non-C implementation] on random data." - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = numpy.float64(eo['pdist-boolean-inp']) - Y_right = eo['pdist-hamming'] - Y_test2 = pdist(X, 'test_hamming') - #print "test-hamming", numpy.abs(Y_test2 - Y_right).max() - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - ################### pdist: jaccard - def test_pdist_jaccard_random(self): - "Tests pdist(X, 'jaccard') on random data." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = eo['pdist-boolean-inp'] - Y_right = eo['pdist-jaccard'] - - Y_test1 = pdist(X, 'jaccard') - #print "jaccard", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_jaccard_random_float32(self): - "Tests pdist(X, 'jaccard') on random data. (float32)" - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-boolean-inp']) - Y_right = eo['pdist-jaccard'] - - Y_test1 = pdist(X, 'jaccard') - #print "jaccard", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_jaccard_random_nonC(self): - "Tests pdist(X, 'test_jaccard') [the non-C implementation] on random data." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = eo['pdist-boolean-inp'] - Y_right = eo['pdist-jaccard'] - Y_test2 = pdist(X, 'test_jaccard') - #print "test-jaccard", numpy.abs(Y_test2 - Y_right).max() - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - ################### pdist: jaccard (double) - def test_pdist_djaccard_random(self): - "Tests pdist(X, 'jaccard') on random data." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = numpy.float64(eo['pdist-boolean-inp']) - Y_right = eo['pdist-jaccard'] - - Y_test1 = pdist(X, 'jaccard') - #print "jaccard", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_djaccard_random_float32(self): - "Tests pdist(X, 'jaccard') on random data. (float32)" - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-boolean-inp']) - Y_right = eo['pdist-jaccard'] - - Y_test1 = pdist(X, 'jaccard') - #print "jaccard", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_djaccard_random_nonC(self): - "Tests pdist(X, 'test_jaccard') [the non-C implementation] on random data." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = numpy.float64(eo['pdist-boolean-inp']) - Y_right = eo['pdist-jaccard'] - Y_test2 = pdist(X, 'test_jaccard') - #print "test-jaccard", numpy.abs(Y_test2 - Y_right).max() - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - ################### pdist: chebychev - def test_pdist_chebychev_random(self): - "Tests pdist(X, 'chebychev') on random data." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-chebychev'] - - Y_test1 = pdist(X, 'chebychev') - #print "chebychev", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_chebychev_random_float32(self): - "Tests pdist(X, 'chebychev') on random data. (float32)" - eps = 1e-07 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['pdist-double-inp']) - Y_right = eo['pdist-chebychev'] - - Y_test1 = pdist(X, 'chebychev') - print "chebychev", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_chebychev_random_nonC(self): - "Tests pdist(X, 'test_chebychev') [the non-C implementation] on random data." - eps = 1e-08 - # Get the data: the input matrix and the right output. - X = eo['pdist-double-inp'] - Y_right = eo['pdist-chebychev'] - Y_test2 = pdist(X, 'test_chebychev') - #print "test-chebychev", numpy.abs(Y_test2 - Y_right).max() - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - def test_pdist_chebychev_iris(self): - "Tests pdist(X, 'chebychev') on the Iris data set." - eps = 1e-15 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-chebychev-iris'] - Y_test1 = pdist(X, 'chebychev') - #print "chebychev-iris", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_chebychev_iris_float32(self): - "Tests pdist(X, 'chebychev') on the Iris data set. (float32)" - eps = 1e-06 - # Get the data: the input matrix and the right output. - X = numpy.float32(eo['iris']) - Y_right = eo['pdist-chebychev-iris'] - Y_test1 = pdist(X, 'chebychev') - print "chebychev-iris", numpy.abs(Y_test1 - Y_right).max() - self.failUnless(within_tol(Y_test1, Y_right, eps)) - - def test_pdist_chebychev_iris_nonC(self): - "Tests pdist(X, 'test_chebychev') [the non-C implementation] on the Iris data set." - eps = 1e-15 - # Get the data: the input matrix and the right output. - X = eo['iris'] - Y_right = eo['pdist-chebychev-iris'] - Y_test2 = pdist(X, 'test_chebychev') - #print "test-chebychev-iris", numpy.abs(Y_test2 - Y_right).max() - self.failUnless(within_tol(Y_test2, Y_right, eps)) - - def test_pdist_matching_mtica1(self): - "Tests matching(*,*) with mtica example #1 (nums)." - m = matching(numpy.array([1, 0, 1, 1, 0]), - numpy.array([1, 1, 0, 1, 1])) - m2 = matching(numpy.array([1, 0, 1, 1, 0], dtype=numpy.bool), - numpy.array([1, 1, 0, 1, 1], dtype=numpy.bool)) - self.failUnless(numpy.abs(m - 0.6) <= 1e-10) - self.failUnless(numpy.abs(m2 - 0.6) <= 1e-10) - - def test_pdist_matching_mtica2(self): - "Tests matching(*,*) with mtica example #2." - m = matching(numpy.array([1, 0, 1]), - numpy.array([1, 1, 0])) - m2 = matching(numpy.array([1, 0, 1], dtype=numpy.bool), - numpy.array([1, 1, 0], dtype=numpy.bool)) - self.failUnless(numpy.abs(m - (2.0/3.0)) <= 1e-10) - self.failUnless(numpy.abs(m2 - (2.0/3.0)) <= 1e-10) - - def test_pdist_matching_match(self): - "Tests pdist('matching') to see if the two implementations match on random boolean input data." - D = eo['random-bool-data'] - B = numpy.bool_(D) - print B.shape, B.dtype - eps = 1e-10 - y1 = pdist(B, "matching") - y2 = pdist(B, "test_matching") - y3 = pdist(D, "test_matching") - print numpy.abs(y1-y2).max() - print numpy.abs(y1-y3).max() - self.failUnless(within_tol(y1, y2, eps)) - self.failUnless(within_tol(y2, y3, eps)) - - def test_pdist_jaccard_mtica1(self): - "Tests jaccard(*,*) with mtica example #1." - m = jaccard(numpy.array([1, 0, 1, 1, 0]), - numpy.array([1, 1, 0, 1, 1])) - m2 = jaccard(numpy.array([1, 0, 1, 1, 0], dtype=numpy.bool), - numpy.array([1, 1, 0, 1, 1], dtype=numpy.bool)) - self.failUnless(numpy.abs(m - 0.6) <= 1e-10) - self.failUnless(numpy.abs(m2 - 0.6) <= 1e-10) - - def test_pdist_jaccard_mtica2(self): - "Tests jaccard(*,*) with mtica example #2." - m = jaccard(numpy.array([1, 0, 1]), - numpy.array([1, 1, 0])) - m2 = jaccard(numpy.array([1, 0, 1], dtype=numpy.bool), - numpy.array([1, 1, 0], dtype=numpy.bool)) - self.failUnless(numpy.abs(m - (2.0/3.0)) <= 1e-10) - self.failUnless(numpy.abs(m2 - (2.0/3.0)) <= 1e-10) - - def test_pdist_jaccard_match(self): - "Tests pdist('jaccard') to see if the two implementations match on random double input data." - D = eo['random-bool-data'] - print D.shape, D.dtype - eps = 1e-10 - y1 = pdist(D, "jaccard") - y2 = pdist(D, "test_jaccard") - y3 = pdist(numpy.bool_(D), "test_jaccard") - print numpy.abs(y1-y2).max() - print numpy.abs(y2-y3).max() - self.failUnless(within_tol(y1, y2, eps)) - self.failUnless(within_tol(y2, y3, eps)) - - def test_pdist_yule_mtica1(self): - "Tests yule(*,*) with mtica example #1." - m = yule(numpy.array([1, 0, 1, 1, 0]), - numpy.array([1, 1, 0, 1, 1])) - m2 = yule(numpy.array([1, 0, 1, 1, 0], dtype=numpy.bool), - numpy.array([1, 1, 0, 1, 1], dtype=numpy.bool)) - print m - self.failUnless(numpy.abs(m - 2.0) <= 1e-10) - self.failUnless(numpy.abs(m2 - 2.0) <= 1e-10) - - def test_pdist_yule_mtica2(self): - "Tests yule(*,*) with mtica example #2." - m = yule(numpy.array([1, 0, 1]), - numpy.array([1, 1, 0])) - m2 = yule(numpy.array([1, 0, 1], dtype=numpy.bool), - numpy.array([1, 1, 0], dtype=numpy.bool)) - print m - self.failUnless(numpy.abs(m - 2.0) <= 1e-10) - self.failUnless(numpy.abs(m2 - 2.0) <= 1e-10) - - def test_pdist_yule_match(self): - "Tests pdist('yule') to see if the two implementations match on random double input data." - D = eo['random-bool-data'] - print D.shape, D.dtype - eps = 1e-10 - y1 = pdist(D, "yule") - y2 = pdist(D, "test_yule") - y3 = pdist(numpy.bool_(D), "test_yule") - print numpy.abs(y1-y2).max() - print numpy.abs(y2-y3).max() - self.failUnless(within_tol(y1, y2, eps)) - self.failUnless(within_tol(y2, y3, eps)) - - def test_pdist_dice_mtica1(self): - "Tests dice(*,*) with mtica example #1." - m = dice(numpy.array([1, 0, 1, 1, 0]), - numpy.array([1, 1, 0, 1, 1])) - m2 = dice(numpy.array([1, 0, 1, 1, 0], dtype=numpy.bool), - numpy.array([1, 1, 0, 1, 1], dtype=numpy.bool)) - print m - self.failUnless(numpy.abs(m - (3.0/7.0)) <= 1e-10) - self.failUnless(numpy.abs(m2 - (3.0/7.0)) <= 1e-10) - - def test_pdist_dice_mtica2(self): - "Tests dice(*,*) with mtica example #2." - m = dice(numpy.array([1, 0, 1]), - numpy.array([1, 1, 0])) - m2 = dice(numpy.array([1, 0, 1], dtype=numpy.bool), - numpy.array([1, 1, 0], dtype=numpy.bool)) - print m - self.failUnless(numpy.abs(m - 0.5) <= 1e-10) - self.failUnless(numpy.abs(m2 - 0.5) <= 1e-10) - - def test_pdist_dice_match(self): - "Tests pdist('dice') to see if the two implementations match on random double input data." - D = eo['random-bool-data'] - print D.shape, D.dtype - eps = 1e-10 - y1 = pdist(D, "dice") - y2 = pdist(D, "test_dice") - y3 = pdist(D, "test_dice") - print numpy.abs(y1-y2).max() - print numpy.abs(y2-y3).max() - self.failUnless(within_tol(y1, y2, eps)) - self.failUnless(within_tol(y2, y3, eps)) - - def test_pdist_sokalsneath_mtica1(self): - "Tests sokalsneath(*,*) with mtica example #1." - m = sokalsneath(numpy.array([1, 0, 1, 1, 0]), - numpy.array([1, 1, 0, 1, 1])) - m2 = sokalsneath(numpy.array([1, 0, 1, 1, 0], dtype=numpy.bool), - numpy.array([1, 1, 0, 1, 1], dtype=numpy.bool)) - print m - self.failUnless(numpy.abs(m - (3.0/4.0)) <= 1e-10) - self.failUnless(numpy.abs(m2 - (3.0/4.0)) <= 1e-10) - - def test_pdist_sokalsneath_mtica2(self): - "Tests sokalsneath(*,*) with mtica example #2." - m = sokalsneath(numpy.array([1, 0, 1]), - numpy.array([1, 1, 0])) - m2 = sokalsneath(numpy.array([1, 0, 1], dtype=numpy.bool), - numpy.array([1, 1, 0], dtype=numpy.bool)) - print m - self.failUnless(numpy.abs(m - (4.0/5.0)) <= 1e-10) - self.failUnless(numpy.abs(m2 - (4.0/5.0)) <= 1e-10) - - def test_pdist_sokalsneath_match(self): - "Tests pdist('sokalsneath') to see if the two implementations match on random double input data." - D = eo['random-bool-data'] - print D.shape, D.dtype - eps = 1e-10 - y1 = pdist(D, "sokalsneath") - y2 = pdist(D, "test_sokalsneath") - y3 = pdist(numpy.bool_(D), "test_sokalsneath") - print numpy.abs(y1-y2).max() - print numpy.abs(y2-y3).max() - self.failUnless(within_tol(y1, y2, eps)) - self.failUnless(within_tol(y2, y3, eps)) - - def test_pdist_rogerstanimoto_mtica1(self): - "Tests rogerstanimoto(*,*) with mtica example #1." - m = rogerstanimoto(numpy.array([1, 0, 1, 1, 0]), - numpy.array([1, 1, 0, 1, 1])) - m2 = rogerstanimoto(numpy.array([1, 0, 1, 1, 0], dtype=numpy.bool), - numpy.array([1, 1, 0, 1, 1], dtype=numpy.bool)) - print m - self.failUnless(numpy.abs(m - (3.0/4.0)) <= 1e-10) - self.failUnless(numpy.abs(m2 - (3.0/4.0)) <= 1e-10) - - def test_pdist_rogerstanimoto_mtica2(self): - "Tests rogerstanimoto(*,*) with mtica example #2." - m = rogerstanimoto(numpy.array([1, 0, 1]), - numpy.array([1, 1, 0])) - m2 = rogerstanimoto(numpy.array([1, 0, 1], dtype=numpy.bool), - numpy.array([1, 1, 0], dtype=numpy.bool)) - print m - self.failUnless(numpy.abs(m - (4.0/5.0)) <= 1e-10) - self.failUnless(numpy.abs(m2 - (4.0/5.0)) <= 1e-10) - - def test_pdist_rogerstanimoto_match(self): - "Tests pdist('rogerstanimoto') to see if the two implementations match on random double input data." - D = eo['random-bool-data'] - print D.shape, D.dtype - eps = 1e-10 - y1 = pdist(D, "rogerstanimoto") - y2 = pdist(D, "test_rogerstanimoto") - y3 = pdist(numpy.bool_(D), "test_rogerstanimoto") - print numpy.abs(y1-y2).max() - print numpy.abs(y2-y3).max() - self.failUnless(within_tol(y1, y2, eps)) - self.failUnless(within_tol(y2, y3, eps)) - - def test_pdist_russellrao_mtica1(self): - "Tests russellrao(*,*) with mtica example #1." - m = russellrao(numpy.array([1, 0, 1, 1, 0]), - numpy.array([1, 1, 0, 1, 1])) - m2 = russellrao(numpy.array([1, 0, 1, 1, 0], dtype=numpy.bool), - numpy.array([1, 1, 0, 1, 1], dtype=numpy.bool)) - print m - self.failUnless(numpy.abs(m - (3.0/5.0)) <= 1e-10) - self.failUnless(numpy.abs(m2 - (3.0/5.0)) <= 1e-10) - - def test_pdist_russellrao_mtica2(self): - "Tests russellrao(*,*) with mtica example #2." - m = russellrao(numpy.array([1, 0, 1]), - numpy.array([1, 1, 0])) - m2 = russellrao(numpy.array([1, 0, 1], dtype=numpy.bool), - numpy.array([1, 1, 0], dtype=numpy.bool)) - print m - self.failUnless(numpy.abs(m - (2.0/3.0)) <= 1e-10) - self.failUnless(numpy.abs(m2 - (2.0/3.0)) <= 1e-10) - - def test_pdist_russellrao_match(self): - "Tests pdist('russellrao') to see if the two implementations match on random double input data." - D = eo['random-bool-data'] - print D.shape, D.dtype - eps = 1e-10 - y1 = pdist(D, "russellrao") - y2 = pdist(D, "test_russellrao") - y3 = pdist(numpy.bool_(D), "test_russellrao") - print numpy.abs(y1-y2).max() - print numpy.abs(y2-y3).max() - self.failUnless(within_tol(y1, y2, eps)) - self.failUnless(within_tol(y2, y3, eps)) - - def test_pdist_sokalmichener_match(self): - "Tests pdist('sokalmichener') to see if the two implementations match on random double input data." - D = eo['random-bool-data'] - print D.shape, D.dtype - eps = 1e-10 - y1 = pdist(D, "sokalmichener") - y2 = pdist(D, "test_sokalmichener") - y3 = pdist(numpy.bool_(D), "test_sokalmichener") - print numpy.abs(y1-y2).max() - print numpy.abs(y2-y3).max() - self.failUnless(within_tol(y1, y2, eps)) - self.failUnless(within_tol(y2, y3, eps)) - - def test_pdist_kulsinski_match(self): - "Tests pdist('kulsinski') to see if the two implementations match on random double input data." - D = eo['random-bool-data'] - print D.shape, D.dtype - eps = 1e-10 - y1 = pdist(D, "kulsinski") - y2 = pdist(D, "test_kulsinski") - y3 = pdist(numpy.bool_(D), "test_kulsinski") - print numpy.abs(y1-y2).max() - self.failUnless(within_tol(y1, y2, eps)) - class TestSquareForm(TestCase): ################### squareform From scipy-svn at scipy.org Mon Jun 9 01:59:40 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 9 Jun 2008 00:59:40 -0500 (CDT) Subject: [Scipy-svn] r4418 - trunk/scipy/cluster Message-ID: <20080609055940.EE51439C55F@scipy.org> Author: damian.eads Date: 2008-06-09 00:59:39 -0500 (Mon, 09 Jun 2008) New Revision: 4418 Modified: trunk/scipy/cluster/info.py Log: Added information on distance module in cluster/info.py Modified: trunk/scipy/cluster/info.py =================================================================== --- trunk/scipy/cluster/info.py 2008-06-09 05:55:44 UTC (rev 4417) +++ trunk/scipy/cluster/info.py 2008-06-09 05:59:39 UTC (rev 4418) @@ -14,6 +14,12 @@ clustering. Its features include generating hierarchical clusters from distance matrices, computing distance matrices from observation vectors, calculating statistics on clusters, cutting linkages to generate flat - clusters, and visualizing clusters with dendrograms. + clusters, and visualizing clusters with dendrograms. +Distance Computation +==================== + + The distance module provides functions for computing distances between + pairs of vectors from a set of observation vectors. + """ From scipy-svn at scipy.org Mon Jun 9 02:01:53 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 9 Jun 2008 01:01:53 -0500 (CDT) Subject: [Scipy-svn] r4419 - trunk/scipy/cluster Message-ID: <20080609060153.9BB1F39C55F@scipy.org> Author: damian.eads Date: 2008-06-09 01:01:51 -0500 (Mon, 09 Jun 2008) New Revision: 4419 Modified: trunk/scipy/cluster/SConstruct Log: Added extension building code for distance_wrap to cluster/SConstruct Modified: trunk/scipy/cluster/SConstruct =================================================================== --- trunk/scipy/cluster/SConstruct 2008-06-09 05:59:39 UTC (rev 4418) +++ trunk/scipy/cluster/SConstruct 2008-06-09 06:01:51 UTC (rev 4419) @@ -13,3 +13,7 @@ env.NumpyPythonExtension('_hierarchy_wrap', source = [join('src', 'hierarchy_wrap.c'), join('src', 'hierarchy.c')]) + + +env.NumpyPythonExtension('_distance_wrap', source = [join('src', 'distance_wrap.c'), + join('src', 'distance.c')]) From scipy-svn at scipy.org Mon Jun 9 02:05:13 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 9 Jun 2008 01:05:13 -0500 (CDT) Subject: [Scipy-svn] r4420 - trunk/scipy/cluster Message-ID: <20080609060513.9B7AF39C05F@scipy.org> Author: damian.eads Date: 2008-06-09 01:05:10 -0500 (Mon, 09 Jun 2008) New Revision: 4420 Modified: trunk/scipy/cluster/__init__.py trunk/scipy/cluster/hierarchy.py Log: Added import to cluster/__init__.py. Removed pdist import from hierarchy. Modified: trunk/scipy/cluster/__init__.py =================================================================== --- trunk/scipy/cluster/__init__.py 2008-06-09 06:01:51 UTC (rev 4419) +++ trunk/scipy/cluster/__init__.py 2008-06-09 06:05:10 UTC (rev 4420) @@ -6,6 +6,6 @@ __all__ = ['vq', 'hierarchy', 'distance'] -import vq, hierarchy +import vq, hierarchy, distance from scipy.testing.pkgtester import Tester test = Tester().test Modified: trunk/scipy/cluster/hierarchy.py =================================================================== --- trunk/scipy/cluster/hierarchy.py 2008-06-09 06:01:51 UTC (rev 4419) +++ trunk/scipy/cluster/hierarchy.py 2008-06-09 06:05:10 UTC (rev 4420) @@ -149,7 +149,7 @@ import numpy as np import _hierarchy_wrap, types -from distance import pdist +import distance _cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2, 'weighted': 6} @@ -437,14 +437,14 @@ if method not in _cpy_linkage_methods: raise ValueError('Invalid method: %s' % method) if method in _cpy_non_euclid_methods.keys(): - dm = pdist(X, metric) + dm = distance.pdist(X, metric) Z = np.zeros((n - 1, 4)) _hierarchy_wrap.linkage_wrap(dm, Z, n, \ int(_cpy_non_euclid_methods[method])) elif method in _cpy_euclid_methods.keys(): if metric != 'euclidean': raise ValueError('Method %s requires the distance metric to be euclidean' % s) - dm = pdist(X, metric) + dm = distance.pdist(X, metric) Z = np.zeros((n - 1, 4)) _hierarchy_wrap.linkage_euclid_wrap(dm, Z, X, m, n, int(_cpy_euclid_methods[method])) @@ -1341,7 +1341,7 @@ descriptions. distance: the distance metric for calculating pairwise - distances. See pdist for descriptions and + distances. See distance.pdist for descriptions and linkage to verify compatibility with the linkage method. @@ -1361,7 +1361,7 @@ if type(X) != np.ndarray or len(X.shape) != 2: raise TypeError('The observation matrix X must be an n by m numpy array.') - Y = pdist(X, metric=distance) + Y = distance.pdist(X, metric=distance) Z = linkage(Y, method=method) if R is None: R = inconsistent(Z, d=depth) From scipy-svn at scipy.org Mon Jun 9 18:42:05 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 9 Jun 2008 17:42:05 -0500 (CDT) Subject: [Scipy-svn] r4421 - trunk/scipy/ndimage Message-ID: <20080609224205.9012839C192@scipy.org> Author: tom.waite Date: 2008-06-09 17:42:02 -0500 (Mon, 09 Jun 2008) New Revision: 4421 Modified: trunk/scipy/ndimage/_registration.py Log: bug fixes Modified: trunk/scipy/ndimage/_registration.py =================================================================== --- trunk/scipy/ndimage/_registration.py 2008-06-09 06:05:10 UTC (rev 4420) +++ trunk/scipy/ndimage/_registration.py 2008-06-09 22:42:02 UTC (rev 4421) @@ -31,69 +31,57 @@ # ---- co-registration and IO ---- # -def resize_image(imageG, imageF_mat): +def resize_image(imageS, imageS_mat, imageR_mat): """ - zoom_image = resize_image(source_image, reference_image[mat]) + zoom_image = resize_image(imageS, imageS_mat, imageR_mat) - Fractional resample source_image to reference_imagesize. The - resample is implemented with 3D cubic spline. The reference - image [mat] is the 4x4 voxel-to-physical conversion matrix. + Fractional resample source_image to reference_image size. The + resample is implemented with 3D cubic spline. The source + imageS_mat is the 4x4 voxel-to-physical conversion matrix. Parameters ---------- - imageG : {dictionary} - imageG is the source image to be resized. it is a dictionary with - the data as an ndarray in the ['data'] component. + imageS: {ndarray} + imageS is the source image to be resized. - reference_image[mat] : {ndarray} - refernce_image is the image whose sampling dimensions the source - image is to be remapped to. [mat] refers to the component - of the image dictionary, reference_image['mat'] that is the - sampling dimensions. + imageS_mat : {ndarray} + the 4x4 transform of the source image that maps voxel to physical. + imageR_mat : {ndarray} + the 4x4 transform of the destination image that maps voxel to physical. + Returns ------- - zoom_image : {dictionary} + zoom_image : {ndarray} Examples -------- >>> import _registration as reg - >>> measures, imageF_anat, fmri_series = reg.demo_MRI_coregistration() + >>> measures, image_anat, image_anat_mat, image_fmri_mat, fmri_series = reg.demo_MRI_coregistration() - >>> resampled_fmri = reg.resize_image(fmri_series[10], imageF_anat['mat']) + >>> resampled_fmri = reg.resize_image(fmri_series[10], image_fmri_mat, image_anat_mat) - image 10 in the fmri_series is resampled to imageF_anat coordinates + image 10 in the fmri_series is resampled from image_fmri_mat to image_anat coordinates """ - Z = np.zeros(3, dtype=np.float64); # get the zoom - Z[0] = imageG['mat'][0][0] / imageF_mat[0][0] - Z[1] = imageG['mat'][1][1] / imageF_mat[1][1] - Z[2] = imageG['mat'][2][2] / imageF_mat[2][2] + Z = imageS.diagonal() / imageR.diagonal() - # new volume dimensions (rounded) - D = np.zeros(3, dtype=np.int32); - D[0] = int(float(imageG['dim'][0])*Z[0]+0.5) - D[1] = int(float(imageG['dim'][1])*Z[1]+0.5) - D[2] = int(float(imageG['dim'][2])*Z[2]+0.5) + # new volume dimensions (rounded). D, imageS and Z are 3D and this is a vector element product + D = (imageS.shape * Z + 0.5).astype(np.int16) - M = np.eye(4, dtype=np.float64); - # for the test data, set the xyz voxel sizes for fMRI volume - M[0][0] = imageG['mat'][0][0]/Z[0] - M[1][1] = imageG['mat'][1][1]/Z[1] - M[2][2] = imageG['mat'][2][2]/Z[2] - - image = np.zeros(D[2]*D[1]*D[0], dtype=np.uint8).reshape(D[2], D[0], D[1]) + # for the test data, set the xyz voxel sizes for fMRI volume. M is a 4x4 matrix. + M = np.diag(imageS.diagonal() / Z) + image = np.empty((D[2],D[1],D[0]),np.uint8) + mode = 2 scale = 0 - reg.register_volume_resample(imageG['data'], image, Z, scale, mode) - F = np.zeros(3, dtype=np.float64); - zoom_image = {'data' : image, 'mat' : M, 'dim' : D, 'fwhm' : F} + reg.register_volume_resample(imageS, image, Z, scale, mode) - return zoom_image + return image, M def remap_image(image, parm_vector, resample='linear'): """ @@ -105,20 +93,19 @@ Parameters ---------- - image : {dictionary} - image is the source image to be remapped. it is a dictionary with - the data as an ndarray in the ['data'] component. + image : {ndarray} + image is the source image to be remapped. parm_vector : {ndarray} parm_vector is the 6-dimensional vector (3 angles, 3 translations) - generated from the registration. + generated from the rigid body registration. resample : {'linear', 'cubic'}, optional Returns ------- - remaped_image : {dictionary} + remaped_image : {ndarray} Examples -------- @@ -134,19 +121,20 @@ # use the 6 dim parm_vector (3 angles, 3 translations) to remap # M_inverse = get_inverse_mappings(parm_vector) - (layers, rows, cols) = image['data'].shape + (layers, rows, cols) = image.shape + # allocate the zero image - remaped_image = np.zeros(layers*rows*cols, dtype=np.uint8).reshape(layers, rows, cols) - remaped_image = {'data' : remaped_image, 'mat' : image['mat'], - 'dim' : image['dim'], 'fwhm' : image['fwhm']} - imdata = build_structs() + #remaped_image = np.zeros(image.size, dtype=np.uint8).reshape(layers, rows, cols) + remaped_image = np.empty(image.shape, dtype=np.uint8) + step = np.array([1, 1, 1], dtype=np.int32) + if resample == 'linear': # trilinear interpolation mapping. - reg.register_linear_resample(image['data'], remaped_image['data'], M_inverse, imdata['step']) + reg.register_linear_resample(image, remaped_image, M_inverse, step) elif resample == 'cubic': # tricubic convolve interpolation mapping. - reg.register_cubic_resample(image['data'], remaped_image['data'], M_inverse, imdata['step']) + reg.register_cubic_resample(image, remaped_image, M_inverse, step) return remaped_image @@ -182,24 +170,18 @@ """ # get the inverse mapping to rotate the G matrix to F space following registration - imdata = build_structs() - # inverse angles and translations - imdata['parms'][0] = -parm_vector[0] - imdata['parms'][1] = -parm_vector[1] - imdata['parms'][2] = -parm_vector[2] - imdata['parms'][3] = -parm_vector[3] - imdata['parms'][4] = -parm_vector[4] - imdata['parms'][5] = -parm_vector[5] - M_inverse = build_rotate_matrix(imdata['parms']) + # -parm_vector is the inverse angles and translations + M_inverse = build_rotate_matrix(-parm_vector) return M_inverse -def python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0, smhist=0, - method='nmi', opt_method='powell'): +def coregister(image1, image1_mat, image2, image2_mat, multires=[4, 2], histo_fwhm=3, + ftype=1, lite=0, smhist=0, method='nmi', opt_method='powell'): + """ - parm_vector = python_coreg(image1, image2, imdata, ftype=1, smimage=0, lite=0, - smhist=0, method='nmi', opt_method='powell'): + parm_vector = coregister(image1, image1_mat, image2, image2_mat, multires=[4, 2], histo_fwhm=3, + ftype=1, lite=0, smhist=0, method='nmi', opt_method='powell'): - takes two images and the image data descriptor (imdata) and determines the optimal + takes two images and the image process descriptor (improc) and determines the optimal alignment of the two images (measured by mutual information or cross correlation) using optimization search of 3 angle and 3 translation parameters. The optimization uses either the Powell or Conjugate Gradient methods in the scipy optimization @@ -207,29 +189,33 @@ Parameters ---------- - image1 : {dictionary} + image1 : {nd_array} image1 is the source image to be remapped during the registration. - it is a dictionary with the data as an ndarray in the ['data'] component. - image2 : {dictionary} + image1_mat : {nd_array} + image1_mat is the source image MAT + image2 : {nd_array} image2 is the reference image that image1 gets mapped to. - imdata : {dictionary} - image sampling and optimization information. + image2_mat : {nd_array} + image2_mat is the source image MAT + multires: {list}, optional + the volume subsample values for each pass of the registration. + the default is 2 passes with subsample 4 in pass 1 and subsample 2 in pass 2 + histo_fwhm : {int}, optional + used for the filter kernel in the low pass filter of the joint histogram ftype : {0, 1}, optional flag for type of low pass filter. 0 is Gauss-Spline 1 is pure Gauss. Sigma determined from volume sampling info. - smimage : {0, 1}, optional - flag for volume 3D low pass filtering of image 2. - 0 for no filter, 1 for do filter. lite : {0, 1}, optional lite of 1 is to jitter both images during resampling. 0 is to not jitter. jittering is for non-aliased volumes. smhist: {0, 1}, optional flag for joint histogram low pass filtering. 0 for no filter, 1 for do filter. - method: {'nmi', 'mi', 'ncc', 'ecc'}, optional + method: {'nmi', 'mi', 'ncc', 'ecc', 'mse'}, optional flag for type of registration metric. nmi is normalized mutual information; mi is mutual information; ecc is entropy cross - correlation; ncc is normalized cross correlation. + correlation; ncc is normalized cross correlation. mse is mean + squared error. opt_method: {'powell', 'hybrid'}, optional registration is two pass. Pass 1 is low res to get close to alignment and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and @@ -248,34 +234,33 @@ >>> import numpy as NP >>> import _registration as reg - >>> image1, image2, imdata = reg.demo_MRI_volume_align() - >>> parm_vector = python_coreg(image1, image2, imdata) + >>> image1, image2, fwhm, improc = reg.demo_build_dual_volumes() + >>> parm_vector = coregister(image1, image2, fwhm, improc) """ + start = time.time() - # smooth of the images - if smimage: - image_F_xyz2 = filter_image_3D(image2['data'], image2['fwhm'], ftype) - image2['data'] = image_F_xyz2 - parm_vector = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method) + parm_vector = multires_registration(image1, image1_mat, image2, image2_mat, multires, + histo_fwhm, lite, smhist, method, opt_method) stop = time.time() print 'Total Optimizer Time is ', (stop-start) return parm_vector -def multires_registration(image1, image2, imdata, lite, smhist, method, opt_method): +def multires_registration(image1, image1_mat, image2, image2_mat, multires, histo_fwhm, + lite, smhist, method, opt_method): + """ x = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method) - to be called by python_coreg() which optionally does 3D image filtering and + to be called by coregister() which optionally does 3D image filtering and provies timing for registration. Parameters ---------- - image1 : {dictionary} + image1 : {nd_array} image1 is the source image to be remapped during the registration. - it is a dictionary with the data as an ndarray in the ['data'] component. - image2 : {dictionary} + image2 : {nd_array} image2 is the reference image that image1 gets mapped to. imdata : {dictionary} image sampling and optimization information. @@ -285,10 +270,11 @@ smhist: {integer} flag for joint histogram low pass filtering. 0 for no filter, 1 for do filter. - method: {'nmi', 'mi', 'ncc', 'ecc'} + method: {'nmi', 'mi', 'ncc', 'ecc', 'mse'} flag for type of registration metric. nmi is normalized mutual information; mi is mutual information; ecc is entropy cross - correlation; ncc is normalized cross correlation. + correlation; ncc is normalized cross correlation. mse is mean + square error. opt_method: {'powell', 'hybrid'} registration is two pass. Pass 1 is low res to get close to alignment and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and @@ -303,31 +289,32 @@ Examples -------- - (calling this from python_coreg which optionally filters image2) + (calling this from coregister which optionally filters image2) >>> import numpy as NP >>> import _registration as reg - >>> image1, image2, imdata = reg.demo_MRI_volume_align() - >>> parm_vector = python_coreg(image1, image2, imdata) + >>> image1, mat1, image2, mat2 = reg.demo_build_dual_volumes() + >>> parm_vector = coregister(image1, image2, imdata) """ ret_histo=0 - # zero out the start parameter; but this may be set to large values - # if the head is out of range and well off the optimal alignment skirt - imdata['parms'][0:5] = 0.0 + step = np.array([1, 1, 1], dtype=np.int32) + fwhm = np.zeros(2, dtype=np.int32) # make the step a scalar to can put in a multi-res loop - loop = range(imdata['sample'].size) - x = imdata['parms'] + loop = range(size(multires)) + # 6-D zero vector + x = np.zeros(6, dtype=np.float64); + # the kernel fwhm value for the x and y joint histogram filter + fwhm[:] = histo_fwhm for i in loop: - step = imdata['sample'][i] - imdata['step'][:] = step - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, - method, ret_histo) + # this is the volume subsample + step[:] = multires[i] + optfunc_args = (image1, image1_mat, image2, image2_mat, step, fwhm, lite, + smhist, method, ret_histo) p_args = (optfunc_args,) if opt_method=='powell': print 'POWELL multi-res registration step size ', step print 'vector ', x - x = fmin_powell(optimize_function, x, args=p_args, - callback=callback_powell) + x = fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) elif opt_method=='cg': print 'CG multi-res registration step size ', step print 'vector ', x @@ -337,16 +324,16 @@ print 'Hybrid POWELL multi-res registration step size ', step print 'vector ', x lite = 0 - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, - method, ret_histo) + optfunc_args = (image1, image1_mat, image2, image2_mat, step, fwhm, lite, + smhist, method, ret_histo) p_args = (optfunc_args,) x = fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) elif i==1: print 'Hybrid CG multi-res registration step size ', step print 'vector ', x lite = 1 - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, - smhist, method, ret_histo) + optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm, + lite, smhist, method, ret_histo) p_args = (optfunc_args,) x = fmin_cg(optimize_function, x, args=p_args, callback=callback_cg) @@ -369,7 +356,7 @@ print x return -def smooth_kernel(fwhm, x, ktype=1): +def smooth_kernel(fwhm, x, pixel_scale=8.0, ktype=1): """ kernel = smooth_kernel(fwhm, x, ktype=1) @@ -411,7 +398,7 @@ """ eps = 0.00001 - s = np.square((fwhm/math.sqrt(8.0*math.log(2.0)))) + eps + s = np.square((fwhm/math.sqrt(pixel_scale*math.log(2.0)))) + eps if ktype==1: # from SPM: Gauss kernel convolved with 1st degree B spline w1 = 0.5 * math.sqrt(2.0/s) @@ -453,7 +440,7 @@ -------- >>> import _registration as reg - >>> image1, image2, imdata = reg.demo_MRI_volume_align() + >>> image1, image2, imdata = reg.demo_build_dual_volumes() >>> ftype = 1 >>> image_Filter_xyz = filter_image_3D(image1['data'], image1['fwhm'], ftype) >>> image1['data'] = image_Filter_xyz @@ -462,12 +449,15 @@ p = np.ceil(2*fwhm[0]).astype(int) x = np.array(range(-p, p+1)) kernel_x = smooth_kernel(fwhm[0], x, ktype=ftype) + p = np.ceil(2*fwhm[1]).astype(int) x = np.array(range(-p, p+1)) kernel_y = smooth_kernel(fwhm[1], x, ktype=ftype) + p = np.ceil(2*fwhm[2]).astype(int) x = np.array(range(-p, p+1)) kernel_z = smooth_kernel(fwhm[2], x, ktype=ftype) + output=None # 3D filter in 3 1D separable stages axis = 0 @@ -504,14 +494,14 @@ >>> import _registration as reg >>> anat_desc = reg.load_anatMRI_desc() >>> image1 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img') - >>> imdata = reg.build_structs() >>> image1['fwhm'] = reg.build_fwhm(image1['mat'], imdata['step']) """ + # M contains the voxel to physical mapping view_3x3 = np.square(M[0:3, 0:3]) - # sum the elements inn the first row + # sum the elements in the first row vxg = np.sqrt(view_3x3.sum(axis=0)) - # assumes that sampling is the same for xyz + # assumes that voxel sampling is the same for xyz as S is the step size = np.array([1,1,1])*S[0] x = np.square(size) - np.square(vxg) # clip @@ -592,7 +582,6 @@ >>> anat_desc = reg.load_anatMRI_desc() >>> image1 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img') >>> image2 = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img') - >>> imdata = reg.build_structs() >>> image1['fwhm'] = reg.build_fwhm(image1['mat'], imdata['step']) >>> image2['fwhm'] = reg.build_fwhm(image2['mat'], imdata['step']) >>> method = 'ncc' @@ -607,24 +596,26 @@ """ image_F = optfunc_args[0] - image_G = optfunc_args[1] - sample_vector = optfunc_args[2] - fwhm = optfunc_args[3] - do_lite = optfunc_args[4] - smooth = optfunc_args[5] - method = optfunc_args[6] - ret_histo = optfunc_args[7] + image_F_mat = optfunc_args[1] + image_G = optfunc_args[2] + image_G_mat = optfunc_args[3] + sample_vector = optfunc_args[4] + fwhm = optfunc_args[5] + do_lite = optfunc_args[6] + smooth = optfunc_args[7] + method = optfunc_args[8] + ret_histo = optfunc_args[9] rot_matrix = build_rotate_matrix(x) cost = 0.0 epsilon = 2.2e-16 # image_G is base image # image_F is the to-be-rotated image - # rot_matrix is the 4x4 constructed (current angles and translates) transform matrix + # rot_matrix is the 4x4 constructed (rigid body) transform matrix # sample_vector is the subsample vector for x-y-z - F_inv = np.linalg.inv(image_F['mat']) - composite = np.dot(F_inv, image_G['mat']) + F_inv = np.linalg.inv(image_F_mat) + composite = np.dot(F_inv, image_G_mat) composite = np.dot(composite, rot_matrix) if method == 'mse': @@ -632,14 +623,14 @@ # mean squard error method # - (layers, rows, cols) = image_F['data'].shape # allocate the zero image - remap_image_F = np.zeros(layers*rows*cols, dtype=np.uint8).reshape(layers, rows, cols) - imdata = build_structs() + #(layers, rows, cols) = image_F.shape + #remap_image_F = np.zeros(image_F.size, dtype=np.uint8).reshape(layers, rows, cols) + remap_image_F = np.empty(image_F.shape, dtype=np.uint8) # trilinear interpolation mapping. - reg.register_linear_resample(image_F['data'], remap_image_F, composite, - imdata['step']) - cost = (np.square(image_G['data']-remap_image_F)).mean() + reg.register_linear_resample(image_F, remap_image_F, composite, sample_vector) + cost = (np.square(image_G-remap_image_F)).mean() + # cost is min when G and F are aligned so keep cost positive return cost @@ -649,29 +640,25 @@ # # allocate memory for 2D histogram - joint_histogram = np.zeros([256, 256], dtype=np.float64); + joint_histogram = np.zeros([256, 256], dtype=np.float64) if do_lite: - reg.register_histogram_lite(image_F['data'], image_G['data'], composite, - sample_vector, joint_histogram) + reg.register_histogram_lite(image_F, image_G, composite, sample_vector, joint_histogram) else: - reg.register_histogram(image_F['data'], image_G['data'], composite, - sample_vector, joint_histogram) + reg.register_histogram(image_F, image_G, composite, sample_vector, joint_histogram) # smooth the histogram if smooth: - p = np.ceil(2*fwhm[0]).astype(int) + p = np.ceil(2*fwhm).astype(int) x = np.array(range(-p, p+1)) - kernel1 = smooth_kernel(fwhm[0], x) - p = np.ceil(2*fwhm[1]).astype(int) - x = np.array(range(-p, p+1)) - kernel2 = smooth_kernel(fwhm[1], x) + hkernel = smooth_kernel(fwhm, x) output=None - # 2D filter in 1D separable stages + # 2D filter in 1D separable stages using the same kernel. SPM + # has options for a 2D fwhm kernel yet only uses 1 element axis = 0 - result = correlate1d(joint_histogram, kernel1, axis, output) + joint_histogram = correlate1d(joint_histogram, hkernel, axis, output) axis = 1 - joint_histogram = correlate1d(result, kernel1, axis, output) + joint_histogram = correlate1d(joint_histogram, hkernel, axis, output) joint_histogram += epsilon # prevent log(0) # normalize the joint histogram @@ -702,7 +689,7 @@ row_entropy = marginal_row * np.log(marginal_row) col_entropy = marginal_col * np.log(marginal_col) H = joint_histogram * np.log(joint_histogram) - nmi = (row_entropy.sum() + col_entropy.sum()) / (H.sum()) + nmi = (row_entropy.sum() + col_entropy.sum()) / (H.sum()) cost = -nmi elif method == 'ncc': @@ -719,7 +706,7 @@ b = b - m2 # element multiplies in the joint histogram and grids H = ((joint_histogram * a) * b).sum() - ncc = H / (np.dot(sig1, sig2)) + ncc = H / (np.dot(sig1, sig2)) cost = -ncc if ret_histo: @@ -728,64 +715,6 @@ return cost -def build_structs(step=1): - """ - img_data = build_structs(step=1) - - builds the image data (imdata) dictionary for later use as parameter - storage in the co-registration. - - Parameters - ---------- - step : {int} : optional - default is 1 and is the sample increment in voxels. This sets the sample - for x,y,z and is the same value in all 3 axes. only change the default for debug. - - Returns - ------- - img_data : {dictionary} - - Examples - -------- - - >>> import numpy as NP - >>> import _registration as reg - >>> imdata = reg.build_structs() - - """ - - # build image data structures here - P = np.zeros(6, dtype=np.float64); - T = np.zeros(6, dtype=np.float64); - F = np.zeros(2, dtype=np.int32); - S = np.ones(3, dtype=np.int32); - sample = np.zeros(2, dtype=np.int32); - S[0] = step - S[1] = step - S[2] = step - # image/histogram smoothing - F[0] = 3 - F[1] = 3 - # subsample for multiresolution registration - sample[0] = 4 - sample[1] = 2 - # tolerances for angle (0-2) and translation (3-5) - T[0] = 0.02 - T[1] = 0.02 - T[2] = 0.02 - T[3] = 0.001 - T[4] = 0.001 - T[5] = 0.001 - # P[0] = alpha <=> pitch. + alpha is moving back in the sagittal plane - # P[1] = beta <=> roll. + beta is moving right in the coronal plane - # P[2] = gamma <=> yaw. + gamma is right turn in the transverse plane - # P[3] = Tx - # P[4] = Ty - # P[5] = Tz - img_data = {'parms' : P, 'step' : S, 'fwhm' : F, 'tol' : T, 'sample' : sample} - return img_data - - def build_rotate_matrix(img_data_parms): """ rot_matrix = reg.build_rotate_matrix(img_data_parms) @@ -807,7 +736,6 @@ >>> import numpy as NP >>> import _registration as reg - >>> imdata = reg.build_structs() >>> x = np.zeros(6, dtype=np.float64) >>> M = reg.build_rotate_matrix(x) >>> M @@ -864,6 +792,51 @@ return rot_matrix +def build_test_volume(imagedesc, S=[15.0, 25.0, 10.0]): + + """ + build a 3D Gaussian volume. user passes in image dims in imagedesc + the sigma for each axis is S[3] where 0=z, 1=y, 2=x + + volume3D = build_test_volume(imagedesc, S) + + Parameters + ---------- + imagedesc : {dictionary} + volume dimensions and sampling + + S : {tuple} + the Gaussian sigma for Z, Y and X + + Returns + ------- + + volume3D : {nd_array} + the 3D volume for testing + + """ + layers = imagedesc['layers'] + rows = imagedesc['rows'] + cols = imagedesc['cols'] + + L = layers/2 + R = rows/2 + C = cols/2 + + # build coordinates for 3D Gaussian volume + # coordinates are centered at (0, 0, 0) + [a, b, c] = np.mgrid[-L:L, -R:R, -C:C] + + sigma = np.array([S[0], S[1], S[2]]) + aa = (np.square(a))/sigma[0] + bb = (np.square(b))/sigma[1] + cc = (np.square(c))/sigma[2] + volume3D = np.exp(-(aa + bb + cc)) + + return volume3D + + + def load_volume(imagedesc, imagename=None, threshold=0.999, debug=0): """ @@ -895,13 +868,13 @@ Returns ------- - image : {dictionary} + image : {nd_array} the volume data assoicated with the filename or a blank volume of the same dimensions as specified in imagedesc. --- OR --- (if debug = 1) - image : {dictionary} + image : {nd_array} the volume data assoicated with the filename or a blank volume of the same dimensions as specified in imagedesc. @@ -932,11 +905,12 @@ # autoscale is using integrated histogram to deal with outlier high amplitude voxels if imagename == None: # imagename of none means to create a blank image - ImageVolume = np.zeros(imagedesc['layers']*imagedesc['rows']*imagedesc['cols'], - dtype=np.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']) + image = np.zeros([imagedesc['layers'],imagedesc['rows'],imagedesc['cols']],dtype=np.uint16) + #image = np.zeros(imagedesc['layers']*imagedesc['rows']*imagedesc['cols'], + # dtype=np.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']) else: - ImageVolume = np.fromfile(imagename, - dtype=np.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']); + image = np.fromfile(imagename, + dtype=np.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']); # the mat (voxel to physical) matrix M = np.eye(4, dtype=np.float64); @@ -944,23 +918,15 @@ M[0][0] = imagedesc['sample_x'] M[1][1] = imagedesc['sample_y'] M[2][2] = imagedesc['sample_z'] - # dimensions - D = np.zeros(3, dtype=np.int32); - # Gaussian kernel - fill in with build_fwhm() - F = np.zeros(3, dtype=np.float64); - D[0] = imagedesc['rows'] - D[1] = imagedesc['cols'] - D[2] = imagedesc['layers'] if imagename == None: # no voxels to scale to 8 bits - ImageVolume = ImageVolume.astype(np.uint8) - image = {'data' : ImageVolume, 'mat' : M, 'dim' : D, 'fwhm' : F} - return image + image = image.astype(np.uint8) + return image, M # 8 bit scale with threshold clip of the volume integrated histogram - max = ImageVolume.max() - min = ImageVolume.min() + max = image.max() + min = image.min() ih = np.zeros(max-min+1, dtype=np.float64); h = np.zeros(max-min+1, dtype=np.float64); if threshold <= 0: @@ -969,16 +935,16 @@ threshold = 1.0 # get the integrated histogram of the volume and get max from # the threshold crossing in the integrated histogram - index = reg.register_image_threshold(ImageVolume, h, ih, threshold) + index = reg.register_image_threshold(image, h, ih, threshold) scale = 255.0 / (index-min) # generate the scaled 8 bit image - images = (scale*(ImageVolume.astype(np.float)-min)) - images[images>255] = 255 - image = {'data' : images.astype(np.uint8), 'mat' : M, 'dim' : D, 'fwhm' : F} + image = (scale*(image.astype(np.float)-min)) + image[image>255] = 255 + image = image.astype(np.uint8) if debug == 1: - return image, h, ih, index + return image, M, h, ih, index else: - return image + return image, M @@ -1015,120 +981,100 @@ return files_fMRI -def check_alignment(image1, image2, imdata, method='ncc', lite=0, smhist=0, - alpha=0.0, beta=0.0, gamma=0.0, Tx=0, Ty=0, Tz=0, ret_histo=0): +def check_alignment(image1, image1_mat, image2, image2_mat, histo_fwhm=3, method='ncc', lite=0, + smhist=0, alpha=0.0, beta=0.0, gamma=0.0, Tx=0, Ty=0, Tz=0, ret_histo=0): # - # to test the cost function and view the joint histogram - # for 2 images. used for debug + # to test the cost function and (optional) view the joint histogram + # default of use of ncc for testing the cross-correlation as a metric + # of alignment # - imdata['parms'][0] = alpha - imdata['parms'][1] = beta - imdata['parms'][2] = gamma - imdata['parms'][3] = Tx - imdata['parms'][4] = Ty - imdata['parms'][5] = Tz - M = build_rotate_matrix(imdata['parms']) - optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + P = np.zeros(6, dtype=np.float64); + P[0] = alpha + P[1] = beta + P[2] = gamma + P[3] = Tx + P[4] = Ty + P[5] = Tz + + step = np.array([1, 1, 1], dtype=np.int32) + optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm, lite, + smhist, method, ret_histo) + + #optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) + if ret_histo: - cost, joint_histogram = optimize_function(imdata['parms'], optfunc_args) + cost, joint_histogram = optimize_function(P, optfunc_args) return cost, joint_histogram else: - cost = optimize_function(imdata['parms'], optfunc_args) + cost = optimize_function(P, optfunc_args) return cost -def build_scale_image(image, scale): +def build_scale_volume(image, mat, scale): # # rescale the 'mat' (voxel to physical mapping matrix) # - (layers, rows, cols) = image['data'].shape - M = image['mat'] * scale + (layers, rows, cols) = image.shape + M = mat * scale # dimensions D = np.zeros(3, dtype=np.int32); - # Gaussian kernel - fill in with build_fwhm() - F = np.zeros(3, dtype=np.float64); Z = np.zeros(3, dtype=np.float64); D[0] = rows/scale D[1] = cols/scale D[2] = layers/scale - image2 = np.zeros(D[2]*D[1]*D[0], dtype=np.uint8).reshape(D[2], D[0], D[1]); + #image2 = np.zeros(D.prod(), dtype=np.uint8).reshape(D[2], D[0], D[1]); + image2 = np.empty([D[2], D[0], D[1]], dtype=np.uint8) mode = 1; - reg.register_volume_resample(image['data'], image2, Z, scale, mode) - scaled_image = {'data' : image2, 'mat' : M, 'dim' : D, 'fwhm' : F} - return scaled_image + reg.register_volume_resample(image, image2, Z, scale, mode) + return image2, M -def demo_MRI_volume_align(scale=2, alpha=3.0, beta=4.0, gamma=5.0, Tx = 0.0, Ty = 0.0, Tz = 0.0): +def demo_build_dual_volumes(scale=2, alpha=3.0, beta=4.0, gamma=5.0, Tx = 0.0, Ty = 0.0, Tz = 0.0): """ demo with (must have file ANAT1_V0001.img) + builds a volume and a scaled-rotated version for coreg testing - image1, image2, imdata = reg.demo_MRI_volume_align() - x = reg.python_coreg(image1, image2, imdata, method='ncc', lite=1) + image1, mat1, image2, mat2 = reg.demo_build_dual_volumes() + x = reg.coregister(image1, mat1, image2, mat2, method='ncc', lite=1) image2r = reg.remap_image(image2, x, resample='cubic') - image2rz = reg.resize_image(image2r, image1['mat']) + image2rz = reg.resize_image(image2r, mat1) - - slice1 = image1['data'][45, :, :] - slice2 = image2['data'][45/2, :, :] - slice2r = image2r['data'][45/2, :, :] - slice2rz = image2rz['data'][45, :, :] - - pylab.figure(1) - pylab.bone() - pylab.imshow(slice1) - pylab.imshow(slice1) - pylab.figure(2) - pylab.imshow(slice2) - pylab.figure(3) - pylab.imshow(slice2r) - pylab.figure(4) - pylab.imshow(slice2rz) - pylab.show() - """ # # this is for coreg MRI / fMRI scale test. The volume is anatomical MRI. # the image is rotated in 3D. after rotation the image is scaled. # + step = np.array([1, 1, 1], dtype=np.int32) anat_desc = load_anatMRI_desc() - image1 = load_volume(anat_desc, imagename='ANAT1_V0001.img') - image2 = load_volume(anat_desc, imagename=None) - imdata = build_structs() - image1['fwhm'] = build_fwhm(image1['mat'], imdata['step']) - image2['fwhm'] = build_fwhm(image2['mat'], imdata['step']) - imdata['parms'][0] = alpha - imdata['parms'][1] = beta - imdata['parms'][2] = gamma - imdata['parms'][3] = Tx - imdata['parms'][4] = Ty - imdata['parms'][5] = Tz - M = build_rotate_matrix(imdata['parms']) + image1, mat1 = load_volume(anat_desc, imagename='ANAT1_V0001.img') + image2, mat2 = load_volume(anat_desc, imagename=None) + P = np.zeros(6, dtype=np.float64); + P[0] = alpha + P[1] = beta + P[2] = gamma + P[3] = Tx + P[4] = Ty + P[5] = Tz + M = build_rotate_matrix(P) # rotate volume. linear interpolation means the volume is low pass filtered - reg.register_linear_resample(image1['data'], image2['data'], M, imdata['step']) + reg.register_linear_resample(image1, image2, M, step) # subsample volume - image3 = build_scale_image(image2, scale) - return image1, image3, imdata + image2, mat2 = build_scale_volume(image2, mat2, scale) + return image1, mat1, image2, mat2 -def demo_rotate_fMRI_volume(fMRIVol, x): +def demo_rotate_fMRI_volume(fMRI_volume, desc, x): # - # return rotated fMRIVol. the fMRIVol is already loaded, and gets rotated + # return rotated fMRIVol. # - desc = load_fMRI_desc() image = load_volume(desc, imagename=None) - imdata = build_structs() - image['fwhm'] = build_fwhm(image['mat'], imdata['step']) - imdata['parms'][0] = x[0] # alpha - imdata['parms'][1] = x[1] # beta - imdata['parms'][2] = x[2] # gamma - imdata['parms'][3] = x[3] # Tx - imdata['parms'][4] = x[4] # Ty - imdata['parms'][5] = x[5] # Tz - M = build_rotate_matrix(imdata['parms']) + step = np.array([1, 1, 1], dtype=np.int32) + M = build_rotate_matrix(x) # rotate volume. cubic spline interpolation means the volume is NOT low pass filtered - reg.register_cubic_resample(fMRIVol['data'], image['data'], M, imdata['step']) + reg.register_cubic_resample(fMRI_volume, image, M, step) + return image def demo_MRI_coregistration(anatfile, funclist, optimizer_method='powell', @@ -1169,25 +1115,35 @@ # read the anatomical MRI volume anat_desc = load_anatMRI_desc() - imageF_anat = load_volume(anat_desc, imagename=anatfile) + imageF_anat, anat_mat = load_volume(anat_desc, imagename=anatfile) + imageF = imageF_anat.copy() # the sampling structure - imdata = build_structs() + step = np.array([1, 1, 1], dtype=np.int32) # the volume filter - imageF_anat['fwhm'] = build_fwhm(imageF_anat['mat'], imdata['step']) + imageF_anat_fwhm = build_fwhm(mat_anat, step) - # read in the file list of the fMRI data + + # allocate the structure for the processed fMRI array metric_test = np.dtype([('cost', 'f'), - ('align_cost', 'f'), - ('rotate', 'f', 6), - ('align_rotate', 'f', 6)]) + ('align_cost', 'f'), + ('rotate', 'f', 6), + ('align_rotate', 'f', 6)]) + # allocate the empty dictionary that will contain metrics and aligned volumes + fmri_series = {} - #fMRIdata = read_fMRI_directory('fMRIData\*.img') - #fMRIdata = read_fMRI_directory(funcdir + '/*.img') - fMRIdata = funclist + # read in the file list of the fMRI data + fMRIdata = read_fMRI_directory('fMRIData\*.img') fmri_desc = load_fMRI_desc() - fmri_series = {} - ave_fMRI_volume = np.zeros(fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols'], - dtype=np.float64).reshape(fmri_desc['layers'], fmri_desc['rows'], fmri_desc['cols']) + image_fmri, fmri_mat = load_volume(fmri_desc, fMRIdata[0]) + + # one time build of the fwhm that is used to build the filter kernels + anat_fwhm = build_fwhm(anat_mat, step) + fmri_fwhm = build_fwhm(fmri_mat, step) + + # blank volume that will be used for ensemble average for fMRI volumes + # prior to functional-anatomical coregistration + ave_fMRI_volume = np.zeros([fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols']].dtype=np.float64) + count = 0 number_volumes = len(fMRIdata) measures = np.zeros(number_volumes, dtype=metric_test) @@ -1196,13 +1152,12 @@ image = load_volume(fmri_desc, i) # random perturbation of angle, translation for each volume beyond the first if count == 0: - image['fwhm'] = build_fwhm(image['mat'], imdata['step']) fmri_series[count] = image count = count + 1 else: x = np.random.random(6) - 0.5 x = 10.0 * x - fmri_series[count] = demo_rotate_fMRI_volume(image, x) + fmri_series[count] = demo_rotate_fMRI_volume(image, fmri_desc, x) measures[count]['rotate'][0:6] = x[0:6] count = count + 1 @@ -1210,19 +1165,22 @@ # load and register the fMRI volumes with volume_0 using normalized cross correlation metric imageF = fmri_series[0] if smooth_image: - image_F_xyz = filter_image_3D(imageF['data'], imageF['fwhm'], ftype) - imageF['data'] = image_F_xyz + imageF = filter_image_3D(imageF, fmri_fwhm, ftype) for i in range(1, number_volumes): imageG = fmri_series[i] + if smooth_image: + imageG = filter_image_3D(imageG, fmri_fwhm, ftype) # the measure prior to alignment - measures[i]['cost'] = check_alignment(imageF, imageG, imdata, method='ncc', + measures[i]['cost'] = check_alignment(imageF, fmri_mat, imageG, fmri_mat, method='ncc', lite=histo_method, smhist=smooth_histo) - x = python_coreg(imageF, imageG, imdata, lite=histo_method, method='ncc', - opt_method=optimizer_method, smhist=smooth_histo, smimage=smooth_image) + x = coregister(imageF, fmri_mat, imageG, fmri_mat, lite=histo_method, method='ncc', + opt_method=optimizer_method, smhist=smooth_histo) measures[i]['align_rotate'][0:6] = x[0:6] - measures[i]['align_cost'] = check_alignment(imageF, imageG, imdata, method='ncc', - lite=histo_method, smhist=smooth_histo, - alpha=x[0], beta=x[1], gamma=x[2], Tx=x[3], Ty=x[4], Tz=x[5]) + measures[i]['align_cost'] = check_alignment(imageF, fmri_mat, imageG, fmri_mat, + method='ncc', lite=histo_method, + smhist=smooth_histo, alpha=x[0], + beta=x[1], gamma=x[2], Tx=x[3], + Ty=x[4], Tz=x[5]) # align the volumes and average them for co-registration with the anatomical MRI @@ -1239,10 +1197,11 @@ 'dim' : imageF['dim'], 'fwhm' : imageF['fwhm']} # register (using normalized mutual information) with the anatomical MRI if smooth_image: - image_F_anat_xyz = filter_image_3D(imageF_anat['data'], imageF_anat['fwhm'], ftype) - imageF_anat['data'] = image_F_anat_xyz - x = python_coreg(imageF_anat, ave_fMRI_volume, imdata, lite=histo_method, - method='nmi', opt_method=optimizer_method, smhist=smooth_histo, smimage=smooth_image) + imageF_anat = filter_image_3D(imageF_anat, anat_fwhm, ftype) + + x = coregister(imageF_anat, anat_mat, ave_fMRI_volume, fmri_mat, lite=histo_method, + method='nmi', opt_method=optimizer_method, smhist=smooth_histo) + print 'functional-anatomical align parameters ' print x for i in range(number_volumes): @@ -1250,14 +1209,14 @@ # overwrite the fMRI volume with the anatomical-aligned volume fmri_series[i] = remap_image(image, x, resample='cubic') - return measures, imageF_anat, fmri_series + return measures, imageF, fmri_series -def demo_fMRI_resample(imageF_anat, fmri_series): +def demo_fMRI_resample(imageF_anat, imageF_anat_mat, fmri_series): resampled_fmri_series = {} number_volumes = len(fmri_series) for i in range(number_volumes): - resampled_fmri_series[i] = resize_image(fmri_series[i], imageF_anat['mat']) + resampled_fmri_series[i] = resize_image(fmri_series[i], imageF_anat_mat) return resampled_fmri_series From scipy-svn at scipy.org Mon Jun 9 23:40:59 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 9 Jun 2008 22:40:59 -0500 (CDT) Subject: [Scipy-svn] r4422 - in trunk/scipy/io/matlab: . tests Message-ID: <20080610034059.8D5BC39C424@scipy.org> Author: wnbell Date: 2008-06-09 22:40:56 -0500 (Mon, 09 Jun 2008) New Revision: 4422 Modified: trunk/scipy/io/matlab/miobase.py trunk/scipy/io/matlab/tests/test_mio.py Log: Added patch by Andrew Straw to fix MATLAB support on gzip files resolves ticket #682 Modified: trunk/scipy/io/matlab/miobase.py =================================================================== --- trunk/scipy/io/matlab/miobase.py 2008-06-09 22:42:02 UTC (rev 4421) +++ trunk/scipy/io/matlab/miobase.py 2008-06-10 03:40:56 UTC (rev 4422) @@ -284,7 +284,8 @@ def end_of_stream(self): b = self.mat_stream.read(1) - self.mat_stream.seek(-1,1) + curpos = self.mat_stream.tell() + self.mat_stream.seek(curpos-1) return len(b) == 0 Modified: trunk/scipy/io/matlab/tests/test_mio.py =================================================================== --- trunk/scipy/io/matlab/tests/test_mio.py 2008-06-09 22:42:02 UTC (rev 4421) +++ trunk/scipy/io/matlab/tests/test_mio.py 2008-06-10 03:40:56 UTC (rev 4422) @@ -3,7 +3,7 @@ import os from glob import glob from cStringIO import StringIO -from tempfile import mkstemp +from tempfile import mkstemp, mkdtemp from scipy.testing import * from numpy import arange, array, eye, pi, cos, exp, sin, sqrt, ndarray, \ zeros, reshape, transpose, empty @@ -12,6 +12,9 @@ from scipy.io.matlab.mio import loadmat, savemat from scipy.io.matlab.mio5 import mat_obj, mat_struct +import shutil +import gzip + try: # Python 2.3 support from sets import Set as set except: @@ -238,3 +241,29 @@ expected = case['expected'] format = case in case_table4 and '4' or '5' yield _make_rt_check_case, name, expected, format + +def test_gzip_simple(): + xdense = zeros((20,20)) + xdense[2,3]=2.3 + xdense[4,5]=4.5 + x = SP.csc_matrix(xdense) + + name = 'gzip_test' + expected = {'x':x} + format='4' + + tmpdir = mkdtemp() + try: + fname = os.path.join(tmpdir,name) + mat_stream = gzip.open( fname,mode='wb') + savemat(mat_stream, expected, format=format) + mat_stream.close() + + mat_stream = gzip.open( fname,mode='rb') + actual = loadmat(mat_stream) + mat_stream.close() + finally: + shutil.rmtree(tmpdir) + + assert_array_almost_equal(actual['x'].todense(), + expected['x'].todense()) From scipy-svn at scipy.org Tue Jun 10 02:35:33 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 10 Jun 2008 01:35:33 -0500 (CDT) Subject: [Scipy-svn] r4423 - trunk/scipy/ndimage Message-ID: <20080610063533.8668D39C628@scipy.org> Author: rkern Date: 2008-06-10 01:35:31 -0500 (Tue, 10 Jun 2008) New Revision: 4423 Modified: trunk/scipy/ndimage/_registration.py Log: Correct syntax typo. Fix a few undefined references while I'm at it, too. pyflakes rules. Modified: trunk/scipy/ndimage/_registration.py =================================================================== --- trunk/scipy/ndimage/_registration.py 2008-06-10 03:40:56 UTC (rev 4422) +++ trunk/scipy/ndimage/_registration.py 2008-06-10 06:35:31 UTC (rev 4423) @@ -68,7 +68,7 @@ """ # get the zoom - Z = imageS.diagonal() / imageR.diagonal() + Z = imageS_mat.diagonal() / imageR_mat.diagonal() # new volume dimensions (rounded). D, imageS and Z are 3D and this is a vector element product D = (imageS.shape * Z + 0.5).astype(np.int16) @@ -300,7 +300,7 @@ step = np.array([1, 1, 1], dtype=np.int32) fwhm = np.zeros(2, dtype=np.int32) # make the step a scalar to can put in a multi-res loop - loop = range(size(multires)) + loop = range(np.size(multires)) # 6-D zero vector x = np.zeros(6, dtype=np.float64); # the kernel fwhm value for the x and y joint histogram filter @@ -1120,7 +1120,7 @@ # the sampling structure step = np.array([1, 1, 1], dtype=np.int32) # the volume filter - imageF_anat_fwhm = build_fwhm(mat_anat, step) + imageF_anat_fwhm = build_fwhm(anat_mat, step) # allocate the structure for the processed fMRI array @@ -1142,7 +1142,8 @@ # blank volume that will be used for ensemble average for fMRI volumes # prior to functional-anatomical coregistration - ave_fMRI_volume = np.zeros([fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols']].dtype=np.float64) + ave_fMRI_volume = np.zeros([fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols']], + dtype=np.float64) count = 0 number_volumes = len(fMRIdata) From scipy-svn at scipy.org Wed Jun 11 06:47:06 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 11 Jun 2008 05:47:06 -0500 (CDT) Subject: [Scipy-svn] r4424 - trunk/scipy/testing Message-ID: <20080611104706.2F8F739C09F@scipy.org> Author: rkern Date: 2008-06-11 05:47:02 -0500 (Wed, 11 Jun 2008) New Revision: 4424 Removed: trunk/scipy/testing/nulltester.py Modified: trunk/scipy/testing/__init__.py trunk/scipy/testing/decorators.py trunk/scipy/testing/nosetester.py trunk/scipy/testing/pkgtester.py Log: Use local imports to avoid importing nose until tests are actually requested. This speeds up the load-time of scipy. Modified: trunk/scipy/testing/__init__.py =================================================================== --- trunk/scipy/testing/__init__.py 2008-06-10 06:35:31 UTC (rev 4423) +++ trunk/scipy/testing/__init__.py 2008-06-11 10:47:02 UTC (rev 4424) @@ -8,11 +8,6 @@ import unittest from unittest import TestCase -try: - import nose -except ImportError: - pass - import decorators as dec from numpy.testing.utils import * from utils import * Modified: trunk/scipy/testing/decorators.py =================================================================== --- trunk/scipy/testing/decorators.py 2008-06-10 06:35:31 UTC (rev 4423) +++ trunk/scipy/testing/decorators.py 2008-06-11 10:47:02 UTC (rev 4424) @@ -10,11 +10,6 @@ """ -try: - import nose -except ImportError: - pass - def slow(t): """Labels a test as 'slow'. @@ -76,6 +71,9 @@ if msg is None: msg = 'Test skipped due to test condition' def skip_decorator(f): + # Local import to avoid a hard nose dependency and only incur the import + # time overhead at actual test-time. + import nose def skipper(*args, **kwargs): if skip_condition: raise nose.SkipTest, msg @@ -87,6 +85,9 @@ def skipknownfailure(f): ''' Decorator to raise SkipTest for test known to fail ''' + # Local import to avoid a hard nose dependency and only incur the import + # time overhead at actual test-time. + import nose def skipper(*args, **kwargs): raise nose.SkipTest, 'This test is known to fail' return nose.tools.make_decorator(f)(skipper) Modified: trunk/scipy/testing/nosetester.py =================================================================== --- trunk/scipy/testing/nosetester.py 2008-06-10 06:35:31 UTC (rev 4423) +++ trunk/scipy/testing/nosetester.py 2008-06-11 10:47:02 UTC (rev 4424) @@ -7,8 +7,26 @@ import sys import re -import nose +def import_nose(): + """ Import nose only when needed. + """ + fine_nose = True + try: + import nose + except ImportError: + fine_nose = False + else: + nose_version = nose.__versioninfo__ + if nose_version[0] < 1 and nose_version[1] < 10: + fine_nose = False + + if not fine_nose: + raise ImportError('Need nose >=0.10 for tests - see ' + 'http://somethingaboutorange.com/mrl/projects/nose') + + return nose + class NoseTester(object): """ Nose test runner. @@ -112,6 +130,7 @@ doctests : boolean If True, run doctests in module, default False ''' + nose = import_nose() argv = self._test_argv(label, verbose, extra_argv) if doctests: argv+=['--with-doctest'] @@ -122,6 +141,7 @@ ''' Run benchmarks for module using nose %(test_header)s''' + nose = import_nose() argv = self._test_argv(label, verbose, extra_argv) argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] nose.run(argv=argv) Deleted: trunk/scipy/testing/nulltester.py =================================================================== --- trunk/scipy/testing/nulltester.py 2008-06-10 06:35:31 UTC (rev 4423) +++ trunk/scipy/testing/nulltester.py 2008-06-11 10:47:02 UTC (rev 4424) @@ -1,15 +0,0 @@ -''' Null tester to signal nose tests disabled - -Merely returns error reporting lack of nose package or version number -below requirements. - -See pkgtester, nosetester modules - -''' - -class NullTester(object): - def test(self, labels=None, *args, **kwargs): - raise ImportError, \ - 'Need nose >=0.10 for tests - see %s' % \ - 'http://somethingaboutorange.com/mrl/projects/nose' - bench = test Modified: trunk/scipy/testing/pkgtester.py =================================================================== --- trunk/scipy/testing/pkgtester.py 2008-06-10 06:35:31 UTC (rev 4423) +++ trunk/scipy/testing/pkgtester.py 2008-06-11 10:47:02 UTC (rev 4424) @@ -11,17 +11,4 @@ See nosetester module for test implementation ''' -fine_nose = True -try: - import nose -except ImportError: - fine_nose = False -else: - nose_version = nose.__versioninfo__ - if nose_version[0] < 1 and nose_version[1] < 10: - fine_nose = False - -if fine_nose: - from scipy.testing.nosetester import NoseTester as Tester -else: - from scipy.testing.nulltester import NullTester as Tester +from scipy.testing.nosetester import NoseTester as Tester From scipy-svn at scipy.org Wed Jun 11 11:36:37 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 11 Jun 2008 10:36:37 -0500 (CDT) Subject: [Scipy-svn] r4425 - trunk/scipy/stats/tests Message-ID: <20080611153637.EACC339C819@scipy.org> Author: cdavid Date: 2008-06-11 10:36:30 -0500 (Wed, 11 Jun 2008) New Revision: 4425 Modified: trunk/scipy/stats/tests/test_stats.py Log: Remove executable bit for test_stats.py Property changes on: trunk/scipy/stats/tests/test_stats.py ___________________________________________________________________ Name: svn:executable - * From scipy-svn at scipy.org Wed Jun 11 11:38:27 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 11 Jun 2008 10:38:27 -0500 (CDT) Subject: [Scipy-svn] r4426 - in trunk/scipy/stats: . tests Message-ID: <20080611153827.ACB0A39C80A@scipy.org> Author: cdavid Date: 2008-06-11 10:38:20 -0500 (Wed, 11 Jun 2008) New Revision: 4426 Modified: trunk/scipy/stats/stats.py trunk/scipy/stats/tests/test_stats.py Log: Put nanmean, nanstd and nanmedian into scipy.stats namespace + adapt tests. Modified: trunk/scipy/stats/stats.py =================================================================== --- trunk/scipy/stats/stats.py 2008-06-11 15:36:30 UTC (rev 4425) +++ trunk/scipy/stats/stats.py 2008-06-11 15:38:20 UTC (rev 4426) @@ -50,6 +50,10 @@ kurtosis normaltest (for arrays only) +MOMENTS HANDLING NAN: nanmean + nanmedian + nanstd + ALTERED VERSIONS: tmean tvar tstd @@ -214,6 +218,7 @@ 'f_value', 'f_value_multivariate', 'ss', 'square_of_sums', 'fastsort', 'rankdata', + 'nanmean', 'nanstd', 'nanmedian', ] Modified: trunk/scipy/stats/tests/test_stats.py =================================================================== --- trunk/scipy/stats/tests/test_stats.py 2008-06-11 15:36:30 UTC (rev 4425) +++ trunk/scipy/stats/tests/test_stats.py 2008-06-11 15:38:20 UTC (rev 4426) @@ -197,47 +197,47 @@ def test_nanmean_none(self): """Check nanmean when no values are nan.""" - m = stats.stats.nanmean(X) + m = stats.nanmean(X) assert_approx_equal(m, X[4]) def test_nanmean_some(self): """Check nanmean when some values only are nan.""" - m = stats.stats.nanmean(self.Xsome) + m = stats.nanmean(self.Xsome) assert_approx_equal(m, 5.5) def test_nanmean_all(self): """Check nanmean when all values are nan.""" - m = stats.stats.nanmean(self.Xall) + m = stats.nanmean(self.Xall) assert numpy.isnan(m) def test_nanstd_none(self): """Check nanstd when no values are nan.""" - s = stats.stats.nanstd(self.X) - assert_approx_equal(s, stats.stats.std(self.X)) + s = stats.nanstd(self.X) + assert_approx_equal(s, stats.std(self.X)) def test_nanstd_some(self): """Check nanstd when some values only are nan.""" - s = stats.stats.nanstd(self.Xsome) - assert_approx_equal(s, stats.stats.std(self.Xsomet)) + s = stats.nanstd(self.Xsome) + assert_approx_equal(s, stats.std(self.Xsomet)) def test_nanstd_all(self): """Check nanstd when all values are nan.""" - s = stats.stats.nanstd(self.Xall) + s = stats.nanstd(self.Xall) assert numpy.isnan(s) def test_nanmedian_none(self): """Check nanmedian when no values are nan.""" - m = stats.stats.nanmedian(self.X) - assert_approx_equal(m, stats.stats.median(self.X)) + m = stats.nanmedian(self.X) + assert_approx_equal(m, stats.median(self.X)) def test_nanmedian_some(self): """Check nanmedian when some values only are nan.""" - m = stats.stats.nanmedian(self.Xsome) - assert_approx_equal(m, stats.stats.median(self.Xsomet)) + m = stats.nanmedian(self.Xsome) + assert_approx_equal(m, stats.median(self.Xsomet)) def test_nanmedian_all(self): """Check nanmedian when all values are nan.""" - m = stats.stats.nanmedian(self.Xall) + m = stats.nanmedian(self.Xall) assert numpy.isnan(m) class TestCorr(TestCase): From scipy-svn at scipy.org Thu Jun 12 06:09:10 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:09:10 -0500 (CDT) Subject: [Scipy-svn] r4427 - in trunk/scipy: cluster fftpack integrate interpolate io lib/blas lib/lapack linalg ndimage odr optimize signal sparse/linalg/dsolve sparse/linalg/dsolve/umfpack sparse/linalg/eigen/arpack sparse/linalg/isolve sparse/sparsetools special stats stsci/convolve stsci/image Message-ID: <20080612100910.9730D39C3AB@scipy.org> Author: cdavid Date: 2008-06-12 05:07:55 -0500 (Thu, 12 Jun 2008) New Revision: 4427 Added: trunk/scipy/cluster/SConscript trunk/scipy/fftpack/SConscript trunk/scipy/integrate/SConscript trunk/scipy/interpolate/SConscript trunk/scipy/io/SConscript trunk/scipy/lib/blas/SConscript trunk/scipy/lib/lapack/SConscript trunk/scipy/linalg/SConscript trunk/scipy/ndimage/SConscript trunk/scipy/odr/SConscript trunk/scipy/optimize/SConscript trunk/scipy/signal/SConscript trunk/scipy/sparse/linalg/dsolve/SConscript trunk/scipy/sparse/linalg/dsolve/umfpack/SConscript trunk/scipy/sparse/linalg/eigen/arpack/SConscript trunk/scipy/sparse/linalg/isolve/SConscript trunk/scipy/sparse/sparsetools/SConscript trunk/scipy/special/SConscript trunk/scipy/stats/SConscript trunk/scipy/stsci/convolve/SConscript trunk/scipy/stsci/image/SConscript Removed: trunk/scipy/cluster/SConstruct trunk/scipy/fftpack/SConstruct trunk/scipy/integrate/SConstruct trunk/scipy/interpolate/SConstruct trunk/scipy/io/SConstruct trunk/scipy/lib/blas/SConstruct trunk/scipy/lib/lapack/SConstruct trunk/scipy/linalg/SConstruct trunk/scipy/ndimage/SConstruct trunk/scipy/odr/SConstruct trunk/scipy/optimize/SConstruct trunk/scipy/signal/SConstruct trunk/scipy/sparse/linalg/dsolve/SConstruct trunk/scipy/sparse/linalg/dsolve/umfpack/SConstruct trunk/scipy/sparse/linalg/eigen/arpack/SConstruct trunk/scipy/sparse/linalg/isolve/SConstruct trunk/scipy/sparse/sparsetools/SConstruct trunk/scipy/special/SConstruct trunk/scipy/stats/SConstruct trunk/scipy/stsci/convolve/SConstruct trunk/scipy/stsci/image/SConstruct Log: Move all scons SConstruct to SConscript, for upcoming adpatation to new numscons build_dir architecture. Copied: trunk/scipy/cluster/SConscript (from rev 4426, trunk/scipy/cluster/SConstruct) Deleted: trunk/scipy/cluster/SConstruct =================================================================== --- trunk/scipy/cluster/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/cluster/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,19 +0,0 @@ -# Last Change: Thu Oct 18 09:00 PM 2007 J -# vim:syntax=python -from os.path import join - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -env.AppendUnique(CPPPATH = get_numpy_include_dirs()) -env.NumpyPythonExtension('_vq', source = [join('src', 'vq_module.c'), - join('src', 'vq.c')]) - -env.NumpyPythonExtension('_hierarchy_wrap', source = [join('src', 'hierarchy_wrap.c'), - join('src', 'hierarchy.c')]) - - -env.NumpyPythonExtension('_distance_wrap', source = [join('src', 'distance_wrap.c'), - join('src', 'distance.c')]) Copied: trunk/scipy/fftpack/SConscript (from rev 4426, trunk/scipy/fftpack/SConstruct) Deleted: trunk/scipy/fftpack/SConstruct =================================================================== --- trunk/scipy/fftpack/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/fftpack/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,44 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python -from os.path import join as pjoin - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment, write_info -from numscons import CheckFFT, IsMKL, IsFFTW2, IsFFTW3 - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') - -env.AppendUnique(CPPPATH = get_numpy_include_dirs()) -env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) - -# Check fft implementation -config = env.NumpyConfigure(custom_tests = {'CheckFFT': CheckFFT}) -has_fft = config.CheckFFT() -config.Finish() -write_info(env) - -# Tweak defineds depending on the fft used -if has_fft: - if IsMKL(env, 'fft'): - env.Append(CPPDEFINES = "SCIPY_MKL_H") - elif IsFFTW3(env, 'fft'): - env.Append(CPPDEFINES = "SCIPY_FFTW3_H") - elif IsFFTW2(env, 'fft'): - env.Append(CPPDEFINES = "SCIPY_FFTW2_H") - else: - pass - -# Build dfftpack -src = env.NumpyGlob(pjoin('dfftpack', '*.f')) -dfftpack = env.NumpyStaticExtLibrary('dfftpack', source = [str(s) for s in src]) -env.PrependUnique(LIBS = ['dfftpack']) -env.PrependUnique(LIBPATH = env['build_dir']) - -# Build _fftpack -src = ['src/zfft.c','src/drfft.c','src/zrfft.c', 'src/zfftnd.c', 'fftpack.pyf'] -env.NumpyPythonExtension('_fftpack', src) - -# Build convolve -src = ['src/convolve.c', 'convolve.pyf'] -env.NumpyPythonExtension('convolve', src) Copied: trunk/scipy/integrate/SConscript (from rev 4426, trunk/scipy/integrate/SConstruct) Deleted: trunk/scipy/integrate/SConstruct =================================================================== --- trunk/scipy/integrate/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/integrate/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,61 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python -from os.path import join as pjoin -import warnings - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment, CheckF77Clib, CheckF77BLAS - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') - -# Configuration -config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib, - 'CheckF77BLAS' : CheckF77BLAS}) - -if not config.CheckF77Clib(): - raise Exception("Could not check F77 runtime, needed for interpolate") -if not config.CheckF77BLAS(): - raise Exception("Could not find F77 BLAS, needed for integrate package") - -config.Finish() - -env.AppendUnique(CPPPATH = get_numpy_include_dirs()) -env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) - -# XXX: lapack integration - -# Build linpack_lite -src = [str(s) for s in env.NumpyGlob(pjoin('linpack_lite', '*.f'))] -linpack_lite = env.NumpyStaticExtLibrary('linpack_lite', source = src) - -# Build mach -# XXX: do not use optimization flags for mach -src = [str(s) for s in env.NumpyGlob(pjoin('mach', '*.f'))] -mach = env.NumpyStaticExtLibrary('mach', source = src) - -# Build quadpack -src = [str(s) for s in env.NumpyGlob(pjoin('quadpack', '*.f'))] -quadpack = env.NumpyStaticExtLibrary('quadpack', source = src) - -# Build odepack -src = [str(s) for s in env.NumpyGlob(pjoin('odepack', '*.f'))] -odepack = env.NumpyStaticExtLibrary('odepack', source = src) - -env.AppendUnique(LIBPATH = env['build_dir']) -env.AppendUnique(LINKFLAGSEND = env['F77_LDFLAGS']) - -quadenv = env.Clone() -quadenv.Prepend(LIBS = ['quadpack', 'linpack_lite', 'mach']) - -odenv = env.Clone() -odenv.Prepend(LIBS = ['odepack', 'linpack_lite', 'mach']) - -# Build _quadpack -quadenv.NumpyPythonExtension('_quadpack', source = '_quadpackmodule.c') - -# Build _odepack -odenv.NumpyPythonExtension('_odepack', source = '_odepackmodule.c') - -# Build vode -odenv.NumpyPythonExtension('vode', source = 'vode.pyf') Copied: trunk/scipy/interpolate/SConscript (from rev 4426, trunk/scipy/interpolate/SConstruct) Deleted: trunk/scipy/interpolate/SConstruct =================================================================== --- trunk/scipy/interpolate/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/interpolate/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,31 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python -from os.path import join as pjoin - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment, CheckF77Clib - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') - -config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib}) -if not config.CheckF77Clib(): - raise Exception("Could not check F77 runtime, needed for interpolate") -config.Finish() - -env.PrependUnique(CPPPATH = get_numpy_include_dirs()) -env.PrependUnique(CPPPATH = env['F2PYINCLUDEDIR']) -env.AppendUnique(LINKFLAGSEND = env['F77_LDFLAGS']) - -# Build fitpack -src = [str(s) for s in env.NumpyGlob(pjoin('fitpack', '*.f'))] -fitpack = env.NumpyStaticExtLibrary('fitpack', source = src) - -env.PrependUnique(LIBS = ['fitpack']) -env.PrependUnique(LIBPATH = [env['build_dir']]) - -# Build _fitpack -env.NumpyPythonExtension('_fitpack', source = '_fitpackmodule.c') - -# Build dfitpack -env.NumpyPythonExtension('dfitpack', source = 'fitpack.pyf') Copied: trunk/scipy/io/SConscript (from rev 4426, trunk/scipy/io/SConstruct) Deleted: trunk/scipy/io/SConstruct =================================================================== --- trunk/scipy/io/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/io/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,11 +0,0 @@ -# Last Change: Wed Mar 05 03:00 PM 2008 J -# vim:syntax=python -from os.path import join - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -env.AppendUnique(CPPPATH = get_numpy_include_dirs()) -env.NumpyPythonExtension('numpyio', source = 'numpyiomodule.c') Copied: trunk/scipy/lib/blas/SConscript (from rev 4426, trunk/scipy/lib/blas/SConstruct) Deleted: trunk/scipy/lib/blas/SConstruct =================================================================== --- trunk/scipy/lib/blas/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/lib/blas/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,85 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python - -import os -from os.path import join as pjoin, splitext - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import get_python_inc#, get_pythonlib_dir -from numscons import GetNumpyEnvironment -from numscons import CheckCBLAS, CheckF77BLAS,\ - IsVeclib, IsAccelerate, \ - IsATLAS, GetATLASVersion -from numscons import write_info - -from scons_support import do_generate_fake_interface, generate_interface_emitter - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') - -env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()]) -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -env['BUILDERS']['GenerateFakePyf'] = Builder(action = do_generate_fake_interface, - emitter = generate_interface_emitter) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckCBLAS' : CheckCBLAS, - 'CheckBLAS' : CheckF77BLAS}) - -#-------------- -# Checking Blas -#-------------- -st = config.CheckBLAS(check_version = 1) -if not st: - raise RuntimeError("no blas found, necessary for linalg module") -if IsATLAS(env, 'blas'): - version = GetATLASVersion(env) - env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)]) -else: - env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)]) - -if config.CheckCBLAS(): - has_cblas = 1 -else: - has_cblas = 0 - -config.Finish() -write_info(env) - -#========== -# Build -#========== - -# XXX: handle cblas wrapper for complex (check in numpy.scons or here ?) -env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) -env.AppendUnique(F2PYOPTIONS = '--quiet') - -#------------ -# fblas -#------------ -env.NumpyFromFTemplate('fblas.pyf', 'fblas.pyf.src') -source = ['fblas.pyf'] -if IsVeclib(env, 'blas') or IsAccelerate(env, 'blas'): - env.NumpyFromCTemplate('fblaswrap_veclib_c.c', 'fblaswrap_veclib_c.c.src') - source.append('fblaswrap_veclib_c.c') -else: - env.NumpyFromFTemplate('fblaswrap.f', 'fblaswrap.f.src') - source.append('fblaswrap.f') -env.NumpyPythonExtension('fblas', source) - -#------------ -# cblas -#------------ -source = ['cblas.pyf'] -if has_cblas: - env.NumpyFromFTemplate('cblas.pyf', 'cblas.pyf.src') -else: - print env.GenerateFakePyf('cblas', 'cblas.pyf.src') -env.NumpyPythonExtension('cblas', source) Copied: trunk/scipy/lib/lapack/SConscript (from rev 4426, trunk/scipy/lib/lapack/SConstruct) Deleted: trunk/scipy/lib/lapack/SConstruct =================================================================== --- trunk/scipy/lib/lapack/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/lib/lapack/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,95 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python - -import os -from os.path import join as pjoin, splitext - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import get_python_inc -from numscons import GetNumpyEnvironment -from numscons import CheckF77LAPACK,\ - CheckCLAPACK, \ - IsATLAS, GetATLASVersion, \ - CheckF77Clib -from numscons import write_info - -from scons_support import do_generate_fake_interface, \ - generate_interface_emitter - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') -env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()]) -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckCLAPACK' : CheckCLAPACK, - 'CheckLAPACK' : CheckF77LAPACK, - 'CheckF77Clib' : CheckF77Clib}) - -#-------------- -# Checking Blas -#-------------- -if not config.CheckF77Clib(): - raise RuntimeError("Could not check F/C runtime library for %s/%s, " \ - "contact the maintainer" % (env['CC'], env['F77'])) - -st = config.CheckLAPACK(check_version = 1) -if not st: - raise RuntimeError("no lapack found, necessary for lapack module") - -if IsATLAS(env, 'lapack'): - version = GetATLASVersion(env) - env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)]) -else: - env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)]) - -if config.CheckCLAPACK(): - has_clapack = 1 -else: - has_clapack = 0 - -config.Finish() -write_info(env) - -#========== -# Build -#========== -env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) -env.AppendUnique(F2PYOPTIONS = '--quiet') - -env['BUILDERS']['GenerateFakePyf'] = Builder(action = do_generate_fake_interface, - emitter = generate_interface_emitter) - -#------------ -# flapack -#------------ -yop = env.NumpyFromFTemplate('flapack.pyf', 'flapack.pyf.src') -env.NumpyPythonExtension('flapack', source = ['flapack.pyf']) - -#------------ -# clapack -#------------ -if has_clapack: - env.NumpyFromFTemplate('clapack.pyf', 'clapack.pyf.src') -else: - env.GenerateFakePyf('clapack', 'clapack.pyf.src') -env.NumpyPythonExtension('clapack', source = 'clapack.pyf') - -#---------------- -# calc_lwork: -#---------------- -calc_src = env.NumpyF2py(pjoin('calc_lworkmodule.c'), - source = pjoin('calc_lwork.f')) -env.NumpyPythonExtension('calc_lwork', source = calc_src + ['calc_lwork.f'], - LINKFLAGSEND = env['F77_LDFLAGS']) - -#-------------- -# Atlas version -#-------------- -env.NumpyPythonExtension('atlas_version', 'atlas_version.c') Copied: trunk/scipy/linalg/SConscript (from rev 4426, trunk/scipy/linalg/SConstruct) Deleted: trunk/scipy/linalg/SConstruct =================================================================== --- trunk/scipy/linalg/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/linalg/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,159 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python - -import os -from os.path import join as pjoin, splitext - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import get_python_inc#, get_pythonlib_dir -from numscons import GetNumpyEnvironment -from numscons import CheckCBLAS, CheckF77BLAS, CheckF77LAPACK,\ - CheckCLAPACK, IsVeclib, IsAccelerate, \ - IsATLAS, GetATLASVersion, CheckF77Clib -from numscons import write_info - -from scons_support import do_generate_interface, do_generate_fake_interface, \ - generate_interface_emitter - -#from scons_support import CheckBrokenMathlib, define_no_smp, \ -# generate_config_header, generate_config_header_emitter - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') -env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()]) - -# XXX: handle cblas wrapper for complex (check in numpy.scons or here ?) -env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) -env.AppendUnique(F2PYOPTIONS = '--quiet') - -env['BUILDERS']['haha'] = Builder(action = do_generate_interface, - emitter = generate_interface_emitter) - -env['BUILDERS']['hihi'] = Builder(action = do_generate_fake_interface, - emitter = generate_interface_emitter) - -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -fenv = env.Clone() - -#======================= -# Starting Configuration -#======================= -config = env.Configure(custom_tests = {'CheckCBLAS' : CheckCBLAS, - 'CheckCLAPACK' : CheckCLAPACK}) - -#------------------------- -# Checking cblas/clapack -#------------------------- -if config.CheckCBLAS(): - has_cblas = 1 -else: - has_cblas = 0 -if has_cblas: - if IsATLAS(env, 'cblas'): - version = GetATLASVersion(env) - env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)]) - else: - env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)]) - -if config.CheckCLAPACK(): - has_clapack = 1 -else: - has_clapack = 0 - -config.Finish() -write_info(env) - -#--------------------------- -# Checking F77 blas/lapack -#--------------------------- -fconfig = fenv.Configure(custom_tests = {'CheckBLAS' : CheckF77BLAS, - 'CheckLAPACK' : CheckF77LAPACK, - 'CheckF77Clib' : CheckF77Clib}) - -if not fconfig.CheckF77Clib(): - raise RuntimeError("Could not check F/C runtime library for %s/%s, " \ - "contact the maintainer" % (fenv['CC'], fenv['F77'])) - -st = fconfig.CheckBLAS(check_version = 1) -if not st: - raise RuntimeError("no blas found, necessary for linalg module") -if IsATLAS(fenv, 'blas'): - version = GetATLASVersion(fenv) - env.Append(CPPDEFINES = [('ATLAS_INFO', '"\\"%s"\\"' % version)]) -else: - env.Append(CPPDEFINES = [('NO_ATLAS_INFO', 1)]) - -st = fconfig.CheckLAPACK() -if not st: - raise RuntimeError("no lapack found, necessary for linalg module") -fconfig.Finish() -write_info(fenv) - - -#========== -# Build -#========== -#------------ -# fblas -#------------ -fenv.haha('fblas', 'generic_fblas.pyf') -source = ['fblas.pyf'] -if IsVeclib(fenv, 'blas') or IsAccelerate(fenv, 'blas'): - source.append(pjoin('src', 'fblaswrap_veclib_c.c')) -else: - source.append(pjoin('src', 'fblaswrap.f')) -fenv.NumpyPythonExtension('fblas', source) - -#------------ -# cblas -#------------ -if has_cblas: - env.haha('cblas', 'generic_cblas.pyf') -else: - env.hihi('cblas', 'generic_cblas.pyf') -env.NumpyPythonExtension('cblas', source = 'cblas.pyf') - -#------------ -# flapack -#------------ -yop = fenv.haha('flapack', 'generic_flapack.pyf') -# XXX: automatically scan dependency on flapack_user_routines.pyf ? -fenv.Depends(yop, pjoin(env['build_dir'], 'flapack_user_routines.pyf')) -fenv.NumpyPythonExtension('flapack', 'flapack.pyf') - -#------------ -# clapack -#------------ -if has_clapack: - env.haha('clapack', 'generic_clapack.pyf') -else: - env.hihi('clapack', 'generic_clapack.pyf') -env.NumpyPythonExtension('clapack', source = 'clapack.pyf') - -#---------------- -# _flinalg -#---------------- -flinalg_fsrc = [pjoin('src', i) for i in ['det.f', 'lu.f']] -flinalg_src = fenv.NumpyF2py(pjoin('src', '_flinalgmodule.c'), flinalg_fsrc) - -fenv.NumpyPythonExtension('_flinalg', source = flinalg_src + flinalg_fsrc) - -#---------------- -# calc_lwork: -#---------------- -calc_fsrc = [pjoin('src', 'calc_lwork.f')] -calc_src = env.NumpyF2py(pjoin('src', 'calc_lworkmodule.c'), calc_fsrc) -fenv.NumpyPythonExtension('calc_lwork', calc_src + calc_fsrc) - -#-------------- -# Atlas version -#-------------- -atlas_env = env.Clone() -if not IsATLAS(env, 'cblas'): - atlas_env.AppendUnique(CPPDEFINES = "NO_ATLAS_INFO") -atlas_env.NumpyPythonExtension('atlas_version', 'atlas_version.c') Copied: trunk/scipy/ndimage/SConscript (from rev 4426, trunk/scipy/ndimage/SConstruct) Deleted: trunk/scipy/ndimage/SConstruct =================================================================== --- trunk/scipy/ndimage/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/ndimage/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,22 +0,0 @@ -# Last Change: Wed Mar 05 09:00 PM 2008 J -from os.path import join - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -env.AppendUnique(CPPPATH = get_numpy_include_dirs()) -env.AppendUnique(CPPPATH = 'src') - -ndimage_src = ["nd_image.c", "ni_filters.c", "ni_fourier.c", "ni_interpolation.c", - "ni_measure.c", "ni_morphology.c", "ni_support.c"] -env.NumpyPythonExtension('_nd_image', source = [join('src', i) for i in ndimage_src]) - -segment_src = ['Segmenter_EXT.c', 'Segmenter_IMPL.c'] -env.NumpyPythonExtension('_segment', source = [join('src', 'segment', i) - for i in segment_src]) - -register_src = ['Register_EXT.c', 'Register_IMPL.c'] -env.NumpyPythonExtension('_register', source = [join('src', 'register', i) - for i in register_src]) Copied: trunk/scipy/odr/SConscript (from rev 4426, trunk/scipy/odr/SConstruct) Deleted: trunk/scipy/odr/SConstruct =================================================================== --- trunk/scipy/odr/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/odr/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,62 +0,0 @@ -# Last Change: Wed Mar 05 04:00 PM 2008 J -# vim:syntax=python - -import os -from os.path import join as pjoin, splitext - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import get_python_inc#, get_pythonlib_dir -from numscons import GetNumpyEnvironment -from numscons import CheckF77BLAS, CheckF77Clib - -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) -env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()]) -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckBLAS' : CheckF77BLAS, - 'CheckF77Clib' : CheckF77Clib}) - -if not config.CheckF77Clib(): - raise RuntimeError("Could not check F/C runtime library for %s/%s, " \ - "contact the maintainer" % (env['CC'], env['F77'])) - -#-------------- -# Checking Blas -#-------------- -st = config.CheckBLAS() -if not st: - has_blas = 0 -else: - has_blas = 1 - -config.Finish() -write_info(env) - -#========== -# Build -#========== - -# odr lib -libodr_src = [pjoin('odrpack', i) for i in ['d_odr.f', 'd_mprec.f', 'dlunoc.f']] -if has_blas: - libodr_src.append(pjoin('odrpack', 'd_lpk.f')) -else: - libodr_src.append(pjoin('odrpack', 'd_lpkbls.f')) - -env.NumpyStaticExtLibrary('odrpack', source = libodr_src) - -env.PrependUnique(LIBS = 'odrpack') -env.PrependUnique(LIBPATH = env['build_dir']) - -# odr pyextension -env.NumpyPythonExtension('__odrpack', '__odrpack.c', - LINKFLAGSEND = env['F77_LDFLAGS']) Copied: trunk/scipy/optimize/SConscript (from rev 4426, trunk/scipy/optimize/SConstruct) Deleted: trunk/scipy/optimize/SConstruct =================================================================== --- trunk/scipy/optimize/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/optimize/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,91 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python - -import os -from os.path import join as pjoin, splitext - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import get_python_inc#, get_pythonlib_dir -from numscons import GetNumpyEnvironment -from numscons import CheckF77LAPACK, CheckF77Clib - -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') -env.Append(CPPPATH = get_numpy_include_dirs()) -env.Append(CPPPATH = env['F2PYINCLUDEDIR']) -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK, - 'CheckF77Clib' : CheckF77Clib}) - -#----------------- -# Checking Lapack -#----------------- -if not config.CheckF77Clib(): - raise RuntimeLibrary("Could not check C/F runtime library for %s/%s"\ - " , contact the maintainer" % (env['CC'], env['F77'])) - -st = config.CheckLAPACK() -if not st: - has_lapack = 0 -else: - has_lapack = 1 - -config.Finish() -write_info(env) - -#========== -# Build -#========== - -# minpack lib -minpack_src = [str(s) for s in env.NumpyGlob(pjoin('minpack', '*.f'))] -env.NumpyStaticExtLibrary('minpack', source = minpack_src) - -# rootfind lib -rootfind_src = [str(s) for s in env.NumpyGlob(pjoin('Zeros', '*.c'))] -env.NumpyStaticExtLibrary('rootfind', source = rootfind_src) - -env.AppendUnique(LIBS = ['minpack', 'rootfind']) -env.AppendUnique(LIBPATH = env['build_dir']) - -# _minpack pyextension -env.NumpyPythonExtension('_minpack', '_minpackmodule.c', - LINKFLAGSEND = env['F77_LDFLAGS']) - -# _zeros pyextension -env.NumpyPythonExtension('_zeros', 'zeros.c') - -# _lbfgsb pyextension -src = [pjoin('lbfgsb', i) for i in ['lbfgsb.pyf', 'routines.f']] -env.NumpyPythonExtension('_lbfgsb', source = src, - LINKFLAGSEND = env['F77_LDFLAGS']) - -# _cobyla pyextension -src = [pjoin('cobyla', i) for i in ['cobyla2.f', 'trstlp.f', 'cobyla.pyf']] -env.NumpyPythonExtension('_cobyla', source = src, - LINKFLAGSEND = env['F77_LDFLAGS']) - -# _minpack2 pyextension -src = [pjoin('minpack2', i) for i in ['dcsrch.f', 'dcstep.f', 'minpack2.pyf']] -env.NumpyPythonExtension('minpack2', source = src, - LINKFLAGSEND = env['F77_LDFLAGS']) - -# moduleTNC pyextension -env.NumpyPythonExtension('moduleTNC', - source = [pjoin('tnc', i) for i in \ - ['moduleTNC.c', 'tnc.c']]) - -# _slsqp pyextension -src = [pjoin('slsqp', i) for i in ['slsqp_optmz.f', 'slsqp.pyf']] -env.NumpyPythonExtension('_slsqp', source = src, - LINKFLAGSEND = env['F77_LDFLAGS']) Copied: trunk/scipy/signal/SConscript (from rev 4426, trunk/scipy/signal/SConstruct) Deleted: trunk/scipy/signal/SConstruct =================================================================== --- trunk/scipy/signal/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/signal/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,19 +0,0 @@ -# Last Change: Wed Mar 05 05:00 PM 2008 J -# vim:syntax=python -from os.path import join - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -env.AppendUnique(CPPPATH = get_numpy_include_dirs()) -env.NumpyPythonExtension('sigtools', - source = ['sigtoolsmodule.c',\ - 'firfilter.c', \ - 'medianfilter.c']) - -env.NumpyPythonExtension('spline', - source = ['splinemodule.c', 'S_bspline_util.c', - 'D_bspline_util.c', 'C_bspline_util.c', - 'Z_bspline_util.c','bspline_util.c']) Copied: trunk/scipy/sparse/linalg/dsolve/SConscript (from rev 4426, trunk/scipy/sparse/linalg/dsolve/SConstruct) Deleted: trunk/scipy/sparse/linalg/dsolve/SConstruct =================================================================== --- trunk/scipy/sparse/linalg/dsolve/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/sparse/linalg/dsolve/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,47 +0,0 @@ -from os.path import join as pjoin -import sys - -from numpy.distutils.misc_util import get_numpy_include_dirs - -from numscons import GetNumpyEnvironment -from numscons import CheckF77LAPACK -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckLapack' : CheckF77LAPACK}) - -#----------------- -# Checking Lapack -#----------------- -st = config.CheckLapack() -if not st: - raise RuntimeError("no lapack found, necessary for dsolve module") - -config.Finish() -write_info(env) - -# Build superlu lib -superlu_env = env.Clone() -superlu_def = {} -if sys.platform == 'win32': - superlu_def['NO_TIMER'] = 1 -superlu_def['USE_VENDOR_BLAS'] = 2 -superlu_env.Append(CPPDEFINES = superlu_def) - -superlu_src = superlu_env.NumpyGlob(pjoin('SuperLU', 'SRC', '*.c')) -superlu = superlu_env.NumpyStaticExtLibrary('superlu_src', source = superlu_src) - -# Build python extensions -pyenv = env.Clone() -pyenv.Append(CPPPATH = [get_numpy_include_dirs(), env['src_dir']]) -pyenv.Prepend(LIBS = superlu) -common_src = ['_superlu_utils.c', '_superluobject.c'] - -for prec in ['z', 'd', 'c', 's']: - pyenv.NumpyPythonExtension('_%ssuperlu' % prec, - source = common_src + \ - ['_%ssuperlumodule.c' % prec]) Copied: trunk/scipy/sparse/linalg/dsolve/umfpack/SConscript (from rev 4426, trunk/scipy/sparse/linalg/dsolve/umfpack/SConstruct) Deleted: trunk/scipy/sparse/linalg/dsolve/umfpack/SConstruct =================================================================== --- trunk/scipy/sparse/linalg/dsolve/umfpack/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/sparse/linalg/dsolve/umfpack/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,35 +0,0 @@ -from os.path import join as pjoin - -from numpy.distutils.misc_util import get_numpy_include_dirs - -from numscons import GetNumpyEnvironment -from numscons import CheckF77BLAS, CheckF77Clib, NumpyCheckLibAndHeader -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = - {'CheckBLAS' : CheckF77BLAS, - 'CheckF77Clib' : CheckF77Clib, - 'NumpyCheckLibAndHeader' : NumpyCheckLibAndHeader}) - -#----------------- -# Checking Lapack -#----------------- -st = config.CheckBLAS() -if not st: - raise RuntimeError("no blas found, necessary for umfpack module") - -has_umfpack = config.NumpyCheckLibAndHeader( - 'umfpack', None, 'umfpack.h', section = 'umfpack', autoadd = 1) -config.Finish() -write_info(env) - -if has_umfpack: - env.Append(SWIGFLAGS = '-python') - env.Append(SWIGFLAGS = '$_CPPINCFLAGS') - env.Append(CPPPATH = get_numpy_include_dirs()) - env.NumpyPythonExtension('__umfpack', source = 'umfpack.i') Copied: trunk/scipy/sparse/linalg/eigen/arpack/SConscript (from rev 4426, trunk/scipy/sparse/linalg/eigen/arpack/SConstruct) Deleted: trunk/scipy/sparse/linalg/eigen/arpack/SConstruct =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/sparse/linalg/eigen/arpack/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,47 +0,0 @@ -from os.path import join as pjoin - -from numpy.distutils.misc_util import get_numpy_include_dirs - -from numscons import GetNumpyEnvironment -from numscons import CheckF77LAPACK, CheckF77Clib -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK, - 'CheckF77Clib' : CheckF77Clib}) - -env.Tool('numpyf2py') -#----------------- -# Checking Lapack -#----------------- -st = config.CheckF77Clib() -st = config.CheckLAPACK(autoadd = 1) -if not st: - raise RuntimeError("no lapack found, necessary for arpack module") - -config.Finish() -write_info(env) - -# Build arpack -arpack_src = env.NumpyGlob(pjoin('ARPACK', 'SRC', '*.f')) -arpack_src += env.NumpyGlob(pjoin('ARPACK', 'UTIL', '*.f')) -arpack_src += env.NumpyGlob(pjoin('ARPACK', 'LAPACK', '*.f')) - -src = [str(s) for s in arpack_src] - -env.AppendUnique(CPPPATH = pjoin('ARPACK', 'SRC')) -env.AppendUnique(F77PATH = pjoin(env['src_dir'], 'ARPACK', 'SRC')) -env.AppendUnique(LIBPATH = env['build_dir']) -arpack_lib = env.NumpyStaticExtLibrary('arpack', source = src) - -# Build _arpack extension -env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) -env.AppendUnique(CPPPATH = get_numpy_include_dirs()) - -env.NumpyFromFTemplate('arpack.pyf', 'arpack.pyf.src') -env.Prepend(LIBS = 'arpack') -env.NumpyPythonExtension('_arpack', 'arpack.pyf') Copied: trunk/scipy/sparse/linalg/isolve/SConscript (from rev 4426, trunk/scipy/sparse/linalg/isolve/SConstruct) Deleted: trunk/scipy/sparse/linalg/isolve/SConstruct =================================================================== --- trunk/scipy/sparse/linalg/isolve/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/sparse/linalg/isolve/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,58 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python - -from os.path import join as pjoin, splitext - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment -from numscons import CheckF77LAPACK - -from numscons import write_info - -env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') -env.Append(CPPPATH = [get_numpy_include_dirs(), env['F2PYINCLUDEDIR']]) -#if os.name == 'nt': -# # NT needs the pythonlib to run any code importing Python.h, including -# # simple code using only typedef and so on, so we need it for configuration -# # checks -# env.AppendUnique(LIBPATH = [get_pythonlib_dir()]) - -#======================= -# Starting Configuration -#======================= -config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK}) - -#----------------- -# Checking Lapack -#----------------- -st = config.CheckLAPACK() -if not st: - raise RuntimeError("no lapack found, necessary for isolve module") - -config.Finish() -write_info(env) - -#-------------------- -# iterative methods -#-------------------- -methods = ['BiCGREVCOM.f.src', - 'BiCGSTABREVCOM.f.src', - 'CGREVCOM.f.src', - 'CGSREVCOM.f.src', -# 'ChebyREVCOM.f.src', - 'GMRESREVCOM.f.src', -# 'JacobiREVCOM.f.src', - 'QMRREVCOM.f.src', -# 'SORREVCOM.f.src' - ] -Util = ['STOPTEST2.f.src','getbreak.f.src'] -raw_sources = methods + Util + ['_iterative.pyf.src'] - -sources = [] -for method in raw_sources: - target = splitext(method)[0] - res = env.NumpyFromFTemplate(target, pjoin('iterative', method)) - sources.append(res[0]) - -env.NumpyPythonExtension('_iterative', source = sources) Copied: trunk/scipy/sparse/sparsetools/SConscript (from rev 4426, trunk/scipy/sparse/sparsetools/SConstruct) Deleted: trunk/scipy/sparse/sparsetools/SConstruct =================================================================== --- trunk/scipy/sparse/sparsetools/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/sparse/sparsetools/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,12 +0,0 @@ -# Last Change: Wed Mar 05 09:00 PM 2008 J -# vim:syntax=python -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -env.AppendUnique(CPPPATH = get_numpy_include_dirs()) - -for fmt in ['csr','csc','coo','bsr','dia']: - sources = [ fmt + '_wrap.cxx' ] - env.NumpyPythonExtension('_%s' % fmt, source = sources) Copied: trunk/scipy/special/SConscript (from rev 4426, trunk/scipy/special/SConstruct) Deleted: trunk/scipy/special/SConstruct =================================================================== --- trunk/scipy/special/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/special/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,65 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python -from os.path import join as pjoin, basename as pbasename -import sys - -from distutils.sysconfig import get_python_inc - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment -from numscons import CheckF77Clib - -env = GetNumpyEnvironment(ARGUMENTS) - -env.Tool('numpyf2py') - -env.AppendUnique(CPPPATH = [get_python_inc(), get_numpy_include_dirs()]) -env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) - -if sys.platform=='win32': -# define_macros.append(('NOINFINITIES',None)) -# define_macros.append(('NONANS',None)) - env.AppendUnique(CPPDEFINES = '_USE_MATH_DEFINES') - -config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib}) -if not config.CheckF77Clib(): - raise RuntimeError("Could not get C/F77 runtime information") -config.Finish() - -def build_lib(name, ext, libname = None): - """ext should be .f or .c""" - if not libname: - libname = name - src = env.NumpyGlob(pjoin(name, '*%s' % ext)) - assert len(src) > 0 - env.NumpyStaticExtLibrary(libname, source = src) - -# C libraries -build_lib('c_misc', '.c') -build_lib('cephes', '.c') - -# F libraries -# XXX: handle no opt flags for mach -build_lib('mach', '.f') -build_lib('toms', '.f') -build_lib('amos', '.f') -build_lib('cdflib', '.f', 'cdf') -build_lib('specfun', '.f', 'specfunlib') - -env.AppendUnique(LIBPATH = [env['build_dir']]) - -# Cephes extension -src = ['_cephesmodule.c', 'amos_wrappers.c', 'specfun_wrappers.c', \ - 'toms_wrappers.c','cdf_wrappers.c','ufunc_extras.c'] - -env.NumpyPythonExtension('_cephes', - source = src, - LIBS = ['amos', 'toms', 'c_misc', 'cephes', 'mach',\ - 'cdf', 'specfunlib'], - LINKFLAGSEND = env['F77_LDFLAGS']) - -# Specfun extension -env.Prepend(LIBS = ['specfunlib']) -env.NumpyPythonExtension('specfun', source = 'specfun.pyf', - F2PYOPTIONS = ["--no-wrap-functions"], - LINKFLAGSEND = env['F77_LDFLAGS']) Copied: trunk/scipy/stats/SConscript (from rev 4426, trunk/scipy/stats/SConstruct) Deleted: trunk/scipy/stats/SConstruct =================================================================== --- trunk/scipy/stats/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/stats/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,39 +0,0 @@ -# Last Change: Sat May 03 02:00 PM 2008 J -# vim:syntax=python -from os.path import join as pjoin - -from numpy.distutils.misc_util import get_numpy_include_dirs -from numscons import GetNumpyEnvironment, CheckF77Clib - -env = GetNumpyEnvironment(ARGUMENTS) - -t = env.Tool('numpyf2py') - -env.AppendUnique(CPPPATH = [get_numpy_include_dirs()]) -env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) - -config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib}) -if not config.CheckF77Clib(): - raise RuntimeError("Could not get C/F77 runtime information") -config.Finish() - -# Statlib library -src = env.NumpyGlob(pjoin('statlib', '*.f' )) -env.NumpyStaticExtLibrary('statlibimp', source = src) - -env.AppendUnique(LIBPATH = [env['build_dir']]) - -# Statlib extension -env.NumpyPythonExtension('statlib', source = 'statlib.pyf', - F2PYOPTIONS = ["--no-wrap-functions"], - LIBS = 'statlibimp', - LINKFLAGSEND = env['F77_LDFLAGS']) - -# futil extension -futil_src = env.NumpyF2py(pjoin('futilmodule.c'), pjoin('futil.f')) -env.NumpyPythonExtension('futil', source = futil_src + ['futil.f'], - LINKFLAGSEND = env['F77_LDFLAGS']) - -# mvn extension -env.NumpyPythonExtension('mvn', source = ['mvn.pyf', 'mvndst.f'], - LINKFLAGSEND = env['F77_LDFLAGS']) Copied: trunk/scipy/stsci/convolve/SConscript (from rev 4426, trunk/scipy/stsci/convolve/SConstruct) Deleted: trunk/scipy/stsci/convolve/SConstruct =================================================================== --- trunk/scipy/stsci/convolve/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/stsci/convolve/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,13 +0,0 @@ -# Last Change: Wed Mar 05 09:00 PM 2008 J -from numpy.distutils.misc_util import get_numpy_include_dirs -from numpy import get_numarray_include -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -env.AppendUnique(CPPPATH = [get_numpy_include_dirs(), get_numarray_include()]) -env.AppendUnique(CPPDEFINES = {'NUMPY': '1'}) - -# _correlate extension -env.NumpyPythonExtension('_correlate', source = 'src/_correlatemodule.c') -env.NumpyPythonExtension('_lineshape', source = 'src/_lineshapemodule.c') Copied: trunk/scipy/stsci/image/SConscript (from rev 4426, trunk/scipy/stsci/image/SConstruct) Deleted: trunk/scipy/stsci/image/SConstruct =================================================================== --- trunk/scipy/stsci/image/SConstruct 2008-06-11 15:38:20 UTC (rev 4426) +++ trunk/scipy/stsci/image/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) @@ -1,11 +0,0 @@ -# Last Change: Wed Mar 05 09:00 PM 2008 J -from numpy.distutils.misc_util import get_numpy_include_dirs -from numpy import get_numarray_include -from numscons import GetNumpyEnvironment - -env = GetNumpyEnvironment(ARGUMENTS) - -env.AppendUnique(CPPPATH = [get_numpy_include_dirs(), get_numarray_include()]) -env.AppendUnique(CPPDEFINES = {'NUMPY': '1'}) - -env.NumpyPythonExtension('_combine', source = 'src/_combinemodule.c') From scipy-svn at scipy.org Thu Jun 12 06:14:20 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:14:20 -0500 (CDT) Subject: [Scipy-svn] r4428 - trunk/scipy/cluster Message-ID: <20080612101420.828B039C3AB@scipy.org> Author: cdavid Date: 2008-06-12 05:14:16 -0500 (Thu, 12 Jun 2008) New Revision: 4428 Added: trunk/scipy/cluster/SConstruct Log: Add SConstruct file for scipy.cluter. Added: trunk/scipy/cluster/SConstruct =================================================================== --- trunk/scipy/cluster/SConstruct 2008-06-12 10:07:55 UTC (rev 4427) +++ trunk/scipy/cluster/SConstruct 2008-06-12 10:14:16 UTC (rev 4428) @@ -0,0 +1,2 @@ +from numscons import GetInitEnvironment +GetInitEnvironment(ARGUMENTS).DistutilsSConscript('SConscript') From scipy-svn at scipy.org Thu Jun 12 06:17:22 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:17:22 -0500 (CDT) Subject: [Scipy-svn] r4429 - in trunk/scipy: fftpack integrate interpolate io lib/blas lib/lapack linalg ndimage odr optimize signal sparse/linalg/dsolve sparse/linalg/dsolve/umfpack sparse/linalg/eigen/arpack sparse/linalg/isolve sparse/sparsetools special stats stsci/convolve stsci/image Message-ID: <20080612101722.69C7F39C3AB@scipy.org> Author: cdavid Date: 2008-06-12 05:16:21 -0500 (Thu, 12 Jun 2008) New Revision: 4429 Added: trunk/scipy/fftpack/SConstruct trunk/scipy/integrate/SConstruct trunk/scipy/interpolate/SConstruct trunk/scipy/io/SConstruct trunk/scipy/lib/blas/SConstruct trunk/scipy/lib/lapack/SConstruct trunk/scipy/linalg/SConstruct trunk/scipy/ndimage/SConstruct trunk/scipy/odr/SConstruct trunk/scipy/optimize/SConstruct trunk/scipy/signal/SConstruct trunk/scipy/sparse/linalg/dsolve/SConstruct trunk/scipy/sparse/linalg/dsolve/umfpack/SConstruct trunk/scipy/sparse/linalg/eigen/arpack/SConstruct trunk/scipy/sparse/linalg/isolve/SConstruct trunk/scipy/sparse/sparsetools/SConstruct trunk/scipy/special/SConstruct trunk/scipy/stats/SConstruct trunk/scipy/stsci/convolve/SConstruct trunk/scipy/stsci/image/SConstruct Log: Add SConstruct files for all pkgs built with scons script. Copied: trunk/scipy/fftpack/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/integrate/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/interpolate/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/io/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/lib/blas/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/lib/lapack/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/linalg/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/ndimage/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/odr/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/optimize/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/signal/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/sparse/linalg/dsolve/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/sparse/linalg/dsolve/umfpack/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/sparse/linalg/eigen/arpack/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/sparse/linalg/isolve/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/sparse/sparsetools/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/special/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/stats/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/stsci/convolve/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) Copied: trunk/scipy/stsci/image/SConstruct (from rev 4428, trunk/scipy/cluster/SConstruct) From scipy-svn at scipy.org Thu Jun 12 06:19:21 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:19:21 -0500 (CDT) Subject: [Scipy-svn] r4430 - in trunk/scipy: cluster fftpack integrate interpolate io lib/blas lib/lapack linalg ndimage odr optimize signal sparse/linalg/dsolve sparse/linalg/dsolve/umfpack sparse/linalg/eigen/arpack sparse/linalg/isolve sparse/sparsetools special stats stsci/convolve stsci/image Message-ID: <20080612101921.F2B6C39C3AB@scipy.org> Author: cdavid Date: 2008-06-12 05:18:28 -0500 (Thu, 12 Jun 2008) New Revision: 4430 Modified: trunk/scipy/cluster/SConscript trunk/scipy/fftpack/SConscript trunk/scipy/integrate/SConscript trunk/scipy/interpolate/SConscript trunk/scipy/io/SConscript trunk/scipy/lib/blas/SConscript trunk/scipy/lib/lapack/SConscript trunk/scipy/linalg/SConscript trunk/scipy/ndimage/SConscript trunk/scipy/odr/SConscript trunk/scipy/optimize/SConscript trunk/scipy/signal/SConscript trunk/scipy/sparse/linalg/dsolve/SConscript trunk/scipy/sparse/linalg/dsolve/umfpack/SConscript trunk/scipy/sparse/linalg/eigen/arpack/SConscript trunk/scipy/sparse/linalg/isolve/SConscript trunk/scipy/sparse/sparsetools/SConscript trunk/scipy/special/SConscript trunk/scipy/stats/SConscript trunk/scipy/stsci/convolve/SConscript trunk/scipy/stsci/image/SConscript Log: Renamve NumpyPythonExtension to DistutilsPythonExtension. Modified: trunk/scipy/cluster/SConscript =================================================================== --- trunk/scipy/cluster/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/cluster/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -8,12 +8,12 @@ env = GetNumpyEnvironment(ARGUMENTS) env.AppendUnique(CPPPATH = get_numpy_include_dirs()) -env.NumpyPythonExtension('_vq', source = [join('src', 'vq_module.c'), +env.DistutilsPythonExtension('_vq', source = [join('src', 'vq_module.c'), join('src', 'vq.c')]) -env.NumpyPythonExtension('_hierarchy_wrap', source = [join('src', 'hierarchy_wrap.c'), +env.DistutilsPythonExtension('_hierarchy_wrap', source = [join('src', 'hierarchy_wrap.c'), join('src', 'hierarchy.c')]) -env.NumpyPythonExtension('_distance_wrap', source = [join('src', 'distance_wrap.c'), +env.DistutilsPythonExtension('_distance_wrap', source = [join('src', 'distance_wrap.c'), join('src', 'distance.c')]) Modified: trunk/scipy/fftpack/SConscript =================================================================== --- trunk/scipy/fftpack/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/fftpack/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -37,8 +37,8 @@ # Build _fftpack src = ['src/zfft.c','src/drfft.c','src/zrfft.c', 'src/zfftnd.c', 'fftpack.pyf'] -env.NumpyPythonExtension('_fftpack', src) +env.DistutilsPythonExtension('_fftpack', src) # Build convolve src = ['src/convolve.c', 'convolve.pyf'] -env.NumpyPythonExtension('convolve', src) +env.DistutilsPythonExtension('convolve', src) Modified: trunk/scipy/integrate/SConscript =================================================================== --- trunk/scipy/integrate/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/integrate/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -52,10 +52,10 @@ odenv.Prepend(LIBS = ['odepack', 'linpack_lite', 'mach']) # Build _quadpack -quadenv.NumpyPythonExtension('_quadpack', source = '_quadpackmodule.c') +quadenv.DistutilsPythonExtension('_quadpack', source = '_quadpackmodule.c') # Build _odepack -odenv.NumpyPythonExtension('_odepack', source = '_odepackmodule.c') +odenv.DistutilsPythonExtension('_odepack', source = '_odepackmodule.c') # Build vode -odenv.NumpyPythonExtension('vode', source = 'vode.pyf') +odenv.DistutilsPythonExtension('vode', source = 'vode.pyf') Modified: trunk/scipy/interpolate/SConscript =================================================================== --- trunk/scipy/interpolate/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/interpolate/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -25,7 +25,7 @@ env.PrependUnique(LIBPATH = [env['build_dir']]) # Build _fitpack -env.NumpyPythonExtension('_fitpack', source = '_fitpackmodule.c') +env.DistutilsPythonExtension('_fitpack', source = '_fitpackmodule.c') # Build dfitpack -env.NumpyPythonExtension('dfitpack', source = 'fitpack.pyf') +env.DistutilsPythonExtension('dfitpack', source = 'fitpack.pyf') Modified: trunk/scipy/io/SConscript =================================================================== --- trunk/scipy/io/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/io/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -8,4 +8,4 @@ env = GetNumpyEnvironment(ARGUMENTS) env.AppendUnique(CPPPATH = get_numpy_include_dirs()) -env.NumpyPythonExtension('numpyio', source = 'numpyiomodule.c') +env.DistutilsPythonExtension('numpyio', source = 'numpyiomodule.c') Modified: trunk/scipy/lib/blas/SConscript =================================================================== --- trunk/scipy/lib/blas/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/lib/blas/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -72,7 +72,7 @@ else: env.NumpyFromFTemplate('fblaswrap.f', 'fblaswrap.f.src') source.append('fblaswrap.f') -env.NumpyPythonExtension('fblas', source) +env.DistutilsPythonExtension('fblas', source) #------------ # cblas @@ -82,4 +82,4 @@ env.NumpyFromFTemplate('cblas.pyf', 'cblas.pyf.src') else: print env.GenerateFakePyf('cblas', 'cblas.pyf.src') -env.NumpyPythonExtension('cblas', source) +env.DistutilsPythonExtension('cblas', source) Modified: trunk/scipy/lib/lapack/SConscript =================================================================== --- trunk/scipy/lib/lapack/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/lib/lapack/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -70,7 +70,7 @@ # flapack #------------ yop = env.NumpyFromFTemplate('flapack.pyf', 'flapack.pyf.src') -env.NumpyPythonExtension('flapack', source = ['flapack.pyf']) +env.DistutilsPythonExtension('flapack', source = ['flapack.pyf']) #------------ # clapack @@ -79,17 +79,17 @@ env.NumpyFromFTemplate('clapack.pyf', 'clapack.pyf.src') else: env.GenerateFakePyf('clapack', 'clapack.pyf.src') -env.NumpyPythonExtension('clapack', source = 'clapack.pyf') +env.DistutilsPythonExtension('clapack', source = 'clapack.pyf') #---------------- # calc_lwork: #---------------- calc_src = env.NumpyF2py(pjoin('calc_lworkmodule.c'), source = pjoin('calc_lwork.f')) -env.NumpyPythonExtension('calc_lwork', source = calc_src + ['calc_lwork.f'], +env.DistutilsPythonExtension('calc_lwork', source = calc_src + ['calc_lwork.f'], LINKFLAGSEND = env['F77_LDFLAGS']) #-------------- # Atlas version #-------------- -env.NumpyPythonExtension('atlas_version', 'atlas_version.c') +env.DistutilsPythonExtension('atlas_version', 'atlas_version.c') Modified: trunk/scipy/linalg/SConscript =================================================================== --- trunk/scipy/linalg/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/linalg/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -107,7 +107,7 @@ source.append(pjoin('src', 'fblaswrap_veclib_c.c')) else: source.append(pjoin('src', 'fblaswrap.f')) -fenv.NumpyPythonExtension('fblas', source) +fenv.DistutilsPythonExtension('fblas', source) #------------ # cblas @@ -116,7 +116,7 @@ env.haha('cblas', 'generic_cblas.pyf') else: env.hihi('cblas', 'generic_cblas.pyf') -env.NumpyPythonExtension('cblas', source = 'cblas.pyf') +env.DistutilsPythonExtension('cblas', source = 'cblas.pyf') #------------ # flapack @@ -124,7 +124,7 @@ yop = fenv.haha('flapack', 'generic_flapack.pyf') # XXX: automatically scan dependency on flapack_user_routines.pyf ? fenv.Depends(yop, pjoin(env['build_dir'], 'flapack_user_routines.pyf')) -fenv.NumpyPythonExtension('flapack', 'flapack.pyf') +fenv.DistutilsPythonExtension('flapack', 'flapack.pyf') #------------ # clapack @@ -133,7 +133,7 @@ env.haha('clapack', 'generic_clapack.pyf') else: env.hihi('clapack', 'generic_clapack.pyf') -env.NumpyPythonExtension('clapack', source = 'clapack.pyf') +env.DistutilsPythonExtension('clapack', source = 'clapack.pyf') #---------------- # _flinalg @@ -141,14 +141,14 @@ flinalg_fsrc = [pjoin('src', i) for i in ['det.f', 'lu.f']] flinalg_src = fenv.NumpyF2py(pjoin('src', '_flinalgmodule.c'), flinalg_fsrc) -fenv.NumpyPythonExtension('_flinalg', source = flinalg_src + flinalg_fsrc) +fenv.DistutilsPythonExtension('_flinalg', source = flinalg_src + flinalg_fsrc) #---------------- # calc_lwork: #---------------- calc_fsrc = [pjoin('src', 'calc_lwork.f')] calc_src = env.NumpyF2py(pjoin('src', 'calc_lworkmodule.c'), calc_fsrc) -fenv.NumpyPythonExtension('calc_lwork', calc_src + calc_fsrc) +fenv.DistutilsPythonExtension('calc_lwork', calc_src + calc_fsrc) #-------------- # Atlas version @@ -156,4 +156,4 @@ atlas_env = env.Clone() if not IsATLAS(env, 'cblas'): atlas_env.AppendUnique(CPPDEFINES = "NO_ATLAS_INFO") -atlas_env.NumpyPythonExtension('atlas_version', 'atlas_version.c') +atlas_env.DistutilsPythonExtension('atlas_version', 'atlas_version.c') Modified: trunk/scipy/ndimage/SConscript =================================================================== --- trunk/scipy/ndimage/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/ndimage/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -11,12 +11,12 @@ ndimage_src = ["nd_image.c", "ni_filters.c", "ni_fourier.c", "ni_interpolation.c", "ni_measure.c", "ni_morphology.c", "ni_support.c"] -env.NumpyPythonExtension('_nd_image', source = [join('src', i) for i in ndimage_src]) +env.DistutilsPythonExtension('_nd_image', source = [join('src', i) for i in ndimage_src]) segment_src = ['Segmenter_EXT.c', 'Segmenter_IMPL.c'] -env.NumpyPythonExtension('_segment', source = [join('src', 'segment', i) +env.DistutilsPythonExtension('_segment', source = [join('src', 'segment', i) for i in segment_src]) register_src = ['Register_EXT.c', 'Register_IMPL.c'] -env.NumpyPythonExtension('_register', source = [join('src', 'register', i) +env.DistutilsPythonExtension('_register', source = [join('src', 'register', i) for i in register_src]) Modified: trunk/scipy/odr/SConscript =================================================================== --- trunk/scipy/odr/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/odr/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -58,5 +58,5 @@ env.PrependUnique(LIBPATH = env['build_dir']) # odr pyextension -env.NumpyPythonExtension('__odrpack', '__odrpack.c', +env.DistutilsPythonExtension('__odrpack', '__odrpack.c', LINKFLAGSEND = env['F77_LDFLAGS']) Modified: trunk/scipy/optimize/SConscript =================================================================== --- trunk/scipy/optimize/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/optimize/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -59,33 +59,33 @@ env.AppendUnique(LIBPATH = env['build_dir']) # _minpack pyextension -env.NumpyPythonExtension('_minpack', '_minpackmodule.c', +env.DistutilsPythonExtension('_minpack', '_minpackmodule.c', LINKFLAGSEND = env['F77_LDFLAGS']) # _zeros pyextension -env.NumpyPythonExtension('_zeros', 'zeros.c') +env.DistutilsPythonExtension('_zeros', 'zeros.c') # _lbfgsb pyextension src = [pjoin('lbfgsb', i) for i in ['lbfgsb.pyf', 'routines.f']] -env.NumpyPythonExtension('_lbfgsb', source = src, +env.DistutilsPythonExtension('_lbfgsb', source = src, LINKFLAGSEND = env['F77_LDFLAGS']) # _cobyla pyextension src = [pjoin('cobyla', i) for i in ['cobyla2.f', 'trstlp.f', 'cobyla.pyf']] -env.NumpyPythonExtension('_cobyla', source = src, +env.DistutilsPythonExtension('_cobyla', source = src, LINKFLAGSEND = env['F77_LDFLAGS']) # _minpack2 pyextension src = [pjoin('minpack2', i) for i in ['dcsrch.f', 'dcstep.f', 'minpack2.pyf']] -env.NumpyPythonExtension('minpack2', source = src, +env.DistutilsPythonExtension('minpack2', source = src, LINKFLAGSEND = env['F77_LDFLAGS']) # moduleTNC pyextension -env.NumpyPythonExtension('moduleTNC', +env.DistutilsPythonExtension('moduleTNC', source = [pjoin('tnc', i) for i in \ ['moduleTNC.c', 'tnc.c']]) # _slsqp pyextension src = [pjoin('slsqp', i) for i in ['slsqp_optmz.f', 'slsqp.pyf']] -env.NumpyPythonExtension('_slsqp', source = src, +env.DistutilsPythonExtension('_slsqp', source = src, LINKFLAGSEND = env['F77_LDFLAGS']) Modified: trunk/scipy/signal/SConscript =================================================================== --- trunk/scipy/signal/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/signal/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -8,12 +8,12 @@ env = GetNumpyEnvironment(ARGUMENTS) env.AppendUnique(CPPPATH = get_numpy_include_dirs()) -env.NumpyPythonExtension('sigtools', +env.DistutilsPythonExtension('sigtools', source = ['sigtoolsmodule.c',\ 'firfilter.c', \ 'medianfilter.c']) -env.NumpyPythonExtension('spline', +env.DistutilsPythonExtension('spline', source = ['splinemodule.c', 'S_bspline_util.c', 'D_bspline_util.c', 'C_bspline_util.c', 'Z_bspline_util.c','bspline_util.c']) Modified: trunk/scipy/sparse/linalg/dsolve/SConscript =================================================================== --- trunk/scipy/sparse/linalg/dsolve/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/sparse/linalg/dsolve/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -42,6 +42,6 @@ common_src = ['_superlu_utils.c', '_superluobject.c'] for prec in ['z', 'd', 'c', 's']: - pyenv.NumpyPythonExtension('_%ssuperlu' % prec, + pyenv.DistutilsPythonExtension('_%ssuperlu' % prec, source = common_src + \ ['_%ssuperlumodule.c' % prec]) Modified: trunk/scipy/sparse/linalg/dsolve/umfpack/SConscript =================================================================== --- trunk/scipy/sparse/linalg/dsolve/umfpack/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/sparse/linalg/dsolve/umfpack/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -32,4 +32,4 @@ env.Append(SWIGFLAGS = '-python') env.Append(SWIGFLAGS = '$_CPPINCFLAGS') env.Append(CPPPATH = get_numpy_include_dirs()) - env.NumpyPythonExtension('__umfpack', source = 'umfpack.i') + env.DistutilsPythonExtension('__umfpack', source = 'umfpack.i') Modified: trunk/scipy/sparse/linalg/eigen/arpack/SConscript =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -44,4 +44,4 @@ env.NumpyFromFTemplate('arpack.pyf', 'arpack.pyf.src') env.Prepend(LIBS = 'arpack') -env.NumpyPythonExtension('_arpack', 'arpack.pyf') +env.DistutilsPythonExtension('_arpack', 'arpack.pyf') Modified: trunk/scipy/sparse/linalg/isolve/SConscript =================================================================== --- trunk/scipy/sparse/linalg/isolve/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/sparse/linalg/isolve/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -55,4 +55,4 @@ res = env.NumpyFromFTemplate(target, pjoin('iterative', method)) sources.append(res[0]) -env.NumpyPythonExtension('_iterative', source = sources) +env.DistutilsPythonExtension('_iterative', source = sources) Modified: trunk/scipy/sparse/sparsetools/SConscript =================================================================== --- trunk/scipy/sparse/sparsetools/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/sparse/sparsetools/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -9,4 +9,4 @@ for fmt in ['csr','csc','coo','bsr','dia']: sources = [ fmt + '_wrap.cxx' ] - env.NumpyPythonExtension('_%s' % fmt, source = sources) + env.DistutilsPythonExtension('_%s' % fmt, source = sources) Modified: trunk/scipy/special/SConscript =================================================================== --- trunk/scipy/special/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/special/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -52,7 +52,7 @@ src = ['_cephesmodule.c', 'amos_wrappers.c', 'specfun_wrappers.c', \ 'toms_wrappers.c','cdf_wrappers.c','ufunc_extras.c'] -env.NumpyPythonExtension('_cephes', +env.DistutilsPythonExtension('_cephes', source = src, LIBS = ['amos', 'toms', 'c_misc', 'cephes', 'mach',\ 'cdf', 'specfunlib'], @@ -60,6 +60,6 @@ # Specfun extension env.Prepend(LIBS = ['specfunlib']) -env.NumpyPythonExtension('specfun', source = 'specfun.pyf', +env.DistutilsPythonExtension('specfun', source = 'specfun.pyf', F2PYOPTIONS = ["--no-wrap-functions"], LINKFLAGSEND = env['F77_LDFLAGS']) Modified: trunk/scipy/stats/SConscript =================================================================== --- trunk/scipy/stats/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/stats/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -24,16 +24,16 @@ env.AppendUnique(LIBPATH = [env['build_dir']]) # Statlib extension -env.NumpyPythonExtension('statlib', source = 'statlib.pyf', +env.DistutilsPythonExtension('statlib', source = 'statlib.pyf', F2PYOPTIONS = ["--no-wrap-functions"], LIBS = 'statlibimp', LINKFLAGSEND = env['F77_LDFLAGS']) # futil extension futil_src = env.NumpyF2py(pjoin('futilmodule.c'), pjoin('futil.f')) -env.NumpyPythonExtension('futil', source = futil_src + ['futil.f'], +env.DistutilsPythonExtension('futil', source = futil_src + ['futil.f'], LINKFLAGSEND = env['F77_LDFLAGS']) # mvn extension -env.NumpyPythonExtension('mvn', source = ['mvn.pyf', 'mvndst.f'], +env.DistutilsPythonExtension('mvn', source = ['mvn.pyf', 'mvndst.f'], LINKFLAGSEND = env['F77_LDFLAGS']) Modified: trunk/scipy/stsci/convolve/SConscript =================================================================== --- trunk/scipy/stsci/convolve/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/stsci/convolve/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -9,5 +9,5 @@ env.AppendUnique(CPPDEFINES = {'NUMPY': '1'}) # _correlate extension -env.NumpyPythonExtension('_correlate', source = 'src/_correlatemodule.c') -env.NumpyPythonExtension('_lineshape', source = 'src/_lineshapemodule.c') +env.DistutilsPythonExtension('_correlate', source = 'src/_correlatemodule.c') +env.DistutilsPythonExtension('_lineshape', source = 'src/_lineshapemodule.c') Modified: trunk/scipy/stsci/image/SConscript =================================================================== --- trunk/scipy/stsci/image/SConscript 2008-06-12 10:16:21 UTC (rev 4429) +++ trunk/scipy/stsci/image/SConscript 2008-06-12 10:18:28 UTC (rev 4430) @@ -8,4 +8,4 @@ env.AppendUnique(CPPPATH = [get_numpy_include_dirs(), get_numarray_include()]) env.AppendUnique(CPPDEFINES = {'NUMPY': '1'}) -env.NumpyPythonExtension('_combine', source = 'src/_combinemodule.c') +env.DistutilsPythonExtension('_combine', source = 'src/_combinemodule.c') From scipy-svn at scipy.org Thu Jun 12 06:21:18 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:21:18 -0500 (CDT) Subject: [Scipy-svn] r4431 - in trunk/scipy: fftpack integrate interpolate lib/blas lib/lapack linalg optimize sparse/linalg/eigen/arpack sparse/linalg/isolve special stats Message-ID: <20080612102118.1144739C3AB@scipy.org> Author: cdavid Date: 2008-06-12 05:20:52 -0500 (Thu, 12 Jun 2008) New Revision: 4431 Modified: trunk/scipy/fftpack/SConscript trunk/scipy/integrate/SConscript trunk/scipy/interpolate/SConscript trunk/scipy/lib/blas/SConscript trunk/scipy/lib/lapack/SConscript trunk/scipy/linalg/SConscript trunk/scipy/optimize/SConscript trunk/scipy/sparse/linalg/eigen/arpack/SConscript trunk/scipy/sparse/linalg/isolve/SConscript trunk/scipy/special/SConscript trunk/scipy/stats/SConscript Log: rename numpyf2py to f2py scons tool; numpyf2py is deprecated. Modified: trunk/scipy/fftpack/SConscript =================================================================== --- trunk/scipy/fftpack/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/fftpack/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -7,7 +7,7 @@ from numscons import CheckFFT, IsMKL, IsFFTW2, IsFFTW3 env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') +env.Tool('f2py') env.AppendUnique(CPPPATH = get_numpy_include_dirs()) env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) Modified: trunk/scipy/integrate/SConscript =================================================================== --- trunk/scipy/integrate/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/integrate/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -7,7 +7,7 @@ from numscons import GetNumpyEnvironment, CheckF77Clib, CheckF77BLAS env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') +env.Tool('f2py') # Configuration config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib, Modified: trunk/scipy/interpolate/SConscript =================================================================== --- trunk/scipy/interpolate/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/interpolate/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -6,7 +6,7 @@ from numscons import GetNumpyEnvironment, CheckF77Clib env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') +env.Tool('f2py') config = env.NumpyConfigure(custom_tests = {'CheckF77Clib' : CheckF77Clib}) if not config.CheckF77Clib(): Modified: trunk/scipy/lib/blas/SConscript =================================================================== --- trunk/scipy/lib/blas/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/lib/blas/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -15,7 +15,7 @@ from scons_support import do_generate_fake_interface, generate_interface_emitter env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') +env.Tool('f2py') env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()]) #if os.name == 'nt': Modified: trunk/scipy/lib/lapack/SConscript =================================================================== --- trunk/scipy/lib/lapack/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/lib/lapack/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -17,7 +17,7 @@ generate_interface_emitter env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') +env.Tool('f2py') env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()]) #if os.name == 'nt': # # NT needs the pythonlib to run any code importing Python.h, including Modified: trunk/scipy/linalg/SConscript =================================================================== --- trunk/scipy/linalg/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/linalg/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -19,7 +19,7 @@ # generate_config_header, generate_config_header_emitter env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') +env.Tool('f2py') env.Append(CPPPATH = [get_python_inc(), get_numpy_include_dirs()]) # XXX: handle cblas wrapper for complex (check in numpy.scons or here ?) Modified: trunk/scipy/optimize/SConscript =================================================================== --- trunk/scipy/optimize/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/optimize/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -12,7 +12,7 @@ from numscons import write_info env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') +env.Tool('f2py') env.Append(CPPPATH = get_numpy_include_dirs()) env.Append(CPPPATH = env['F2PYINCLUDEDIR']) #if os.name == 'nt': Modified: trunk/scipy/sparse/linalg/eigen/arpack/SConscript =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -14,7 +14,7 @@ config = env.NumpyConfigure(custom_tests = {'CheckLAPACK' : CheckF77LAPACK, 'CheckF77Clib' : CheckF77Clib}) -env.Tool('numpyf2py') +env.Tool('f2py') #----------------- # Checking Lapack #----------------- Modified: trunk/scipy/sparse/linalg/isolve/SConscript =================================================================== --- trunk/scipy/sparse/linalg/isolve/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/sparse/linalg/isolve/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -10,7 +10,7 @@ from numscons import write_info env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') +env.Tool('f2py') env.Append(CPPPATH = [get_numpy_include_dirs(), env['F2PYINCLUDEDIR']]) #if os.name == 'nt': # # NT needs the pythonlib to run any code importing Python.h, including Modified: trunk/scipy/special/SConscript =================================================================== --- trunk/scipy/special/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/special/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -11,7 +11,7 @@ env = GetNumpyEnvironment(ARGUMENTS) -env.Tool('numpyf2py') +env.Tool('f2py') env.AppendUnique(CPPPATH = [get_python_inc(), get_numpy_include_dirs()]) env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) Modified: trunk/scipy/stats/SConscript =================================================================== --- trunk/scipy/stats/SConscript 2008-06-12 10:18:28 UTC (rev 4430) +++ trunk/scipy/stats/SConscript 2008-06-12 10:20:52 UTC (rev 4431) @@ -7,7 +7,7 @@ env = GetNumpyEnvironment(ARGUMENTS) -t = env.Tool('numpyf2py') +t = env.Tool('f2py') env.AppendUnique(CPPPATH = [get_numpy_include_dirs()]) env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) From scipy-svn at scipy.org Thu Jun 12 06:23:54 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:23:54 -0500 (CDT) Subject: [Scipy-svn] r4432 - in trunk/scipy: fftpack integrate interpolate odr optimize sparse/linalg/dsolve sparse/linalg/eigen/arpack special stats Message-ID: <20080612102354.8C8DB39C59C@scipy.org> Author: cdavid Date: 2008-06-12 05:23:33 -0500 (Thu, 12 Jun 2008) New Revision: 4432 Modified: trunk/scipy/fftpack/SConscript trunk/scipy/integrate/SConscript trunk/scipy/interpolate/SConscript trunk/scipy/odr/SConscript trunk/scipy/optimize/SConscript trunk/scipy/sparse/linalg/dsolve/SConscript trunk/scipy/sparse/linalg/eigen/arpack/SConscript trunk/scipy/special/SConscript trunk/scipy/stats/SConscript Log: Rename NumpyStaticExtLibrary to DistutilsStaticExtLibrary. Modified: trunk/scipy/fftpack/SConscript =================================================================== --- trunk/scipy/fftpack/SConscript 2008-06-12 10:20:52 UTC (rev 4431) +++ trunk/scipy/fftpack/SConscript 2008-06-12 10:23:33 UTC (rev 4432) @@ -31,7 +31,7 @@ # Build dfftpack src = env.NumpyGlob(pjoin('dfftpack', '*.f')) -dfftpack = env.NumpyStaticExtLibrary('dfftpack', source = [str(s) for s in src]) +dfftpack = env.DistutilsStaticExtLibrary('dfftpack', source = [str(s) for s in src]) env.PrependUnique(LIBS = ['dfftpack']) env.PrependUnique(LIBPATH = env['build_dir']) Modified: trunk/scipy/integrate/SConscript =================================================================== --- trunk/scipy/integrate/SConscript 2008-06-12 10:20:52 UTC (rev 4431) +++ trunk/scipy/integrate/SConscript 2008-06-12 10:23:33 UTC (rev 4432) @@ -27,20 +27,20 @@ # Build linpack_lite src = [str(s) for s in env.NumpyGlob(pjoin('linpack_lite', '*.f'))] -linpack_lite = env.NumpyStaticExtLibrary('linpack_lite', source = src) +linpack_lite = env.DistutilsStaticExtLibrary('linpack_lite', source = src) # Build mach # XXX: do not use optimization flags for mach src = [str(s) for s in env.NumpyGlob(pjoin('mach', '*.f'))] -mach = env.NumpyStaticExtLibrary('mach', source = src) +mach = env.DistutilsStaticExtLibrary('mach', source = src) # Build quadpack src = [str(s) for s in env.NumpyGlob(pjoin('quadpack', '*.f'))] -quadpack = env.NumpyStaticExtLibrary('quadpack', source = src) +quadpack = env.DistutilsStaticExtLibrary('quadpack', source = src) # Build odepack src = [str(s) for s in env.NumpyGlob(pjoin('odepack', '*.f'))] -odepack = env.NumpyStaticExtLibrary('odepack', source = src) +odepack = env.DistutilsStaticExtLibrary('odepack', source = src) env.AppendUnique(LIBPATH = env['build_dir']) env.AppendUnique(LINKFLAGSEND = env['F77_LDFLAGS']) Modified: trunk/scipy/interpolate/SConscript =================================================================== --- trunk/scipy/interpolate/SConscript 2008-06-12 10:20:52 UTC (rev 4431) +++ trunk/scipy/interpolate/SConscript 2008-06-12 10:23:33 UTC (rev 4432) @@ -19,7 +19,7 @@ # Build fitpack src = [str(s) for s in env.NumpyGlob(pjoin('fitpack', '*.f'))] -fitpack = env.NumpyStaticExtLibrary('fitpack', source = src) +fitpack = env.DistutilsStaticExtLibrary('fitpack', source = src) env.PrependUnique(LIBS = ['fitpack']) env.PrependUnique(LIBPATH = [env['build_dir']]) Modified: trunk/scipy/odr/SConscript =================================================================== --- trunk/scipy/odr/SConscript 2008-06-12 10:20:52 UTC (rev 4431) +++ trunk/scipy/odr/SConscript 2008-06-12 10:23:33 UTC (rev 4432) @@ -52,7 +52,7 @@ else: libodr_src.append(pjoin('odrpack', 'd_lpkbls.f')) -env.NumpyStaticExtLibrary('odrpack', source = libodr_src) +env.DistutilsStaticExtLibrary('odrpack', source = libodr_src) env.PrependUnique(LIBS = 'odrpack') env.PrependUnique(LIBPATH = env['build_dir']) Modified: trunk/scipy/optimize/SConscript =================================================================== --- trunk/scipy/optimize/SConscript 2008-06-12 10:20:52 UTC (rev 4431) +++ trunk/scipy/optimize/SConscript 2008-06-12 10:23:33 UTC (rev 4432) @@ -49,11 +49,11 @@ # minpack lib minpack_src = [str(s) for s in env.NumpyGlob(pjoin('minpack', '*.f'))] -env.NumpyStaticExtLibrary('minpack', source = minpack_src) +env.DistutilsStaticExtLibrary('minpack', source = minpack_src) # rootfind lib rootfind_src = [str(s) for s in env.NumpyGlob(pjoin('Zeros', '*.c'))] -env.NumpyStaticExtLibrary('rootfind', source = rootfind_src) +env.DistutilsStaticExtLibrary('rootfind', source = rootfind_src) env.AppendUnique(LIBS = ['minpack', 'rootfind']) env.AppendUnique(LIBPATH = env['build_dir']) Modified: trunk/scipy/sparse/linalg/dsolve/SConscript =================================================================== --- trunk/scipy/sparse/linalg/dsolve/SConscript 2008-06-12 10:20:52 UTC (rev 4431) +++ trunk/scipy/sparse/linalg/dsolve/SConscript 2008-06-12 10:23:33 UTC (rev 4432) @@ -33,7 +33,7 @@ superlu_env.Append(CPPDEFINES = superlu_def) superlu_src = superlu_env.NumpyGlob(pjoin('SuperLU', 'SRC', '*.c')) -superlu = superlu_env.NumpyStaticExtLibrary('superlu_src', source = superlu_src) +superlu = superlu_env.DistutilsStaticExtLibrary('superlu_src', source = superlu_src) # Build python extensions pyenv = env.Clone() Modified: trunk/scipy/sparse/linalg/eigen/arpack/SConscript =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 10:20:52 UTC (rev 4431) +++ trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 10:23:33 UTC (rev 4432) @@ -36,7 +36,7 @@ env.AppendUnique(CPPPATH = pjoin('ARPACK', 'SRC')) env.AppendUnique(F77PATH = pjoin(env['src_dir'], 'ARPACK', 'SRC')) env.AppendUnique(LIBPATH = env['build_dir']) -arpack_lib = env.NumpyStaticExtLibrary('arpack', source = src) +arpack_lib = env.DistutilsStaticExtLibrary('arpack', source = src) # Build _arpack extension env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) Modified: trunk/scipy/special/SConscript =================================================================== --- trunk/scipy/special/SConscript 2008-06-12 10:20:52 UTC (rev 4431) +++ trunk/scipy/special/SConscript 2008-06-12 10:23:33 UTC (rev 4432) @@ -32,7 +32,7 @@ libname = name src = env.NumpyGlob(pjoin(name, '*%s' % ext)) assert len(src) > 0 - env.NumpyStaticExtLibrary(libname, source = src) + env.DistutilsStaticExtLibrary(libname, source = src) # C libraries build_lib('c_misc', '.c') Modified: trunk/scipy/stats/SConscript =================================================================== --- trunk/scipy/stats/SConscript 2008-06-12 10:20:52 UTC (rev 4431) +++ trunk/scipy/stats/SConscript 2008-06-12 10:23:33 UTC (rev 4432) @@ -19,7 +19,7 @@ # Statlib library src = env.NumpyGlob(pjoin('statlib', '*.f' )) -env.NumpyStaticExtLibrary('statlibimp', source = src) +env.DistutilsStaticExtLibrary('statlibimp', source = src) env.AppendUnique(LIBPATH = [env['build_dir']]) From scipy-svn at scipy.org Thu Jun 12 06:25:29 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:25:29 -0500 (CDT) Subject: [Scipy-svn] r4433 - trunk/scipy/fftpack Message-ID: <20080612102529.DF41F39C59C@scipy.org> Author: cdavid Date: 2008-06-12 05:25:25 -0500 (Thu, 12 Jun 2008) New Revision: 4433 Modified: trunk/scipy/fftpack/SConscript Log: Fix libpath issue with new scons build_dir. Modified: trunk/scipy/fftpack/SConscript =================================================================== --- trunk/scipy/fftpack/SConscript 2008-06-12 10:23:33 UTC (rev 4432) +++ trunk/scipy/fftpack/SConscript 2008-06-12 10:25:25 UTC (rev 4433) @@ -1,4 +1,4 @@ -# Last Change: Sat May 03 02:00 PM 2008 J +# Last Change: Thu Jun 12 07:00 PM 2008 J # vim:syntax=python from os.path import join as pjoin @@ -33,7 +33,7 @@ src = env.NumpyGlob(pjoin('dfftpack', '*.f')) dfftpack = env.DistutilsStaticExtLibrary('dfftpack', source = [str(s) for s in src]) env.PrependUnique(LIBS = ['dfftpack']) -env.PrependUnique(LIBPATH = env['build_dir']) +env.PrependUnique(LIBPATH = ['.']) # Build _fftpack src = ['src/zfft.c','src/drfft.c','src/zrfft.c', 'src/zfftnd.c', 'fftpack.pyf'] From scipy-svn at scipy.org Thu Jun 12 06:38:43 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:38:43 -0500 (CDT) Subject: [Scipy-svn] r4434 - in trunk/scipy: integrate interpolate odr optimize sparse/linalg/eigen/arpack special stats Message-ID: <20080612103843.5E7EE39C3AB@scipy.org> Author: cdavid Date: 2008-06-12 05:38:29 -0500 (Thu, 12 Jun 2008) New Revision: 4434 Modified: trunk/scipy/integrate/SConscript trunk/scipy/interpolate/SConscript trunk/scipy/odr/SConscript trunk/scipy/optimize/SConscript trunk/scipy/sparse/linalg/eigen/arpack/SConscript trunk/scipy/special/SConscript trunk/scipy/stats/SConscript Log: Replace build_dir by current directory when used as a LIBPATH. Modified: trunk/scipy/integrate/SConscript =================================================================== --- trunk/scipy/integrate/SConscript 2008-06-12 10:25:25 UTC (rev 4433) +++ trunk/scipy/integrate/SConscript 2008-06-12 10:38:29 UTC (rev 4434) @@ -1,4 +1,4 @@ -# Last Change: Sat May 03 02:00 PM 2008 J +# Last Change: Thu Jun 12 07:00 PM 2008 J # vim:syntax=python from os.path import join as pjoin import warnings @@ -42,7 +42,7 @@ src = [str(s) for s in env.NumpyGlob(pjoin('odepack', '*.f'))] odepack = env.DistutilsStaticExtLibrary('odepack', source = src) -env.AppendUnique(LIBPATH = env['build_dir']) +env.AppendUnique(LIBPATH = '.') env.AppendUnique(LINKFLAGSEND = env['F77_LDFLAGS']) quadenv = env.Clone() Modified: trunk/scipy/interpolate/SConscript =================================================================== --- trunk/scipy/interpolate/SConscript 2008-06-12 10:25:25 UTC (rev 4433) +++ trunk/scipy/interpolate/SConscript 2008-06-12 10:38:29 UTC (rev 4434) @@ -1,4 +1,4 @@ -# Last Change: Sat May 03 02:00 PM 2008 J +# Last Change: Thu Jun 12 07:00 PM 2008 J # vim:syntax=python from os.path import join as pjoin @@ -22,7 +22,7 @@ fitpack = env.DistutilsStaticExtLibrary('fitpack', source = src) env.PrependUnique(LIBS = ['fitpack']) -env.PrependUnique(LIBPATH = [env['build_dir']]) +env.PrependUnique(LIBPATH = ['.']) # Build _fitpack env.DistutilsPythonExtension('_fitpack', source = '_fitpackmodule.c') Modified: trunk/scipy/odr/SConscript =================================================================== --- trunk/scipy/odr/SConscript 2008-06-12 10:25:25 UTC (rev 4433) +++ trunk/scipy/odr/SConscript 2008-06-12 10:38:29 UTC (rev 4434) @@ -1,4 +1,4 @@ -# Last Change: Wed Mar 05 04:00 PM 2008 J +# Last Change: Thu Jun 12 07:00 PM 2008 J # vim:syntax=python import os @@ -55,7 +55,7 @@ env.DistutilsStaticExtLibrary('odrpack', source = libodr_src) env.PrependUnique(LIBS = 'odrpack') -env.PrependUnique(LIBPATH = env['build_dir']) +env.PrependUnique(LIBPATH = '.') # odr pyextension env.DistutilsPythonExtension('__odrpack', '__odrpack.c', Modified: trunk/scipy/optimize/SConscript =================================================================== --- trunk/scipy/optimize/SConscript 2008-06-12 10:25:25 UTC (rev 4433) +++ trunk/scipy/optimize/SConscript 2008-06-12 10:38:29 UTC (rev 4434) @@ -1,4 +1,4 @@ -# Last Change: Sat May 03 02:00 PM 2008 J +# Last Change: Thu Jun 12 07:00 PM 2008 J # vim:syntax=python import os @@ -56,7 +56,7 @@ env.DistutilsStaticExtLibrary('rootfind', source = rootfind_src) env.AppendUnique(LIBS = ['minpack', 'rootfind']) -env.AppendUnique(LIBPATH = env['build_dir']) +env.AppendUnique(LIBPATH = '.') # _minpack pyextension env.DistutilsPythonExtension('_minpack', '_minpackmodule.c', Modified: trunk/scipy/sparse/linalg/eigen/arpack/SConscript =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 10:25:25 UTC (rev 4433) +++ trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 10:38:29 UTC (rev 4434) @@ -35,7 +35,7 @@ env.AppendUnique(CPPPATH = pjoin('ARPACK', 'SRC')) env.AppendUnique(F77PATH = pjoin(env['src_dir'], 'ARPACK', 'SRC')) -env.AppendUnique(LIBPATH = env['build_dir']) +env.AppendUnique(LIBPATH = '.') arpack_lib = env.DistutilsStaticExtLibrary('arpack', source = src) # Build _arpack extension Modified: trunk/scipy/special/SConscript =================================================================== --- trunk/scipy/special/SConscript 2008-06-12 10:25:25 UTC (rev 4433) +++ trunk/scipy/special/SConscript 2008-06-12 10:38:29 UTC (rev 4434) @@ -1,4 +1,4 @@ -# Last Change: Sat May 03 02:00 PM 2008 J +# Last Change: Thu Jun 12 07:00 PM 2008 J # vim:syntax=python from os.path import join as pjoin, basename as pbasename import sys @@ -46,7 +46,7 @@ build_lib('cdflib', '.f', 'cdf') build_lib('specfun', '.f', 'specfunlib') -env.AppendUnique(LIBPATH = [env['build_dir']]) +env.AppendUnique(LIBPATH = ['.']) # Cephes extension src = ['_cephesmodule.c', 'amos_wrappers.c', 'specfun_wrappers.c', \ Modified: trunk/scipy/stats/SConscript =================================================================== --- trunk/scipy/stats/SConscript 2008-06-12 10:25:25 UTC (rev 4433) +++ trunk/scipy/stats/SConscript 2008-06-12 10:38:29 UTC (rev 4434) @@ -1,4 +1,4 @@ -# Last Change: Sat May 03 02:00 PM 2008 J +# Last Change: Thu Jun 12 07:00 PM 2008 J # vim:syntax=python from os.path import join as pjoin @@ -21,7 +21,7 @@ src = env.NumpyGlob(pjoin('statlib', '*.f' )) env.DistutilsStaticExtLibrary('statlibimp', source = src) -env.AppendUnique(LIBPATH = [env['build_dir']]) +env.AppendUnique(LIBPATH = '.') # Statlib extension env.DistutilsPythonExtension('statlib', source = 'statlib.pyf', From scipy-svn at scipy.org Thu Jun 12 06:46:02 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:46:02 -0500 (CDT) Subject: [Scipy-svn] r4435 - trunk/scipy/linalg Message-ID: <20080612104602.AC69AC7C01F@scipy.org> Author: cdavid Date: 2008-06-12 05:45:57 -0500 (Thu, 12 Jun 2008) New Revision: 4435 Modified: trunk/scipy/linalg/scons_support.py Log: Do not play with build_dir anymore in emitters. Modified: trunk/scipy/linalg/scons_support.py =================================================================== --- trunk/scipy/linalg/scons_support.py 2008-06-12 10:38:29 UTC (rev 4434) +++ trunk/scipy/linalg/scons_support.py 2008-06-12 10:45:57 UTC (rev 4435) @@ -14,8 +14,6 @@ return 0 def generate_interface_emitter(target, source, env): - source = [pjoin(env['build_dir'], str(i)) for i in source] - target = [pjoin(env['build_dir'], str(i)) for i in target] base = str(target[0]) return (['%s.pyf' % base], source) From scipy-svn at scipy.org Thu Jun 12 06:47:56 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:47:56 -0500 (CDT) Subject: [Scipy-svn] r4436 - in trunk/scipy: lib/blas lib/lapack linalg Message-ID: <20080612104756.3186FC7C01A@scipy.org> Author: cdavid Date: 2008-06-12 05:47:48 -0500 (Thu, 12 Jun 2008) New Revision: 4436 Modified: trunk/scipy/lib/blas/scons_support.py trunk/scipy/lib/lapack/scons_support.py trunk/scipy/linalg/SConscript Log: Remove last build_dir uses. Modified: trunk/scipy/lib/blas/scons_support.py =================================================================== --- trunk/scipy/lib/blas/scons_support.py 2008-06-12 10:45:57 UTC (rev 4435) +++ trunk/scipy/lib/blas/scons_support.py 2008-06-12 10:47:48 UTC (rev 4436) @@ -1,8 +1,6 @@ from os.path import join as pjoin, splitext, basename as pbasename def generate_interface_emitter(target, source, env): - source = [pjoin(env['build_dir'], str(i)) for i in source] - target = [pjoin(env['build_dir'], str(i)) for i in target] base = str(target[0]) return (['%s.pyf' % base], source) Modified: trunk/scipy/lib/lapack/scons_support.py =================================================================== --- trunk/scipy/lib/lapack/scons_support.py 2008-06-12 10:45:57 UTC (rev 4435) +++ trunk/scipy/lib/lapack/scons_support.py 2008-06-12 10:47:48 UTC (rev 4436) @@ -1,8 +1,6 @@ from os.path import join as pjoin, splitext, basename as pbasename def generate_interface_emitter(target, source, env): - source = [pjoin(env['build_dir'], str(i)) for i in source] - target = [pjoin(env['build_dir'], str(i)) for i in target] base = str(target[0]) return (['%s.pyf' % base], source) Modified: trunk/scipy/linalg/SConscript =================================================================== --- trunk/scipy/linalg/SConscript 2008-06-12 10:45:57 UTC (rev 4435) +++ trunk/scipy/linalg/SConscript 2008-06-12 10:47:48 UTC (rev 4436) @@ -1,4 +1,4 @@ -# Last Change: Sat May 03 02:00 PM 2008 J +# Last Change: Thu Jun 12 07:00 PM 2008 J # vim:syntax=python import os @@ -123,7 +123,7 @@ #------------ yop = fenv.haha('flapack', 'generic_flapack.pyf') # XXX: automatically scan dependency on flapack_user_routines.pyf ? -fenv.Depends(yop, pjoin(env['build_dir'], 'flapack_user_routines.pyf')) +fenv.Depends(yop, 'flapack_user_routines.pyf') fenv.DistutilsPythonExtension('flapack', 'flapack.pyf') #------------ From scipy-svn at scipy.org Thu Jun 12 06:58:21 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 05:58:21 -0500 (CDT) Subject: [Scipy-svn] r4437 - in trunk/scipy: lib/blas lib/lapack sparse/linalg/eigen/arpack sparse/linalg/isolve Message-ID: <20080612105821.0296839C3AB@scipy.org> Author: cdavid Date: 2008-06-12 05:58:13 -0500 (Thu, 12 Jun 2008) New Revision: 4437 Modified: trunk/scipy/lib/blas/SConscript trunk/scipy/lib/lapack/SConscript trunk/scipy/sparse/linalg/eigen/arpack/SConscript trunk/scipy/sparse/linalg/isolve/SConscript Log: Replace depreacted NumpyFrom*Template builders by their new names. Modified: trunk/scipy/lib/blas/SConscript =================================================================== --- trunk/scipy/lib/blas/SConscript 2008-06-12 10:47:48 UTC (rev 4436) +++ trunk/scipy/lib/blas/SConscript 2008-06-12 10:58:13 UTC (rev 4437) @@ -64,13 +64,13 @@ #------------ # fblas #------------ -env.NumpyFromFTemplate('fblas.pyf', 'fblas.pyf.src') +env.FromFTemplate('fblas.pyf', 'fblas.pyf.src') source = ['fblas.pyf'] if IsVeclib(env, 'blas') or IsAccelerate(env, 'blas'): - env.NumpyFromCTemplate('fblaswrap_veclib_c.c', 'fblaswrap_veclib_c.c.src') + env.FromCTemplate('fblaswrap_veclib_c.c', 'fblaswrap_veclib_c.c.src') source.append('fblaswrap_veclib_c.c') else: - env.NumpyFromFTemplate('fblaswrap.f', 'fblaswrap.f.src') + env.FromFTemplate('fblaswrap.f', 'fblaswrap.f.src') source.append('fblaswrap.f') env.DistutilsPythonExtension('fblas', source) @@ -79,7 +79,7 @@ #------------ source = ['cblas.pyf'] if has_cblas: - env.NumpyFromFTemplate('cblas.pyf', 'cblas.pyf.src') + env.FromFTemplate('cblas.pyf', 'cblas.pyf.src') else: print env.GenerateFakePyf('cblas', 'cblas.pyf.src') env.DistutilsPythonExtension('cblas', source) Modified: trunk/scipy/lib/lapack/SConscript =================================================================== --- trunk/scipy/lib/lapack/SConscript 2008-06-12 10:47:48 UTC (rev 4436) +++ trunk/scipy/lib/lapack/SConscript 2008-06-12 10:58:13 UTC (rev 4437) @@ -69,14 +69,14 @@ #------------ # flapack #------------ -yop = env.NumpyFromFTemplate('flapack.pyf', 'flapack.pyf.src') +yop = env.FromFTemplate('flapack.pyf', 'flapack.pyf.src') env.DistutilsPythonExtension('flapack', source = ['flapack.pyf']) #------------ # clapack #------------ if has_clapack: - env.NumpyFromFTemplate('clapack.pyf', 'clapack.pyf.src') + env.FromFTemplate('clapack.pyf', 'clapack.pyf.src') else: env.GenerateFakePyf('clapack', 'clapack.pyf.src') env.DistutilsPythonExtension('clapack', source = 'clapack.pyf') Modified: trunk/scipy/sparse/linalg/eigen/arpack/SConscript =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 10:47:48 UTC (rev 4436) +++ trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 10:58:13 UTC (rev 4437) @@ -42,6 +42,6 @@ env.AppendUnique(CPPPATH = env['F2PYINCLUDEDIR']) env.AppendUnique(CPPPATH = get_numpy_include_dirs()) -env.NumpyFromFTemplate('arpack.pyf', 'arpack.pyf.src') +env.FromFTemplate('arpack.pyf', 'arpack.pyf.src') env.Prepend(LIBS = 'arpack') env.DistutilsPythonExtension('_arpack', 'arpack.pyf') Modified: trunk/scipy/sparse/linalg/isolve/SConscript =================================================================== --- trunk/scipy/sparse/linalg/isolve/SConscript 2008-06-12 10:47:48 UTC (rev 4436) +++ trunk/scipy/sparse/linalg/isolve/SConscript 2008-06-12 10:58:13 UTC (rev 4437) @@ -52,7 +52,7 @@ sources = [] for method in raw_sources: target = splitext(method)[0] - res = env.NumpyFromFTemplate(target, pjoin('iterative', method)) + res = env.FromFTemplate(target, pjoin('iterative', method)) sources.append(res[0]) env.DistutilsPythonExtension('_iterative', source = sources) From scipy-svn at scipy.org Thu Jun 12 07:00:20 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 06:00:20 -0500 (CDT) Subject: [Scipy-svn] r4438 - in trunk/scipy: lib/lapack linalg stats Message-ID: <20080612110020.34FAB39C3AB@scipy.org> Author: cdavid Date: 2008-06-12 05:59:41 -0500 (Thu, 12 Jun 2008) New Revision: 4438 Modified: trunk/scipy/lib/lapack/SConscript trunk/scipy/linalg/SConscript trunk/scipy/stats/SConscript Log: Replace depreacted NumpyF2py builder by their new names. Modified: trunk/scipy/lib/lapack/SConscript =================================================================== --- trunk/scipy/lib/lapack/SConscript 2008-06-12 10:58:13 UTC (rev 4437) +++ trunk/scipy/lib/lapack/SConscript 2008-06-12 10:59:41 UTC (rev 4438) @@ -84,7 +84,7 @@ #---------------- # calc_lwork: #---------------- -calc_src = env.NumpyF2py(pjoin('calc_lworkmodule.c'), +calc_src = env.F2py(pjoin('calc_lworkmodule.c'), source = pjoin('calc_lwork.f')) env.DistutilsPythonExtension('calc_lwork', source = calc_src + ['calc_lwork.f'], LINKFLAGSEND = env['F77_LDFLAGS']) Modified: trunk/scipy/linalg/SConscript =================================================================== --- trunk/scipy/linalg/SConscript 2008-06-12 10:58:13 UTC (rev 4437) +++ trunk/scipy/linalg/SConscript 2008-06-12 10:59:41 UTC (rev 4438) @@ -139,7 +139,7 @@ # _flinalg #---------------- flinalg_fsrc = [pjoin('src', i) for i in ['det.f', 'lu.f']] -flinalg_src = fenv.NumpyF2py(pjoin('src', '_flinalgmodule.c'), flinalg_fsrc) +flinalg_src = fenv.F2py(pjoin('src', '_flinalgmodule.c'), flinalg_fsrc) fenv.DistutilsPythonExtension('_flinalg', source = flinalg_src + flinalg_fsrc) @@ -147,7 +147,7 @@ # calc_lwork: #---------------- calc_fsrc = [pjoin('src', 'calc_lwork.f')] -calc_src = env.NumpyF2py(pjoin('src', 'calc_lworkmodule.c'), calc_fsrc) +calc_src = env.F2py(pjoin('src', 'calc_lworkmodule.c'), calc_fsrc) fenv.DistutilsPythonExtension('calc_lwork', calc_src + calc_fsrc) #-------------- Modified: trunk/scipy/stats/SConscript =================================================================== --- trunk/scipy/stats/SConscript 2008-06-12 10:58:13 UTC (rev 4437) +++ trunk/scipy/stats/SConscript 2008-06-12 10:59:41 UTC (rev 4438) @@ -30,7 +30,7 @@ LINKFLAGSEND = env['F77_LDFLAGS']) # futil extension -futil_src = env.NumpyF2py(pjoin('futilmodule.c'), pjoin('futil.f')) +futil_src = env.F2py(pjoin('futilmodule.c'), pjoin('futil.f')) env.DistutilsPythonExtension('futil', source = futil_src + ['futil.f'], LINKFLAGSEND = env['F77_LDFLAGS']) From scipy-svn at scipy.org Thu Jun 12 07:23:37 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 06:23:37 -0500 (CDT) Subject: [Scipy-svn] r4439 - in trunk/scipy: optimize sparse/linalg/dsolve Message-ID: <20080612112337.0A7DA39C443@scipy.org> Author: cdavid Date: 2008-06-12 06:23:17 -0500 (Thu, 12 Jun 2008) New Revision: 4439 Modified: trunk/scipy/optimize/SConscript trunk/scipy/sparse/linalg/dsolve/SConscript Log: Fix some include paths related to scons build_dir arch changes. Modified: trunk/scipy/optimize/SConscript =================================================================== --- trunk/scipy/optimize/SConscript 2008-06-12 10:59:41 UTC (rev 4438) +++ trunk/scipy/optimize/SConscript 2008-06-12 11:23:17 UTC (rev 4439) @@ -14,7 +14,7 @@ env = GetNumpyEnvironment(ARGUMENTS) env.Tool('f2py') env.Append(CPPPATH = get_numpy_include_dirs()) -env.Append(CPPPATH = env['F2PYINCLUDEDIR']) +env.Append(CPPPATH = [env['F2PYINCLUDEDIR'], 'Zeros']) #if os.name == 'nt': # # NT needs the pythonlib to run any code importing Python.h, including # # simple code using only typedef and so on, so we need it for configuration Modified: trunk/scipy/sparse/linalg/dsolve/SConscript =================================================================== --- trunk/scipy/sparse/linalg/dsolve/SConscript 2008-06-12 10:59:41 UTC (rev 4438) +++ trunk/scipy/sparse/linalg/dsolve/SConscript 2008-06-12 11:23:17 UTC (rev 4439) @@ -37,7 +37,7 @@ # Build python extensions pyenv = env.Clone() -pyenv.Append(CPPPATH = [get_numpy_include_dirs(), env['src_dir']]) +pyenv.Append(CPPPATH = [get_numpy_include_dirs(), pjoin('SuperLU', 'SRC')]) pyenv.Prepend(LIBS = superlu) common_src = ['_superlu_utils.c', '_superluobject.c'] From scipy-svn at scipy.org Thu Jun 12 07:27:59 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 06:27:59 -0500 (CDT) Subject: [Scipy-svn] r4440 - in trunk/scipy: fftpack integrate interpolate optimize sparse/linalg/dsolve sparse/linalg/eigen/arpack special stats Message-ID: <20080612112759.3644FC7C03A@scipy.org> Author: cdavid Date: 2008-06-12 06:26:55 -0500 (Thu, 12 Jun 2008) New Revision: 4440 Modified: trunk/scipy/fftpack/SConscript trunk/scipy/integrate/SConscript trunk/scipy/interpolate/SConscript trunk/scipy/optimize/SConscript trunk/scipy/sparse/linalg/dsolve/SConscript trunk/scipy/sparse/linalg/eigen/arpack/SConscript trunk/scipy/special/SConscript trunk/scipy/stats/SConscript Log: Use Glob instead of NumpyGlob, which is not needed anymore. Modified: trunk/scipy/fftpack/SConscript =================================================================== --- trunk/scipy/fftpack/SConscript 2008-06-12 11:23:17 UTC (rev 4439) +++ trunk/scipy/fftpack/SConscript 2008-06-12 11:26:55 UTC (rev 4440) @@ -30,7 +30,7 @@ pass # Build dfftpack -src = env.NumpyGlob(pjoin('dfftpack', '*.f')) +src = env.Glob(pjoin('dfftpack', '*.f')) dfftpack = env.DistutilsStaticExtLibrary('dfftpack', source = [str(s) for s in src]) env.PrependUnique(LIBS = ['dfftpack']) env.PrependUnique(LIBPATH = ['.']) Modified: trunk/scipy/integrate/SConscript =================================================================== --- trunk/scipy/integrate/SConscript 2008-06-12 11:23:17 UTC (rev 4439) +++ trunk/scipy/integrate/SConscript 2008-06-12 11:26:55 UTC (rev 4440) @@ -26,20 +26,20 @@ # XXX: lapack integration # Build linpack_lite -src = [str(s) for s in env.NumpyGlob(pjoin('linpack_lite', '*.f'))] +src = [str(s) for s in env.Glob(pjoin('linpack_lite', '*.f'))] linpack_lite = env.DistutilsStaticExtLibrary('linpack_lite', source = src) # Build mach # XXX: do not use optimization flags for mach -src = [str(s) for s in env.NumpyGlob(pjoin('mach', '*.f'))] +src = [str(s) for s in env.Glob(pjoin('mach', '*.f'))] mach = env.DistutilsStaticExtLibrary('mach', source = src) # Build quadpack -src = [str(s) for s in env.NumpyGlob(pjoin('quadpack', '*.f'))] +src = [str(s) for s in env.Glob(pjoin('quadpack', '*.f'))] quadpack = env.DistutilsStaticExtLibrary('quadpack', source = src) # Build odepack -src = [str(s) for s in env.NumpyGlob(pjoin('odepack', '*.f'))] +src = [str(s) for s in env.Glob(pjoin('odepack', '*.f'))] odepack = env.DistutilsStaticExtLibrary('odepack', source = src) env.AppendUnique(LIBPATH = '.') Modified: trunk/scipy/interpolate/SConscript =================================================================== --- trunk/scipy/interpolate/SConscript 2008-06-12 11:23:17 UTC (rev 4439) +++ trunk/scipy/interpolate/SConscript 2008-06-12 11:26:55 UTC (rev 4440) @@ -18,7 +18,7 @@ env.AppendUnique(LINKFLAGSEND = env['F77_LDFLAGS']) # Build fitpack -src = [str(s) for s in env.NumpyGlob(pjoin('fitpack', '*.f'))] +src = [str(s) for s in env.Glob(pjoin('fitpack', '*.f'))] fitpack = env.DistutilsStaticExtLibrary('fitpack', source = src) env.PrependUnique(LIBS = ['fitpack']) Modified: trunk/scipy/optimize/SConscript =================================================================== --- trunk/scipy/optimize/SConscript 2008-06-12 11:23:17 UTC (rev 4439) +++ trunk/scipy/optimize/SConscript 2008-06-12 11:26:55 UTC (rev 4440) @@ -48,11 +48,11 @@ #========== # minpack lib -minpack_src = [str(s) for s in env.NumpyGlob(pjoin('minpack', '*.f'))] +minpack_src = [str(s) for s in env.Glob(pjoin('minpack', '*.f'))] env.DistutilsStaticExtLibrary('minpack', source = minpack_src) # rootfind lib -rootfind_src = [str(s) for s in env.NumpyGlob(pjoin('Zeros', '*.c'))] +rootfind_src = [str(s) for s in env.Glob(pjoin('Zeros', '*.c'))] env.DistutilsStaticExtLibrary('rootfind', source = rootfind_src) env.AppendUnique(LIBS = ['minpack', 'rootfind']) Modified: trunk/scipy/sparse/linalg/dsolve/SConscript =================================================================== --- trunk/scipy/sparse/linalg/dsolve/SConscript 2008-06-12 11:23:17 UTC (rev 4439) +++ trunk/scipy/sparse/linalg/dsolve/SConscript 2008-06-12 11:26:55 UTC (rev 4440) @@ -32,7 +32,7 @@ superlu_def['USE_VENDOR_BLAS'] = 2 superlu_env.Append(CPPDEFINES = superlu_def) -superlu_src = superlu_env.NumpyGlob(pjoin('SuperLU', 'SRC', '*.c')) +superlu_src = superlu_env.Glob(pjoin('SuperLU', 'SRC', '*.c')) superlu = superlu_env.DistutilsStaticExtLibrary('superlu_src', source = superlu_src) # Build python extensions Modified: trunk/scipy/sparse/linalg/eigen/arpack/SConscript =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 11:23:17 UTC (rev 4439) +++ trunk/scipy/sparse/linalg/eigen/arpack/SConscript 2008-06-12 11:26:55 UTC (rev 4440) @@ -27,9 +27,9 @@ write_info(env) # Build arpack -arpack_src = env.NumpyGlob(pjoin('ARPACK', 'SRC', '*.f')) -arpack_src += env.NumpyGlob(pjoin('ARPACK', 'UTIL', '*.f')) -arpack_src += env.NumpyGlob(pjoin('ARPACK', 'LAPACK', '*.f')) +arpack_src = env.Glob(pjoin('ARPACK', 'SRC', '*.f')) +arpack_src += env.Glob(pjoin('ARPACK', 'UTIL', '*.f')) +arpack_src += env.Glob(pjoin('ARPACK', 'LAPACK', '*.f')) src = [str(s) for s in arpack_src] Modified: trunk/scipy/special/SConscript =================================================================== --- trunk/scipy/special/SConscript 2008-06-12 11:23:17 UTC (rev 4439) +++ trunk/scipy/special/SConscript 2008-06-12 11:26:55 UTC (rev 4440) @@ -30,7 +30,7 @@ """ext should be .f or .c""" if not libname: libname = name - src = env.NumpyGlob(pjoin(name, '*%s' % ext)) + src = env.Glob(pjoin(name, '*%s' % ext)) assert len(src) > 0 env.DistutilsStaticExtLibrary(libname, source = src) Modified: trunk/scipy/stats/SConscript =================================================================== --- trunk/scipy/stats/SConscript 2008-06-12 11:23:17 UTC (rev 4439) +++ trunk/scipy/stats/SConscript 2008-06-12 11:26:55 UTC (rev 4440) @@ -18,7 +18,7 @@ config.Finish() # Statlib library -src = env.NumpyGlob(pjoin('statlib', '*.f' )) +src = env.Glob(pjoin('statlib', '*.f' )) env.DistutilsStaticExtLibrary('statlibimp', source = src) env.AppendUnique(LIBPATH = '.') From scipy-svn at scipy.org Thu Jun 12 17:39:56 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 16:39:56 -0500 (CDT) Subject: [Scipy-svn] r4441 - trunk/scipy/sparse/linalg/eigen/arpack Message-ID: <20080612213956.2876C39C6C8@scipy.org> Author: wnbell Date: 2008-06-12 16:39:52 -0500 (Thu, 12 Jun 2008) New Revision: 4441 Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py Log: fixed bug reported by James Philbin http://thread.gmane.org/gmane.comp.python.scientific.user/16720 Modified: trunk/scipy/sparse/linalg/eigen/arpack/arpack.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2008-06-12 11:26:55 UTC (rev 4440) +++ trunk/scipy/sparse/linalg/eigen/arpack/arpack.py 2008-06-12 21:39:52 UTC (rev 4441) @@ -460,12 +460,16 @@ else: break - if info < -1 : - raise RuntimeError("Error info=%d in arpack"%info) + if info < -1 : + raise RuntimeError("Error info=%d in arpack" % info) return None - if info == -1: - warnings.warn("Maximum number of iterations taken: %s"%iparam[2]) + if info == 1: + warnings.warn("Maximum number of iterations taken: %s" % iparam[2]) + + if iparam[4] < k: + warnings.warn("Only %d/%d eigenvectors converged" % (iparam[4], k)) + # now extract eigenvalues and (optionally) eigenvectors rvec = return_eigenvectors ierr = 0 From scipy-svn at scipy.org Thu Jun 12 18:09:21 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 17:09:21 -0500 (CDT) Subject: [Scipy-svn] r4442 - trunk/scipy Message-ID: <20080612220921.F0B9D39C6F4@scipy.org> Author: wnbell Date: 2008-06-12 17:09:16 -0500 (Thu, 12 Jun 2008) New Revision: 4442 Removed: trunk/scipy/splinalg/ Modified: trunk/scipy/setup.py Log: removed splinalg Modified: trunk/scipy/setup.py =================================================================== --- trunk/scipy/setup.py 2008-06-12 21:39:52 UTC (rev 4441) +++ trunk/scipy/setup.py 2008-06-12 22:09:16 UTC (rev 4442) @@ -19,7 +19,6 @@ config.add_subpackage('signal') config.add_subpackage('sparse') config.add_subpackage('special') - config.add_subpackage('splinalg') config.add_subpackage('stats') config.add_subpackage('ndimage') config.add_subpackage('stsci') From scipy-svn at scipy.org Thu Jun 12 18:14:53 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 17:14:53 -0500 (CDT) Subject: [Scipy-svn] r4443 - trunk/scipy/sparse/linalg/eigen/arpack Message-ID: <20080612221453.5118939C708@scipy.org> Author: wnbell Date: 2008-06-12 17:14:49 -0500 (Thu, 12 Jun 2008) New Revision: 4443 Modified: trunk/scipy/sparse/linalg/eigen/arpack/speigs.py Log: silenced ARPACK debugging output Modified: trunk/scipy/sparse/linalg/eigen/arpack/speigs.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/arpack/speigs.py 2008-06-12 22:09:16 UTC (rev 4442) +++ trunk/scipy/sparse/linalg/eigen/arpack/speigs.py 2008-06-12 22:14:49 UTC (rev 4443) @@ -191,7 +191,7 @@ def ARPACK_iteration(matvec, sigma_solve, n, bmat, which, nev, tol, ncv, mode): ncv, maxitr = check_init(n, nev, ncv) ipntr, d, resid, workd, workl, v = init_workspaces(n,nev,ncv) - init_debug() + #init_debug() ishfts = 1 # Some random arpack parameter # Some random arpack parameter (I think it tells ARPACK to solve the # general eigenproblem using shift-invert From scipy-svn at scipy.org Thu Jun 12 23:08:52 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 22:08:52 -0500 (CDT) Subject: [Scipy-svn] r4444 - in trunk/scipy/io: . tests Message-ID: <20080613030852.7CE55C7C048@scipy.org> Author: wnbell Date: 2008-06-12 22:08:49 -0500 (Thu, 12 Jun 2008) New Revision: 4444 Modified: trunk/scipy/io/mmio.py trunk/scipy/io/tests/test_mmio.py Log: fix problem with writing CSR/CSC matrices in MatrixMarket format Modified: trunk/scipy/io/mmio.py =================================================================== --- trunk/scipy/io/mmio.py 2008-06-12 22:14:49 UTC (rev 4443) +++ trunk/scipy/io/mmio.py 2008-06-13 03:08:49 UTC (rev 4444) @@ -554,16 +554,16 @@ coo = a.tocoo() # convert to COOrdinate format # write shape spec - stream.write('%i %i %i\n' % (rows,cols,coo.nnz)) + stream.write('%i %i %i\n' % (rows, cols, coo.nnz)) fmt = '%%.%dg' % precision if field == self.FIELD_PATTERN: - IJV = vstack((a.row, a.col)).T + IJV = vstack((coo.row, coo.col)).T elif field in [ self.FIELD_INTEGER, self.FIELD_REAL ]: - IJV = vstack((a.row, a.col, a.data)).T + IJV = vstack((coo.row, coo.col, coo.data)).T elif field == self.FIELD_COMPLEX: - IJV = vstack((a.row, a.col, a.data.real, a.data.imag)).T + IJV = vstack((coo.row, coo.col, coo.data.real, coo.data.imag)).T else: raise TypeError('Unknown field type %s' % `field`) Modified: trunk/scipy/io/tests/test_mmio.py =================================================================== --- trunk/scipy/io/tests/test_mmio.py 2008-06-12 22:14:49 UTC (rev 4443) +++ trunk/scipy/io/tests/test_mmio.py 2008-06-13 03:08:49 UTC (rev 4444) @@ -291,5 +291,28 @@ b = mmread(fn).todense() assert_array_almost_equal(a,b) + def test_sparse_formats(self): + mats = [] + + I = array([0, 0, 1, 2, 3, 3, 3, 4]) + J = array([0, 3, 1, 2, 1, 3, 4, 4]) + + V = array([ 1.0, 6.0, 10.5, 0.015, 250.5, -280.0, 33.32, 12.0 ]) + mats.append( scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5)) ) + + V = array([ 1.0 + 3j, 6.0 + 2j, 10.50 + 0.9j, 0.015 + -4.4j, + 250.5 + 0j, -280.0 + 5j, 33.32 + 6.4j, 12.00 + 0.8j]) + mats.append( scipy.sparse.coo_matrix((V,(I,J)),shape=(5,5)) ) + + for mat in mats: + expected = mat.todense() + for fmt in ['csr','csc','coo']: + fn = mktemp() + mmwrite(fn, mat.asformat(fmt)) + + result = mmread(fn).todense() + assert_array_almost_equal(result, expected) + + if __name__ == "__main__": nose.run(argv=['', __file__]) From scipy-svn at scipy.org Thu Jun 12 23:16:06 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 12 Jun 2008 22:16:06 -0500 (CDT) Subject: [Scipy-svn] r4445 - trunk/scipy/sparse Message-ID: <20080613031606.B000839C137@scipy.org> Author: wnbell Date: 2008-06-12 22:16:04 -0500 (Thu, 12 Jun 2008) New Revision: 4445 Added: trunk/scipy/sparse/.svnignore Log: add sparse ignore filter Added: trunk/scipy/sparse/.svnignore =================================================================== --- trunk/scipy/sparse/.svnignore 2008-06-13 03:08:49 UTC (rev 4444) +++ trunk/scipy/sparse/.svnignore 2008-06-13 03:16:04 UTC (rev 4445) @@ -0,0 +1,7 @@ +# Recursively set the SVN ignore property. Run with: +# $ svn -R propset svn:ignore -F .svnignore . +*.pyc +*.bak +*.so +*.swp + From scipy-svn at scipy.org Mon Jun 16 21:19:09 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 16 Jun 2008 20:19:09 -0500 (CDT) Subject: [Scipy-svn] r4446 - trunk/scipy Message-ID: <20080617011909.BEF9239C90E@scipy.org> Author: rkern Date: 2008-06-16 20:19:04 -0500 (Mon, 16 Jun 2008) New Revision: 4446 Modified: trunk/scipy/__init__.py Log: BUG: Ensure that no subpackage name is added to scipy.__all__. Modified: trunk/scipy/__init__.py =================================================================== --- trunk/scipy/__init__.py 2008-06-13 03:16:04 UTC (rev 4445) +++ trunk/scipy/__init__.py 2008-06-17 01:19:04 UTC (rev 4446) @@ -44,6 +44,7 @@ # Remove the linalg imported from numpy so that the scipy.linalg package can be # imported. del linalg +__all__.remove('linalg') try: from __config__ import show as show_config @@ -61,6 +62,20 @@ del _os pkgload = PackageLoader() pkgload(verbose=SCIPY_IMPORT_VERBOSE,postpone=True) + +# Remove subpackage names from __all__ such that they are not imported via +# "from scipy import *". This works around a numpy bug present in < 1.2. +subpackages = """cluster constants fftpack integrate interpolate io lib linalg +linsolve maxentropy misc ndimage odr optimize sandbox signal sparse special +splinalg stats stsci testing weave""".split() +for name in subpackages: + try: + __all__.remove(name) + except ValueError: + pass + +del name, subpackages + __doc__ += """ Available subpackages From scipy-svn at scipy.org Wed Jun 18 15:04:18 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 18 Jun 2008 14:04:18 -0500 (CDT) Subject: [Scipy-svn] r4447 - trunk/scipy/ndimage Message-ID: <20080618190418.E1E6639C6E6@scipy.org> Author: tom.waite Date: 2008-06-18 14:04:15 -0500 (Wed, 18 Jun 2008) New Revision: 4447 Modified: trunk/scipy/ndimage/_registration.py Log: Parameter checking. Replace c-extension integrated histogram thresholding with pure numpy version. Modified: trunk/scipy/ndimage/_registration.py =================================================================== --- trunk/scipy/ndimage/_registration.py 2008-06-17 01:19:04 UTC (rev 4446) +++ trunk/scipy/ndimage/_registration.py 2008-06-18 19:04:15 UTC (rev 4447) @@ -74,8 +74,9 @@ D = (imageS.shape * Z + 0.5).astype(np.int16) # for the test data, set the xyz voxel sizes for fMRI volume. M is a 4x4 matrix. - M = np.diag(imageS.diagonal() / Z) - image = np.empty((D[2],D[1],D[0]),np.uint8) + M = np.diag(imageS_mat.diagonal() / Z) + + image = np.zeros((D[2],D[1],D[0]),np.uint8) mode = 2 scale = 0 @@ -121,11 +122,9 @@ # use the 6 dim parm_vector (3 angles, 3 translations) to remap # M_inverse = get_inverse_mappings(parm_vector) - (layers, rows, cols) = image.shape # allocate the zero image - #remaped_image = np.zeros(image.size, dtype=np.uint8).reshape(layers, rows, cols) - remaped_image = np.empty(image.shape, dtype=np.uint8) + remaped_image = np.zeros(image.shape, dtype=np.uint8) step = np.array([1, 1, 1], dtype=np.int32) @@ -174,18 +173,18 @@ M_inverse = build_rotate_matrix(-parm_vector) return M_inverse -def coregister(image1, image1_mat, image2, image2_mat, multires=[4, 2], histo_fwhm=3, - ftype=1, lite=0, smhist=0, method='nmi', opt_method='powell'): +def register(image1, image1_mat, image2, image2_mat, multires=[4, 2], histo_fwhm=3, + ftype=1, lite=0, smhist=0, method='nmi', opt_method='hybrid', + optimize_function=None): """ - parm_vector = coregister(image1, image1_mat, image2, image2_mat, multires=[4, 2], histo_fwhm=3, + parm_vector = register(image1, image1_mat, image2, image2_mat, multires=[4, 2], histo_fwhm=3, ftype=1, lite=0, smhist=0, method='nmi', opt_method='powell'): - takes two images and the image process descriptor (improc) and determines the optimal alignment of the two images (measured by mutual information or cross correlation) using optimization search of 3 angle and 3 translation parameters. The optimization uses either the Powell or Conjugate Gradient methods in the scipy optimization - package. The optimal parameter is returned. + package. The optimal rigid body parameter is returned. Parameters ---------- @@ -216,7 +215,7 @@ information; mi is mutual information; ecc is entropy cross correlation; ncc is normalized cross correlation. mse is mean squared error. - opt_method: {'powell', 'hybrid'}, optional + opt_method: {'powell', 'cg', 'hybrid'}, optional registration is two pass. Pass 1 is low res to get close to alignment and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and 2 are powell, in hybrid pass 2 is conjugate gradient. @@ -235,65 +234,124 @@ >>> import _registration as reg >>> image1, image2, fwhm, improc = reg.demo_build_dual_volumes() - >>> parm_vector = coregister(image1, image2, fwhm, improc) + >>> parm_vector = register(image1, image2, fwhm, improc) """ - start = time.time() - parm_vector = multires_registration(image1, image1_mat, image2, image2_mat, multires, - histo_fwhm, lite, smhist, method, opt_method) - stop = time.time() - print 'Total Optimizer Time is ', (stop-start) + # do the parameter validity checking. this is specific to this 3D registration. + # make sure the image is 3D and the mats are 4x4 with nonzero diagonal + + if image1.ndim != 3: + raise ValueError, "Image 1 is not 3 dimensional" + + if image2.ndim != 3: + raise ValueError, "Image 2 is not 3 dimensional" + + if image1.dtype != np.uint8: + raise ValueError, "Image 1 is not 8 bit (required for joint histogram)" + + if image2.dtype != np.uint8: + raise ValueError, "Image 2 is not 8 bit (required for joint histogram)" + + if image1_mat.shape != (4,4): + raise ValueError, "Image1 MAT is not 4x4" + + if image2_mat.shape != (4,4): + raise ValueError, "Image2 MAT is not 4x4" + + if (np.diag(image1_mat)).prod() == 0: + raise ValueError, "Image1 MAT has a 0 on the diagonal" + + if (np.diag(image2_mat)).prod() == 0: + raise ValueError, "Image2 MAT has a 0 on the diagonal" + + if opt_method=='hybrid' and np.size(multires) != 2: + raise ValueError, "hybrid method must be 2 pass registration" + + if ftype != 0 and ftype != 1: + raise ValueError, "choose filter type 0 or 1 only" + + if lite != 0 and lite != 1: + raise ValueError, "choose histogram generation type 0 or 1 only" + + if smhist != 0 and smhist != 1: + raise ValueError, "choose histogram smoothing type 0 or 1 only" + + if method != 'nmi' and method != 'mi' and method != 'ncc'\ + and method != 'ecc' and method != 'mse': + raise ValueError, "choose cost method nmi, mi, ecc, mse, ncc" + + if opt_method != 'powell' and opt_method != 'cg' and opt_method != 'hybrid': + raise ValueError, "only optimize methods powell, cg or hybrid are supported" + + # default is to use the cost_function I provided. + # this shows you can override this but the parameters will have to + # be changed for the new cost function if it is different + + if optimize_function == None: + optimize_function = cost_function + + parm_vector = multires_registration(optimize_function, image1, image1_mat, image2, image2_mat, + multires, histo_fwhm, lite, smhist, method, opt_method) + return parm_vector -def multires_registration(image1, image1_mat, image2, image2_mat, multires, histo_fwhm, - lite, smhist, method, opt_method): +def multires_registration(optimize_function, image1, image1_mat, image2, image2_mat, + multires, histo_fwhm, lite, smhist, method, opt_method): """ - x = multires_registration(image1, image2, imdata, lite, smhist, method, opt_method) - to be called by coregister() which optionally does 3D image filtering and - provies timing for registration. + to be called by register() which does parameter validation Parameters ---------- - image1 : {nd_array} image1 is the source image to be remapped during the registration. + image1_mat : {nd_array} + image1_mat is the source image MAT image2 : {nd_array} image2 is the reference image that image1 gets mapped to. - imdata : {dictionary} - image sampling and optimization information. - lite : {integer} + image2_mat : {nd_array} + image2_mat is the source image MAT + multires: {list}, optional + the volume subsample values for each pass of the registration. + the default is 2 passes with subsample 4 in pass 1 and subsample 2 in pass 2 + histo_fwhm : {int}, optional + used for the filter kernel in the low pass filter of the joint histogram + ftype : {0, 1}, optional + flag for type of low pass filter. 0 is Gauss-Spline + 1 is pure Gauss. Sigma determined from volume sampling info. + lite : {0, 1}, optional lite of 1 is to jitter both images during resampling. 0 is to not jitter. jittering is for non-aliased volumes. - smhist: {integer} + smhist: {0, 1}, optional flag for joint histogram low pass filtering. 0 for no filter, 1 for do filter. - method: {'nmi', 'mi', 'ncc', 'ecc', 'mse'} + method: {'nmi', 'mi', 'ncc', 'ecc', 'mse'}, optional flag for type of registration metric. nmi is normalized mutual information; mi is mutual information; ecc is entropy cross correlation; ncc is normalized cross correlation. mse is mean - square error. - opt_method: {'powell', 'hybrid'} + squared error. + opt_method: {'powell', 'cg', 'hybrid'}, optional registration is two pass. Pass 1 is low res to get close to alignment and pass 2 starts at the pass 1 optimal alignment. In powell pass 1 and 2 are powell, in hybrid pass 2 is conjugate gradient. + Returns ------- - x : {nd_array} + parm_vector : {nd_array} this is the optimal alignment (6-dim) array with 3 angles and 3 translations. Examples -------- - (calling this from coregister which optionally filters image2) + (calling this from register which optionally filters image2) >>> import numpy as NP >>> import _registration as reg >>> image1, mat1, image2, mat2 = reg.demo_build_dual_volumes() - >>> parm_vector = coregister(image1, image2, imdata) + >>> parm_vector = register(image1, image2, imdata) """ ret_histo=0 @@ -308,8 +366,10 @@ for i in loop: # this is the volume subsample step[:] = multires[i] - optfunc_args = (image1, image1_mat, image2, image2_mat, step, fwhm, lite, - smhist, method, ret_histo) + # optfunc_args is specific to the cost_function in this file + # this will need to change if you use another optimize_function. + optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm, + lite, smhist, method, ret_histo) p_args = (optfunc_args,) if opt_method=='powell': print 'POWELL multi-res registration step size ', step @@ -324,8 +384,8 @@ print 'Hybrid POWELL multi-res registration step size ', step print 'vector ', x lite = 0 - optfunc_args = (image1, image1_mat, image2, image2_mat, step, fwhm, lite, - smhist, method, ret_histo) + optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm, + lite, smhist, method, ret_histo) p_args = (optfunc_args,) x = fmin_powell(optimize_function, x, args=p_args, callback=callback_powell) elif i==1: @@ -417,7 +477,7 @@ return kernel -def filter_image_3D(imageRaw, fwhm, ftype=2): +def filter_image_3D(imageRaw, fwhm, ftype=2, give_2D=0): """ image_F_xyz = filter_image_3D(imageRaw, fwhm, ftype=2): does 3D separable digital filtering using scipy.ndimage.correlate1d @@ -427,9 +487,9 @@ imageRaw : {nd_array} the unfiltered 3D volume image fwhm : {int} - used for kernel width + used for kernel width. this is 3 elements (one for each dimension) ktype: {1, 2}, optional - kernel type. 1 is Gauss convoled with spline, 2 is Gauss + kernel type. 1 is Gauss convoled with spline (SPM), 2 is Gauss Returns ------- @@ -442,32 +502,37 @@ >>> import _registration as reg >>> image1, image2, imdata = reg.demo_build_dual_volumes() >>> ftype = 1 - >>> image_Filter_xyz = filter_image_3D(image1['data'], image1['fwhm'], ftype) + >>> image_Filter_xyz = filter_image_3D(image, fwhm, ftype) >>> image1['data'] = image_Filter_xyz """ - p = np.ceil(2*fwhm[0]).astype(int) - x = np.array(range(-p, p+1)) + p = np.ceil(2*fwhm).astype(int) + x = np.array(range(-p[0], p[0]+1)) kernel_x = smooth_kernel(fwhm[0], x, ktype=ftype) - p = np.ceil(2*fwhm[1]).astype(int) - x = np.array(range(-p, p+1)) + x = np.array(range(-p[1], p[1]+1)) kernel_y = smooth_kernel(fwhm[1], x, ktype=ftype) - p = np.ceil(2*fwhm[2]).astype(int) - x = np.array(range(-p, p+1)) + x = np.array(range(-p[2], p[2]+1)) kernel_z = smooth_kernel(fwhm[2], x, ktype=ftype) output=None - # 3D filter in 3 1D separable stages + # 3D filter in 3 1D separable stages. keep the image + # names at each stage separate in case you need them + # for example may need an image that is 2D slice filtered only axis = 0 image_F_x = correlate1d(imageRaw, kernel_x, axis, output) axis = 1 image_F_xy = correlate1d(image_F_x, kernel_y, axis, output) axis = 2 image_F_xyz = correlate1d(image_F_xy, kernel_z, axis, output) - return image_F_xyz + if give_2D==0: + return image_F_xyz + else: + return image_F_xyz, image_F_xy + + def build_fwhm(M, S): """ fwhm = build_fwhm(M, S) @@ -512,10 +577,10 @@ # return the 3D Gaussian kernel width (xyz) return fwhm -def optimize_function(x, optfunc_args): +def cost_function(x, optfunc_args): """ - cost = optimize_function(x, optfunc_args) --- OR --- - cost, joint_histogram = optimize_function(x, optfunc_args) + cost = cost_function(x, optfunc_args) --- OR --- + cost, joint_histogram = cost_function(x, optfunc_args) computes the alignment between 2 volumes using cross correlation or mutual information metrics. In both the 8 bit joint histogram of the 2 images is @@ -590,7 +655,7 @@ >>> ret_histo = 1 >>> optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) >>> x = np.zeros(6, dtype=np.float64) - >>> return cost, joint_histogram = reg.optimize_function(x, optfunc_args) + >>> return cost, joint_histogram = reg.cost_function(x, optfunc_args) """ @@ -625,8 +690,7 @@ # allocate the zero image #(layers, rows, cols) = image_F.shape - #remap_image_F = np.zeros(image_F.size, dtype=np.uint8).reshape(layers, rows, cols) - remap_image_F = np.empty(image_F.shape, dtype=np.uint8) + remap_image_F = np.zeros(image_F.shape, dtype=np.uint8) # trilinear interpolation mapping. reg.register_linear_resample(image_F, remap_image_F, composite, sample_vector) cost = (np.square(image_G-remap_image_F)).mean() @@ -792,7 +856,7 @@ return rot_matrix -def build_test_volume(imagedesc, S=[15.0, 25.0, 10.0]): +def build_test_volume(imagedesc, S=[1500.0, 2500.0, 1000.0]): """ build a 3D Gaussian volume. user passes in image dims in imagedesc @@ -831,21 +895,17 @@ aa = (np.square(a))/sigma[0] bb = (np.square(b))/sigma[1] cc = (np.square(c))/sigma[2] - volume3D = np.exp(-(aa + bb + cc)) + volume3D = (255.0*np.exp(-(aa + bb + cc))).astype(np.uint8) return volume3D -def load_volume(imagedesc, imagename=None, threshold=0.999, debug=0): +def load_volume(imagedesc, imagename=None): """ - image = load_volume(imagedesc, imagename=None, threshold=0.999, debug=0) --- OR --- - image, h, ih, index = load_volume(imagedesc, imagename=None, threshold=0.999, debug=0) - gets an image descriptor and optional filename and returns a scaled 8 bit volume. The - scaling is designed to make full use of the 8 bits (ignoring high amplitude outliers). - The current method uses numpy fromfile and will be replaced by neuroimage nifti load. + returns an nd_array from the filename or blank image. used for testing. Parameters ---------- @@ -856,47 +916,21 @@ name of image file. No name creates a blank image that is used for creating a rotated test image or image rescaling. - threshold : {float} : optional - this is the threshold for upper cutoff in the 8 bit scaling. The volume histogram - and integrated histogram is computed and the upper amplitude cutoff is where the - integrated histogram crosses the value set in the threshold. setting threshold to - 1.0 means the scaling is done over the min to max amplitude range. - - debug : {0, 1} : optional - when debug=1 the method returns the volume histogram, integrated histogram and the - amplitude index where the provided threshold occured. - Returns ------- image : {nd_array} the volume data assoicated with the filename or a blank volume of the same dimensions as specified in imagedesc. - --- OR --- (if debug = 1) + M : {nd_array} + the voxel-to-physical affine matrix (mat) - image : {nd_array} - the volume data assoicated with the filename or a blank volume of the same - dimensions as specified in imagedesc. - - h : {nd_array} - the volume 1D amplitude histogram - - ih : {nd_array} - the volume 1D amplitude integrated histogram - - index : {int} - the amplitude (histogram index) where the integrated histogram - crosses the 'threshold' provided. - Examples -------- - >>> import numpy as NP >>> import _registration as reg >>> anat_desc = reg.load_anatMRI_desc() - >>> image_anat, h, ih, index = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img', debug=1) - >>> index - 210 + >>> image, M = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img') """ @@ -906,8 +940,6 @@ if imagename == None: # imagename of none means to create a blank image image = np.zeros([imagedesc['layers'],imagedesc['rows'],imagedesc['cols']],dtype=np.uint16) - #image = np.zeros(imagedesc['layers']*imagedesc['rows']*imagedesc['cols'], - # dtype=np.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']) else: image = np.fromfile(imagename, dtype=np.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']); @@ -922,32 +954,133 @@ if imagename == None: # no voxels to scale to 8 bits image = image.astype(np.uint8) - return image, M - # 8 bit scale with threshold clip of the volume integrated histogram + return image, M + + + +def scale_image(image, max_amp=255, image_type=np.uint8, threshold=0.999, fetch_ih=0): + + """ + scale and threshold clip the volume using the integrated histogram + to set the high threshold + + Parameters + ---------- + image : {nd_array} + raw unscaled volume + + max_amp : int (default 255) + the maximum value of the scaled image + + image_type : nd_array dtype (default uint8) + the type of the volume to return. + + threshold : float (default 0.999) + the value of the normalized integrated histogram + that when reached sets the high threshold index + + Returns + ------- + image : {nd_array} + the scaled volume + ih : {nd_array} + the integrated histogram. can be used for image display + purpose (histogram equalization) + + """ + max = image.max() min = image.min() - ih = np.zeros(max-min+1, dtype=np.float64); - h = np.zeros(max-min+1, dtype=np.float64); - if threshold <= 0: - threshold = 0.999 - elif threshold > 1.0: - threshold = 1.0 - # get the integrated histogram of the volume and get max from - # the threshold crossing in the integrated histogram - index = reg.register_image_threshold(image, h, ih, threshold) - scale = 255.0 / (index-min) - # generate the scaled 8 bit image - image = (scale*(image.astype(np.float)-min)) - image[image>255] = 255 - image = image.astype(np.uint8) - if debug == 1: - return image, M, h, ih, index + if max == 0 and min == 0: + raise ValueError, "Zero image. cannot be scaled" + + # need range of pixels for the number of bins + h, edges = np.histogram(image, bins=(max-min)) + ih = (np.cumsum(h)).astype(np.float64) + # normalize the integrated histogram + ih = ih / ih.max() + indices = np.where(ih >= threshold) + # wind up getting all the indices where the ih >= threshold + # and only need the first index. tuple has one nd_array and + # get the 0 element from it ([0][0]) + index = indices[0][0] + scale = float(max_amp) / (index-min) + image = (scale*(image.astype(np.float)-min)) + image[image>max_amp] = max_amp + # down type. usually will go from float to 8 bit (needed for the 8 bit joint histogram) + image = image.astype(image_type) + + if fetch_ih == 1: + return image, ih else: - return image, M + return image +def check_alignment(image1, image1_mat, image2, image2_mat, histo_fwhm=3, method='ncc', lite=0, + smhist=0, alpha=0.0, beta=0.0, gamma=0.0, Tx=0, Ty=0, Tz=0, ret_histo=0): + + """ + test the cost function and (optional) view the joint histogram. can be used + during intra-modal registration to measure the current alignment (return + the cross correlation). would measure before and after registration + + + """ + + # do the parameter validity checking. this is specific to this 3D registration. + # make sure the image is 3D and the mats are 4x4 with nonzero diagonal + + if image1.ndim != 3: + raise ValueError, "Image 1 is not 3 dimensional" + + if image2.ndim != 3: + raise ValueError, "Image 2 is not 3 dimensional" + + if image1.dtype != np.uint8: + raise ValueError, "Image 1 is not 8 bit (required for joint histogram)" + + if image2.dtype != np.uint8: + raise ValueError, "Image 2 is not 8 bit (required for joint histogram)" + + if image1_mat.shape != (4,4): + raise ValueError, "Image1 MAT is not 4x4" + + if image2_mat.shape != (4,4): + raise ValueError, "Image2 MAT is not 4x4" + + if (np.diag(image1_mat)).prod() == 0: + raise ValueError, "Image1 MAT has a 0 on the diagonal" + + if (np.diag(image2_mat)).prod() == 0: + raise ValueError, "Image2 MAT has a 0 on the diagonal" + + if method != 'nmi' and method != 'mi' and method != 'ncc'\ + and method != 'ecc' and method != 'mse': + raise ValueError, "choose cost method nmi, mi, ecc, mse, ncc" + + P = np.zeros(6, dtype=np.float64); + P[0] = alpha + P[1] = beta + P[2] = gamma + P[3] = Tx + P[4] = Ty + P[5] = Tz + + step = np.array([1, 1, 1], dtype=np.int32) + optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm, lite, + smhist, method, ret_histo) + + if ret_histo: + cost, joint_histogram = cost_function(P, optfunc_args) + return cost, joint_histogram + else: + cost = cost_function(P, optfunc_args) + return cost + + + # # ---- demo/debug routines ---- # @@ -981,50 +1114,20 @@ return files_fMRI -def check_alignment(image1, image1_mat, image2, image2_mat, histo_fwhm=3, method='ncc', lite=0, - smhist=0, alpha=0.0, beta=0.0, gamma=0.0, Tx=0, Ty=0, Tz=0, ret_histo=0): - - # - # to test the cost function and (optional) view the joint histogram - # default of use of ncc for testing the cross-correlation as a metric - # of alignment - # - P = np.zeros(6, dtype=np.float64); - P[0] = alpha - P[1] = beta - P[2] = gamma - P[3] = Tx - P[4] = Ty - P[5] = Tz - - step = np.array([1, 1, 1], dtype=np.int32) - optfunc_args = (image1, image1_mat, image2, image2_mat, step, histo_fwhm, lite, - smhist, method, ret_histo) - - #optfunc_args = (image1, image2, imdata['step'], imdata['fwhm'], lite, smhist, method, ret_histo) - - if ret_histo: - cost, joint_histogram = optimize_function(P, optfunc_args) - return cost, joint_histogram - else: - cost = optimize_function(P, optfunc_args) - return cost - def build_scale_volume(image, mat, scale): # # rescale the 'mat' (voxel to physical mapping matrix) # + M = mat * scale (layers, rows, cols) = image.shape - M = mat * scale # dimensions D = np.zeros(3, dtype=np.int32); Z = np.zeros(3, dtype=np.float64); D[0] = rows/scale D[1] = cols/scale D[2] = layers/scale - #image2 = np.zeros(D.prod(), dtype=np.uint8).reshape(D[2], D[0], D[1]); - image2 = np.empty([D[2], D[0], D[1]], dtype=np.uint8) + image2 = np.zeros([D[2], D[0], D[1]], dtype=np.uint8) mode = 1; reg.register_volume_resample(image, image2, Z, scale, mode) return image2, M @@ -1036,7 +1139,7 @@ builds a volume and a scaled-rotated version for coreg testing image1, mat1, image2, mat2 = reg.demo_build_dual_volumes() - x = reg.coregister(image1, mat1, image2, mat2, method='ncc', lite=1) + x = reg.register(image1, mat1, image2, mat2, method='ncc', lite=1) image2r = reg.remap_image(image2, x, resample='cubic') image2rz = reg.resize_image(image2r, mat1) @@ -1050,6 +1153,7 @@ anat_desc = load_anatMRI_desc() image1, mat1 = load_volume(anat_desc, imagename='ANAT1_V0001.img') image2, mat2 = load_volume(anat_desc, imagename=None) + image1 = scale_image(image1) P = np.zeros(6, dtype=np.float64); P[0] = alpha P[1] = beta @@ -1064,12 +1168,75 @@ image2, mat2 = build_scale_volume(image2, mat2, scale) return image1, mat1, image2, mat2 +def tests(image1, mat1, image2, mat2): + + # for same image, using the lite method the off-diagonal is zero + cost, joint = reg.check_alignment(image1, mat1, image2, mat2, ret_histo=1, lite=1) + my_diag = joint.diagonal() + Z = np.diag(my_diag) + W = joint - Z + W[abs(W) < 1e-10] = 0.0 + + if W.max() != 0.0 and W.min() != 0.0: + print 'joint histogram is not diagonal ' + if abs(cost) < 0.99: + print 'cross correlation is too small' + + # for same image, not using the lite method the off-diagonal is non-zero + cost, joint = reg.check_alignment(image1, mat1, image2, mat2, ret_histo=1, lite=0) + my_diag = joint.diagonal() + Z = np.diag(my_diag) + W = joint - Z + W[abs(W) < 1e-10] = 0.0 + + if W.max() == 0.0 and W.min() == 0.0: + print 'joint histogram is diagonal and needs off-diagonals' + if abs(cost) < 0.99: + print 'cross correlation is too small' + + # call w/o returning the joint histogram + cost = reg.check_alignment(image1, mat1, image2, mat2, ret_histo=0, lite=1) + if abs(cost) < 0.99: + print 'cross correlation is too small' + + cost = reg.check_alignment(image1, mat1, image2, mat2, ret_histo=0, lite=0) + if abs(cost) < 0.99: + print 'cross correlation is too small' + + + image1 = np.zeros([64,64,64],np.uint8) + image2 = np.zeros([64,64,64],np.uint8) + image3 = np.zeros([64,64],np.uint8) + mat1 = np.eye(4) + mat2 = np.eye(3) + mat3 = np.zeros([4,4]) + # test with wrong dim image, wrong dim mat and mat with zeros on diagonal + + # wrong image dim + assertRaises(ValueError, check_alignment, image1, mat1, image3, mat1) + # wrong mat dim + assertRaises(ValueError, check_alignment, image1, mat1, image2, mat2) + # mat with zeros on diagonal + assertRaises(ValueError, check_alignment, image1, mat1, image2, mat3) + + + + + + + + + + + + def demo_rotate_fMRI_volume(fMRI_volume, desc, x): # # return rotated fMRIVol. # image = load_volume(desc, imagename=None) + image = scale_image(image) step = np.array([1, 1, 1], dtype=np.int32) M = build_rotate_matrix(x) # rotate volume. cubic spline interpolation means the volume is NOT low pass filtered @@ -1142,8 +1309,12 @@ # blank volume that will be used for ensemble average for fMRI volumes # prior to functional-anatomical coregistration +<<<<<<< .mine + ave_fMRI_volume = np.zeros(fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols'], dtype=np.float64) +======= ave_fMRI_volume = np.zeros([fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols']], dtype=np.float64) +>>>>>>> .r4446 count = 0 number_volumes = len(fMRIdata) @@ -1174,7 +1345,7 @@ # the measure prior to alignment measures[i]['cost'] = check_alignment(imageF, fmri_mat, imageG, fmri_mat, method='ncc', lite=histo_method, smhist=smooth_histo) - x = coregister(imageF, fmri_mat, imageG, fmri_mat, lite=histo_method, method='ncc', + x = register(imageF, fmri_mat, imageG, fmri_mat, lite=histo_method, method='ncc', opt_method=optimizer_method, smhist=smooth_histo) measures[i]['align_rotate'][0:6] = x[0:6] measures[i]['align_cost'] = check_alignment(imageF, fmri_mat, imageG, fmri_mat, @@ -1200,7 +1371,7 @@ if smooth_image: imageF_anat = filter_image_3D(imageF_anat, anat_fwhm, ftype) - x = coregister(imageF_anat, anat_mat, ave_fMRI_volume, fmri_mat, lite=histo_method, + x = register(imageF_anat, anat_mat, ave_fMRI_volume, fmri_mat, lite=histo_method, method='nmi', opt_method=optimizer_method, smhist=smooth_histo) print 'functional-anatomical align parameters ' From scipy-svn at scipy.org Wed Jun 18 18:15:06 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 18 Jun 2008 17:15:06 -0500 (CDT) Subject: [Scipy-svn] r4448 - trunk/scipy/ndimage/tests Message-ID: <20080618221506.804B739CA5C@scipy.org> Author: tom.waite Date: 2008-06-18 17:15:03 -0500 (Wed, 18 Jun 2008) New Revision: 4448 Added: trunk/scipy/ndimage/tests/test_registration.py Log: added for testing registration Added: trunk/scipy/ndimage/tests/test_registration.py =================================================================== --- trunk/scipy/ndimage/tests/test_registration.py 2008-06-18 19:04:15 UTC (rev 4447) +++ trunk/scipy/ndimage/tests/test_registration.py 2008-06-18 22:15:03 UTC (rev 4448) @@ -0,0 +1,186 @@ +import math +import numpy as np +import scipy.ndimage._registration as reg +from scipy.testing import * + +def load_desc(): + # this is for a 256x256x90 volume with 0.9375 x 0.9375 * 1.5 mm voxel sizes + rows = 256 + cols = 256 + layers = 90 + xsamp = 0.9375 + ysamp = 0.9375 + zsamp = 1.5 + desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, + 'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp} + return desc + +def build_volume(imagedesc, S=[1500.0, 2500.0, 1000.0]): + + """ + build a 3D Gaussian volume. user passes in image dims in imagedesc + the sigma for each axis is S[3] where 0=z, 1=y, 2=x + + volume3D = build_test_volume(imagedesc, S) + + Parameters + ---------- + imagedesc : {dictionary} + volume dimensions and sampling + + S : {tuple} + the Gaussian sigma for Z, Y and X + + Returns + ------- + + volume3D : {nd_array} + the 3D volume for testing + + """ + layers = imagedesc['layers'] + rows = imagedesc['rows'] + cols = imagedesc['cols'] + + L = layers/2 + R = rows/2 + C = cols/2 + + # build coordinates for 3D Gaussian volume + # coordinates are centered at (0, 0, 0) + [a, b, c] = np.mgrid[-L:L, -R:R, -C:C] + + sigma = np.array([S[0], S[1], S[2]]) + aa = (np.square(a))/sigma[0] + bb = (np.square(b))/sigma[1] + cc = (np.square(c))/sigma[2] + volume3D = (255.0*np.exp(-(aa + bb + cc))).astype(np.uint8) + + return volume3D + + # self.failUnless(diff(output, tcov) < eps) +class TestRegistration(TestCase): + + def test_affine_matrix_build_1(self): + "test_affine_matrix_build_1" + P = np.zeros(6) + M = reg.build_rotate_matrix(P) + E = np.eye(4) + match = (E==M).all() + assert_equal(match, True) + return + + def test_affine_matrix_build_2(self): + "test_affine_matrix_build_2" + P = np.zeros(6) + P[0] = 1.0 + M = reg.build_rotate_matrix(P) + E = np.array([ + [ 1. , 0. , 0. , 0. ], + [ 0. , 0.9998477 , 0.01745241, 0. ], + [ 0. , -0.01745241, 0.9998477 , 0. ], + [ 0. , 0. , 0. , 1. ] + ]) + assert_array_almost_equal(E, M, decimal=6) + return + + def test_affine_matrix_build_3(self): + "test_affine_matrix_build_3" + P = np.zeros(6) + P[0] = 1.0 + P[1] = 1.0 + P[2] = 1.0 + M = reg.build_rotate_matrix(P) + E = np.array([ + [ 0.99969541, 0.01744975, 0.01745241, 0. ], + [-0.01775429, 0.9996901 , 0.01744975, 0. ], + [-0.0171425 , -0.01775429, 0.99969541, 0. ], + [ 0. , 0. , 0. , 1. ] + ]) + assert_array_almost_equal(E, M, decimal=6) + return + + def test_autoalign_histogram_1(self): + "test_autoalign_histogram_1" + desc = load_desc() + gvol = build_volume(desc) + mat = np.eye(4) + cost, joint = reg.check_alignment(gvol, mat, gvol, mat, ret_histo=1, lite=1) + # confirm that with lite=1 only have non-zero on the main diagonal + j_diag = joint.diagonal() + Z = np.diag(j_diag) + W = joint - Z + # clip the near-zero fuzz + W[abs(W) < 1e-10] = 0.0 + assert_equal(W.max(), 0.0) + return + + def test_autoalign_histogram_2(self): + "test_autoalign_histogram_2" + desc = load_desc() + gvol = build_volume(desc) + mat = np.eye(4) + cost, joint = reg.check_alignment(gvol, mat, gvol, mat, ret_histo=1, lite=0) + # confirm that with lite=0 DO have non-zero on the main diagonal + j_diag = joint.diagonal() + Z = np.diag(j_diag) + W = joint - Z + # clip the near-zero fuzz + W[abs(W) < 1e-10] = 0.0 + s = (W.max() > 0.0) + # make sure there are off-diagonal values + assert_equal(s, True) + return + + def test_autoalign_ncc_value_1(self): + "test_autoalign_ncc_value_1" + desc = load_desc() + gvol = build_volume(desc) + mat = np.eye(4) + cost = reg.check_alignment(gvol, mat, gvol, mat, method='ncc', lite=1) + # confirm the cross correlation is near 1.0 + t = abs(cost) + 0.0001 + s = (t >= 1.0) + assert_equal(s, True) + return + + def test_autoalign_ncc_value_2(self): + "test_autoalign_ncc_value_2" + desc = load_desc() + gvol = build_volume(desc) + mat = np.eye(4) + cost = reg.check_alignment(gvol, mat, gvol, mat, method='ncc', lite=0) + # confirm the cross correlation is near 1.0 + t = abs(cost) + 0.0001 + s = (t >= 1.0) + assert_equal(s, True) + return + + def test_autoalign_nmi_value_1(self): + "test_autoalign_nmi_value_1" + desc = load_desc() + gvol = build_volume(desc) + mat = np.eye(4) + cost = reg.check_alignment(gvol, mat, gvol, mat, method='nmi', lite=1) + # confirm the normalized mutual information is near -2.0 + assert_almost_equal(cost, -2.0, decimal=6) + return + + def test_autoalign_nmi_value_2(self): + "test_autoalign_nmi_value_2" + desc = load_desc() + gvol = build_volume(desc) + mat = np.eye(4) + cost = reg.check_alignment(gvol, mat, gvol, mat, method='nmi', lite=0) + # confirm the normalized mutual information is near -2.0 + assert_almost_equal(cost, -1.7973048186515352, decimal=6) + return + + + +if __name__ == "__main__": + #nose.runmodule() + nose.run(argv=['', __file__]) + + + From scipy-svn at scipy.org Wed Jun 18 18:36:34 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 18 Jun 2008 17:36:34 -0500 (CDT) Subject: [Scipy-svn] r4449 - trunk/scipy/ndimage Message-ID: <20080618223634.9B6D439CAA4@scipy.org> Author: tom.waite Date: 2008-06-18 17:36:32 -0500 (Wed, 18 Jun 2008) New Revision: 4449 Modified: trunk/scipy/ndimage/_registration.py Log: remove demo methods which go to nipy registration Modified: trunk/scipy/ndimage/_registration.py =================================================================== --- trunk/scipy/ndimage/_registration.py 2008-06-18 22:15:03 UTC (rev 4448) +++ trunk/scipy/ndimage/_registration.py 2008-06-18 22:36:32 UTC (rev 4449) @@ -856,7 +856,7 @@ return rot_matrix -def build_test_volume(imagedesc, S=[1500.0, 2500.0, 1000.0]): +def build_gauss_volume(imagedesc, S=[1500.0, 2500.0, 1000.0]): """ build a 3D Gaussian volume. user passes in image dims in imagedesc @@ -900,65 +900,6 @@ return volume3D - -def load_volume(imagedesc, imagename=None): - - """ - - returns an nd_array from the filename or blank image. used for testing. - - Parameters - ---------- - imagedesc : {dictionary} - imagedesc is the descriptor of the image to be read. - - imagename : {string} : optional - name of image file. No name creates a blank image that is used for creating - a rotated test image or image rescaling. - - Returns - ------- - image : {nd_array} - the volume data assoicated with the filename or a blank volume of the same - dimensions as specified in imagedesc. - - M : {nd_array} - the voxel-to-physical affine matrix (mat) - - Examples - -------- - - >>> import _registration as reg - >>> anat_desc = reg.load_anatMRI_desc() - >>> image, M = reg.load_volume(anat_desc, imagename='ANAT1_V0001.img') - - - """ - - # load MRI or fMRI volume and return an autoscaled 8 bit image. - # autoscale is using integrated histogram to deal with outlier high amplitude voxels - if imagename == None: - # imagename of none means to create a blank image - image = np.zeros([imagedesc['layers'],imagedesc['rows'],imagedesc['cols']],dtype=np.uint16) - else: - image = np.fromfile(imagename, - dtype=np.uint16).reshape(imagedesc['layers'], imagedesc['rows'], imagedesc['cols']); - - # the mat (voxel to physical) matrix - M = np.eye(4, dtype=np.float64); - # for now just the sample size (mm units) in x, y and z - M[0][0] = imagedesc['sample_x'] - M[1][1] = imagedesc['sample_y'] - M[2][2] = imagedesc['sample_z'] - - if imagename == None: - # no voxels to scale to 8 bits - image = image.astype(np.uint8) - - return image, M - - - def scale_image(image, max_amp=255, image_type=np.uint8, threshold=0.999, fetch_ih=0): """ @@ -1081,40 +1022,6 @@ -# -# ---- demo/debug routines ---- -# - -def load_anatMRI_desc(): - # this is for demo on the test MRI and fMRI volumes - rows = 256 - cols = 256 - layers = 90 - xsamp = 0.9375 - ysamp = 0.9375 - zsamp = 1.5 - desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, - 'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp} - return desc - -def load_fMRI_desc(): - # this is for demo on the test MRI and fMRI volumes - rows = 64 - cols = 64 - layers = 28 - xsamp = 3.75 - ysamp = 3.75 - zsamp = 5.0 - desc = {'rows' : rows, 'cols' : cols, 'layers' : layers, - 'sample_x' : xsamp, 'sample_y' : ysamp, 'sample_z' : zsamp} - return desc - -def read_fMRI_directory(path): - files_fMRI = glob.glob(path) - return files_fMRI - - - def build_scale_volume(image, mat, scale): # # rescale the 'mat' (voxel to physical mapping matrix) @@ -1133,263 +1040,5 @@ return image2, M -def demo_build_dual_volumes(scale=2, alpha=3.0, beta=4.0, gamma=5.0, Tx = 0.0, Ty = 0.0, Tz = 0.0): - """ - demo with (must have file ANAT1_V0001.img) - builds a volume and a scaled-rotated version for coreg testing - image1, mat1, image2, mat2 = reg.demo_build_dual_volumes() - x = reg.register(image1, mat1, image2, mat2, method='ncc', lite=1) - image2r = reg.remap_image(image2, x, resample='cubic') - image2rz = reg.resize_image(image2r, mat1) - """ - # - # this is for coreg MRI / fMRI scale test. The volume is anatomical MRI. - # the image is rotated in 3D. after rotation the image is scaled. - # - - step = np.array([1, 1, 1], dtype=np.int32) - anat_desc = load_anatMRI_desc() - image1, mat1 = load_volume(anat_desc, imagename='ANAT1_V0001.img') - image2, mat2 = load_volume(anat_desc, imagename=None) - image1 = scale_image(image1) - P = np.zeros(6, dtype=np.float64); - P[0] = alpha - P[1] = beta - P[2] = gamma - P[3] = Tx - P[4] = Ty - P[5] = Tz - M = build_rotate_matrix(P) - # rotate volume. linear interpolation means the volume is low pass filtered - reg.register_linear_resample(image1, image2, M, step) - # subsample volume - image2, mat2 = build_scale_volume(image2, mat2, scale) - return image1, mat1, image2, mat2 - -def tests(image1, mat1, image2, mat2): - - # for same image, using the lite method the off-diagonal is zero - cost, joint = reg.check_alignment(image1, mat1, image2, mat2, ret_histo=1, lite=1) - my_diag = joint.diagonal() - Z = np.diag(my_diag) - W = joint - Z - W[abs(W) < 1e-10] = 0.0 - - if W.max() != 0.0 and W.min() != 0.0: - print 'joint histogram is not diagonal ' - if abs(cost) < 0.99: - print 'cross correlation is too small' - - # for same image, not using the lite method the off-diagonal is non-zero - cost, joint = reg.check_alignment(image1, mat1, image2, mat2, ret_histo=1, lite=0) - my_diag = joint.diagonal() - Z = np.diag(my_diag) - W = joint - Z - W[abs(W) < 1e-10] = 0.0 - - if W.max() == 0.0 and W.min() == 0.0: - print 'joint histogram is diagonal and needs off-diagonals' - if abs(cost) < 0.99: - print 'cross correlation is too small' - - # call w/o returning the joint histogram - cost = reg.check_alignment(image1, mat1, image2, mat2, ret_histo=0, lite=1) - if abs(cost) < 0.99: - print 'cross correlation is too small' - - cost = reg.check_alignment(image1, mat1, image2, mat2, ret_histo=0, lite=0) - if abs(cost) < 0.99: - print 'cross correlation is too small' - - - image1 = np.zeros([64,64,64],np.uint8) - image2 = np.zeros([64,64,64],np.uint8) - image3 = np.zeros([64,64],np.uint8) - mat1 = np.eye(4) - mat2 = np.eye(3) - mat3 = np.zeros([4,4]) - # test with wrong dim image, wrong dim mat and mat with zeros on diagonal - - # wrong image dim - assertRaises(ValueError, check_alignment, image1, mat1, image3, mat1) - # wrong mat dim - assertRaises(ValueError, check_alignment, image1, mat1, image2, mat2) - # mat with zeros on diagonal - assertRaises(ValueError, check_alignment, image1, mat1, image2, mat3) - - - - - - - - - - - - -def demo_rotate_fMRI_volume(fMRI_volume, desc, x): - # - # return rotated fMRIVol. - # - - image = load_volume(desc, imagename=None) - image = scale_image(image) - step = np.array([1, 1, 1], dtype=np.int32) - M = build_rotate_matrix(x) - # rotate volume. cubic spline interpolation means the volume is NOT low pass filtered - reg.register_cubic_resample(fMRI_volume, image, M, step) - - return image - -def demo_MRI_coregistration(anatfile, funclist, optimizer_method='powell', - histo_method=1, smooth_histo=0, smooth_image=0, - ftype=1): - """ - demo with (must have file ANAT1_V0001.img and fMRI directory fMRIData) - - measures, imageF_anat, fmri_series = reg.demo_MRI_coregistration() - - show results with - - In [59]: measures[25]['cost'] - Out[59]: -0.48607185 - - In [60]: measures[25]['align_cost'] - Out[60]: -0.99514639 - - In [61]: measures[25]['align_rotate'] - Out[61]: - array([ 1.94480181, 5.64703989, 5.35002136, -5.00544405, -2.2712214, -1.42249691], dtype=float32) - - In [62]: measures[25]['rotate'] - Out[62]: - array([ 1.36566341, 4.70644331, 4.68198586, -4.32256889, -2.47607017, -2.39173937], dtype=float32) - - - """ - - # demo of alignment of fMRI series with anatomical MRI - # in this demo, each fMRI volume is first perturbed (rotated, translated) - # by a random value. The initial registration is measured, then the optimal - # alignment is computed and the registration measure made following the volume remap. - # The fMRI registration is done with the first fMRI volume using normalized cross-correlation. - # Each fMRI volume is rotated to the fMRI-0 volume and the series is ensemble averaged. - # The ensemble averaged is then registered with the anatomical MRI volume using normalized mutual information. - # The fMRI series is then rotated with this parameter. The alignments are done with 3D cubic splines. - - # read the anatomical MRI volume - anat_desc = load_anatMRI_desc() - imageF_anat, anat_mat = load_volume(anat_desc, imagename=anatfile) - imageF = imageF_anat.copy() - # the sampling structure - step = np.array([1, 1, 1], dtype=np.int32) - # the volume filter - imageF_anat_fwhm = build_fwhm(anat_mat, step) - - - # allocate the structure for the processed fMRI array - metric_test = np.dtype([('cost', 'f'), - ('align_cost', 'f'), - ('rotate', 'f', 6), - ('align_rotate', 'f', 6)]) - # allocate the empty dictionary that will contain metrics and aligned volumes - fmri_series = {} - - # read in the file list of the fMRI data - fMRIdata = read_fMRI_directory('fMRIData\*.img') - fmri_desc = load_fMRI_desc() - image_fmri, fmri_mat = load_volume(fmri_desc, fMRIdata[0]) - - # one time build of the fwhm that is used to build the filter kernels - anat_fwhm = build_fwhm(anat_mat, step) - fmri_fwhm = build_fwhm(fmri_mat, step) - - # blank volume that will be used for ensemble average for fMRI volumes - # prior to functional-anatomical coregistration -<<<<<<< .mine - ave_fMRI_volume = np.zeros(fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols'], dtype=np.float64) -======= - ave_fMRI_volume = np.zeros([fmri_desc['layers']*fmri_desc['rows']*fmri_desc['cols']], - dtype=np.float64) ->>>>>>> .r4446 - - count = 0 - number_volumes = len(fMRIdata) - measures = np.zeros(number_volumes, dtype=metric_test) - # load and perturb (rotation, translation) the fMRI volumes - for i in fMRIdata: - image = load_volume(fmri_desc, i) - # random perturbation of angle, translation for each volume beyond the first - if count == 0: - fmri_series[count] = image - count = count + 1 - else: - x = np.random.random(6) - 0.5 - x = 10.0 * x - fmri_series[count] = demo_rotate_fMRI_volume(image, fmri_desc, x) - measures[count]['rotate'][0:6] = x[0:6] - count = count + 1 - - - # load and register the fMRI volumes with volume_0 using normalized cross correlation metric - imageF = fmri_series[0] - if smooth_image: - imageF = filter_image_3D(imageF, fmri_fwhm, ftype) - for i in range(1, number_volumes): - imageG = fmri_series[i] - if smooth_image: - imageG = filter_image_3D(imageG, fmri_fwhm, ftype) - # the measure prior to alignment - measures[i]['cost'] = check_alignment(imageF, fmri_mat, imageG, fmri_mat, method='ncc', - lite=histo_method, smhist=smooth_histo) - x = register(imageF, fmri_mat, imageG, fmri_mat, lite=histo_method, method='ncc', - opt_method=optimizer_method, smhist=smooth_histo) - measures[i]['align_rotate'][0:6] = x[0:6] - measures[i]['align_cost'] = check_alignment(imageF, fmri_mat, imageG, fmri_mat, - method='ncc', lite=histo_method, - smhist=smooth_histo, alpha=x[0], - beta=x[1], gamma=x[2], Tx=x[3], - Ty=x[4], Tz=x[5]) - - - # align the volumes and average them for co-registration with the anatomical MRI - ave_fMRI_volume = fmri_series[0]['data'].astype(np.float64) - for i in range(1, number_volumes): - image = fmri_series[i] - x[0:6] = measures[i]['align_rotate'][0:6] - # overwrite the fMRI volume with the aligned volume - fmri_series[i] = remap_image(image, x, resample='cubic') - ave_fMRI_volume = ave_fMRI_volume + fmri_series[i]['data'].astype(np.float64) - - ave_fMRI_volume = (ave_fMRI_volume / float(number_volumes)).astype(np.uint8) - ave_fMRI_volume = {'data' : ave_fMRI_volume, 'mat' : imageF['mat'], - 'dim' : imageF['dim'], 'fwhm' : imageF['fwhm']} - # register (using normalized mutual information) with the anatomical MRI - if smooth_image: - imageF_anat = filter_image_3D(imageF_anat, anat_fwhm, ftype) - - x = register(imageF_anat, anat_mat, ave_fMRI_volume, fmri_mat, lite=histo_method, - method='nmi', opt_method=optimizer_method, smhist=smooth_histo) - - print 'functional-anatomical align parameters ' - print x - for i in range(number_volumes): - image = fmri_series[i] - # overwrite the fMRI volume with the anatomical-aligned volume - fmri_series[i] = remap_image(image, x, resample='cubic') - - return measures, imageF, fmri_series - - -def demo_fMRI_resample(imageF_anat, imageF_anat_mat, fmri_series): - resampled_fmri_series = {} - number_volumes = len(fmri_series) - for i in range(number_volumes): - resampled_fmri_series[i] = resize_image(fmri_series[i], imageF_anat_mat) - - return resampled_fmri_series - - From scipy-svn at scipy.org Wed Jun 18 19:13:07 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 18 Jun 2008 18:13:07 -0500 (CDT) Subject: [Scipy-svn] r4450 - trunk/scipy/ndimage Message-ID: <20080618231307.2FC8939C52D@scipy.org> Author: tom.waite Date: 2008-06-18 18:13:03 -0500 (Wed, 18 Jun 2008) New Revision: 4450 Modified: trunk/scipy/ndimage/_registration.py Log: fixed axis bug in build_fwhm method Modified: trunk/scipy/ndimage/_registration.py =================================================================== --- trunk/scipy/ndimage/_registration.py 2008-06-18 22:36:32 UTC (rev 4449) +++ trunk/scipy/ndimage/_registration.py 2008-06-18 23:13:03 UTC (rev 4450) @@ -565,7 +565,7 @@ # M contains the voxel to physical mapping view_3x3 = np.square(M[0:3, 0:3]) # sum the elements in the first row - vxg = np.sqrt(view_3x3.sum(axis=0)) + vxg = np.sqrt(view_3x3.sum(axis=1)) # assumes that voxel sampling is the same for xyz as S is the step size = np.array([1,1,1])*S[0] x = np.square(size) - np.square(vxg) From scipy-svn at scipy.org Fri Jun 20 11:53:19 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 20 Jun 2008 10:53:19 -0500 (CDT) Subject: [Scipy-svn] r4451 - branches Message-ID: <20080620155319.8A27039CB4C@scipy.org> Author: matthew.brett at gmail.com Date: 2008-06-20 10:53:10 -0500 (Fri, 20 Jun 2008) New Revision: 4451 Added: branches/mb_mio_refactor/ Log: Created mio branch for refactoring Copied: branches/mb_mio_refactor (from rev 4450, trunk/scipy/io/matlab) From scipy-svn at scipy.org Fri Jun 20 12:30:22 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 20 Jun 2008 11:30:22 -0500 (CDT) Subject: [Scipy-svn] r4452 - branches Message-ID: <20080620163022.4AEB139C75F@scipy.org> Author: matthew.brett at gmail.com Date: 2008-06-20 11:30:19 -0500 (Fri, 20 Jun 2008) New Revision: 4452 Removed: branches/mb_mio_refactor/ Log: Deleted mio branch, need whole io tree From scipy-svn at scipy.org Fri Jun 20 12:31:09 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 20 Jun 2008 11:31:09 -0500 (CDT) Subject: [Scipy-svn] r4453 - branches Message-ID: <20080620163109.919DD39C54F@scipy.org> Author: matthew.brett at gmail.com Date: 2008-06-20 11:31:05 -0500 (Fri, 20 Jun 2008) New Revision: 4453 Added: branches/mb_mio_refactor/ Log: Created io branch for matfile refactoring Copied: branches/mb_mio_refactor (from rev 4452, trunk/scipy/io) From scipy-svn at scipy.org Fri Jun 20 12:38:05 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 20 Jun 2008 11:38:05 -0500 (CDT) Subject: [Scipy-svn] r4454 - in branches/mb_mio_refactor: matlab/tests tests Message-ID: <20080620163805.98A5039C54F@scipy.org> Author: matthew.brett at gmail.com Date: 2008-06-20 11:37:47 -0500 (Fri, 20 Jun 2008) New Revision: 4454 Added: branches/mb_mio_refactor/matlab/tests/gen_unittests.m branches/mb_mio_refactor/matlab/tests/gen_unittests4.m branches/mb_mio_refactor/matlab/tests/save_test.m Removed: branches/mb_mio_refactor/tests/gen_unittests.m branches/mb_mio_refactor/tests/gen_unittests4.m branches/mb_mio_refactor/tests/save_test.m Modified: branches/mb_mio_refactor/tests/test_recaster.py Log: Moved m files to matlab tests, added recaster test draft Copied: branches/mb_mio_refactor/matlab/tests/gen_unittests.m (from rev 4453, branches/mb_mio_refactor/tests/gen_unittests.m) Copied: branches/mb_mio_refactor/matlab/tests/gen_unittests4.m (from rev 4453, branches/mb_mio_refactor/tests/gen_unittests4.m) Copied: branches/mb_mio_refactor/matlab/tests/save_test.m (from rev 4453, branches/mb_mio_refactor/tests/save_test.m) Deleted: branches/mb_mio_refactor/tests/gen_unittests.m =================================================================== --- branches/mb_mio_refactor/tests/gen_unittests.m 2008-06-20 16:31:05 UTC (rev 4453) +++ branches/mb_mio_refactor/tests/gen_unittests.m 2008-06-20 16:37:47 UTC (rev 4454) @@ -1,92 +0,0 @@ -% Generates mat files for loadmat unit tests -% This is the version for matlab 5 and higher -% Uses save_test.m function - -% work out matlab version and file suffix for test files -global FILEPREFIX FILESUFFIX -FILEPREFIX = [fullfile(pwd, 'data') filesep]; -temp = ver('MATLAB'); -mlv = temp.Version; -FILESUFFIX = ['_' mlv '_' computer '.mat']; - -% basic double array -save_test('testdouble', 0:pi/4:2*pi); - -% string -save_test('teststring', '"Do nine men interpret?" "Nine men," I nod.') - -% complex -theta = 0:pi/4:2*pi; -save_test('testcomplex', cos(theta) + 1j*sin(theta)); - -% asymmetric array to check indexing -a = zeros(3, 5); -a(:,1) = [1:3]'; -a(1,:) = 1:5; - -% 2D matrix -save_test('testmatrix', a); - -% minus number - tests signed int -save_test('testminus', -1); - -% single character -save_test('testonechar', 'r'); - -% string array -save_test('teststringarray', ['one '; 'two '; 'three']); - -% sparse array -save_test('testsparse', sparse(a)); - -% sparse complex array -b = sparse(a); -b(1,1) = b(1,1) + j; -save_test('testsparsecomplex', b); - -% Two variables in same file -save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta') - - -% struct -save_test('teststruct', ... - struct('stringfield','Rats live on no evil star.',... - 'doublefield',[sqrt(2) exp(1) pi],... - 'complexfield',(1+1j)*[sqrt(2) exp(1) pi])); - -% cell -save_test('testcell', ... - {['This cell contains this string and 3 arrays of increasing' ... - ' length'], 1., 1.:2., 1.:3.}); - -% Empty cells in two cell matrices -save_test('testemptycell', {1, 2, [], [], 3}); - -% 3D matrix -save_test('test3dmatrix', reshape(1:24,[2 3 4])) - -% nested cell array -save_test('testcellnest', {1, {2, 3, {4, 5}}}); - -% nested struct -save_test('teststructnest', struct('one', 1, 'two', ... - struct('three', 'number 3'))); - -% array of struct -save_test('teststructarr', [struct('one', 1, 'two', 2) ... - struct('one', 'number 1', 'two', 'number 2')]); - -% matlab object -save_test('testobject', inline('x')) - -% array of matlab objects -%save_test('testobjarr', [inline('x') inline('y')]) - -% unicode test -if str2num(mlv) > 7 % function added 7.0.1 - fid = fopen([FILEPREFIX 'japanese_utf8.txt']); - from_japan = fread(fid, 'uint8')'; - fclose(fid); - save_test('testunicode', native2unicode(from_japan, 'utf-8')); -end - \ No newline at end of file Deleted: branches/mb_mio_refactor/tests/gen_unittests4.m =================================================================== --- branches/mb_mio_refactor/tests/gen_unittests4.m 2008-06-20 16:31:05 UTC (rev 4453) +++ branches/mb_mio_refactor/tests/gen_unittests4.m 2008-06-20 16:37:47 UTC (rev 4454) @@ -1,50 +0,0 @@ -% Generates mat files for loadmat unit tests -% Uses save_test.m function -% This is the version for matlab 4 - -% work out matlab version and file suffix for test files -global FILEPREFIX FILESUFFIX -sepchar = '/'; -if strcmp(computer, 'PCWIN'), sepchar = '\'; end -FILEPREFIX = [pwd sepchar 'data' sepchar]; -mlv = version; -FILESUFFIX = ['_' mlv '_' computer '.mat']; - -% basic double array -save_test('testdouble', 0:pi/4:2*pi); - -% string -save_test('teststring', '"Do nine men interpret?" "Nine men," I nod.') - -% complex -theta = 0:pi/4:2*pi; -save_test('testcomplex', cos(theta) + 1j*sin(theta)); - -% asymmetric array to check indexing -a = zeros(3, 5); -a(:,1) = [1:3]'; -a(1,:) = 1:5; - -% 2D matrix -save_test('testmatrix', a); - -% minus number - tests signed int -save_test('testminus', -1); - -% single character -save_test('testonechar', 'r'); - -% string array -save_test('teststringarray', ['one '; 'two '; 'three']); - -% sparse array -save_test('testsparse', sparse(a)); - -% sparse complex array -b = sparse(a); -b(1,1) = b(1,1) + j; -save_test('testsparsecomplex', b); - -% Two variables in same file -save([FILEPREFIX 'testmulti' FILESUFFIX], 'a', 'theta') - Deleted: branches/mb_mio_refactor/tests/save_test.m =================================================================== --- branches/mb_mio_refactor/tests/save_test.m 2008-06-20 16:31:05 UTC (rev 4453) +++ branches/mb_mio_refactor/tests/save_test.m 2008-06-20 16:37:47 UTC (rev 4454) @@ -1,6 +0,0 @@ -function save_test(test_name, v) -% saves variable passed in m with filename from prefix - -global FILEPREFIX FILESUFFIX -eval([test_name ' = v;']); -save([FILEPREFIX test_name FILESUFFIX], test_name) \ No newline at end of file Modified: branches/mb_mio_refactor/tests/test_recaster.py =================================================================== --- branches/mb_mio_refactor/tests/test_recaster.py 2008-06-20 16:31:05 UTC (rev 4453) +++ branches/mb_mio_refactor/tests/test_recaster.py 2008-06-20 16:37:47 UTC (rev 4454) @@ -1,176 +1,246 @@ +# Test recasting module + +from numpy.testing import * import numpy as N -from scipy.testing import * -from scipy.io.recaster import sctype_attributes, Recaster, RecastError +set_package_path() +from io.recaster import basecaster, intcaster, floatcaster, complexcaster, \ + rejectcastercollection, precisioncastercollection,\ + energeticcastercollection, fastcastercollection, \ + numerictypeinfo, RecastError +restore_path() try: # Python 2.3 support from sets import Set as set except: pass -class TestRecaster(TestCase): +class test_numerictypeinfo(NumpyTestCase): def test_init(self): - # Setting sctype_list - R = Recaster() - assert set(R.sctype_list) == set(sctype_attributes().keys()), \ - 'Default recaster should include all system types' - T = N.float32 - R = Recaster([T]) - assert R.sctype_list == [T], 'Scalar type list not correctly set' - # Setting tolerances - R = Recaster() - tols = R.default_sctype_tols() - assert tols == R.sctype_tols, 'Unexpected tols dictionary' - F = N.finfo(T) - R = Recaster(sctype_tols={T: { - 'rtol': F.eps*2, - 'atol': F.tiny*2, - 'silly': 'silly text'}}) - assert R.sctype_tols[T]['rtol'] == F.eps*2, \ - 'Rtol not correctly set' - assert R.sctype_tols[T]['atol'] == F.tiny*2, \ - 'Atol not correctly set' - T = N.complex128 - F = N.finfo(T) - assert R.sctype_tols[T]['rtol'] == F.eps, \ - 'Rtol defaults not correctly set' - assert R.sctype_tols[T]['atol'] == F.tiny, \ - 'Atol defaults not correctly set' - # Options - # Sctype size lists - # Integer sizes - # Cabable types + types = [] + for k in ('int', 'float', 'uint', 'complex'): + types += N.sctypes[k] + # All system numeric types by default + nt = numerictypeinfo() + assert nt.types == set(types) + # Can include bools as uint type + nt = numerictypeinfo(bool_as_uint=True) + types.append(N.bool) + assert nt.types == set(types) + # Bools rejected unless flag is set + self.assertRaises(TypeError, numerictypes,[N.bool]) + # Can restrict by kind + nt = numerictypeinfo(kinds=['int', 'uint']) + types = [] + for k in ('int', 'uint'): + types += N.sctypes[k] + assert nt.types == set(types) + # By list of types + type_list = [N.uint16, N.int16, N.float64] + nt = numertypeinfo(types=type_list) + assert nt.types == set(type_list) + # And by intersection of two + nt = numerictypeinfo(kinds=['int', 'uint'], types=type_list) + assert nt.types == set([N.uint16, N.int16]) + # Reject non-numeric + self.assertRaises(TypeError, numerictypeinfo, [N.void]) - def test_cast_to_fp(self): - R = Recaster() - # Define expected type output from fp recast of value - sta = sctype_attributes() - inp_outp = ( - (1, N.complex128, 'c', sta[N.complex128]['size'], 0, N.complex128), - (1, N.complex128, 'c', sta[N.complex128]['size'], 1, N.complex64), - (1, N.complex128, 'c', sta[N.complex64]['size'], 0, N.complex64), - (1, N.complex128, 'f', sta[N.float64]['size'], 0, N.float64), - (1.0+1j, N.complex128, 'f', sta[N.complex128]['size'], 0, None), - (1, N.float64, 'f', sta[N.float64]['size'], 0, N.float64), - (1, N.float64, 'f', sta[N.float64]['size'], 1, N.float32), - (1, N.float64, 'f', sta[N.float32]['size'], 0, N.float32), - (1, N.float64, 'c', sta[N.complex128]['size'], 0, N.complex128), - (1, N.float64, 'c', sta[N.complex128]['size'], 1, N.complex64), - (1, N.int32, 'f', sta[N.float64]['size'], 0, N.float64), - (1, N.int32, 'f', sta[N.float64]['size'], 1, N.float32), - (1, N.float64, 'f', 0, 0, None), - ) - for value, inp, kind, max_size, continue_down, outp in inp_outp: - arr = N.array(value, dtype=inp) - arr = R.cast_to_fp(arr, kind, max_size, continue_down) - if outp is None: - assert arr is None, \ - 'Expected None from type %s, got %s' \ - % (inp, arr.dtype.type) - continue - assert arr is not None, \ - 'Expected %s from %s, got None' % (outp, inp) - dtt = arr.dtype.type - assert dtt is outp, \ - 'Expected %s from %s, got %s' % (outp, inp, dtt) + def test_info(self): + nt = numerictypeinfo() + it_expected = {'kind': 'i', + 'size': 1, + 'min': -128, + 'max': 127} + assert nt.info(N.int8) == it_expected + F = N.finfo(N.dtype(N.float64)) + ft_expected = {'kind': 'f', + 'size', 8, + 'min': F.min, + 'max': F.max} + assert nt.info(N.float64) == ft_expected - def test_smallest_int_sctype(self): - # Smallest int sctype with full recaster - params = sctype_attributes() - RF = Recaster() - test_triples = [(N.uint8, 0, 255), - (N.int8, -128, 0), - (N.uint16, 0, params[N.uint16]['max']), - (N.int16, params[N.int16]['min'], 0), - (N.uint32, 0, params[N.uint32]['max']), - (N.int32, params[N.int32]['min'], 0), - (N.uint64, 0, params[N.uint64]['max']), - (N.int64, params[N.int64]['min'], 0)] - for T, mn, mx in test_triples: - rt = RF.smallest_int_sctype(mx, mn) - assert N.dtype(rt) == N.dtype(T), \ - 'Expected %s, got %s type' % (T, rt) - # Smallest int sctype with restricted recaster - mmax = params[N.int32]['max'] - mmin = params[N.int32]['min'] - RR = Recaster([N.int32]) - for kind in ('int', 'uint'): - for T in N.sctypes[kind]: - mx = params[T]['max'] - mn = params[T]['min'] - rt = RR.smallest_int_sctype(mx, mn) - if mx <= mmax and mn >= mmin: - assert rt == N.int32, \ - 'Expected int32 type, got %s' % rt - else: - assert rt is None, \ - 'Expected None, got %s for %s' % (T, rt) - # Test preferred int flag - mx = 1000 - mn = 0 - rt = RF.smallest_int_sctype(mx, mn) - assert rt == N.int16, 'Expected int16, got %s' % rt - rt = RF.smallest_int_sctype(mx, mn, 'i') - assert rt == N.int16, 'Expected int16, got %s' % rt - rt = RF.smallest_int_sctype(mx, mn, prefer='u') - assert rt == N.uint16, 'Expected uint16, got %s' % rt + def test_of_kind(self): + # Can select sublist of specified kind + nt = numerictypes() + allints = set(N.sctypes['uint'] + N.sctypes['uint'] + [N.bool]) + assert set(nt.of_kind(['int', 'uint')) == uints - def test_recasts(self): - valid_types = [N.int32, N.complex128, N.float64] - # Test smallest - R = Recaster(valid_types, recast_options='smallest') - inp_outp = ( - (1, N.complex128, N.int32), - (1, N.complex64, N.int32), - (1.0+1j, N.complex128, N.complex128), - (1.0+1j, N.complex64, N.complex128), - (1, N.float64, N.int32), - (1, N.float32, N.int32), - (1.1, N.float64, N.float64), - (-1e12, N.int64, N.float64), - ) - self.run_io_recasts(R, inp_outp) - # Test only_if_none - R = Recaster(valid_types, recast_options='only_if_none') - inp_outp = ( - (1, N.complex128, N.complex128), - (1, N.complex64, N.int32), - (1.0+1j, N.complex128, N.complex128), - (1.0+1j, N.complex64, N.complex128), - (1, N.float64, N.float64), - (1, N.float32, N.int32), - (1.1, N.float64, N.float64), - (-1e12, N.int64, N.float64), - ) - self.run_io_recasts(R, inp_outp) - # Test preserve_precision - R = Recaster(valid_types, recast_options='preserve_precision') - inp_outp = ( - (1, N.complex128, N.complex128), - (1, N.complex64, N.complex128), - (1.0+1j, N.complex128, N.complex128), - (1.0+1j, N.complex64, N.complex128), - (1, N.float64, N.float64), - (1, N.float32, N.float64), - (1.1, N.float64, N.float64), - (-1e12, N.int64, None), - ) - self.run_io_recasts(R, inp_outp) + def test_by_size(self): + nt = numerictypes() + t_by_size = nt.by_size() + csz = N.inf + for t in t_by_size: + sz = N.dtype(t).itemsize + assert sz <= csz + csz = sz + nt = numerictypes([N.int8, N.int16]) + assert nt.by_size() == [N.int16, N.int8] + # uints are lower (appear smaller) in search order + # because larger range for same size means smaller + nt = numerictypes([N.uint16, N.int16]) + assert nt.by_size() == [N.int16, N.uint16] + # bools therefore tend to be higher in order + nt = numerictypes([N.uint8, N.int8, N.bool], bool_as_uint=True) + assert nt.by_size() == [N.bool, N.int8, N.uint8] - def run_io_recasts(self, R, inp_outp): - ''' Runs sets of value, input, output tests ''' - for value, inp, outp in inp_outp: - arr = N.array(value, inp) - if outp is None: - self.assertRaises(RecastError, R.recast, arr) + def test_smallest_precise(self): + nt = numerictypes(kinds=['complex']) + largest_complex = nt.by_size()[0] + nt = numerictypes(kinds=['float']) + largest_float = nt.by_size()[0] + nt = numerictypes(types=[N.complex128, N.float64, N.int16]) + tests = [[N.int8, N.int16], + [N.bool, N.int16], + [N.uint8, N.int16], + [N.int16, N.int16], + [N.uint16, None], + [N.int32, None], + [N.float32, N.float64], + [N.float64, N.float64], + [N.float128, None], + [N.complex64, N.complex128], + [N.complex128, N.complex128], + [largest_complex, None]] + for inp, outp in tests: + assert nt.smallest_precise(inp) == outp + # No floats, then float goes to complex + nt = numerictypes(types=[N.complex128]) + assert nt.smallest_precise(N.float64) == N.complex128 + assert nt.smallest_precise(N.float32) == N.complex128 + assert nt.smallest_precise(largest_float) == None + + +class CastTestCase(NumpyTestCase): + ''' Define helper function for running caster tests ''' + def run_rig(self, caster, test_list): + for inpval, inptype, outval, outtype in test_list: + inparr = N.array(inpval, dtype=inptype) + if outval is None: + self.assertRaises(RecastError, caster.do, inparr) continue - arr = R.recast(N.array(value, inp)) - assert arr is not None, \ - 'Expected %s from %s, got None' % (outp, inp) - dtt = arr.dtype.type - assert dtt is outp, \ - 'Expected %s from %s, got %s' % (outp, inp, dtt) + res = caster.do(inparr) + assert res.dtype == N.dtype(outtype) + assert res == outval + -if __name__ == "__main__": - nose.run(argv=['', __file__]) +class test_basecaster(CastTestCase): + def test_init(self): + bc = basecaster() + assert bc.ordered_types is None + bc = basecaster([N.float64]) + assert bc.ordered_types == [N.float64] + bc = basecaster([N.float64], tols={}) + bc = basecaster([N.int8, N.float64]) + assert bc.ordered_types = [N.float64, N.int8] + # Reject non-numeric + self.assertRaises(TypeError, intcaster, [N.void]) + + def test_do(self): + bc = basecaster() + self.assertRaises(NotImplementedError, bc.do, 1) + + +class test_intcaster(CastTestCase): + def test_init(self): + # Default leads to None in typeinfo + ic = intcaster() + assert ic.typeinfo is None + # Ordering + ic = intcaster([N.int8, N.int16]) + assert bc.typeinfo.types == set([N.int16, N.int8]) + assert ic.tols is None + # Test passed named args + F = N.finfo(N.dtype(N.float64)) + tols = {N.int8: {'rtol': F.eps, 'atol': F.tiny}} + ic = intcaster([N.int8], tols=tols) + assert ic.tols == tols + # Reject non-integer + self.assertRaises(TypeError, intcaster, [N.float64]) + self.assertRaises(TypeError, intcaster, [N.complex128]) + # Accept bool + ic = intcaster([N.bool]) + + def test_int_int(self): + ic = intcaster() + self.assertRaises(RecastError, ic.do, 1) + # Default caster tries to find smallest type with larger type range + # If not, falls back to largest type containing data range + ic = intcaster([N.int8]) + # Tests: input value, input dtype, output value, output dtype + # Where None in output value indicates should raise error + tests = [ # Inevitable shrink + [1, N.int32, 1, N.int8], + [128, N.int32, None, None], + [-129, N.int32, None, None], + ] + self.run_rig(ic, tests) + ic = intcaster([N.int32, N.int8]) + tests = [ # Up by default, down when + + def test_float_int(self): + + +class test_floatcaster(CastTestCase): + def test_init(self): + pass + + def test_float_float(self): + pass + + def test_int_float(self): + pass + + def test_complex_float(self): + pass + + +class test_complexcaster(CastTestCase): + def test_init(self): + pass + + def test_complex_complex(self): + pass + + def test_complex_float(self): + pass + + def test_complex_int(self): + ''' Necessary? Maybe only do complex->float->int ''' + pass + +class test_rejectecastercollection(NumpyTestCase): + def test_init(self): + pass + + def test_do(self): + pass + +class test_precisioncastercollection(NumpyTestCase): + def test_init(self): + pass + + def test_do(self): + pass + +class test_energeticcastercollection(NumpyTestCase): + def test_init(self): + pass + + def test_do(self): + pass + +class test_fastcastercollection(NumpyTestCase): + def test_init(self): + pass + + def test_do(self): + pass + + +class test_smallcastercollection(NumpyTestCase): + def test_init(self): + pass + + def test_do(self): + pass From scipy-svn at scipy.org Sat Jun 21 05:04:58 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 21 Jun 2008 04:04:58 -0500 (CDT) Subject: [Scipy-svn] r4455 - trunk/scipy/stats/models Message-ID: <20080621090458.45F4E39C2EA@scipy.org> Author: wnbell Date: 2008-06-21 04:04:55 -0500 (Sat, 21 Jun 2008) New Revision: 4455 Modified: trunk/scipy/stats/models/formula.py Log: should fix ticket #611 the code seemed to be capturing the wrong Exception type Modified: trunk/scipy/stats/models/formula.py =================================================================== --- trunk/scipy/stats/models/formula.py 2008-06-20 16:37:47 UTC (rev 4454) +++ trunk/scipy/stats/models/formula.py 2008-06-21 09:04:55 UTC (rev 4455) @@ -269,9 +269,9 @@ if reference is None: reference = 0 else: - try: + if reference in names: reference = names.index(reference) - except IndexError: + else: reference = int(reference) def maineffect_func(value, reference=reference): From scipy-svn at scipy.org Sat Jun 21 07:28:07 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 21 Jun 2008 06:28:07 -0500 (CDT) Subject: [Scipy-svn] r4456 - trunk/scipy Message-ID: <20080621112807.86CCF39C3F0@scipy.org> Author: cdavid Date: 2008-06-21 06:28:01 -0500 (Sat, 21 Jun 2008) New Revision: 4456 Modified: trunk/scipy/setupscons.py Log: No splinalg anymore, remove it from scons build. Modified: trunk/scipy/setupscons.py =================================================================== --- trunk/scipy/setupscons.py 2008-06-21 09:04:55 UTC (rev 4455) +++ trunk/scipy/setupscons.py 2008-06-21 11:28:01 UTC (rev 4456) @@ -18,7 +18,6 @@ config.add_subpackage('signal') config.add_subpackage('sparse') config.add_subpackage('special') - config.add_subpackage('splinalg') config.add_subpackage('stats') config.add_subpackage('ndimage') config.add_subpackage('stsci') From scipy-svn at scipy.org Sat Jun 21 12:48:52 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 21 Jun 2008 11:48:52 -0500 (CDT) Subject: [Scipy-svn] r4457 - trunk/scipy/ndimage/tests Message-ID: <20080621164852.9D3A139C737@scipy.org> Author: cdavid Date: 2008-06-21 11:48:48 -0500 (Sat, 21 Jun 2008) New Revision: 4457 Added: trunk/scipy/ndimage/tests/test_regression.py Log: Add regression test for #413. Added: trunk/scipy/ndimage/tests/test_regression.py =================================================================== --- trunk/scipy/ndimage/tests/test_regression.py 2008-06-21 11:28:01 UTC (rev 4456) +++ trunk/scipy/ndimage/tests/test_regression.py 2008-06-21 16:48:48 UTC (rev 4457) @@ -0,0 +1,15 @@ +import numpy as np +from numpy.testing import * + +import scipy.ndimage as ndimage + +def test_byte_order_median(): + """Regression test for #413: median_filter does not handle bytes orders.""" + a = np.arange(9, dtype=' Author: cdavid Date: 2008-06-22 10:14:43 -0500 (Sun, 22 Jun 2008) New Revision: 4458 Modified: trunk/scipy/stats/models/tests/test_bspline.py Log: Do not run bspline tests if _bspline is not available. Modified: trunk/scipy/stats/models/tests/test_bspline.py =================================================================== --- trunk/scipy/stats/models/tests/test_bspline.py 2008-06-21 16:48:48 UTC (rev 4457) +++ trunk/scipy/stats/models/tests/test_bspline.py 2008-06-22 15:14:43 UTC (rev 4458) @@ -6,17 +6,21 @@ from scipy.testing import * import scipy.stats.models as S -import scipy.stats.models.bspline as B +try: + import scipy.stats.models.bspline as B +except ImportError: + B = None class TestBSpline(TestCase): def test1(self): - b = B.BSpline(N.linspace(0,10,11), x=N.linspace(0,10,101)) - old = b._basisx.shape - b.x = N.linspace(0,10,51) - new = b._basisx.shape - self.assertEqual((old[0], 51), new) + if B: + b = B.BSpline(N.linspace(0,10,11), x=N.linspace(0,10,101)) + old = b._basisx.shape + b.x = N.linspace(0,10,51) + new = b._basisx.shape + self.assertEqual((old[0], 51), new) if __name__ == "__main__": From scipy-svn at scipy.org Sun Jun 22 11:45:11 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 22 Jun 2008 10:45:11 -0500 (CDT) Subject: [Scipy-svn] r4459 - trunk/scipy/weave/tests Message-ID: <20080622154511.2F7AD39C84D@scipy.org> Author: cdavid Date: 2008-06-22 10:45:05 -0500 (Sun, 22 Jun 2008) New Revision: 4459 Modified: trunk/scipy/weave/tests/test_wx_spec.py Log: Fix weave test: conditionally import wx to avoid ImportError for platforms wo wx. Modified: trunk/scipy/weave/tests/test_wx_spec.py =================================================================== --- trunk/scipy/weave/tests/test_wx_spec.py 2008-06-22 15:14:43 UTC (rev 4458) +++ trunk/scipy/weave/tests/test_wx_spec.py 2008-06-22 15:45:05 UTC (rev 4459) @@ -13,12 +13,18 @@ from scipy.weave import ext_tools, wx_spec -import wx +try: + import wx +except ImportError: + wx = None +skip = dec.skipif(True, "Cannot import wx, skipping ") + class TestWxConverter(TestCase): def setUp(self): - self.app = wx.App() - self.s = wx_spec.wx_converter() + if wx: + self.app = wx.App() + self.s = wx_spec.wx_converter() @dec.slow def test_type_match_string(self): @@ -107,5 +113,7 @@ c = wx_return.test(b) assert(c == 'hello') +decorate_methods(TestWxConverter, skip) + if __name__ == "__main__": nose.run(argv=['', __file__]) From scipy-svn at scipy.org Sun Jun 22 12:25:42 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 22 Jun 2008 11:25:42 -0500 (CDT) Subject: [Scipy-svn] r4460 - in trunk: . scipy/stats/models/robust Message-ID: <20080622162542.435D539C65B@scipy.org> Author: cdavid Date: 2008-06-22 11:25:32 -0500 (Sun, 22 Jun 2008) New Revision: 4460 Modified: trunk/INSTALL.txt trunk/scipy/stats/models/robust/scale.py Log: Add a note on CFLAGS and co. Modified: trunk/INSTALL.txt =================================================================== --- trunk/INSTALL.txt 2008-06-22 15:45:05 UTC (rev 4459) +++ trunk/INSTALL.txt 2008-06-22 16:25:32 UTC (rev 4460) @@ -178,6 +178,11 @@ It is not necessary to install blas or lapack libraries in addition. + 4) Compiler flags customization (FFLAGS, CFLAGS, etc...). If you customize + CFLAGS and other related flags from the command line or the shell environment, + beware that is does not have the standard behavior of appending options. + Instead, it overrides the options. As such, you have to give all options in the + flag for the build to be successful. GETTING SCIPY ============= Modified: trunk/scipy/stats/models/robust/scale.py =================================================================== --- trunk/scipy/stats/models/robust/scale.py 2008-06-22 15:45:05 UTC (rev 4459) +++ trunk/scipy/stats/models/robust/scale.py 2008-06-22 16:25:32 UTC (rev 4460) @@ -99,6 +99,7 @@ mu = self.mu self.axis = unsqueeze(mu, self.axis, self.a.shape) + print subset * (a - mu)**2 scale = N.sum(subset * (a - mu)**2, axis=self.axis) / (self.n * Huber.gamma - N.sum(1. - subset, axis=self.axis) * Huber.c**2) self.iter += 1 From scipy-svn at scipy.org Sun Jun 22 12:27:36 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 22 Jun 2008 11:27:36 -0500 (CDT) Subject: [Scipy-svn] r4461 - trunk/scipy/stats/models/robust Message-ID: <20080622162736.2737E39C6AF@scipy.org> Author: cdavid Date: 2008-06-22 11:27:28 -0500 (Sun, 22 Jun 2008) New Revision: 4461 Modified: trunk/scipy/stats/models/robust/scale.py Log: Revert accidentaly commited print debug statement. Modified: trunk/scipy/stats/models/robust/scale.py =================================================================== --- trunk/scipy/stats/models/robust/scale.py 2008-06-22 16:25:32 UTC (rev 4460) +++ trunk/scipy/stats/models/robust/scale.py 2008-06-22 16:27:28 UTC (rev 4461) @@ -99,7 +99,6 @@ mu = self.mu self.axis = unsqueeze(mu, self.axis, self.a.shape) - print subset * (a - mu)**2 scale = N.sum(subset * (a - mu)**2, axis=self.axis) / (self.n * Huber.gamma - N.sum(1. - subset, axis=self.axis) * Huber.c**2) self.iter += 1 From scipy-svn at scipy.org Mon Jun 23 09:57:45 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 23 Jun 2008 08:57:45 -0500 (CDT) Subject: [Scipy-svn] r4462 - trunk/scipy/ndimage/tests Message-ID: <20080623135745.514D739C9C1@scipy.org> Author: cdavid Date: 2008-06-23 08:57:33 -0500 (Mon, 23 Jun 2008) New Revision: 4462 Modified: trunk/scipy/ndimage/tests/test_registration.py Log: Flag some memory hungry tests as slow. Modified: trunk/scipy/ndimage/tests/test_registration.py =================================================================== --- trunk/scipy/ndimage/tests/test_registration.py 2008-06-22 16:27:28 UTC (rev 4461) +++ trunk/scipy/ndimage/tests/test_registration.py 2008-06-23 13:57:33 UTC (rev 4462) @@ -100,6 +100,7 @@ assert_array_almost_equal(E, M, decimal=6) return + @dec.slow def test_autoalign_histogram_1(self): "test_autoalign_histogram_1" desc = load_desc() @@ -115,6 +116,7 @@ assert_equal(W.max(), 0.0) return + @dec.slow def test_autoalign_histogram_2(self): "test_autoalign_histogram_2" desc = load_desc() @@ -132,6 +134,7 @@ assert_equal(s, True) return + @dec.slow def test_autoalign_ncc_value_1(self): "test_autoalign_ncc_value_1" desc = load_desc() @@ -144,6 +147,7 @@ assert_equal(s, True) return + @dec.slow def test_autoalign_ncc_value_2(self): "test_autoalign_ncc_value_2" desc = load_desc() @@ -156,6 +160,7 @@ assert_equal(s, True) return + @dec.slow def test_autoalign_nmi_value_1(self): "test_autoalign_nmi_value_1" desc = load_desc() @@ -166,6 +171,7 @@ assert_almost_equal(cost, -2.0, decimal=6) return + @dec.slow def test_autoalign_nmi_value_2(self): "test_autoalign_nmi_value_2" desc = load_desc() From scipy-svn at scipy.org Mon Jun 23 09:58:28 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 23 Jun 2008 08:58:28 -0500 (CDT) Subject: [Scipy-svn] r4463 - trunk/scipy/weave/tests Message-ID: <20080623135828.5605439C9C1@scipy.org> Author: cdavid Date: 2008-06-23 08:58:24 -0500 (Mon, 23 Jun 2008) New Revision: 4463 Modified: trunk/scipy/weave/tests/test_wx_spec.py Log: Do not run wx tests if wx not found. Modified: trunk/scipy/weave/tests/test_wx_spec.py =================================================================== --- trunk/scipy/weave/tests/test_wx_spec.py 2008-06-23 13:57:33 UTC (rev 4462) +++ trunk/scipy/weave/tests/test_wx_spec.py 2008-06-23 13:58:24 UTC (rev 4463) @@ -10,19 +10,22 @@ from scipy.testing import * -from scipy.weave import ext_tools, wx_spec - - +e = None try: + from scipy.weave import ext_tools, wx_spec import wx -except ImportError: +except ImportError, e: wx = None + DONOTRUN = True +except RuntimeError, e: + wx = None + DONOTRUN = True -skip = dec.skipif(True, "Cannot import wx, skipping ") +skip = dec.skipif(DONOTRUN, "(error was %s)" % str(e)) class TestWxConverter(TestCase): def setUp(self): - if wx: + if not DONOTRUN: self.app = wx.App() self.s = wx_spec.wx_converter() From scipy-svn at scipy.org Mon Jun 23 10:31:04 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 23 Jun 2008 09:31:04 -0500 (CDT) Subject: [Scipy-svn] r4464 - trunk/scipy/weave/tests Message-ID: <20080623143104.ED4EB39C9BC@scipy.org> Author: cdavid Date: 2008-06-23 09:30:58 -0500 (Mon, 23 Jun 2008) New Revision: 4464 Modified: trunk/scipy/weave/tests/test_wx_spec.py Log: Maybe I will get this right at some point: set DONOTRUN to False by default to decide wether we should run some weave test or not. Modified: trunk/scipy/weave/tests/test_wx_spec.py =================================================================== --- trunk/scipy/weave/tests/test_wx_spec.py 2008-06-23 13:58:24 UTC (rev 4463) +++ trunk/scipy/weave/tests/test_wx_spec.py 2008-06-23 14:30:58 UTC (rev 4464) @@ -11,6 +11,7 @@ from scipy.testing import * e = None +DONOTRUN = False try: from scipy.weave import ext_tools, wx_spec import wx From scipy-svn at scipy.org Mon Jun 23 10:37:09 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 23 Jun 2008 09:37:09 -0500 (CDT) Subject: [Scipy-svn] r4465 - in trunk/scipy/cluster: . tests Message-ID: <20080623143709.E00A539C24F@scipy.org> Author: cdavid Date: 2008-06-23 09:37:03 -0500 (Mon, 23 Jun 2008) New Revision: 4465 Modified: trunk/scipy/cluster/tests/test_vq.py trunk/scipy/cluster/vq.py Log: Fix #505 in scipy.cluster: unhelpful message when size 0 arrays are input. Modified: trunk/scipy/cluster/tests/test_vq.py =================================================================== --- trunk/scipy/cluster/tests/test_vq.py 2008-06-23 14:30:58 UTC (rev 4464) +++ trunk/scipy/cluster/tests/test_vq.py 2008-06-23 14:37:03 UTC (rev 4465) @@ -1,7 +1,7 @@ #! /usr/bin/env python # David Cournapeau -# Last Change: Tue Jul 03 08:00 PM 2007 J +# Last Change: Mon Jun 23 11:00 PM 2008 J # For now, just copy the tests from sandbox.pyem, so we can check that # kmeans works OK for trivial examples. @@ -152,5 +152,14 @@ kmeans2(data, 3, minit = 'random') kmeans2(data, 3, minit = 'points') + def test_kmeans2_empty(self): + """Ticket #505.""" + try: + kmeans2([], 2) + raise AssertionError("This should not succeed.") + except ValueError, e: + # OK, that's what we expect + pass + if __name__ == "__main__": nose.run(argv=['', __file__]) Modified: trunk/scipy/cluster/vq.py =================================================================== --- trunk/scipy/cluster/vq.py 2008-06-23 14:30:58 UTC (rev 4464) +++ trunk/scipy/cluster/vq.py 2008-06-23 14:37:03 UTC (rev 4465) @@ -633,6 +633,9 @@ else: raise ValueError("Input of rank > 2 not supported") + if N.size(data) < 1: + raise ValueError("Input has 0 items.") + # If k is not a single value, then it should be compatible with data's # shape if N.size(k) > 1 or minit == 'matrix': From scipy-svn at scipy.org Mon Jun 23 16:53:30 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 23 Jun 2008 15:53:30 -0500 (CDT) Subject: [Scipy-svn] r4466 - trunk/scipy/integrate Message-ID: <20080623205330.6751E39C9CD@scipy.org> Author: ptvirtan Date: 2008-06-23 15:53:22 -0500 (Mon, 23 Jun 2008) New Revision: 4466 Modified: trunk/scipy/integrate/odepack.py Log: Reformat integrate.odeint docstring Modified: trunk/scipy/integrate/odepack.py =================================================================== --- trunk/scipy/integrate/odepack.py 2008-06-23 14:37:03 UTC (rev 4465) +++ trunk/scipy/integrate/odepack.py 2008-06-23 20:53:22 UTC (rev 4466) @@ -21,96 +21,115 @@ ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0, hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12, mxords=5, printmessg=0): - """Integrate a system of ordinary differential equations. - Description: + Solve a system of ordinary differential equations using lsoda from the + FORTRAN library odepack. - Solve a system of ordinary differential equations Using lsoda from the - FORTRAN library odepack. + Solves the initial value problem for stiff or non-stiff systems + of first order ode-s:: + + dy/dt = func(y,t0,...) - Solves the initial value problem for stiff or non-stiff systems - of first order ode-s: - dy/dt = func(y,t0,...) where y can be a vector. + where y can be a vector. - Inputs: + Parameters + ---------- + func : callable(y, t0, ...) + Computes the derivative of y at t0. + y0 : array + Initial condition on y (can be a vector). + t : array + A sequence of time points for which to solve for y. The initial + value point should be the first element of this sequence. + args : tuple + Extra arguments to pass to function. + Dfun : callable(y, t0, ...) + Gradient (Jacobian) of func. + col_deriv : boolean + True if Dfun defines derivatives down columns (faster), + otherwise Dfun should define derivatives across rows. + full_output : boolean + True if to return a dictionary of optional outputs as the second output + printmessg : boolean + Whether to print the convergence message - func -- func(y,t0,...) computes the derivative of y at t0. - y0 -- initial condition on y (can be a vector). - t -- a sequence of time points for which to solve for y. The intial - value point should be the first element of this sequence. - args -- extra arguments to pass to function. - Dfun -- the gradient (Jacobian) of func (same input signature as func). - col_deriv -- non-zero implies that Dfun defines derivatives down - columns (faster), otherwise Dfun should define derivatives - across rows. - full_output -- non-zero to return a dictionary of optional outputs as - the second output. - printmessg -- print the convergence message. + Returns + ------- + y : array, shape (len(y0), len(t)) + Array containing the value of y for each desired time in t, + with the initial value y0 in the first row. + + infodict : dict, only returned if full_output == True + Dictionary containing additional output information + + ======= ============================================================ + key meaning + ======= ============================================================ + 'hu' vector of step sizes successfully used for each time step. + 'tcur' vector with the value of t reached for each time step. + (will always be at least as large as the input times). + 'tolsf' vector of tolerance scale factors, greater than 1.0, + computed when a request for too much accuracy was detected. + 'tsw' value of t at the time of the last method switch + (given for each time step) + 'nst' cumulative number of time steps + 'nfe' cumulative number of function evaluations for each time step + 'nje' cumulative number of jacobian evaluations for each time step + 'nqu' a vector of method orders for each successful step. + 'imxer' index of the component of largest magnitude in the + weighted local error vector (e / ewt) on an error return. + 'lenrw' the length of the double work array required. + 'leniw' the length of integer work array required. + 'mused' a vector of method indicators for each successful time step: + 1: adams (nonstiff), 2: bdf (stiff) + ======= ============================================================ + + Other Parameters + ---------------- + ml, mu : integer + If either of these are not-None or non-negative, then the + Jacobian is assumed to be banded. These give the number of + lower and upper non-zero diagonals in this banded matrix. + For the banded case, Dfun should return a matrix whose + columns contain the non-zero bands (starting with the + lowest diagonal). Thus, the return matrix from Dfun should + have shape len(y0) * (ml + mu + 1) when ml >=0 or mu >=0 + rtol, atol : float + The input parameters rtol and atol determine the error + control performed by the solver. The solver will control the + vector, e, of estimated local errors in y, according to an + inequality of the form:: + max-norm of (e / ewt) <= 1 + where ewt is a vector of positive error weights computed as:: + ewt = rtol * abs(y) + atol + rtol and atol can be either vectors the same length as y or scalars. + tcrit : array + Vector of critical points (e.g. singularities) where integration + care should be taken. + h0 : float, (0: solver-determined) + The step size to be attempted on the first step. + hmax : float, (0: solver-determined) + The maximum absolute step size allowed. + hmin : float, (0: solver-determined) + The minimum absolute step size allowed. + ixpr : boolean + Whether to generate extra printing at method switches. + mxstep : integer, (0: solver-determined) + Maximum number of (internally defined) steps allowed for each + integration point in t. + mxhnil : integer, (0: solver-determined) + Maximum number of messages printed. + mxordn : integer, (0: solver-determined) + Maximum order to be allowed for the nonstiff (Adams) method. + mxords : integer, (0: solver-determined) + Maximum order to be allowed for the stiff (BDF) method. - Outputs: (y, {infodict,}) - - y -- a rank-2 array containing the value of y in each row for each - desired time in t (with the initial value y0 in the first row). - - infodict -- a dictionary of optional outputs: - 'hu' : a vector of step sizes successfully used for each time step. - 'tcur' : a vector with the value of t reached for each time step. - (will always be at least as large as the input times). - 'tolsf' : a vector of tolerance scale factors, greater than 1.0, - computed when a request for too much accuracy was detected. - 'tsw' : the value of t at the time of the last method switch - (given for each time step). - 'nst' : the cumulative number of time steps. - 'nfe' : the cumulative number of function evaluations for eadh - time step. - 'nje' : the cumulative number of jacobian evaluations for each - time step. - 'nqu' : a vector of method orders for each successful step. - 'imxer' : index of the component of largest magnitude in the - weighted local error vector (e / ewt) on an error return. - 'lenrw' : the length of the double work array required. - 'leniw' : the length of integer work array required. - 'mused' : a vector of method indicators for each successful time step: - 1 -- adams (nonstiff) - 2 -- bdf (stiff) - - Additional Inputs: - - ml, mu -- If either of these are not-None or non-negative, then the - Jacobian is assumed to be banded. These give the number of - lower and upper non-zero diagonals in this banded matrix. - For the banded case, Dfun should return a matrix whose - columns contain the non-zero bands (starting with the - lowest diagonal). Thus, the return matrix from Dfun should - have shape len(y0) x (ml + mu + 1) when ml >=0 or mu >=0 - rtol -- The input parameters rtol and atol determine the error - atol control performed by the solver. The solver will control the - vector, e, of estimated local errors in y, according to an - inequality of the form - max-norm of (e / ewt) <= 1 - where ewt is a vector of positive error weights computed as - ewt = rtol * abs(y) + atol - rtol and atol can be either vectors the same length as y or - scalars. - tcrit -- a vector of critical points (e.g. singularities) where - integration care should be taken. - - (For the next inputs a zero default means the solver determines it). - - h0 -- the step size to be attempted on the first step. - hmax -- the maximum absolute step size allowed. - hmin -- the minimum absolute step size allowed. - ixpr -- non-zero to generate extra printing at method switches. - mxstep -- maximum number of (internally defined) steps allowed - for each integration point in t. - mxhnil -- maximum number of messages printed. - mxordn -- maximum order to be allowed for the nonstiff (Adams) method. - mxords -- maximum order to be allowed for the stiff (BDF) method. - - See also: - ode - a more object-oriented integrator based on VODE - quad - for finding the area under a curve + See Also + -------- + ode : a more object-oriented integrator based on VODE + quad : for finding the area under a curve + """ if ml is None: From scipy-svn at scipy.org Mon Jun 23 17:14:21 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 23 Jun 2008 16:14:21 -0500 (CDT) Subject: [Scipy-svn] r4467 - in trunk/scipy/interpolate: . tests Message-ID: <20080623211421.B0EBA39C5ED@scipy.org> Author: ptvirtan Date: 2008-06-23 16:14:11 -0500 (Mon, 23 Jun 2008) New Revision: 4467 Modified: trunk/scipy/interpolate/fitpack.py trunk/scipy/interpolate/fitpack.pyf trunk/scipy/interpolate/fitpack2.py trunk/scipy/interpolate/tests/test_fitpack.py Log: Wrap and expose dblint from dfitpack. (Implements #206). Add corresponding tests. Modified: trunk/scipy/interpolate/fitpack.py =================================================================== --- trunk/scipy/interpolate/fitpack.py 2008-06-23 20:53:22 UTC (rev 4466) +++ trunk/scipy/interpolate/fitpack.py 2008-06-23 21:14:11 UTC (rev 4467) @@ -842,6 +842,28 @@ if len(z[0])>1: return z[0] return z[0][0] +def dblint(xa,xb,ya,yb,tck): + """Evaluate the integral of a spline over area [xa,xb] x [ya,yb]. + + Parameters + ---------- + xa, xb : float + The end-points of the x integration interval. + ya, yb : float + The end-points of the y integration interval. + tck : list [tx, ty, c, kx, ky] + A sequence of length 5 returned by bisplrep containing the knot + locations tx, ty, the coefficients c, and the degrees kx, ky + of the spline. + + Returns + ------- + integ : float + The value of the resulting integral. + """ + tx,ty,c,kx,ky=tck + return dfitpack.dblint(tx,ty,c,kx,ky,xb,xe,yb,ye) + def insert(x,tck,m=1,per=0): """Insert knots into a B-spline. Modified: trunk/scipy/interpolate/fitpack.pyf =================================================================== --- trunk/scipy/interpolate/fitpack.pyf 2008-06-23 20:53:22 UTC (rev 4466) +++ trunk/scipy/interpolate/fitpack.pyf 2008-06-23 21:14:11 UTC (rev 4467) @@ -456,7 +456,24 @@ :: kwrk=3+mx+my+nxest+nyest integer intent(out) :: ier end subroutine regrid_smth - + + function dblint(tx,nx,ty,ny,c,kx,ky,xb,xe,yb,ye,wrk) + ! iy = dblint(tx,ty,c,kx,ky,xb,xe,yb,ye) + real*8 dimension(nx),intent(in) :: tx + integer intent(hide),depend(tx) :: nx=len(tx) + real*8 dimension(ny),intent(in) :: ty + integer intent(hide),depend(ty) :: ny=len(ty) + real*8 intent(in),dimension((nx-kx-1)*(ny-ky-1)),depend(nx,ny,kx,ky),& + check(len(c)==(nx-kx-1)*(ny-ky-1)):: c + integer :: kx + integer :: ky + real*8 intent(in) :: xb + real*8 intent(in) :: xe + real*8 intent(in) :: yb + real*8 intent(in) :: ye + real*8 dimension(nx+ny-kx-ky-2),depend(nx,ny,kx,ky),intent(cache,hide) :: wrk + real*8 :: dblint + end function dblint end interface end python module dfitpack Modified: trunk/scipy/interpolate/fitpack2.py =================================================================== --- trunk/scipy/interpolate/fitpack2.py 2008-06-23 20:53:22 UTC (rev 4466) +++ trunk/scipy/interpolate/fitpack2.py 2008-06-23 21:14:11 UTC (rev 4467) @@ -352,6 +352,27 @@ assert ier==0,'Invalid input: ier='+`ier` return z raise NotImplementedError + + def integral(self, xa, xb, ya, yb): + """ + Evaluate the integral of the spline over area [xa,xb] x [ya,yb]. + + Parameters + ---------- + xa, xb : float + The end-points of the x integration interval. + ya, yb : float + The end-points of the y integration interval. + + Returns + ------- + integ : float + The value of the resulting integral. + + """ + tx,ty,c = self.tck[:3] + kx,ky = self.degrees + return dfitpack.dblint(tx,ty,c,kx,ky,xa,xb,ya,yb) class SmoothBivariateSpline(BivariateSpline): """ Smooth bivariate spline approximation. Modified: trunk/scipy/interpolate/tests/test_fitpack.py =================================================================== --- trunk/scipy/interpolate/tests/test_fitpack.py 2008-06-23 20:53:22 UTC (rev 4466) +++ trunk/scipy/interpolate/tests/test_fitpack.py 2008-06-23 21:14:11 UTC (rev 4467) @@ -14,7 +14,7 @@ import sys from scipy.testing import * -from numpy import array +from numpy import array, diff from scipy.interpolate.fitpack2 import UnivariateSpline,LSQUnivariateSpline,\ InterpolatedUnivariateSpline from scipy.interpolate.fitpack2 import LSQBivariateSpline, \ @@ -48,10 +48,49 @@ tx = [1+s,3-s] ty = [1+s,3-s] lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1) - #print lut.get_knots() - #print lut.get_coeffs() - #print lut.get_residual() + assert_almost_equal(lut(2,2), 3.) + + def test_bilinearity(self): + x = [1,1,1,2,2,2,3,3,3] + y = [1,2,3,1,2,3,1,2,3] + z = [0,7,8,3,4,7,1,3,4] + s = 0.1 + tx = [1+s,3-s] + ty = [1+s,3-s] + lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1) + + tx, ty = lut.get_knots() + + for xa, xb in zip(tx[:-1], tx[1:]): + for ya, yb in zip(ty[:-1], ty[1:]): + for t in [0.1, 0.5, 0.9]: + for s in [0.3, 0.4, 0.7]: + xp = xa*(1-t) + xb*t + yp = ya*(1-s) + yb*s + zp = (+ lut(xa, ya)*(1-t)*(1-s) + + lut(xb, ya)*t*(1-s) + + lut(xa, yb)*(1-t)*s + + lut(xb, yb)*t*s) + assert_almost_equal(lut(xp,yp), zp) + + def test_integral(self): + x = [1,1,1,2,2,2,8,8,8] + y = [1,2,3,1,2,3,1,2,3] + z = array([0,7,8,3,4,7,1,3,4]) + + s = 0.1 + tx = [1+s,3-s] + ty = [1+s,3-s] + lut = LSQBivariateSpline(x,y,z,tx,ty,kx=1,ky=1) + tx, ty = lut.get_knots() + + tz = lut(tx, ty) + trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:] + *(tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() + + assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz) + class TestSmoothBivariateSpline(TestCase): def test_linear_constant(self): x = [1,1,1,2,2,2,3,3,3] @@ -73,6 +112,29 @@ assert_almost_equal(lut.get_residual(),0.0) assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]]) + def test_integral(self): + x = [1,1,1,2,2,2,4,4,4] + y = [1,2,3,1,2,3,1,2,3] + z = array([0,7,8,3,4,7,1,3,4]) + + lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1,s=0) + tx = [1,2,4] + ty = [1,2,3] + + tz = lut(tx, ty) + trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:] + *(tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() + assert_almost_equal(lut.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz) + + lut2 = SmoothBivariateSpline(x,y,z,kx=2,ky=2,s=0) + assert_almost_equal(lut2.integral(tx[0], tx[-1], ty[0], ty[-1]), trpz, + decimal=0) # the quadratures give 23.75 and 23.85 + + tz = lut(tx[:-1], ty[:-1]) + trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:] + *(tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum() + assert_almost_equal(lut.integral(tx[0], tx[-2], ty[0], ty[-2]), trpz) + class TestRectBivariateSpline(TestCase): def test_defaults(self): x = array([1,2,3,4,5]) From scipy-svn at scipy.org Mon Jun 23 20:54:24 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 23 Jun 2008 19:54:24 -0500 (CDT) Subject: [Scipy-svn] r4468 - in trunk/scipy/special: . tests Message-ID: <20080624005424.D421A39C15C@scipy.org> Author: wnbell Date: 2008-06-23 19:54:19 -0500 (Mon, 23 Jun 2008) New Revision: 4468 Modified: trunk/scipy/special/_cephesmodule.c trunk/scipy/special/specfun.pyf trunk/scipy/special/specfun_wrappers.c trunk/scipy/special/specfun_wrappers.h trunk/scipy/special/tests/test_basic.py Log: addresses ticket #659 Modified: trunk/scipy/special/_cephesmodule.c =================================================================== --- trunk/scipy/special/_cephesmodule.c 2008-06-23 21:14:11 UTC (rev 4467) +++ trunk/scipy/special/_cephesmodule.c 2008-06-24 00:54:19 UTC (rev 4468) @@ -108,7 +108,7 @@ static void * gdtri_data[] = { (void *)gdtri, (void *)gdtri, }; */ static void * hyp2f1_data[] = { (void *)hyp2f1, (void *)hyp2f1, (void *)chyp2f1_wrap, (void *)chyp2f1_wrap}; -static void * hyperg_data[] = { (void *)hyperg, (void *)hyperg, (void *)chyp1f1_wrap, (void *)chyp1f1_wrap}; +static void * hyp1f1_data[] = { (void *)hyp1f1_wrap, (void *)hyp1f1_wrap, (void *)chyp1f1_wrap, (void *)chyp1f1_wrap}; static void * hypU_data[] = { (void *)hypU_wrap, (void *)hypU_wrap, }; static void * hyp2f0_data[] = { (void *)hyp2f0, (void *)hyp2f0, }; static void * threef0_data[] = { (void *)threef0, (void *)threef0, }; @@ -441,7 +441,7 @@ f = PyUFunc_FromFuncAndData(cephes4_functions, hyp2f1_data, cephes_5c2_types, 4, 4, 1, PyUFunc_None, "hyp2f1", hyp2f1_doc, 0); PyDict_SetItemString(dictionary, "hyp2f1", f); Py_DECREF(f); - f = PyUFunc_FromFuncAndData(cephes3_functions, hyperg_data, cephes_4c_types, 4, 3, 1, PyUFunc_None, "hyp1f1", hyp1f1_doc, 0); + f = PyUFunc_FromFuncAndData(cephes3_functions, hyp1f1_data, cephes_4c_types, 4, 3, 1, PyUFunc_None, "hyp1f1", hyp1f1_doc, 0); PyDict_SetItemString(dictionary, "hyp1f1", f); Py_DECREF(f); Modified: trunk/scipy/special/specfun.pyf =================================================================== --- trunk/scipy/special/specfun.pyf 2008-06-23 21:14:11 UTC (rev 4467) +++ trunk/scipy/special/specfun.pyf 2008-06-24 00:54:19 UTC (rev 4468) @@ -242,7 +242,12 @@ ! eix ! e1xb - ! chgm + subroutine chgm(a,b,x,hg) ! in :specfun:specfun.f + double precision intent(in) :: a + double precision intent(in) :: b + double precision intent(in) :: x + double precision intent(out) :: hg + end subroutine chgm ! stvh0 Modified: trunk/scipy/special/specfun_wrappers.c =================================================================== --- trunk/scipy/special/specfun_wrappers.c 2008-06-23 21:14:11 UTC (rev 4467) +++ trunk/scipy/special/specfun_wrappers.c 2008-06-24 00:54:19 UTC (rev 4468) @@ -29,6 +29,7 @@ extern void F_FUNC(cpsi,CPSI)(double*,double*,double*,double*); extern void F_FUNC(hygfz,HYGFZ)(double*,double*,double*,Py_complex*,Py_complex*); extern void F_FUNC(cchg,CCHG)(double*,double*,Py_complex*,Py_complex*); +extern void F_FUNC(chgm,CHGM)(double*,double*,double*,double*); extern void F_FUNC(chgu,CHGU)(double*,double*,double*,double*,int*); extern void F_FUNC(itairy,ITAIRY)(double*,double*,double*,double*,double*); extern void F_FUNC(e1xb,E1XB)(double*,double*); @@ -147,6 +148,15 @@ } +double hyp1f1_wrap(double a, double b, double x) { + double outy; + + F_FUNC(chgm,CHGM)(&a, &b, &x, &outy); + if (outy == 1e300) { + outy = INFINITY; + } + return outy; +} int itairy_wrap(double x, double *apt, double *bpt, double *ant, double *bnt) { double tmp; Modified: trunk/scipy/special/specfun_wrappers.h =================================================================== --- trunk/scipy/special/specfun_wrappers.h 2008-06-23 21:14:11 UTC (rev 4467) +++ trunk/scipy/special/specfun_wrappers.h 2008-06-24 00:54:19 UTC (rev 4468) @@ -31,6 +31,7 @@ Py_complex crgamma_wrap( Py_complex z); Py_complex chyp2f1_wrap( double a, double b, double c, Py_complex z); Py_complex chyp1f1_wrap( double a, double b, Py_complex z); +double hyp1f1_wrap( double a, double b, double x); double hypU_wrap(double a, double b, double x); double exp1_wrap(double x); double expi_wrap(double x); Modified: trunk/scipy/special/tests/test_basic.py =================================================================== --- trunk/scipy/special/tests/test_basic.py 2008-06-23 21:14:11 UTC (rev 4467) +++ trunk/scipy/special/tests/test_basic.py 2008-06-24 00:54:19 UTC (rev 4468) @@ -32,7 +32,7 @@ #8 test_sh_jacobi #8 test_sh_legendre -from numpy import dot +from numpy import dot, array from scipy.testing import * @@ -1177,6 +1177,116 @@ hyp1 = hyp1f1(.1,.1,.3) assert_almost_equal(hyp1, 1.3498588075760032,7) + # test contributed by Moritz Deger (2008-05-29) + # http://projects.scipy.org/scipy/scipy/ticket/659 + + # reference data obtained from mathematica [ a, b, x, m(a,b,x)]: + # produced with test_hyp1f1.nb + ref_data = array([[ -8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04], + [ 2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00], + [ -1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05], + [ 5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08], + [ -2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24], + [ 4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21], + [ 1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13], + [ 2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13], + [ 1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02], + [ 1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10], + [ -4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01], + [ 8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21], + [ 1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20], + [ -2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07], + [ 2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03], + [ 2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02], + [ 6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11], + [ -1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03], + [ 2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17], + [ 8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01], + [ 1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00], + [ -4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00], + [ 2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23], + [ -2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01], + [ 3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04], + [ -1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08], + [ 2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01], + [ -9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07], + [ 1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03], + [ -2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09], + [ -8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06], + [ -1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00], + [ -3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01], + [ 3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02], + [ 6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02], + [ -2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02], + [ 2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00], + [ 1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09], + [ 1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01], + [ 1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00], + [ 1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02], + [ -1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05], + [ -1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05], + [ 7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02], + [ 2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02], + [ -2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13], + [ -2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05], + [ -1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12], + [ -5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01], + [ -1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16], + [ 2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37], + [ 5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06], + [ -1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02], + [ -1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12], + [ 5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27], + [ -2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04], + [ 1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06], + [ 2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07], + [ 5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03], + [ -2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07], + [ 1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27], + [ 6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12], + [ 1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32], + [ -2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04], + [ -4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01], + [ -7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02], + [ -2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19], + [ 1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09], + [ 2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31], + [ -2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01], + [ 2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02], + [ -2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08], + [ 2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09], + [ 1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33], + [ -3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01], + [ 7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29], + [ 2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01], + [ 8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29], + [ -1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02], + [ -8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00], + [ -1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08], + [ -5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01], + [ -5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01], + [ -2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01], + [ 6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13], + [ -2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11], + [ -1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02], + [ 6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02], + [ -1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01], + [ 7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31], + [ -1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04], + [ 5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25], + [ 3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01], + [ -2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00], + [ 2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02], + [ 2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05], + [ -9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02], + [ -5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01], + [ -1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01], + [ -5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]]) + + for a,b,c,expected in ref_data: + result = hyp1f1(a,b,c) + assert(abs(expected - result)/expected < 1e-4) + def test_hyp1f2(self): pass From scipy-svn at scipy.org Tue Jun 24 03:46:55 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Jun 2008 02:46:55 -0500 (CDT) Subject: [Scipy-svn] r4469 - in trunk/scipy/cluster: . tests Message-ID: <20080624074655.990E339C6F3@scipy.org> Author: cdavid Date: 2008-06-24 02:46:46 -0500 (Tue, 24 Jun 2008) New Revision: 4469 Modified: trunk/scipy/cluster/tests/test_vq.py trunk/scipy/cluster/vq.py Log: Fix #535 with tests. Modified: trunk/scipy/cluster/tests/test_vq.py =================================================================== --- trunk/scipy/cluster/tests/test_vq.py 2008-06-24 00:54:19 UTC (rev 4468) +++ trunk/scipy/cluster/tests/test_vq.py 2008-06-24 07:46:46 UTC (rev 4469) @@ -1,7 +1,7 @@ #! /usr/bin/env python # David Cournapeau -# Last Change: Mon Jun 23 11:00 PM 2008 J +# Last Change: Tue Jun 24 04:00 PM 2008 J # For now, just copy the tests from sandbox.pyem, so we can check that # kmeans works OK for trivial examples. @@ -161,5 +161,19 @@ # OK, that's what we expect pass + def test_kmeans_0k(self): + """Regression test for #535: fail when k arg is 0.""" + try: + kmeans(X, 0) + raise AssertionError("kmeans with 0 clusters should fail.") + except ValueError: + pass + + try: + kmeans2(X, 0) + raise AssertionError("kmeans2 with 0 clusters should fail.") + except ValueError: + pass + if __name__ == "__main__": nose.run(argv=['', __file__]) Modified: trunk/scipy/cluster/vq.py =================================================================== --- trunk/scipy/cluster/vq.py 2008-06-24 00:54:19 UTC (rev 4468) +++ trunk/scipy/cluster/vq.py 2008-06-24 07:46:46 UTC (rev 4469) @@ -479,13 +479,17 @@ raise ValueError, 'iter must be >= to 1.' if type(k_or_guess) == type(array([])): guess = k_or_guess + if guess.size < 1: + raise ValueError("Asked for 0 cluster ? initial book was %s" % \ + guess) result = _kmeans(obs, guess, thresh = thresh) else: #initialize best distance value to a large value best_dist = 100000 No = obs.shape[0] k = k_or_guess - #print 'kmeans iter: ', + if k < 1: + raise ValueError("Asked for 0 cluster ? ") for i in range(iter): #the intial code book is randomly selected from observations guess = take(obs, randint(0, No, k), 0) From scipy-svn at scipy.org Tue Jun 24 03:59:52 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Jun 2008 02:59:52 -0500 (CDT) Subject: [Scipy-svn] r4470 - in trunk/scipy/cluster: . tests Message-ID: <20080624075952.DBC6339C661@scipy.org> Author: cdavid Date: 2008-06-24 02:59:43 -0500 (Tue, 24 Jun 2008) New Revision: 4470 Modified: trunk/scipy/cluster/tests/test_vq.py trunk/scipy/cluster/vq.py Log: Handle bogus number of clusters better + test. Modified: trunk/scipy/cluster/tests/test_vq.py =================================================================== --- trunk/scipy/cluster/tests/test_vq.py 2008-06-24 07:46:46 UTC (rev 4469) +++ trunk/scipy/cluster/tests/test_vq.py 2008-06-24 07:59:43 UTC (rev 4470) @@ -175,5 +175,11 @@ except ValueError: pass + try: + kmeans2(X, N.array([])) + raise AssertionError("kmeans2 with 0 clusters should fail.") + except ValueError: + pass + if __name__ == "__main__": nose.run(argv=['', __file__]) Modified: trunk/scipy/cluster/vq.py =================================================================== --- trunk/scipy/cluster/vq.py 2008-06-24 07:46:46 UTC (rev 4469) +++ trunk/scipy/cluster/vq.py 2008-06-24 07:59:43 UTC (rev 4470) @@ -654,7 +654,14 @@ data") clusters = k.copy() else: - nc = int(k) + try: + nc = int(k) + except TypeError: + raise ValueError("k (%s) could not be converted to an integer " % str(k)) + + if nc < 1: + raise ValueError("kmeans2 for 0 clusters ? (k was %s)" % str(k)) + if not nc == k: warnings.warn("k was not an integer, was converted.") try: From scipy-svn at scipy.org Tue Jun 24 04:01:31 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Jun 2008 03:01:31 -0500 (CDT) Subject: [Scipy-svn] r4471 - trunk/scipy/cluster Message-ID: <20080624080131.C628E39C661@scipy.org> Author: cdavid Date: 2008-06-24 03:01:24 -0500 (Tue, 24 Jun 2008) New Revision: 4471 Modified: trunk/scipy/cluster/vq.py Log: Fix 535 first comment. Modified: trunk/scipy/cluster/vq.py =================================================================== --- trunk/scipy/cluster/vq.py 2008-06-24 07:59:43 UTC (rev 4470) +++ trunk/scipy/cluster/vq.py 2008-06-24 08:01:24 UTC (rev 4471) @@ -370,10 +370,10 @@ """ code_book = array(guess, copy = True) - nc = code_book.shape[0] avg_dist = [] diff = thresh+1. while diff > thresh: + nc = code_book.shape[0] #compute membership and distances between obs and code_book obs_code, distort = vq(obs, code_book) avg_dist.append(mean(distort, axis=-1)) From scipy-svn at scipy.org Tue Jun 24 04:02:48 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Jun 2008 03:02:48 -0500 (CDT) Subject: [Scipy-svn] r4472 - trunk/scipy/cluster/tests Message-ID: <20080624080248.5E60439C661@scipy.org> Author: cdavid Date: 2008-06-24 03:02:32 -0500 (Tue, 24 Jun 2008) New Revision: 4472 Modified: trunk/scipy/cluster/tests/test_vq.py Log: Set regression test to correct number. Modified: trunk/scipy/cluster/tests/test_vq.py =================================================================== --- trunk/scipy/cluster/tests/test_vq.py 2008-06-24 08:01:24 UTC (rev 4471) +++ trunk/scipy/cluster/tests/test_vq.py 2008-06-24 08:02:32 UTC (rev 4472) @@ -162,7 +162,7 @@ pass def test_kmeans_0k(self): - """Regression test for #535: fail when k arg is 0.""" + """Regression test for #546: fail when k arg is 0.""" try: kmeans(X, 0) raise AssertionError("kmeans with 0 clusters should fail.") From scipy-svn at scipy.org Tue Jun 24 04:55:24 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Jun 2008 03:55:24 -0500 (CDT) Subject: [Scipy-svn] r4473 - branches/mb_mio_refactor/matlab Message-ID: <20080624085524.E0C8A39C6F3@scipy.org> Author: matthew.brett at gmail.com Date: 2008-06-24 03:55:06 -0500 (Tue, 24 Jun 2008) New Revision: 4473 Added: branches/mb_mio_refactor/matlab/c_python.pxd branches/mb_mio_refactor/matlab/cython_setup.py branches/mb_mio_refactor/matlab/tagreader.pyx Modified: branches/mb_mio_refactor/matlab/mio.py branches/mb_mio_refactor/matlab/mio4.py branches/mb_mio_refactor/matlab/mio5.py branches/mb_mio_refactor/matlab/miobase.py Log: Scribbling at cython, checking for HDF5 format Added: branches/mb_mio_refactor/matlab/c_python.pxd =================================================================== --- branches/mb_mio_refactor/matlab/c_python.pxd 2008-06-24 08:02:32 UTC (rev 4472) +++ branches/mb_mio_refactor/matlab/c_python.pxd 2008-06-24 08:55:06 UTC (rev 4473) @@ -0,0 +1,75 @@ +# -*- python -*- +# :Author: Robert Kern +# :Copyright: 2004, Enthought, Inc. +# :License: BSD Style + + +cdef extern from "Python.h": + # Not part of the Python API, but we might as well define it here. + # Note that the exact type doesn't actually matter for Pyrex. + ctypedef int size_t + + # Some type declarations we need + ctypedef int Py_intptr_t + + + # String API + char* PyString_AsString(object string) + char* PyString_AS_STRING(object string) + object PyString_FromString(char* c_string) + object PyString_FromStringAndSize(char* c_string, int length) + object PyString_InternFromString(char *v) + + # Float API + object PyFloat_FromDouble(double v) + double PyFloat_AsDouble(object ob) + long PyInt_AsLong(object ob) + + + # Memory API + void* PyMem_Malloc(size_t n) + void* PyMem_Realloc(void* buf, size_t n) + void PyMem_Free(void* buf) + + void Py_DECREF(object obj) + void Py_XDECREF(object obj) + void Py_INCREF(object obj) + void Py_XINCREF(object obj) + + # CObject API + ctypedef void (*destructor1)(void* cobj) + ctypedef void (*destructor2)(void* cobj, void* desc) + int PyCObject_Check(object p) + object PyCObject_FromVoidPtr(void* cobj, destructor1 destr) + object PyCObject_FromVoidPtrAndDesc(void* cobj, void* desc, + destructor2 destr) + void* PyCObject_AsVoidPtr(object self) + void* PyCObject_GetDesc(object self) + int PyCObject_SetVoidPtr(object self, void* cobj) + + # TypeCheck API + int PyFloat_Check(object obj) + int PyInt_Check(object obj) + + # Error API + int PyErr_Occurred() + void PyErr_Clear() + int PyErr_CheckSignals() + + # File API + ctypedef struct FILE + FILE* PyFile_AsFile(object) + +cdef extern from "stdio.h": + size_t fread(void *ptr, size_t size, size_t n, FILE *file) + +cdef extern from "string.h": + void *memcpy(void *s1, void *s2, int n) + +cdef extern from "math.h": + double fabs(double x) + +cdef extern from "fileobject.h": + ctypedef class __builtin__.file [object PyFileObject]: + pass + Added: branches/mb_mio_refactor/matlab/cython_setup.py =================================================================== --- branches/mb_mio_refactor/matlab/cython_setup.py 2008-06-24 08:02:32 UTC (rev 4472) +++ branches/mb_mio_refactor/matlab/cython_setup.py 2008-06-24 08:55:06 UTC (rev 4473) @@ -0,0 +1,49 @@ +#!/usr/bin/env python +"""Install file for example on how to use Cython with Numpy. + +Note: Cython is the successor project to Pyrex. For more information, see +http://cython.org. +""" + +from distutils.core import setup +from distutils.extension import Extension + +import numpy + +# We detect whether Cython is available, so that below, we can eventually ship +# pre-generated C for users to compile the extension without having Cython +# installed on their systems. +try: + from Cython.Distutils import build_ext + has_cython = True +except ImportError: + has_cython = False + +# Define a cython-based extension module, using the generated sources if cython +# is not available. +if has_cython: + pyx_sources = ['tagreader.pyx'] + cmdclass = {'build_ext': build_ext} +else: + # In production work, you can ship the auto-generated C source yourself to + # your users. In this case, we do NOT ship the .c file as part of numpy, + # so you'll need to actually have cython installed at least the first + # time. Since this is really just an example to show you how to use + # *Cython*, it makes more sense NOT to ship the C sources so you can edit + # the pyx at will with less chances for source update conflicts when you + # update numpy. + pyx_sources = ['tagreader.c'] + cmdclass = {} + + +# Declare the extension object +pyx_ext = Extension('tagreader', + pyx_sources, + include_dirs = [numpy.get_include()]) + +# Call the routine which does the real work +setup(name = 'tagreader', + description = 'tagreader extension', + ext_modules = [pyx_ext], + cmdclass = cmdclass, + ) Modified: branches/mb_mio_refactor/matlab/mio.py =================================================================== --- branches/mb_mio_refactor/matlab/mio.py 2008-06-24 08:02:32 UTC (rev 4472) +++ branches/mb_mio_refactor/matlab/mio.py 2008-06-24 08:55:06 UTC (rev 4473) @@ -7,6 +7,7 @@ import os import sys +from miobase import get_matfile_version from mio4 import MatFile4Reader, MatFile4Writer from mio5 import MatFile5Reader, MatFile5Writer @@ -57,11 +58,16 @@ raise IOError, 'Reader needs file name or open file-like object' byte_stream = file_name - MR = MatFile4Reader(byte_stream, **kwargs) - if MR.format_looks_right(): - return MR - return MatFile5Reader(byte_stream, **kwargs) - + mv = get_matfile_version(byte_stream) + if mv == '4': + return MatFile4Reader(byte_stream, **kwargs) + elif mv == '5': + return MatFile5Reader(byte_stream, **kwargs) + elif mv == '7': + raise NotImplementedError('Please use PyTables for matlab HDF files') + else: + raise TypeError('Did not recognize version %s' % mv) + def loadmat(file_name, mdict=None, appendmat=True, basename='raw', **kwargs): ''' Load Matlab(tm) file Modified: branches/mb_mio_refactor/matlab/mio4.py =================================================================== --- branches/mb_mio_refactor/matlab/mio4.py 2008-06-24 08:02:32 UTC (rev 4472) +++ branches/mb_mio_refactor/matlab/mio4.py 2008-06-24 08:55:06 UTC (rev 4473) @@ -198,15 +198,6 @@ def matrix_getter_factory(self): return self._array_reader.matrix_getter_factory() - def format_looks_right(self): - # Mat4 files have a zero somewhere in first 4 bytes - self.mat_stream.seek(0) - mopt_bytes = N.ndarray(shape=(4,), - dtype=N.uint8, - buffer = self.mat_stream.read(4)) - self.mat_stream.seek(0) - return 0 in mopt_bytes - def guess_byte_order(self): self.mat_stream.seek(0) mopt = self.read_dtype(N.dtype('i4')) Modified: branches/mb_mio_refactor/matlab/mio5.py =================================================================== --- branches/mb_mio_refactor/matlab/mio5.py 2008-06-24 08:02:32 UTC (rev 4472) +++ branches/mb_mio_refactor/matlab/mio5.py 2008-06-24 08:55:06 UTC (rev 4473) @@ -188,46 +188,27 @@ self.class_dtypes = class_dtypes def read_element(self, copy=True): - raw_tag = self.mat_stream.read(8) - tag = N.ndarray(shape=(), - dtype=self.dtypes['tag_full'], - buffer=raw_tag) - mdtype = tag['mdtype'].item() - - byte_count = mdtype >> 16 - if byte_count: # small data element format - if byte_count > 4: - raise ValueError, 'Too many bytes for sde format' - mdtype = mdtype & 0xFFFF - dt = self.dtypes[mdtype] - el_count = byte_count // dt.itemsize - return N.ndarray(shape=(el_count,), - dtype=dt, - buffer=raw_tag[4:]) - - byte_count = tag['byte_count'].item() + mdtype, byte_count, buf = tagparse(self.mat_stream, swapf) if mdtype == miMATRIX: + # Can this use buf or not? return self.current_getter(byte_count).get_array() elif mdtype in self.codecs: # encoded char data - raw_str = self.mat_stream.read(byte_count) codec = self.codecs[mdtype] if not codec: raise TypeError, 'Do not support encoding %d' % mdtype - el = raw_str.decode(codec) + el = buf.decode(codec) else: # numeric data dt = self.dtypes[mdtype] el_count = byte_count // dt.itemsize el = N.ndarray(shape=(el_count,), dtype=dt, - buffer=self.mat_stream.read(byte_count)) + buffer=buf) if copy: el = el.copy() - # Seek to next 64-bit boundary mod8 = byte_count % 8 if mod8: self.mat_stream.seek(8 - mod8, 1) - return el def matrix_getter_factory(self): @@ -460,7 +441,6 @@ uint16_codec - char codec to use for uint16 char arrays (defaults to system default codec) ''' - def __init__(self, mat_stream, byte_order=None, @@ -533,6 +513,8 @@ return self._array_reader.matrix_getter_factory() def guess_byte_order(self): + ''' Guess byte order. + Sets stream pointer to 0 ''' self.mat_stream.seek(126) mi = self.mat_stream.read(2) self.mat_stream.seek(0) @@ -548,16 +530,7 @@ hdict['__version__'] = '%d.%d' % (v_major, v_minor) return hdict - def format_looks_right(self): - # Mat4 files have a zero somewhere in first 4 bytes - self.mat_stream.seek(0) - mopt_bytes = N.ndarray(shape=(4,), - dtype=N.uint8, - buffer = self.mat_stream.read(4)) - self.mat_stream.seek(0) - return 0 not in mopt_bytes - class Mat5MatrixWriter(MatStreamWriter): mat_tag = N.zeros((), mdtypes_template['tag_full']) Modified: branches/mb_mio_refactor/matlab/miobase.py =================================================================== --- branches/mb_mio_refactor/matlab/miobase.py 2008-06-24 08:02:32 UTC (rev 4472) +++ branches/mb_mio_refactor/matlab/miobase.py 2008-06-24 08:55:06 UTC (rev 4473) @@ -22,6 +22,38 @@ res *= e return res +def get_matfile_version(fileobj): + ''' Return '4', '5', or '7' depending on apparent mat file type + Inputs + fileobj - file object implementing seek() and read() + Outputs + version_str - one of (strings) 4, 5, or 7 + + Has the side effect of setting the file read pointer to 0 + ''' + # Mat4 files have a zero somewhere in first 4 bytes + fileobj.seek(0) + mopt_bytes = N.ndarray(shape=(4,), + dtype=N.uint8, + buffer = fileobj.read(4)) + if 0 in mopt_bytes: + fileobj.seek(0) + return '4' + # For 5 or 7 we need to read an integer in the header + # bytes 124 through 128 contain a version integer + # and an endian test string + fileobj.seek(124) + tst_str = fileobj.read(4) + fileobj.seek(0) + maj_ind = int(tst_str[2] == 'I') + verb = ord(tst_str[maj_ind]) + if verb == 1: + return '5' + elif verb == 2: + return '7' + raise ValueError('Unknown mat file type, version %d' % verb) + + class ByteOrder(object): ''' Namespace for byte ordering ''' little_endian = sys.byteorder == 'little' @@ -50,7 +82,7 @@ Attaches to initialized stream Base class for "getters" - which do store state of what they are - reading on itialization, and therefore need to be initialized + reading on initialization, and therefore need to be initialized before each read, and "readers" which do not store state, and only need to be initialized once on object creation @@ -102,11 +134,8 @@ set_dtypes - sets data types defs from byte order matrix_getter_factory - gives object to fetch next matrix from stream - format_looks_right - returns True if format looks correct for - this file type (Mat4, Mat5) guess_byte_order - guesses file byte order from file """ - def __init__(self, mat_stream, byte_order=None, mat_dtype=False, @@ -177,7 +206,8 @@ 'get/set order code') def set_dtypes(self): - assert False, 'Not implemented' + ''' Set dtype endianness. In this case we have no dtypes ''' + pass def convert_dtypes(self, dtype_template): dtypes = dtype_template.copy() @@ -188,16 +218,13 @@ def matrix_getter_factory(self): assert False, 'Not implemented' - - def format_looks_right(self): - "Return True if the format looks right for this object" - assert False, 'Not implemented' - + def file_header(self): return {} def guess_byte_order(self): - assert 0, 'Not implemented' + ''' As we do not know what file type we have, assume native ''' + return ByteOrder.native_code def get_processor_func(self): ''' Processing to apply to read matrices Added: branches/mb_mio_refactor/matlab/tagreader.pyx =================================================================== --- branches/mb_mio_refactor/matlab/tagreader.pyx 2008-06-24 08:02:32 UTC (rev 4472) +++ branches/mb_mio_refactor/matlab/tagreader.pyx 2008-06-24 08:55:06 UTC (rev 4473) @@ -0,0 +1,19 @@ +# -*- python -*- +''' Extension to parse matlab 5 tags ''' + +# Import the pieces of the Python C API we need to use (from c_python.pxd): +cimport c_python as py + +def parse(fileobj, int swapf): + ''' Read in the tag + The tag can be normal format (mdtype=u4, byte_count=u4) + or small element format (mdtype=u2, byte_count=u2, data in last 4 bytes) + Small element format is where mdtype (u4) has non-zero high bytes + ''' + cdef py.size_t n_out + cdef char raw_tag[8] + cdef py.FILE* infile + infile = py.PyFile_AsFile(fileobj) + n_out = py.fread(raw_tag, 8, 1, infile) + # Raise Exception if n_out < 1 + return mdtype, byte_count, buf From scipy-svn at scipy.org Tue Jun 24 06:52:40 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Jun 2008 05:52:40 -0500 (CDT) Subject: [Scipy-svn] r4474 - trunk/scipy/stats/tests Message-ID: <20080624105240.D17B739C088@scipy.org> Author: cdavid Date: 2008-06-24 05:52:35 -0500 (Tue, 24 Jun 2008) New Revision: 4474 Modified: trunk/scipy/stats/tests/test_stats.py Log: Add test for #8. Modified: trunk/scipy/stats/tests/test_stats.py =================================================================== --- trunk/scipy/stats/tests/test_stats.py 2008-06-24 08:55:06 UTC (rev 4473) +++ trunk/scipy/stats/tests/test_stats.py 2008-06-24 10:52:35 UTC (rev 4474) @@ -482,6 +482,15 @@ assert_almost_equal(intercept,0.0) assert_almost_equal(r,0.0) + def test_regress_simple(self): + """Regress a line with sinusoidal noise.""" + x = numpy.linspace(0, 100, 100) + y = 0.2 * numpy.linspace(0, 100, 100) + 10 + y += numpy.sin(numpy.linspace(0, 20, 100)) + + res = stats.linregress(x, y) + assert_almost_equal(res[4], 4.3609875083149268e-3) + # Utility def compare_results(res,desired): From scipy-svn at scipy.org Tue Jun 24 07:41:25 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Jun 2008 06:41:25 -0500 (CDT) Subject: [Scipy-svn] r4475 - trunk/scipy/integrate Message-ID: <20080624114125.450BF39C8FD@scipy.org> Author: cdavid Date: 2008-06-24 06:41:19 -0500 (Tue, 24 Jun 2008) New Revision: 4475 Modified: trunk/scipy/integrate/__odepack.h trunk/scipy/integrate/odepack.py Log: Do not set imxer to unitiliazed value if not set by fortran. Modified: trunk/scipy/integrate/__odepack.h =================================================================== --- trunk/scipy/integrate/__odepack.h 2008-06-24 10:52:35 UTC (rev 4474) +++ trunk/scipy/integrate/__odepack.h 2008-06-24 11:41:19 UTC (rev 4475) @@ -326,7 +326,11 @@ *((int *)ap_nfe->data + (k-1)) = iwork[11]; *((int *)ap_nje->data + (k-1)) = iwork[12]; *((int *)ap_nqu->data + (k-1)) = iwork[13]; - imxer = iwork[15]; + if (istate == -5 || istate == -4) { + imxer = iwork[15]; + } else { + imxer = -1; + } lenrw = iwork[16]; leniw = iwork[17]; *((int *)ap_mused->data + (k-1)) = iwork[18]; @@ -348,7 +352,20 @@ /* Do Full output */ if (full_output) { - return Py_BuildValue("N{s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:i,s:i,s:i,s:N}i",PyArray_Return(ap_yout),"hu",PyArray_Return(ap_hu),"tcur",PyArray_Return(ap_tcur),"tolsf",PyArray_Return(ap_tolsf),"tsw",PyArray_Return(ap_tsw),"nst",PyArray_Return(ap_nst),"nfe",PyArray_Return(ap_nfe),"nje",PyArray_Return(ap_nje),"nqu",PyArray_Return(ap_nqu),"imxer",imxer,"lenrw",lenrw,"leniw",leniw,"mused",PyArray_Return(ap_mused),istate); + return Py_BuildValue("N{s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:N,s:i,s:i,s:i,s:N}i",PyArray_Return(ap_yout), + "hu",PyArray_Return(ap_hu), + "tcur",PyArray_Return(ap_tcur), + "tolsf",PyArray_Return(ap_tolsf), + "tsw",PyArray_Return(ap_tsw), + "nst",PyArray_Return(ap_nst), + "nfe",PyArray_Return(ap_nfe), + "nje",PyArray_Return(ap_nje), + "nqu",PyArray_Return(ap_nqu), + "imxer",imxer, + "lenrw",lenrw, + "leniw",leniw, + "mused",PyArray_Return(ap_mused), + istate); } else { return Py_BuildValue("Ni",PyArray_Return(ap_yout),istate); Modified: trunk/scipy/integrate/odepack.py =================================================================== --- trunk/scipy/integrate/odepack.py 2008-06-24 10:52:35 UTC (rev 4474) +++ trunk/scipy/integrate/odepack.py 2008-06-24 11:41:19 UTC (rev 4475) @@ -78,7 +78,8 @@ 'nje' cumulative number of jacobian evaluations for each time step 'nqu' a vector of method orders for each successful step. 'imxer' index of the component of largest magnitude in the - weighted local error vector (e / ewt) on an error return. + weighted local error vector (e / ewt) on an error return, -1 + otherwise. 'lenrw' the length of the double work array required. 'leniw' the length of integer work array required. 'mused' a vector of method indicators for each successful time step: From scipy-svn at scipy.org Tue Jun 24 09:53:51 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Jun 2008 08:53:51 -0500 (CDT) Subject: [Scipy-svn] r4476 - in trunk/scipy/sparse: . linalg/eigen/lobpcg linalg/isolve Message-ID: <20080624135351.21E5B39C93E@scipy.org> Author: wnbell Date: 2008-06-24 08:53:46 -0500 (Tue, 24 Jun 2008) New Revision: 4476 Modified: trunk/scipy/sparse/base.py trunk/scipy/sparse/construct.py trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py trunk/scipy/sparse/linalg/isolve/utils.py Log: edited a few docstrings Modified: trunk/scipy/sparse/base.py =================================================================== --- trunk/scipy/sparse/base.py 2008-06-24 11:41:19 UTC (rev 4475) +++ trunk/scipy/sparse/base.py 2008-06-24 13:53:46 UTC (rev 4476) @@ -195,13 +195,14 @@ """Return this matrix in a given sparse format Parameters - ========== - - format : desired sparse matrix format - - If format is None then no conversion is performed - - Other possible values include: - - "csr" for csr_matrix format - - "csc" for csc_matrix format - - "dok" for dok_matrix format and so on + ---------- + format : {string, None} + desired sparse matrix format + - None for no format conversion + - "csr" for csr_matrix format + - "csc" for csc_matrix format + - "lil" for lil_matrix format + - "dok" for dok_matrix format and so on """ Modified: trunk/scipy/sparse/construct.py =================================================================== --- trunk/scipy/sparse/construct.py 2008-06-24 11:41:19 UTC (rev 4475) +++ trunk/scipy/sparse/construct.py 2008-06-24 13:53:46 UTC (rev 4476) @@ -27,30 +27,32 @@ def spdiags(data, diags, m, n, format=None): - """Return a sparse matrix given its diagonals. + """Return a sparse matrix from diagonals. Parameters ---------- - - data : matrix whose rows contain the diagonal values - - diags : diagonals to set - - k = 0 - the main diagonal - - k > 0 - the k-th upper diagonal - - k < 0 - the k-th lower diagonal - - m, n : dimensions of the result - - format : format of the result (e.g. "csr") - - By default (format=None) an appropriate sparse matrix - format is returned. This choice is subject to change. + data : array_like + matrix diagonals stored row-wise + diags : diagonals to set + - k = 0 the main diagonal + - k > 0 the k-th upper diagonal + - k < 0 the k-th lower diagonal + m, n : int + shape of the result + format : format of the result (e.g. "csr") + By default (format=None) an appropriate sparse matrix + format is returned. This choice is subject to change. See Also -------- - The dia_matrix class which implements the DIAgonal format. + The dia_matrix class which implements the DIAgonal format. Example ------- - >>> data = array([[1,2,3,4]]).repeat(3,axis=0) + >>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]]) >>> diags = array([0,-1,2]) - >>> spdiags(data,diags,4,4).todense() + >>> spdiags(data, diags, 4, 4).todense() matrix([[1, 0, 3, 0], [1, 2, 0, 4], [0, 2, 3, 0], @@ -87,8 +89,12 @@ Parameters ---------- - A,B : dense or sparse matrices - format : format of the result (e.g. "csr") + A + matrix + B + matrix + format : string + format of the result (e.g. "csr") Returns ------- @@ -169,15 +175,19 @@ Parameters ---------- - A,B : square dense or sparse matrices - format : format of the result (e.g. "csr") + A + square matrix + B + square matrix + format : string + format of the result (e.g. "csr") Returns - ======= - kronecker sum in a sparse matrix format + ------- + kronecker sum in a sparse matrix format Examples - ======== + -------- """ @@ -206,7 +216,8 @@ blocks sequence of sparse matrices with compatible shapes - format : sparse format of the result (e.g. "csr") + format : string + sparse format of the result (e.g. "csr") by default an appropriate sparse matrix format is returned. This choice is subject to change. @@ -232,7 +243,8 @@ blocks sequence of sparse matrices with compatible shapes - format : sparse format of the result (e.g. "csr") + format : string + sparse format of the result (e.g. "csr") by default an appropriate sparse matrix format is returned. This choice is subject to change. Modified: trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py =================================================================== --- trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py 2008-06-24 11:41:19 UTC (rev 4475) +++ trunk/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py 2008-06-24 13:53:46 UTC (rev 4476) @@ -5,7 +5,7 @@ License: BSD -(c) Robert Cimrman, Andrew Knyazev +Authors: Robert Cimrman, Andrew Knyazev Examples in tests directory contributed by Nils Wagner. """ @@ -91,8 +91,9 @@ Example ------- - A = makeOperator( arrayA, (n, n) ) - vectorB = A( vectorX ) + >>> A = makeOperator( arrayA, (n, n) ) + >>> vectorB = A( vectorX ) + """ if operatorInput is None: def ident(x): @@ -203,8 +204,8 @@ Notes ----- If both retLambdaHistory and retResidualNormsHistory are True, the - return tuple has the following format: - (lambda, V, lambda history, residual norms history) + return tuple has the following format + (lambda, V, lambda history, residual norms history) """ failureFlag = True Modified: trunk/scipy/sparse/linalg/isolve/utils.py =================================================================== --- trunk/scipy/sparse/linalg/isolve/utils.py 2008-06-24 11:41:19 UTC (rev 4475) +++ trunk/scipy/sparse/linalg/isolve/utils.py 2008-06-24 13:53:46 UTC (rev 4476) @@ -1,3 +1,7 @@ +__docformat__ = "restructuredtext en" + +__all__ = [] + from warnings import warn from numpy import asanyarray, asarray, asmatrix, array, matrix, zeros @@ -24,27 +28,34 @@ def make_system(A, M, x0, b, xtype=None): """Make a linear system Ax=b - Parameters: - A - LinearOperator - - sparse or dense matrix (or any valid input to aslinearoperator) - M - LinearOperator or None - - preconditioner - - sparse or dense matrix (or any valid input to aslinearoperator) - x0 - array_like or None - - initial guess to iterative method - b - array_like - - right hand side - xtype - None or one of 'fdFD' - - dtype of the x vector + Parameters + ---------- + A : LinearOperator + sparse or dense matrix (or any valid input to aslinearoperator) + M : {LinearOperator, Nones} + preconditioner + sparse or dense matrix (or any valid input to aslinearoperator) + x0 : {array_like, None} + initial guess to iterative method + b : array_like + right hand side + xtype : {'f', 'd', 'F', 'D', None} + dtype of the x vector - Returns: - (A, M, x, b, postprocess) where: - - A is a LinearOperator - - M is a LinearOperator - - x is the initial guess (rank 1 array) - - b is the rhs (rank 1 array) - - postprocess is a function that converts the solution vector - to the appropriate type and dimensions (e.g. (N,1) matrix) + Returns + ------- + (A, M, x, b, postprocess) + A : LinearOperator + matrix of the linear system + M : LinearOperator + preconditioner + x : rank 1 ndarray + initial guess + b : rank 1 ndarray + right hand side + postprocess : function + converts the solution vector to the appropriate + type and dimensions (e.g. (N,1) matrix) """ A_ = A From scipy-svn at scipy.org Tue Jun 24 10:23:12 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Jun 2008 09:23:12 -0500 (CDT) Subject: [Scipy-svn] r4477 - trunk Message-ID: <20080624142312.AAB8B39C90E@scipy.org> Author: cdavid Date: 2008-06-24 09:23:09 -0500 (Tue, 24 Jun 2008) New Revision: 4477 Modified: trunk/THANKS.txt Log: Add Damiean Eads and me in THANKS.txt. Modified: trunk/THANKS.txt =================================================================== --- trunk/THANKS.txt 2008-06-24 13:53:46 UTC (rev 4476) +++ trunk/THANKS.txt 2008-06-24 14:23:09 UTC (rev 4477) @@ -29,6 +29,8 @@ sparse matrix module Travis Vaught -- initial work on stats module clean up Jeff Whitaker -- Mac OS X support +David Cournapeau -- bug-fixes, refactor of fftpack and cluster, numscons build. +Damian Eads -- hiearchical clustering Testing: From scipy-svn at scipy.org Tue Jun 24 13:48:36 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Tue, 24 Jun 2008 12:48:36 -0500 (CDT) Subject: [Scipy-svn] r4478 - trunk/scipy/io Message-ID: <20080624174836.E12C339CA24@scipy.org> Author: rkern Date: 2008-06-24 12:48:31 -0500 (Tue, 24 Jun 2008) New Revision: 4478 Modified: trunk/scipy/io/array_import.py Log: BUG: Allow __del__ to work even when self.file never got constructed. Thanks to Yosef Meller for finding this bug and suggesting the fix. #681 Modified: trunk/scipy/io/array_import.py =================================================================== --- trunk/scipy/io/array_import.py 2008-06-24 14:23:09 UTC (rev 4477) +++ trunk/scipy/io/array_import.py 2008-06-24 17:48:31 UTC (rev 4478) @@ -169,7 +169,7 @@ return lines[:-1] def __del__(self): - if hasattr(self.file,'close') and self.should_close_file: + if hasattr(getattr(self, 'file', None),'close') and self.should_close_file: self.file.close() def __getitem__(self, item): From scipy-svn at scipy.org Wed Jun 25 11:43:59 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 25 Jun 2008 10:43:59 -0500 (CDT) Subject: [Scipy-svn] r4479 - in trunk/scipy/sandbox: . mkufunc Message-ID: <20080625154359.0AA1639CA6E@scipy.org> Author: ilan Date: 2008-06-25 10:43:39 -0500 (Wed, 25 Jun 2008) New Revision: 4479 Added: trunk/scipy/sandbox/mkufunc/ trunk/scipy/sandbox/mkufunc/bar_code.cc trunk/scipy/sandbox/mkufunc/bar_support_code.cc trunk/scipy/sandbox/mkufunc/driver.py trunk/scipy/sandbox/mkufunc/head.c trunk/scipy/sandbox/mkufunc/interactive.py trunk/scipy/sandbox/mkufunc/mkufunc.py trunk/scipy/sandbox/mkufunc/pypy.c trunk/scipy/sandbox/mkufunc/test_1.py Log: Adding project mkufunc (make U function decorator) to sandbox Added: trunk/scipy/sandbox/mkufunc/bar_code.cc =================================================================== --- trunk/scipy/sandbox/mkufunc/bar_code.cc 2008-06-24 17:48:31 UTC (rev 4478) +++ trunk/scipy/sandbox/mkufunc/bar_code.cc 2008-06-25 15:43:39 UTC (rev 4479) @@ -0,0 +1,14 @@ + +import_ufunc(); + +return_val = PyUFunc_FromFuncAndData( + foo_functions, + foo_data, + foo_signatures, + 2, /* ntypes */ + 1, /* nin */ + 1, /* nout */ + PyUFunc_None, /* identity */ + "foo", /* name */ + "", /* doc */ + 0); Added: trunk/scipy/sandbox/mkufunc/bar_support_code.cc =================================================================== --- trunk/scipy/sandbox/mkufunc/bar_support_code.cc 2008-06-24 17:48:31 UTC (rev 4478) +++ trunk/scipy/sandbox/mkufunc/bar_support_code.cc 2008-06-25 15:43:39 UTC (rev 4479) @@ -0,0 +1,78 @@ + +extern "C" { + double f1_pypy_g_bar(long l_n_1); + double f2_pypy_g_bar(double l_n_5); +} + + +static long foo_1(long x) +{ + return f1_pypy_g_bar(x); +} + +typedef long Func_1(long); + +static void +PyUFunc_1(char **args, npy_intp *dimensions, npy_intp *steps, void *func) +{ + /* printf("PyUFunc_1\n"); */ + + npy_intp n = dimensions[0]; + npy_intp is0 = steps[0]; + npy_intp os = steps[1]; + char *ip0 = args[0]; + char *op = args[1]; + Func_1 *f = (Func_1 *) func; + npy_intp i; + + for(i = 0; i < n; i++, ip0 += is0, op += os) { + long *in1 = (long *)ip0; + long *out = (long *)op; + + *out = f(*in1); + } +} + +static double foo_2(double x) +{ + return f2_pypy_g_bar(x); +} + +typedef double Func_2(double); + +static void +PyUFunc_2(char **args, npy_intp *dimensions, npy_intp *steps, void *func) +{ + /* printf("PyUFunc_2\n"); */ + + npy_intp n = dimensions[0]; + npy_intp is0 = steps[0]; + npy_intp os = steps[1]; + char *ip0 = args[0]; + char *op = args[1]; + Func_2 *f = (Func_2 *) func; + npy_intp i; + + for(i = 0; i < n; i++, ip0 += is0, op += os) { + double *in1 = (double *)ip0; + double *out = (double *)op; + + *out = f(*in1); + } +} + + +static PyUFuncGenericFunction foo_functions[] = { + PyUFunc_1, + PyUFunc_2, +}; + +static void *foo_data[] = { + (void *) foo_1, + (void *) foo_2, +}; + +static char foo_signatures[] = { + NPY_LONG, NPY_LONG, /* 1 */ + NPY_DOUBLE, NPY_DOUBLE, /* 2 */ +}; Added: trunk/scipy/sandbox/mkufunc/driver.py =================================================================== --- trunk/scipy/sandbox/mkufunc/driver.py 2008-06-24 17:48:31 UTC (rev 4478) +++ trunk/scipy/sandbox/mkufunc/driver.py 2008-06-25 15:43:39 UTC (rev 4479) @@ -0,0 +1,728 @@ +import sys, os + +from pypy.translator.translator import TranslationContext, graphof +from pypy.translator.tool.taskengine import SimpleTaskEngine +from pypy.translator.goal import query +from pypy.translator.goal.timing import Timer +from pypy.annotation import model as annmodel +from pypy.annotation.listdef import s_list_of_strings +from pypy.annotation import policy as annpolicy +from py.compat import optparse +from pypy.tool.udir import udir + +import py +from pypy.tool.ansi_print import ansi_log +log = py.log.Producer("translation") +py.log.setconsumer("translation", ansi_log) + +DEFAULTS = { + 'translation.gc': 'ref', + 'translation.cc': None, + 'translation.profopt': None, + + 'translation.thread': False, # influences GC policy + + 'translation.stackless': False, + 'translation.debug': True, + 'translation.insist': False, + 'translation.backend': 'c', + 'translation.fork_before': None, + 'translation.backendopt.raisingop2direct_call' : False, + 'translation.backendopt.merge_if_blocks': True, +} + + +def taskdef(taskfunc, deps, title, new_state=None, expected_states=[], + idemp=False, earlycheck=None): + taskfunc.task_deps = deps + taskfunc.task_title = title + taskfunc.task_newstate = None + taskfunc.task_expected_states = expected_states + taskfunc.task_idempotent = idemp + taskfunc.task_earlycheck = earlycheck + return taskfunc + +# TODO: +# sanity-checks using states + +_BACKEND_TO_TYPESYSTEM = { + 'c': 'lltype', + 'llvm': 'lltype' +} + +def backend_to_typesystem(backend): + return _BACKEND_TO_TYPESYSTEM.get(backend, 'ootype') + +# set of translation steps to profile +PROFILE = set([]) + +class Instrument(Exception): + pass + + +class ProfInstrument(object): + name = "profinstrument" + def __init__(self, datafile, compiler): + self.datafile = datafile + self.compiler = compiler + + def first(self): + self.compiler._build() + + def probe(self, exe, args): + from py.compat import subprocess + env = os.environ.copy() + env['_INSTRUMENT_COUNTERS'] = str(self.datafile) + subprocess.call("'%s' %s" % (exe, args), env=env, shell=True) + + def after(self): + # xxx + os._exit(0) + + +class TranslationDriver(SimpleTaskEngine): + + def __init__(self, setopts=None, default_goal=None, + disable=[], + exe_name=None, extmod_name=None, + config=None, overrides=None): + self.timer = Timer() + SimpleTaskEngine.__init__(self) + + self.log = log + + if config is None: + from pypy.config.pypyoption import get_pypy_config + config = get_pypy_config(DEFAULTS, translating=True) + self.config = config + if overrides is not None: + self.config.override(overrides) + + if setopts is not None: + self.config.set(**setopts) + + self.exe_name = exe_name + self.extmod_name = extmod_name + + self.done = {} + + self.disable(disable) + + if default_goal: + default_goal, = self.backend_select_goals([default_goal]) + if default_goal in self._maybe_skip(): + default_goal = None + + self.default_goal = default_goal + self.extra_goals = [] + self.exposed = [] + + # expose tasks + def expose_task(task, backend_goal=None): + if backend_goal is None: + backend_goal = task + def proc(): + return self.proceed(backend_goal) + self.exposed.append(task) + setattr(self, task, proc) + + backend, ts = self.get_backend_and_type_system() + for task in self.tasks: + explicit_task = task + parts = task.split('_') + if len(parts) == 1: + if task in ('annotate'): + expose_task(task) + else: + task, postfix = parts + if task in ('rtype', 'backendopt', 'llinterpret', + 'prehannotatebackendopt', 'hintannotate', + 'timeshift'): + if ts: + if ts == postfix: + expose_task(task, explicit_task) + else: + expose_task(explicit_task) + elif task in ('source', 'compile', 'run'): + if backend: + if backend == postfix: + expose_task(task, explicit_task) + elif ts: + if ts == backend_to_typesystem(postfix): + expose_task(explicit_task) + else: + expose_task(explicit_task) + + def set_extra_goals(self, goals): + self.extra_goals = goals + + def get_info(self): # XXX more? + d = {'backend': self.config.translation.backend} + return d + + def get_backend_and_type_system(self): + type_system = self.config.translation.type_system + backend = self.config.translation.backend + return backend, type_system + + def backend_select_goals(self, goals): + backend, ts = self.get_backend_and_type_system() + postfixes = [''] + ['_'+p for p in (backend, ts) if p] + l = [] + for goal in goals: + for postfix in postfixes: + cand = "%s%s" % (goal, postfix) + if cand in self.tasks: + new_goal = cand + break + else: + raise Exception, "cannot infer complete goal from: %r" % goal + l.append(new_goal) + return l + + def disable(self, to_disable): + self._disabled = to_disable + + def _maybe_skip(self): + maybe_skip = [] + if self._disabled: + for goal in self.backend_select_goals(self._disabled): + maybe_skip.extend(self._depending_on_closure(goal)) + return dict.fromkeys(maybe_skip).keys() + + + def setup(self, entry_point, inputtypes, policy=None, extra={}, empty_translator=None): + standalone = inputtypes is None + self.standalone = standalone + + if standalone: + inputtypes = [s_list_of_strings] + self.inputtypes = inputtypes + + if policy is None: + policy = annpolicy.AnnotatorPolicy() + if standalone: + policy.allow_someobjects = False + self.policy = policy + + self.extra = extra + + if empty_translator: + translator = empty_translator + else: + translator = TranslationContext(config=self.config) + + self.entry_point = entry_point + self.translator = translator + self.libdef = None + + self.translator.driver_instrument_result = self.instrument_result + + def setup_library(self, libdef, policy=None, extra={}, empty_translator=None): + self.setup(None, None, policy, extra, empty_translator) + self.libdef = libdef + + def instrument_result(self, args): + backend, ts = self.get_backend_and_type_system() + if backend != 'c' or sys.platform == 'win32': + raise Exception("instrumentation requires the c backend" + " and unix for now") + from pypy.tool.udir import udir + + datafile = udir.join('_instrument_counters') + makeProfInstrument = lambda compiler: ProfInstrument(datafile, compiler) + + pid = os.fork() + if pid == 0: + # child compiling and running with instrumentation + self.config.translation.instrument = True + self.config.translation.instrumentctl = (makeProfInstrument, + args) + raise Instrument + else: + pid, status = os.waitpid(pid, 0) + if os.WIFEXITED(status): + status = os.WEXITSTATUS(status) + if status != 0: + raise Exception, "instrumentation child failed: %d" % status + else: + raise Exception, "instrumentation child aborted" + import array, struct + n = datafile.size()//struct.calcsize('L') + datafile = datafile.open('rb') + counters = array.array('L') + counters.fromfile(datafile, n) + datafile.close() + return counters + + def info(self, msg): + log.info(msg) + + def _profile(self, goal, func): + from cProfile import Profile + from pypy.tool.lsprofcalltree import KCacheGrind + d = {'func':func} + prof = Profile() + prof.runctx("res = func()", globals(), d) + KCacheGrind(prof).output(open(goal + ".out", "w")) + return d['res'] + + def _do(self, goal, func, *args, **kwds): + title = func.task_title + if goal in self.done: + self.log.info("already done: %s" % title) + return + else: + self.log.info("%s..." % title) + self.timer.start_event(goal) + try: + instrument = False + try: + if goal in PROFILE: + res = self._profile(goal, func) + else: + res = func() + except Instrument: + instrument = True + if not func.task_idempotent: + self.done[goal] = True + if instrument: + self.proceed('compile') + assert False, 'we should not get here' + finally: + self.timer.end_event(goal) + return res + + def task_annotate(self): + # includes annotation and annotatation simplifications + translator = self.translator + policy = self.policy + self.log.info('with policy: %s.%s' % + (policy.__class__.__module__, policy.__class__.__name__)) + + annmodel.DEBUG = self.config.translation.debug + annotator = translator.buildannotator(policy=policy) + + if self.entry_point: + s = annotator.build_types(self.entry_point, self.inputtypes) + + self.sanity_check_annotation() + if self.standalone and s.knowntype != int: + raise Exception("stand-alone program entry point must return an " + "int (and not, e.g., None or always raise an " + "exception).") + annotator.simplify() + return s + else: + assert self.libdef is not None + for func, inputtypes in self.libdef.functions: + annotator.build_types(func, inputtypes) + self.sanity_check_annotation() + annotator.simplify() + # + task_annotate = taskdef(task_annotate, [], "Annotating&simplifying") + + + def sanity_check_annotation(self): + translator = self.translator + irreg = query.qoutput(query.check_exceptblocks_qgen(translator)) + if irreg: + self.log.info("Some exceptblocks seem insane") + + lost = query.qoutput(query.check_methods_qgen(translator)) + assert not lost, "lost methods, something gone wrong with the annotation of method defs" + + so = query.qoutput(query.polluted_qgen(translator)) + tot = len(translator.graphs) + percent = int(tot and (100.0*so / tot) or 0) + # if there are a few SomeObjects even if the policy doesn't allow + # them, it means that they were put there in a controlled way + # and then it's not a warning. + if not translator.annotator.policy.allow_someobjects: + pr = self.log.info + elif percent == 0: + pr = self.log.info + else: + pr = log.WARNING + pr("-- someobjectness %2d%% (%d of %d functions polluted by SomeObjects)" % (percent, so, tot)) + + + + def task_rtype_lltype(self): + rtyper = self.translator.buildrtyper(type_system='lltype') + insist = not self.config.translation.insist + rtyper.specialize(dont_simplify_again=True, + crash_on_first_typeerror=insist) + # + task_rtype_lltype = taskdef(task_rtype_lltype, ['annotate'], "RTyping") + RTYPE = 'rtype_lltype' + + def task_rtype_ootype(self): + # Maybe type_system should simply be an option used in task_rtype + insist = not self.config.translation.insist + rtyper = self.translator.buildrtyper(type_system="ootype") + rtyper.specialize(dont_simplify_again=True, + crash_on_first_typeerror=insist) + # + task_rtype_ootype = taskdef(task_rtype_ootype, ['annotate'], "ootyping") + OOTYPE = 'rtype_ootype' + + def task_prehannotatebackendopt_lltype(self): + from pypy.translator.backendopt.all import backend_optimizations + backend_optimizations(self.translator, + inline_threshold=0, + merge_if_blocks=True, + constfold=True, + raisingop2direct_call=False, + remove_asserts=True) + # + task_prehannotatebackendopt_lltype = taskdef( + task_prehannotatebackendopt_lltype, + [RTYPE], + "Backendopt before Hint-annotate") + + def task_hintannotate_lltype(self): + from pypy.jit.hintannotator.annotator import HintAnnotator + from pypy.jit.hintannotator.model import OriginFlags + from pypy.jit.hintannotator.model import SomeLLAbstractConstant + + get_portal = self.extra['portal'] + PORTAL, POLICY = get_portal(self) + t = self.translator + self.portal_graph = graphof(t, PORTAL) + + hannotator = HintAnnotator(base_translator=t, policy=POLICY) + self.hint_translator = hannotator.translator + hs = hannotator.build_types(self.portal_graph, + [SomeLLAbstractConstant(v.concretetype, + {OriginFlags(): True}) + for v in self.portal_graph.getargs()]) + count = hannotator.bookkeeper.nonstuboriggraphcount + stubcount = hannotator.bookkeeper.stuboriggraphcount + self.log.info("The hint-annotator saw %d graphs" + " (and made stubs for %d graphs)." % (count, stubcount)) + n = len(list(hannotator.translator.graphs[0].iterblocks())) + self.log.info("portal has %d blocks" % n) + self.hannotator = hannotator + # + task_hintannotate_lltype = taskdef(task_hintannotate_lltype, + ['prehannotatebackendopt_lltype'], + "Hint-annotate") + + def task_timeshift_lltype(self): + from pypy.jit.timeshifter.hrtyper import HintRTyper + from pypy.jit.codegen import detect_cpu + cpu = detect_cpu.autodetect() + if cpu == 'i386': + from pypy.jit.codegen.i386.rgenop import RI386GenOp as RGenOp + RGenOp.MC_SIZE = 32 * 1024 * 1024 + elif cpu == 'ppc': + from pypy.jit.codegen.ppc.rgenop import RPPCGenOp as RGenOp + RGenOp.MC_SIZE = 32 * 1024 * 1024 + else: + raise Exception('Unsuported cpu %r'%cpu) + + del self.hint_translator + ha = self.hannotator + t = self.translator + # make the timeshifted graphs + hrtyper = HintRTyper(ha, t.rtyper, RGenOp) + hrtyper.specialize(origportalgraph=self.portal_graph, view=False) + # + task_timeshift_lltype = taskdef(task_timeshift_lltype, + ["hintannotate_lltype"], + "Timeshift") + + def task_backendopt_lltype(self): + from pypy.translator.backendopt.all import backend_optimizations + backend_optimizations(self.translator) + # + task_backendopt_lltype = taskdef(task_backendopt_lltype, + [RTYPE, + '??timeshift_lltype'], + "lltype back-end optimisations") + BACKENDOPT = 'backendopt_lltype' + + def task_backendopt_ootype(self): + from pypy.translator.backendopt.all import backend_optimizations + backend_optimizations(self.translator) + # + task_backendopt_ootype = taskdef(task_backendopt_ootype, + [OOTYPE], "ootype back-end optimisations") + OOBACKENDOPT = 'backendopt_ootype' + + + def task_stackcheckinsertion_lltype(self): + from pypy.translator.transform import insert_ll_stackcheck + count = insert_ll_stackcheck(self.translator) + self.log.info("inserted %d stack checks." % (count,)) + + task_stackcheckinsertion_lltype = taskdef( + task_stackcheckinsertion_lltype, + ['?'+BACKENDOPT, RTYPE, 'annotate'], + "inserting stack checks") + STACKCHECKINSERTION = 'stackcheckinsertion_lltype' + + def possibly_check_for_boehm(self): + if self.config.translation.gc == "boehm": + from pypy.translator.tool.cbuild import check_boehm_presence + from pypy.translator.tool.cbuild import CompilationError + try: + check_boehm_presence(noerr=False) + except CompilationError, e: + i = 'Boehm GC not installed. Try e.g. "translate.py --gc=hybrid"' + raise CompilationError('%s\n--------------------\n%s' % (e, i)) + + def task_database_c(self): + translator = self.translator + if translator.annotator is not None: + translator.frozen = True + + standalone = self.standalone + + if standalone: + from pypy.translator.c.genc import CStandaloneBuilder as CBuilder + else: + from pypy.translator.c.genc import CExtModuleBuilder as CBuilder + cbuilder = CBuilder(self.translator, self.entry_point, + config=self.config) + cbuilder.stackless = self.config.translation.stackless + if not standalone: # xxx more messy + cbuilder.modulename = self.extmod_name + database = cbuilder.build_database() + self.log.info("database for generating C source was created") + self.cbuilder = cbuilder + self.database = database + # + task_database_c = taskdef(task_database_c, + [STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE, '?annotate'], + "Creating database for generating c source", + earlycheck = possibly_check_for_boehm) + + def task_source_c(self): # xxx messy + translator = self.translator + cbuilder = self.cbuilder + database = self.database + c_source_filename = cbuilder.generate_source(database) + self.log.info("written: %s" % (c_source_filename,)) + self.c_source_filename = str(c_source_filename) + # + task_source_c = taskdef(task_source_c, ['database_c'], "Generating c source") + + def task_compile_c(self): # xxx messy + cbuilder = self.cbuilder + cbuilder.compile() + + if self.standalone: + self.c_entryp = cbuilder.executable_name + self.create_exe() + else: + self.c_entryp = cbuilder.get_entry_point() + # + task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source") + + + def task_run_c(self): + self.backend_run('c') + # + task_run_c = taskdef(task_run_c, ['compile_c'], + "Running compiled c source", + idemp=True) + + def task_llinterpret_lltype(self): + from pypy.rpython.llinterp import LLInterpreter + py.log.setconsumer("llinterp operation", None) + + translator = self.translator + interp = LLInterpreter(translator.rtyper) + bk = translator.annotator.bookkeeper + graph = bk.getdesc(self.entry_point).getuniquegraph() + v = interp.eval_graph(graph, + self.extra.get('get_llinterp_args', + lambda: [])()) + + log.llinterpret.event("result -> %s" % v) + # + task_llinterpret_lltype = taskdef(task_llinterpret_lltype, + [STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE], + "LLInterpreting") + + def task_source_llvm(self): + translator = self.translator + if translator.annotator is None: + raise ValueError, "llvm requires annotation." + + from pypy.translator.llvm import genllvm + + self.llvmgen = genllvm.GenLLVM(translator, self.standalone) + + llvm_filename = self.llvmgen.gen_source(self.entry_point) + self.log.info("written: %s" % (llvm_filename,)) + # + task_source_llvm = taskdef(task_source_llvm, + [STACKCHECKINSERTION, BACKENDOPT, RTYPE], + "Generating llvm source") + + def task_compile_llvm(self): + gen = self.llvmgen + if self.standalone: + exe_name = (self.exe_name or 'testing') % self.get_info() + self.c_entryp = gen.compile_standalone(exe_name) + self.create_exe() + else: + self.c_module, self.c_entryp = gen.compile_module() + # + task_compile_llvm = taskdef(task_compile_llvm, + ['source_llvm'], + "Compiling llvm source") + + def task_run_llvm(self): + self.backend_run('llvm') + # + task_run_llvm = taskdef(task_run_llvm, ['compile_llvm'], + "Running compiled llvm source", + idemp=True) + + def task_source_js(self): + from pypy.translator.js.js import JS + self.gen = JS(self.translator, functions=[self.entry_point], + stackless=self.config.translation.stackless) + filename = self.gen.write_source() + self.log.info("Wrote %s" % (filename,)) + task_source_js = taskdef(task_source_js, + [OOTYPE], + 'Generating Javascript source') + + def task_compile_js(self): + pass + task_compile_js = taskdef(task_compile_js, ['source_js'], + 'Skipping Javascript compilation') + + def task_run_js(self): + pass + task_run_js = taskdef(task_run_js, ['compile_js'], + 'Please manually run the generated code') + + def task_source_cli(self): + from pypy.translator.cli.gencli import GenCli + from pypy.translator.cli.entrypoint import get_entrypoint + + if self.entry_point is not None: # executable mode + entry_point_graph = self.translator.graphs[0] + entry_point = get_entrypoint(entry_point_graph) + else: + # library mode + assert self.libdef is not None + bk = self.translator.annotator.bookkeeper + entry_point = self.libdef.get_entrypoint(bk) + + self.gen = GenCli(udir, self.translator, entry_point, config=self.config) + filename = self.gen.generate_source() + self.log.info("Wrote %s" % (filename,)) + task_source_cli = taskdef(task_source_cli, ["?" + OOBACKENDOPT, OOTYPE], + 'Generating CLI source') + + def task_compile_cli(self): + from pypy.translator.oosupport.support import unpatch_os + from pypy.translator.cli.test.runtest import CliFunctionWrapper + filename = self.gen.build_exe() + self.c_entryp = CliFunctionWrapper(filename) + # restore original os values + if hasattr(self, 'old_cli_defs'): + unpatch_os(self.old_cli_defs) + + self.log.info("Compiled %s" % filename) + if self.standalone and self.exe_name: + self.copy_cli_exe() + task_compile_cli = taskdef(task_compile_cli, ['source_cli'], + 'Compiling CLI source') + + def task_run_cli(self): + pass + task_run_cli = taskdef(task_run_cli, ['compile_cli'], + 'XXX') + + def task_source_jvm(self): + from pypy.translator.jvm.genjvm import GenJvm + from pypy.translator.jvm.node import EntryPoint + + entry_point_graph = self.translator.graphs[0] + is_func = not self.standalone + entry_point = EntryPoint(entry_point_graph, is_func, is_func) + self.gen = GenJvm(udir, self.translator, entry_point) + self.jvmsource = self.gen.generate_source() + self.log.info("Wrote JVM code") + task_source_jvm = taskdef(task_source_jvm, ["?" + OOBACKENDOPT, OOTYPE], + 'Generating JVM source') + + def task_compile_jvm(self): + from pypy.translator.oosupport.support import unpatch_os + from pypy.translator.jvm.test.runtest import JvmGeneratedSourceWrapper + self.jvmsource.compile() + self.c_entryp = JvmGeneratedSourceWrapper(self.jvmsource) + # restore original os values + if hasattr(self, 'old_cli_defs'): + unpatch_os(self.old_cli_defs) + self.log.info("Compiled JVM source") + if self.standalone and self.exe_name: + self.copy_jvm_jar() + task_compile_jvm = taskdef(task_compile_jvm, ['source_jvm'], + 'Compiling JVM source') + + def task_run_jvm(self): + pass + task_run_jvm = taskdef(task_run_jvm, ['compile_jvm'], + 'XXX') + + def proceed(self, goals): + if not goals: + if self.default_goal: + goals = [self.default_goal] + else: + self.log.info("nothing to do") + return + elif isinstance(goals, str): + goals = [goals] + goals.extend(self.extra_goals) + goals = self.backend_select_goals(goals) + return self._execute(goals, task_skip = self._maybe_skip()) + + def from_targetspec(targetspec_dic, config=None, args=None, + empty_translator=None, + disable=[], + default_goal=None): + if args is None: + args = [] + + driver = TranslationDriver(config=config, default_goal=default_goal, + disable=disable) + # patch some attributes of the os module to make sure they + # have the same value on every platform. + backend, ts = driver.get_backend_and_type_system() + if backend in ('cli', 'jvm'): + from pypy.translator.oosupport.support import patch_os + driver.old_cli_defs = patch_os() + + target = targetspec_dic['target'] + spec = target(driver, args) + + try: + entry_point, inputtypes, policy = spec + except ValueError: + entry_point, inputtypes = spec + policy = None + + driver.setup(entry_point, inputtypes, + policy=policy, + extra=targetspec_dic, + empty_translator=empty_translator) + + return driver + + from_targetspec = staticmethod(from_targetspec) + + def prereq_checkpt_rtype(self): + assert 'pypy.rpython.rmodel' not in sys.modules, ( + "cannot fork because the rtyper has already been imported") + prereq_checkpt_rtype_lltype = prereq_checkpt_rtype + prereq_checkpt_rtype_ootype = prereq_checkpt_rtype Added: trunk/scipy/sandbox/mkufunc/head.c =================================================================== --- trunk/scipy/sandbox/mkufunc/head.c 2008-06-24 17:48:31 UTC (rev 4478) +++ trunk/scipy/sandbox/mkufunc/head.c 2008-06-25 15:43:39 UTC (rev 4479) @@ -0,0 +1,380 @@ + +#include + +/* ================================================== g_prerequisite.h === */ + +typedef unsigned char bool_t; + +/* ================================================== exception.h ======== */ + +#define RPY_DEBUG_RETURN() /* nothing */ + + +/* ================================================== int.h ============== */ + +/*** unary operations ***/ + +#define OP_INT_IS_TRUE(x,r) OP_INT_NE(x,0,r) + +#define OP_INT_INVERT(x,r) r = ~((x)) + +#define OP_INT_NEG(x,r) r = -(x) + +#define OP_INT_NEG_OVF(x,r) \ + if ((x) == LONG_MIN) FAIL_OVF("integer negate"); \ + OP_INT_NEG(x,r) +#define OP_LLONG_NEG_OVF(x,r) \ + if ((x) == LLONG_MIN) FAIL_OVF("integer negate"); \ + OP_LLONG_NEG(x,r) + +#define OP_INT_ABS(x,r) r = (x) >= 0 ? x : -(x) + +#define OP_INT_ABS_OVF(x,r) \ + if ((x) == LONG_MIN) FAIL_OVF("integer absolute"); \ + OP_INT_ABS(x,r) +#define OP_LLONG_ABS_OVF(x,r) \ + if ((x) == LLONG_MIN) FAIL_OVF("integer absolute"); \ + OP_LLONG_ABS(x,r) + +/*** binary operations ***/ + +#define OP_INT_EQ(x,y,r) r = ((x) == (y)) +#define OP_INT_NE(x,y,r) r = ((x) != (y)) +#define OP_INT_LE(x,y,r) r = ((x) <= (y)) +#define OP_INT_GT(x,y,r) r = ((x) > (y)) +#define OP_INT_LT(x,y,r) r = ((x) < (y)) +#define OP_INT_GE(x,y,r) r = ((x) >= (y)) + +/* addition, subtraction */ + +#define OP_INT_ADD(x,y,r) r = (x) + (y) + +#define OP_INT_ADD_OVF(x,y,r) \ + OP_INT_ADD(x,y,r); \ + if ((r^(x)) >= 0 || (r^(y)) >= 0); \ + else FAIL_OVF("integer addition") + +#define OP_INT_ADD_NONNEG_OVF(x,y,r) /* y can be assumed >= 0 */ \ + OP_INT_ADD(x,y,r); \ + if (r >= (x)); \ + else FAIL_OVF("integer addition") +/* XXX can a C compiler be too clever and think it can "prove" that + * r >= x always hold above? */ + +#define OP_INT_SUB(x,y,r) r = (x) - (y) + +#define OP_INT_SUB_OVF(x,y,r) \ + OP_INT_SUB(x,y,r); \ + if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ + else FAIL_OVF("integer subtraction") + +#define OP_INT_MUL(x,y,r) r = (x) * (y) + +#if defined(HAVE_LONG_LONG) && SIZE_OF_LONG_LONG < SIZE_OF_LONG +# define OP_INT_MUL_OVF_LL 1 +#lse +# define OP_INT_MUL_OVF_LL 0 +#endif + +#if !OP_INT_MUL_OVF_LL + +#define OP_INT_MUL_OVF(x,y,r) \ + if (op_int_mul_ovf(x,y,&r)); \ + else FAIL_OVF("integer multiplication") + +#else + +#define OP_INT_MUL_OVF(x,y,r) \ + { \ + PY_LONG_LONG lr = (PY_LONG_LONG)(x) * (PY_LONG_LONG)(y); \ + r = (long)lr; \ + if ((PY_LONG_LONG)r == lr); \ + else FAIL_OVF("integer multiplication"); \ + } +#endif + +/* shifting */ + +/* NB. shifting has same limitations as C: the shift count must be + >= 0 and < LONG_BITS. */ +#define OP_INT_RSHIFT(x,y,r) r = Py_ARITHMETIC_RIGHT_SHIFT(long, x, y) +#define OP_UINT_RSHIFT(x,y,r) r = (x) >> (y) +#define OP_LLONG_RSHIFT(x,y,r) r = Py_ARITHMETIC_RIGHT_SHIFT(PY_LONG_LONG,x,y) +#define OP_ULLONG_RSHIFT(x,y,r) r = (x) >> (y) + +#define OP_INT_LSHIFT(x,y,r) r = (x) << (y) +#define OP_UINT_LSHIFT(x,y,r) r = (x) << (y) +#define OP_LLONG_LSHIFT(x,y,r) r = (x) << (y) +#define OP_ULLONG_LSHIFT(x,y,r) r = (x) << (y) + +#define OP_INT_LSHIFT_OVF(x,y,r) \ + OP_INT_LSHIFT(x,y,r); \ + if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(long, r, (y))) \ + FAIL_OVF("x<= 0) { OP_INT_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_LLONG_RSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_LLONG_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + +#define OP_INT_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_INT_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_LLONG_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_LLONG_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + +#define OP_INT_LSHIFT_OVF_VAL(x,y,r) \ + if ((y) >= 0) { OP_INT_LSHIFT_OVF(x,y,r); } \ + else FAIL_VAL("negative shift count") + +/* pff */ +#define OP_UINT_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_UINT_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_ULLONG_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_ULLONG_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + +#define OP_UINT_RSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_UINT_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_ULLONG_RSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_ULLONG_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + + +/* floor division */ + +#define OP_INT_FLOORDIV(x,y,r) r = (x) / (y) +#define OP_UINT_FLOORDIV(x,y,r) r = (x) / (y) +#define OP_LLONG_FLOORDIV(x,y,r) r = (x) / (y) +#define OP_ULLONG_FLOORDIV(x,y,r) r = (x) / (y) + +#define OP_INT_FLOORDIV_OVF(x,y,r) \ + if ((y) == -1 && (x) == LONG_MIN) \ + { FAIL_OVF("integer division"); } \ + else OP_INT_FLOORDIV(x,y,r) + +#define OP_INT_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_INT_FLOORDIV(x,y,r); } \ + else FAIL_ZER("integer division") +#define OP_UINT_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_UINT_FLOORDIV(x,y,r); } \ + else FAIL_ZER("unsigned integer division") +#define OP_LLONG_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_LLONG_FLOORDIV(x,y,r); } \ + else FAIL_ZER("integer division") +#define OP_ULLONG_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_ULLONG_FLOORDIV(x,y,r); } \ + else FAIL_ZER("unsigned integer division") + +#define OP_INT_FLOORDIV_OVF_ZER(x,y,r) \ + if ((y)) { OP_INT_FLOORDIV_OVF(x,y,r); } \ + else FAIL_ZER("integer division") + +/* modulus */ + +#define OP_INT_MOD(x,y,r) r = (x) % (y) +#define OP_UINT_MOD(x,y,r) r = (x) % (y) +#define OP_LLONG_MOD(x,y,r) r = (x) % (y) +#define OP_ULLONG_MOD(x,y,r) r = (x) % (y) + +#define OP_INT_MOD_OVF(x,y,r) \ + if ((y) == -1 && (x) == LONG_MIN) \ + { FAIL_OVF("integer modulo"); }\ + else OP_INT_MOD(x,y,r) + +#define OP_INT_MOD_ZER(x,y,r) \ + if ((y)) { OP_INT_MOD(x,y,r); } \ + else FAIL_ZER("integer modulo") +#define OP_UINT_MOD_ZER(x,y,r) \ + if ((y)) { OP_UINT_MOD(x,y,r); } \ + else FAIL_ZER("unsigned integer modulo") +#define OP_LLONG_MOD_ZER(x,y,r) \ + if ((y)) { OP_LLONG_MOD(x,y,r); } \ + else FAIL_ZER("integer modulo") +#define OP_ULLONG_MOD_ZER(x,y,r) \ + if ((y)) { OP_ULLONG_MOD(x,y,r); } \ + else FAIL_ZER("integer modulo") + +#define OP_INT_MOD_OVF_ZER(x,y,r) \ + if ((y)) { OP_INT_MOD_OVF(x,y,r); } \ + else FAIL_ZER("integer modulo") + +/* bit operations */ + +#define OP_INT_AND(x,y,r) r = (x) & (y) +#define OP_INT_OR( x,y,r) r = (x) | (y) +#define OP_INT_XOR(x,y,r) r = (x) ^ (y) + +/*** conversions ***/ + +#define OP_CAST_BOOL_TO_INT(x,r) r = (long)(x) +#define OP_CAST_BOOL_TO_UINT(x,r) r = (unsigned long)(x) +#define OP_CAST_UINT_TO_INT(x,r) r = (long)(x) +#define OP_CAST_INT_TO_UINT(x,r) r = (unsigned long)(x) +#define OP_CAST_INT_TO_LONGLONG(x,r) r = (long long)(x) +#define OP_CAST_CHAR_TO_INT(x,r) r = (long)((unsigned char)(x)) +#define OP_CAST_INT_TO_CHAR(x,r) r = (char)(x) +#define OP_CAST_PTR_TO_INT(x,r) r = (long)(x) /* XXX */ + +#define OP_TRUNCATE_LONGLONG_TO_INT(x,r) r = (long)(x) + +#define OP_CAST_UNICHAR_TO_INT(x,r) r = (long)((unsigned long)(x)) /*?*/ +#define OP_CAST_INT_TO_UNICHAR(x,r) r = (unsigned int)(x) + +/* bool operations */ + +#define OP_BOOL_NOT(x, r) r = !(x) + +/* _________________ certain implementations __________________ */ + +#if !OP_INT_MUL_OVF_LL +/* adjusted from intobject.c, Python 2.3.3 */ + +/* prototypes */ + +int op_int_mul_ovf(long a, long b, long *longprod); + +/* implementations */ + +#ifndef PYPY_NOT_MAIN_FILE + +int +op_int_mul_ovf(long a, long b, long *longprod) +{ + double doubled_longprod; /* (double)longprod */ + double doubleprod; /* (double)a * (double)b */ + + *longprod = a * b; + doubleprod = (double)a * (double)b; + doubled_longprod = (double)*longprod; + + /* Fast path for normal case: small multiplicands, and no info + is lost in either method. */ + if (doubled_longprod == doubleprod) + return 1; + + /* Somebody somewhere lost info. Close enough, or way off? Note + that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0). + The difference either is or isn't significant compared to the + true value (of which doubleprod is a good approximation). + */ + { + const double diff = doubled_longprod - doubleprod; + const double absdiff = diff >= 0.0 ? diff : -diff; + const double absprod = doubleprod >= 0.0 ? doubleprod : + -doubleprod; + /* absdiff/absprod <= 1/32 iff + 32 * absdiff <= absprod -- 5 good bits is "close enough" */ + if (32.0 * absdiff <= absprod) + return 1; + return 0; + } +} + +#endif /* PYPY_NOT_MAIN_FILE */ + +#endif /* !OP_INT_MUL_OVF_LL */ + +/* implementations */ + +#define OP_UINT_IS_TRUE OP_INT_IS_TRUE +#define OP_UINT_INVERT OP_INT_INVERT +#define OP_UINT_ADD OP_INT_ADD +#define OP_UINT_SUB OP_INT_SUB +#define OP_UINT_MUL OP_INT_MUL +#define OP_UINT_LT OP_INT_LT +#define OP_UINT_LE OP_INT_LE +#define OP_UINT_EQ OP_INT_EQ +#define OP_UINT_NE OP_INT_NE +#define OP_UINT_GT OP_INT_GT +#define OP_UINT_GE OP_INT_GE +#define OP_UINT_AND OP_INT_AND +#define OP_UINT_OR OP_INT_OR +#define OP_UINT_XOR OP_INT_XOR + +#define OP_LLONG_IS_TRUE OP_INT_IS_TRUE +#define OP_LLONG_NEG OP_INT_NEG +#define OP_LLONG_ABS OP_INT_ABS +#define OP_LLONG_INVERT OP_INT_INVERT + +#define OP_LLONG_ADD OP_INT_ADD +#define OP_LLONG_SUB OP_INT_SUB +#define OP_LLONG_MUL OP_INT_MUL +#define OP_LLONG_LT OP_INT_LT +#define OP_LLONG_LE OP_INT_LE +#define OP_LLONG_EQ OP_INT_EQ +#define OP_LLONG_NE OP_INT_NE +#define OP_LLONG_GT OP_INT_GT +#define OP_LLONG_GE OP_INT_GE +#define OP_LLONG_AND OP_INT_AND +#define OP_LLONG_OR OP_INT_OR +#define OP_LLONG_XOR OP_INT_XOR + +#define OP_ULLONG_IS_TRUE OP_LLONG_IS_TRUE +#define OP_ULLONG_INVERT OP_LLONG_INVERT +#define OP_ULLONG_ADD OP_LLONG_ADD +#define OP_ULLONG_SUB OP_LLONG_SUB +#define OP_ULLONG_MUL OP_LLONG_MUL +#define OP_ULLONG_LT OP_LLONG_LT +#define OP_ULLONG_LE OP_LLONG_LE +#define OP_ULLONG_EQ OP_LLONG_EQ +#define OP_ULLONG_NE OP_LLONG_NE +#define OP_ULLONG_GT OP_LLONG_GT +#define OP_ULLONG_GE OP_LLONG_GE +#define OP_ULLONG_AND OP_LLONG_AND +#define OP_ULLONG_OR OP_LLONG_OR +#define OP_ULLONG_XOR OP_LLONG_XOR + +/* ================================================== float.h ============ */ + +/*** unary operations ***/ + +#define OP_FLOAT_IS_TRUE(x,r) OP_FLOAT_NE(x,0.0,r) +#define OP_FLOAT_NEG(x,r) r = -x +#define OP_FLOAT_ABS(x,r) r = fabs(x) + +/*** binary operations ***/ + +#define OP_FLOAT_EQ(x,y,r) r = (x == y) +#define OP_FLOAT_NE(x,y,r) r = (x != y) +#define OP_FLOAT_LE(x,y,r) r = (x <= y) +#define OP_FLOAT_GT(x,y,r) r = (x > y) +#define OP_FLOAT_LT(x,y,r) r = (x < y) +#define OP_FLOAT_GE(x,y,r) r = (x >= y) + +#define OP_FLOAT_CMP(x,y,r) \ + r = ((x > y) - (x < y)) + +/* addition, subtraction */ + +#define OP_FLOAT_ADD(x,y,r) r = x + y +#define OP_FLOAT_SUB(x,y,r) r = x - y +#define OP_FLOAT_MUL(x,y,r) r = x * y +#define OP_FLOAT_TRUEDIV(x,y,r) r = x / y +#define OP_FLOAT_POW(x,y,r) r = pow(x, y) + +/*** conversions ***/ + +#define OP_CAST_FLOAT_TO_INT(x,r) r = (long)(x) +#define OP_CAST_FLOAT_TO_UINT(x,r) r = (unsigned long)(x) +#define OP_CAST_INT_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_UINT_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_BOOL_TO_FLOAT(x,r) r = (double)(x) + +#ifdef HAVE_LONG_LONG +#define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) +#endif + + + + +/* ================================================== EOF ================ */ +/* ================================================== EOF ================ */ Added: trunk/scipy/sandbox/mkufunc/interactive.py =================================================================== --- trunk/scipy/sandbox/mkufunc/interactive.py 2008-06-24 17:48:31 UTC (rev 4478) +++ trunk/scipy/sandbox/mkufunc/interactive.py 2008-06-25 15:43:39 UTC (rev 4479) @@ -0,0 +1,115 @@ +import driver + +from pypy.translator.translator import TranslationContext + + +DEFAULTS = { + 'translation.backend': None, + 'translation.type_system': None, + 'translation.verbose': False, +} + +class Translation(object): + + def __init__(self, entry_point, argtypes=None, **kwds): + self.driver = driver.TranslationDriver(overrides=DEFAULTS) + self.config = self.driver.config + + self.entry_point = entry_point + self.context = TranslationContext(config=self.config) + + # hook into driver events + driver_own_event = self.driver._event + def _event(kind, goal, func): + self.driver_event(kind, goal, func) + driver_own_event(kind, goal, func) + self.driver._event = _event + self.driver_setup = False + + self.update_options(argtypes, kwds) + # for t.view() to work just after construction + graph = self.context.buildflowgraph(entry_point) + self.context._prebuilt_graphs[entry_point] = graph + + def driver_event(self, kind, goal, func): + if kind == 'pre': + self.ensure_setup() + + def ensure_setup(self, argtypes=None, policy=None, standalone=False): + if not self.driver_setup: + if standalone: + assert argtypes is None + else: + if argtypes is None: + argtypes = [] + self.driver.setup(self.entry_point, argtypes, policy, + empty_translator=self.context) + self.ann_argtypes = argtypes + self.ann_policy = policy + self.driver_setup = True + else: + # check consistency + if standalone: + assert argtypes is None + assert self.ann_argtypes is None + elif argtypes is not None and argtypes != self.ann_argtypes: + raise Exception("inconsistent argtype supplied") + if policy is not None and policy != self.ann_policy: + raise Exception("inconsistent annotation polish supplied") + + def update_options(self, argtypes, kwds): + if argtypes or kwds.get('policy') or kwds.get('standalone'): + self.ensure_setup(argtypes, kwds.get('policy'), + kwds.get('standalone')) + kwds.pop('policy', None) + kwds.pop('standalone', None) + self.config.translation.set(**kwds) + + def ensure_opt(self, name, value=None, fallback=None): + if value is not None: + self.update_options(None, {name: value}) + return value + val = getattr(self.config.translation, name, None) + if fallback is not None and val is None: + self.update_options(None, {name: fallback}) + return fallback + if val is not None: + return val + raise Exception( + "the %r option should have been specified at this point" %name) + + def ensure_type_system(self, type_system=None): + if self.config.translation.backend is not None: + return self.ensure_opt('type_system') + return self.ensure_opt('type_system', type_system, 'lltype') + + def ensure_backend(self, backend=None): + backend = self.ensure_opt('backend', backend) + self.ensure_type_system() + return backend + + # backend independent + + def annotate(self, argtypes=None, **kwds): + self.update_options(argtypes, kwds) + return self.driver.annotate() + + # type system dependent + + def rtype(self, argtypes=None, **kwds): + self.update_options(argtypes, kwds) + ts = self.ensure_type_system() + return getattr(self.driver, 'rtype_'+ts)() + + # backend depedent + + def source(self, argtypes=None, **kwds): + self.update_options(argtypes, kwds) + backend = self.ensure_backend() + self.driver.source_c() + + def compile(self, argtypes=None, **kwds): + self.update_options(argtypes, kwds) + backend = self.ensure_backend() + self.driver.compile_c() + return self.driver.c_entryp Added: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-24 17:48:31 UTC (rev 4478) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-25 15:43:39 UTC (rev 4479) @@ -0,0 +1,240 @@ +import sys +import re +import cStringIO + +import numpy +import scipy.weave as weave + +from interactive import Translation + +verbose = False +_cnt = 0 + +typedict = { + int: ['NPY_LONG', 'long' ], + long: ['NPY_LONG', 'long' ], + float: ['NPY_DOUBLE', 'double'], +} + +class Cfunc(object): + + def __init__(self, f, signature): + global _cnt + _cnt += 1 + self.n = _cnt + self.sig = signature + self.nin = f.func_code.co_argcount # input args + self.nout = len(self.sig) - self.nin + assert self.nout == 1 # for now + + if not verbose: + rem = sys.stderr + sys.stderr = cStringIO.StringIO() + + t = Translation(f, backend='c') + t.annotate(signature[:self.nin]) + t.source() + + if not verbose: + sys.stderr = rem + + c_source_filename = t.driver.c_source_filename + assert c_source_filename.endswith('.c') + src = open(c_source_filename, 'r').read() + + self.prefix = 'f%i_' % self.n + self.allCsrc = src.replace('pypy_', self.prefix + 'pypy_') + self.cname = self.prefix + 'pypy_g_' + f.__name__ + + def cfunc(self): + p = re.compile(r'^\w+[*\s\w]+' + self.cname + + r'\s*\([^)]*\)\s*\{.*?[\n\r]\}[\n\r]', + re.DOTALL | re.MULTILINE | re.VERBOSE) + + found = p.findall(self.allCsrc) + assert len(found) == 1 + res = found[0] + res = res.replace(self.prefix + 'pypy_g_ll_math_ll_math_', '') + return res + '\n' + + def decl(self): + p = re.compile(r'^\w+[*\s\w]+' + self.cname + + r'\s*\([^)]*\);', + re.DOTALL | re.MULTILINE | re.VERBOSE) + + found = p.findall(self.allCsrc) + assert len(found) == 1 + return found[0] + + + def support_code(self): + arg0type = typedict[self.sig[0]][1] + rettype = typedict[self.sig[-1]][1] + n = self.n + cname = self.cname + return ''' +static %(rettype)s foo_%(n)i(%(arg0type)s x) +{ + return %(cname)s(x); +} + +typedef %(rettype)s Func_%(n)i(%(arg0type)s); + +static void +PyUFunc_%(n)i(char **args, npy_intp *dimensions, npy_intp *steps, void *func) +{ + /* printf("PyUFunc_%(n)i\\n"); */ + + npy_intp n = dimensions[0]; + npy_intp is0 = steps[0]; + npy_intp os = steps[1]; + char *ip0 = args[0]; + char *op = args[1]; + Func_%(n)i *f = (Func_%(n)i *) func; + npy_intp i; + + for(i = 0; i < n; i++, ip0 += is0, op += os) { + %(arg0type)s *in1 = (%(arg0type)s *)ip0; + %(rettype)s *out = (%(rettype)s *)op; + + *out = f(*in1); + } +} +''' % locals() + + +def test1(): + def sqr(x): + return x * x + #verbose = True + for argtypes in ([int, int], [float, float]): + x = Cfunc(sqr, argtypes) + print x.cname, x.nin, x.nout, x.sig + print x.cfunc() + print '{{{%s}}}' % x.decl() + print x.support_code() + + +def write_pypyc(cfuncs): + fo = open('pypy.c', 'w'); + fo.write('#include "head.c"\n\n') + for cf in cfuncs: + fo.write(cf.cfunc()) + fo.close() + + +def genufunc(f, signatures): + + signatures.sort(key=lambda sig: [numpy.dtype(typ).num for typ in sig]) + + cfuncs = [Cfunc(f, sig) for sig in signatures] + + write_pypyc(cfuncs) + + declarations = ''.join('\t%s\n' % cf.decl() for cf in cfuncs) + + func_support = ''.join(cf.support_code() for cf in cfuncs) + + pyufuncs = ''.join('\tPyUFunc_%i,\n' % cf.n for cf in cfuncs) + + data = ''.join('\t(void *) foo_%i,\n' % cf.n for cf in cfuncs) + + foo_signatures = ''.join('\t%s /* %i */\n' % + (''.join(typedict[t][0] + ', ' for t in cf.sig), cf.n) + for cf in cfuncs) + + support_code = ''' +extern "C" { +%(declarations)s} + +%(func_support)s + +static PyUFuncGenericFunction foo_functions[] = { +%(pyufuncs)s}; + +static void *foo_data[] = { +%(data)s}; + +static char foo_signatures[] = { +%(foo_signatures)s}; +''' % locals() + + ntypes = len(signatures) + nin = cfuncs[0].nin + + code = ''' +import_ufunc(); + +return_val = PyUFunc_FromFuncAndData( + foo_functions, + foo_data, + foo_signatures, + %(ntypes)i, /* ntypes */ + %(nin)i, /* nin */ + 1, /* nout */ + PyUFunc_None, /* identity */ + "foo", /* name */ + "", /* doc */ + 0); +''' % locals() + + if 1: + fo = open(f.__name__ + '_code.cc', 'w'); + fo.write(code); + fo.close() + + fo = open(f.__name__ + '_support_code.cc', 'w'); + fo.write(support_code); + fo.close() + + ufunc_info = weave.base_info.custom_info() + ufunc_info.add_header('"numpy/ufuncobject.h"') + ufunc_info.add_include_dir('"."') + + return weave.inline(code, + verbose=0, force=1, # XXX + support_code=support_code, + customize=ufunc_info, + sources=['pypy.c']) + + +def test2(): + + def sqr(x): + return x * x + + ufunc = genufunc(sqr, [ + (float, float), + (int, int), + ]) + + x = array([0.0, 1.0, 2.5, 12.0]) + print "x =", x, x.dtype + y = ufunc(x) + print "y =", y, y.dtype + + x = array([0, 1, 2, 15]) + print "x =", x, x.dtype + y = ufunc(x) + print "y =", y, y.dtype + + +def mkufunc(signatures): + print 'signatures', signatures + + class Compile(object): + + def __init__(self, f): + self.ufunc = genufunc(f, signatures) + + def __call__(self, *args): + return self.ufunc(*args) + + return Compile + + +if __name__ == '__main__': + # test1(); exit() + + mkufunc([int, (float, int, float)]) + Added: trunk/scipy/sandbox/mkufunc/pypy.c =================================================================== --- trunk/scipy/sandbox/mkufunc/pypy.c 2008-06-24 17:48:31 UTC (rev 4478) +++ trunk/scipy/sandbox/mkufunc/pypy.c 2008-06-25 15:43:39 UTC (rev 4479) @@ -0,0 +1,168 @@ +#include "head.c" + +double f1_pypy_g_bar(long l_n_2) { + long l_n_3; bool_t l_v190; bool_t l_v192; bool_t l_v196; + double l_v191; double l_v194; double l_v195; double l_v199; + double l_v200; long l_v187; long l_v188; long l_v189; long l_v193; + long l_v197; long l_v198; long l_v201; + + block0: + OP_INT_LT(l_n_2, 10L, l_v190); + if (l_v190) { + l_v201 = 0L; + l_n_3 = l_n_2; + l_v188 = 1L; + l_v189 = 10L; + goto block6; + } + goto block1; + + block1: + switch (l_n_2) { + case 10L: + l_v200 = 42.0; + goto block2; + case 11L: + l_v200 = 3.1400000000000001; + goto block2; + case 12L: + goto block3; + default: + goto block4; + } + + block2: + RPY_DEBUG_RETURN(); + return l_v200; + + block3: + l_v191 = cos(3.1415926535897931); + l_v200 = l_v191; + goto block2; + + block4: + OP_INT_GT(l_n_2, 12L, l_v192); + if (l_v192) { + goto block5; + } + l_v200 = 5.0; + goto block2; + + block5: + OP_INT_MUL(l_n_2, l_n_2, l_v193); + OP_CAST_INT_TO_FLOAT(l_v193, l_v194); + OP_FLOAT_TRUEDIV(l_v194, 1.2345600000000001, l_v195); + l_v200 = l_v195; + goto block2; + + block6: + OP_INT_ADD(l_n_3, l_v201, l_v187); + OP_INT_GE(l_v188, l_v189, l_v196); + while (!l_v196) { + goto block7; + block6_back: + OP_INT_ADD(l_n_3, l_v201, l_v187); + OP_INT_GE(l_v188, l_v189, l_v196); + } + goto block8; + + block7: + OP_INT_ADD(l_v188, 1L, l_v197); + OP_INT_MUL(l_v188, l_v188, l_v198); + l_v201 = l_v198; + l_n_3 = l_v187; + l_v188 = l_v197; + goto block6_back; + + block8: + OP_CAST_INT_TO_FLOAT(l_v187, l_v199); + l_v200 = l_v199; + goto block2; +} + +double f2_pypy_g_bar(double l_n_6) { + double l_n_7; bool_t l_v546; bool_t l_v547; bool_t l_v548; + bool_t l_v549; bool_t l_v550; bool_t l_v554; double l_v545; + double l_v551; double l_v552; double l_v553; double l_v557; + double l_v558; double l_v559; long l_v543; long l_v544; long l_v555; + long l_v556; + + block0: + OP_FLOAT_LT(l_n_6, 10.0, l_v546); + if (l_v546) { + l_v559 = 0.0; + l_v543 = 10L; + l_n_7 = l_n_6; + l_v544 = 1L; + goto block8; + } + goto block1; + + block1: + OP_FLOAT_EQ(l_n_6, 10.0, l_v547); + if (l_v547) { + l_v558 = 42.0; + goto block5; + } + goto block2; + + block2: + OP_FLOAT_EQ(l_n_6, 11.0, l_v548); + if (l_v548) { + l_v558 = 3.1400000000000001; + goto block5; + } + goto block3; + + block3: + OP_FLOAT_EQ(l_n_6, 12.0, l_v549); + if (l_v549) { + goto block7; + } + goto block4; + + block4: + OP_FLOAT_GT(l_n_6, 12.0, l_v550); + if (l_v550) { + goto block6; + } + l_v558 = 5.0; + goto block5; + + block5: + RPY_DEBUG_RETURN(); + return l_v558; + + block6: + OP_FLOAT_MUL(l_n_6, l_n_6, l_v551); + OP_FLOAT_TRUEDIV(l_v551, 1.2345600000000001, l_v552); + l_v558 = l_v552; + goto block5; + + block7: + l_v553 = cos(3.1415926535897931); + l_v558 = l_v553; + goto block5; + + block8: + OP_FLOAT_ADD(l_n_7, l_v559, l_v545); + OP_INT_GE(l_v544, l_v543, l_v554); + while (!l_v554) { + goto block9; + block8_back: + OP_FLOAT_ADD(l_n_7, l_v559, l_v545); + OP_INT_GE(l_v544, l_v543, l_v554); + } + l_v558 = l_v545; + goto block5; + + block9: + OP_INT_ADD(l_v544, 1L, l_v555); + OP_INT_MUL(l_v544, l_v544, l_v556); + OP_CAST_INT_TO_FLOAT(l_v556, l_v557); + l_v559 = l_v557; + l_n_7 = l_v545; + l_v544 = l_v555; + goto block8_back; +} + Added: trunk/scipy/sandbox/mkufunc/test_1.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_1.py 2008-06-24 17:48:31 UTC (rev 4478) +++ trunk/scipy/sandbox/mkufunc/test_1.py 2008-06-25 15:43:39 UTC (rev 4479) @@ -0,0 +1,53 @@ +#!/usr/bin/env python +from math import sin, cos, pi + +from numpy import array + +from mkufunc import mkufunc + +int_const = 42 + +float_const = 3.14 + +def my_sqr(x): + return x * x / 1.23456 + + at mkufunc([(float, float), (int, int)]) +def bar(n): + "Bar docstring" + if n < 10: + for i in xrange(10): + n += i*i + return n + elif n == 10: + return int_const + elif n == 11: + return float_const + elif n == 12: + return cos(pi) + #return 1 + elif n > 12: + return my_sqr(n) + else: + return 5 + + +#@mkufunc(float) +#def baz(n): +# "Baz docstring" +# return n * n + 1000 + + +#print bar + +x = array([0.0, 1.0, 2.5, 12.0]) +print "x =", x, x.dtype +y = bar(x) +print "y =", y, y.dtype + +print bar(5) +print bar(15) +print bar(10) +print bar(11) +print bar(12) +print bar(12.5) Property changes on: trunk/scipy/sandbox/mkufunc/test_1.py ___________________________________________________________________ Name: svn:executable + * From scipy-svn at scipy.org Wed Jun 25 11:59:51 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 25 Jun 2008 10:59:51 -0500 (CDT) Subject: [Scipy-svn] r4480 - trunk/scipy/sandbox/mkufunc Message-ID: <20080625155951.8B65539C718@scipy.org> Author: ilan Date: 2008-06-25 10:59:50 -0500 (Wed, 25 Jun 2008) New Revision: 4480 Removed: trunk/scipy/sandbox/mkufunc/bar_code.cc trunk/scipy/sandbox/mkufunc/bar_support_code.cc trunk/scipy/sandbox/mkufunc/pypy.c Log: Testing output files Deleted: trunk/scipy/sandbox/mkufunc/bar_code.cc =================================================================== --- trunk/scipy/sandbox/mkufunc/bar_code.cc 2008-06-25 15:43:39 UTC (rev 4479) +++ trunk/scipy/sandbox/mkufunc/bar_code.cc 2008-06-25 15:59:50 UTC (rev 4480) @@ -1,14 +0,0 @@ - -import_ufunc(); - -return_val = PyUFunc_FromFuncAndData( - foo_functions, - foo_data, - foo_signatures, - 2, /* ntypes */ - 1, /* nin */ - 1, /* nout */ - PyUFunc_None, /* identity */ - "foo", /* name */ - "", /* doc */ - 0); Deleted: trunk/scipy/sandbox/mkufunc/bar_support_code.cc =================================================================== --- trunk/scipy/sandbox/mkufunc/bar_support_code.cc 2008-06-25 15:43:39 UTC (rev 4479) +++ trunk/scipy/sandbox/mkufunc/bar_support_code.cc 2008-06-25 15:59:50 UTC (rev 4480) @@ -1,78 +0,0 @@ - -extern "C" { - double f1_pypy_g_bar(long l_n_1); - double f2_pypy_g_bar(double l_n_5); -} - - -static long foo_1(long x) -{ - return f1_pypy_g_bar(x); -} - -typedef long Func_1(long); - -static void -PyUFunc_1(char **args, npy_intp *dimensions, npy_intp *steps, void *func) -{ - /* printf("PyUFunc_1\n"); */ - - npy_intp n = dimensions[0]; - npy_intp is0 = steps[0]; - npy_intp os = steps[1]; - char *ip0 = args[0]; - char *op = args[1]; - Func_1 *f = (Func_1 *) func; - npy_intp i; - - for(i = 0; i < n; i++, ip0 += is0, op += os) { - long *in1 = (long *)ip0; - long *out = (long *)op; - - *out = f(*in1); - } -} - -static double foo_2(double x) -{ - return f2_pypy_g_bar(x); -} - -typedef double Func_2(double); - -static void -PyUFunc_2(char **args, npy_intp *dimensions, npy_intp *steps, void *func) -{ - /* printf("PyUFunc_2\n"); */ - - npy_intp n = dimensions[0]; - npy_intp is0 = steps[0]; - npy_intp os = steps[1]; - char *ip0 = args[0]; - char *op = args[1]; - Func_2 *f = (Func_2 *) func; - npy_intp i; - - for(i = 0; i < n; i++, ip0 += is0, op += os) { - double *in1 = (double *)ip0; - double *out = (double *)op; - - *out = f(*in1); - } -} - - -static PyUFuncGenericFunction foo_functions[] = { - PyUFunc_1, - PyUFunc_2, -}; - -static void *foo_data[] = { - (void *) foo_1, - (void *) foo_2, -}; - -static char foo_signatures[] = { - NPY_LONG, NPY_LONG, /* 1 */ - NPY_DOUBLE, NPY_DOUBLE, /* 2 */ -}; Deleted: trunk/scipy/sandbox/mkufunc/pypy.c =================================================================== --- trunk/scipy/sandbox/mkufunc/pypy.c 2008-06-25 15:43:39 UTC (rev 4479) +++ trunk/scipy/sandbox/mkufunc/pypy.c 2008-06-25 15:59:50 UTC (rev 4480) @@ -1,168 +0,0 @@ -#include "head.c" - -double f1_pypy_g_bar(long l_n_2) { - long l_n_3; bool_t l_v190; bool_t l_v192; bool_t l_v196; - double l_v191; double l_v194; double l_v195; double l_v199; - double l_v200; long l_v187; long l_v188; long l_v189; long l_v193; - long l_v197; long l_v198; long l_v201; - - block0: - OP_INT_LT(l_n_2, 10L, l_v190); - if (l_v190) { - l_v201 = 0L; - l_n_3 = l_n_2; - l_v188 = 1L; - l_v189 = 10L; - goto block6; - } - goto block1; - - block1: - switch (l_n_2) { - case 10L: - l_v200 = 42.0; - goto block2; - case 11L: - l_v200 = 3.1400000000000001; - goto block2; - case 12L: - goto block3; - default: - goto block4; - } - - block2: - RPY_DEBUG_RETURN(); - return l_v200; - - block3: - l_v191 = cos(3.1415926535897931); - l_v200 = l_v191; - goto block2; - - block4: - OP_INT_GT(l_n_2, 12L, l_v192); - if (l_v192) { - goto block5; - } - l_v200 = 5.0; - goto block2; - - block5: - OP_INT_MUL(l_n_2, l_n_2, l_v193); - OP_CAST_INT_TO_FLOAT(l_v193, l_v194); - OP_FLOAT_TRUEDIV(l_v194, 1.2345600000000001, l_v195); - l_v200 = l_v195; - goto block2; - - block6: - OP_INT_ADD(l_n_3, l_v201, l_v187); - OP_INT_GE(l_v188, l_v189, l_v196); - while (!l_v196) { - goto block7; - block6_back: - OP_INT_ADD(l_n_3, l_v201, l_v187); - OP_INT_GE(l_v188, l_v189, l_v196); - } - goto block8; - - block7: - OP_INT_ADD(l_v188, 1L, l_v197); - OP_INT_MUL(l_v188, l_v188, l_v198); - l_v201 = l_v198; - l_n_3 = l_v187; - l_v188 = l_v197; - goto block6_back; - - block8: - OP_CAST_INT_TO_FLOAT(l_v187, l_v199); - l_v200 = l_v199; - goto block2; -} - -double f2_pypy_g_bar(double l_n_6) { - double l_n_7; bool_t l_v546; bool_t l_v547; bool_t l_v548; - bool_t l_v549; bool_t l_v550; bool_t l_v554; double l_v545; - double l_v551; double l_v552; double l_v553; double l_v557; - double l_v558; double l_v559; long l_v543; long l_v544; long l_v555; - long l_v556; - - block0: - OP_FLOAT_LT(l_n_6, 10.0, l_v546); - if (l_v546) { - l_v559 = 0.0; - l_v543 = 10L; - l_n_7 = l_n_6; - l_v544 = 1L; - goto block8; - } - goto block1; - - block1: - OP_FLOAT_EQ(l_n_6, 10.0, l_v547); - if (l_v547) { - l_v558 = 42.0; - goto block5; - } - goto block2; - - block2: - OP_FLOAT_EQ(l_n_6, 11.0, l_v548); - if (l_v548) { - l_v558 = 3.1400000000000001; - goto block5; - } - goto block3; - - block3: - OP_FLOAT_EQ(l_n_6, 12.0, l_v549); - if (l_v549) { - goto block7; - } - goto block4; - - block4: - OP_FLOAT_GT(l_n_6, 12.0, l_v550); - if (l_v550) { - goto block6; - } - l_v558 = 5.0; - goto block5; - - block5: - RPY_DEBUG_RETURN(); - return l_v558; - - block6: - OP_FLOAT_MUL(l_n_6, l_n_6, l_v551); - OP_FLOAT_TRUEDIV(l_v551, 1.2345600000000001, l_v552); - l_v558 = l_v552; - goto block5; - - block7: - l_v553 = cos(3.1415926535897931); - l_v558 = l_v553; - goto block5; - - block8: - OP_FLOAT_ADD(l_n_7, l_v559, l_v545); - OP_INT_GE(l_v544, l_v543, l_v554); - while (!l_v554) { - goto block9; - block8_back: - OP_FLOAT_ADD(l_n_7, l_v559, l_v545); - OP_INT_GE(l_v544, l_v543, l_v554); - } - l_v558 = l_v545; - goto block5; - - block9: - OP_INT_ADD(l_v544, 1L, l_v555); - OP_INT_MUL(l_v544, l_v544, l_v556); - OP_CAST_INT_TO_FLOAT(l_v556, l_v557); - l_v559 = l_v557; - l_n_7 = l_v545; - l_v544 = l_v555; - goto block8_back; -} - From scipy-svn at scipy.org Wed Jun 25 12:12:16 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Wed, 25 Jun 2008 11:12:16 -0500 (CDT) Subject: [Scipy-svn] r4481 - trunk/scipy/sandbox/mkufunc Message-ID: <20080625161216.843B639C4FD@scipy.org> Author: ilan Date: 2008-06-25 11:12:15 -0500 (Wed, 25 Jun 2008) New Revision: 4481 Added: trunk/scipy/sandbox/mkufunc/README.txt Log: Added readme file Added: trunk/scipy/sandbox/mkufunc/README.txt =================================================================== --- trunk/scipy/sandbox/mkufunc/README.txt 2008-06-25 15:59:50 UTC (rev 4480) +++ trunk/scipy/sandbox/mkufunc/README.txt 2008-06-25 16:12:15 UTC (rev 4481) @@ -0,0 +1,19 @@ + +mkufunc (make universal function) is a tool which lets you create +a C compiled version of a universal function (UFunc). + +It works by translating the python function into C and then uses +scipy.weave to create a UFunc which calls the appropriate C function +in the inner 1-d loop. This means that there are no Python calls +when the calculation is performed, making the calculation +fast (in particular when the arrays involved in the calculation +are very large). + +Requirements: + + pypy + +You need the pypy path in your PYTHONPATH environment: + +$ export PYTHONPATH=/giant/src/pypy-dist + From scipy-svn at scipy.org Thu Jun 26 14:49:43 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 26 Jun 2008 13:49:43 -0500 (CDT) Subject: [Scipy-svn] r4482 - trunk/scipy/ndimage Message-ID: <20080626184943.D3A2439C11D@scipy.org> Author: tom.waite Date: 2008-06-26 13:49:38 -0500 (Thu, 26 Jun 2008) New Revision: 4482 Modified: trunk/scipy/ndimage/_registration.py Log: replace parameter vector with inverse affine matrix as input to remap_image method Modified: trunk/scipy/ndimage/_registration.py =================================================================== --- trunk/scipy/ndimage/_registration.py 2008-06-25 16:12:15 UTC (rev 4481) +++ trunk/scipy/ndimage/_registration.py 2008-06-26 18:49:38 UTC (rev 4482) @@ -1,5 +1,10 @@ +# +# written by Tom Waite +# rigid body 3D registration +# + + import math - import numpy as np from scipy.special import erf from scipy.ndimage import correlate1d @@ -84,7 +89,7 @@ return image, M -def remap_image(image, parm_vector, resample='linear'): +def remap_image(image, M_inverse, resample='linear'): """ remaped_image = remap_image(image, parm_vector, resample='linear') @@ -97,9 +102,8 @@ image : {ndarray} image is the source image to be remapped. - parm_vector : {ndarray} - parm_vector is the 6-dimensional vector (3 angles, 3 translations) - generated from the rigid body registration. + M_inverse : {ndarray} + M_inverse is the 4x4 inverse affine matrix resample : {'linear', 'cubic'}, optional @@ -112,17 +116,12 @@ -------- image = fmri_series[i] x[0:6] = measures[i]['align_rotate'][0:6] + M = get_inverse_mappings(x) # overwrite the fMRI volume with the aligned volume - fmri_series[i] = remap_image(image, x, resample='cubic') + fmri_series[i] = remap_image(image, M, resample='cubic') """ - # - # remap imageG to coordinates of imageF (creates imageG') - # use the 6 dim parm_vector (3 angles, 3 translations) to remap - # - M_inverse = get_inverse_mappings(parm_vector) - # allocate the zero image remaped_image = np.zeros(image.shape, dtype=np.uint8) @@ -137,6 +136,7 @@ return remaped_image + def get_inverse_mappings(parm_vector): """ M_inverse = get_inverse_mappings(parm_vector) From scipy-svn at scipy.org Thu Jun 26 22:14:17 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 26 Jun 2008 21:14:17 -0500 (CDT) Subject: [Scipy-svn] r4483 - trunk/scipy/sandbox/mkufunc Message-ID: <20080627021417.7317A39C02A@scipy.org> Author: ilan Date: 2008-06-26 21:14:16 -0500 (Thu, 26 Jun 2008) New Revision: 4483 Added: trunk/scipy/sandbox/mkufunc/test_speed.py Modified: trunk/scipy/sandbox/mkufunc/head.c Log: Added test for speed comparison Modified: trunk/scipy/sandbox/mkufunc/head.c =================================================================== --- trunk/scipy/sandbox/mkufunc/head.c 2008-06-26 18:49:38 UTC (rev 4482) +++ trunk/scipy/sandbox/mkufunc/head.c 2008-06-27 02:14:16 UTC (rev 4483) @@ -1,4 +1,6 @@ +#include +#include #include /* ================================================== g_prerequisite.h === */ @@ -362,19 +364,19 @@ /*** conversions ***/ -#define OP_CAST_FLOAT_TO_INT(x,r) r = (long)(x) -#define OP_CAST_FLOAT_TO_UINT(x,r) r = (unsigned long)(x) -#define OP_CAST_INT_TO_FLOAT(x,r) r = (double)(x) -#define OP_CAST_UINT_TO_FLOAT(x,r) r = (double)(x) -#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = (double)(x) -#define OP_CAST_BOOL_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_FLOAT_TO_INT(x,r) r = (long)(x) +#define OP_CAST_FLOAT_TO_UINT(x,r) r = (unsigned long)(x) +#define OP_CAST_INT_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_UINT_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_BOOL_TO_FLOAT(x,r) r = (double)(x) #ifdef HAVE_LONG_LONG -#define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) +#define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) #endif +/* ================================================== support.h ========== */ +#define RPyField(ptr, name) NULL - /* ================================================== EOF ================ */ -/* ================================================== EOF ================ */ Added: trunk/scipy/sandbox/mkufunc/test_speed.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_speed.py 2008-06-26 18:49:38 UTC (rev 4482) +++ trunk/scipy/sandbox/mkufunc/test_speed.py 2008-06-27 02:14:16 UTC (rev 4483) @@ -0,0 +1,37 @@ +#!/usr/bin/env python +from math import sin, cos +import time + +from numpy import arange, vectorize + +from mkufunc import mkufunc + + +def f(x): + return 4.2 * x * x + 3.7 * x + 1.5 + + +ufunc = mkufunc([(float, float)])(f) + +vfunc = vectorize(f) + + +x = arange(0, 1000, 0.001) #print "x =", x, x.dtype + +start_time = time.time() +y = 4.2 * x * x + 3.7 * x + 1.5 +n_time = time.time() - start_time +print 'numpy: %.6f sec' % n_time + +start_time = time.time() +y = vfunc(x) +v_time = time.time() - start_time +print 'vectorize: %.6f sec' % v_time + +start_time = time.time() +y = ufunc(x) +u_time = time.time() - start_time +print 'mkufunc: %.6f sec' % u_time + +print "speedup over numpy:", n_time/u_time +print "speedup over vectorize:", v_time/u_time Property changes on: trunk/scipy/sandbox/mkufunc/test_speed.py ___________________________________________________________________ Name: svn:executable + * From scipy-svn at scipy.org Thu Jun 26 23:04:23 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Thu, 26 Jun 2008 22:04:23 -0500 (CDT) Subject: [Scipy-svn] r4484 - trunk/scipy/sandbox/mkufunc Message-ID: <20080627030423.4383739C704@scipy.org> Author: ilan Date: 2008-06-26 22:04:21 -0500 (Thu, 26 Jun 2008) New Revision: 4484 Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py Log: Adding documentation Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-27 02:14:16 UTC (rev 4483) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-27 03:04:21 UTC (rev 4484) @@ -1,3 +1,8 @@ +""" mkufunc (make U function) + + +Author: Ilan Schnell (with help from Travis Oliphant and Eric Jones) +""" import sys import re import cStringIO @@ -7,6 +12,7 @@ from interactive import Translation + verbose = False _cnt = 0 @@ -17,7 +23,39 @@ } class Cfunc(object): + """ C compiled python functions + + >>> def sqr(x): + ... return x * x + + >>> signature = [int, int] # only the input arguments are used here + compilation is done upon initialization + >>> x = Cfunc(sqr, signature) + >>> x.nin # number of input arguments + 1 + >>> x.nout # number of output arguments (must be 1 for now) + 1 + >>> x.sig + [, ] + + Attributes: + + n -- id number + sig -- signature + nin -- number of input arguments + nout -- number of output arguments + cname -- name of the C function + + Methods: + + decl() -- returns the C declaration for the function + cfunc() -- returns the C function (as string) + support_code() -- generate the C support code to make this + function part work with PyUFuncGenericFunction + + + """ def __init__(self, f, signature): global _cnt _cnt += 1 @@ -42,19 +80,19 @@ assert c_source_filename.endswith('.c') src = open(c_source_filename, 'r').read() - self.prefix = 'f%i_' % self.n - self.allCsrc = src.replace('pypy_', self.prefix + 'pypy_') - self.cname = self.prefix + 'pypy_g_' + f.__name__ + self._prefix = 'f%i_' % self.n + self._allCsrc = src.replace('pypy_', self._prefix + 'pypy_') + self.cname = self._prefix + 'pypy_g_' + f.__name__ def cfunc(self): p = re.compile(r'^\w+[*\s\w]+' + self.cname + r'\s*\([^)]*\)\s*\{.*?[\n\r]\}[\n\r]', re.DOTALL | re.MULTILINE | re.VERBOSE) - found = p.findall(self.allCsrc) + found = p.findall(self._allCsrc) assert len(found) == 1 res = found[0] - res = res.replace(self.prefix + 'pypy_g_ll_math_ll_math_', '') + res = res.replace(self._prefix + 'pypy_g_ll_math_ll_math_', '') return res + '\n' def decl(self): @@ -62,7 +100,7 @@ r'\s*\([^)]*\);', re.DOTALL | re.MULTILINE | re.VERBOSE) - found = p.findall(self.allCsrc) + found = p.findall(self._allCsrc) assert len(found) == 1 return found[0] @@ -116,6 +154,9 @@ def write_pypyc(cfuncs): + """ Given a list of Cfunc instances, write the C code containing the + functions into a file. + """ fo = open('pypy.c', 'w'); fo.write('#include "head.c"\n\n') for cf in cfuncs: @@ -124,7 +165,19 @@ def genufunc(f, signatures): - + """ Given a Python function and its signatures, do the following: + + - Compile the function to C for each signature + + - Write the C code for all these functions to a file + + - Generate the support code for weave + + - Generate the code for weave. This contains the actual call to + PyUFuncGenericFunction + + - Return the Ufunc Python object + """ signatures.sort(key=lambda sig: [numpy.dtype(typ).num for typ in sig]) cfuncs = [Cfunc(f, sig) for sig in signatures] @@ -220,6 +273,9 @@ def mkufunc(signatures): + """ The actual API function, to be used as decorator function. + + """ print 'signatures', signatures class Compile(object): @@ -234,7 +290,5 @@ if __name__ == '__main__': - # test1(); exit() - - mkufunc([int, (float, int, float)]) - + import doctest + doctest.testmod() From scipy-svn at scipy.org Fri Jun 27 09:24:25 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 27 Jun 2008 08:24:25 -0500 (CDT) Subject: [Scipy-svn] r4485 - trunk/scipy/sandbox/mkufunc Message-ID: <20080627132425.5CA4CC7C0B7@scipy.org> Author: ilan Date: 2008-06-27 08:24:23 -0500 (Fri, 27 Jun 2008) New Revision: 4485 Added: trunk/scipy/sandbox/mkufunc/test_mkufunc.py Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py Log: Started unittest suite and other small changes Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-27 03:04:21 UTC (rev 4484) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-27 13:24:23 UTC (rev 4485) @@ -49,12 +49,11 @@ Methods: - decl() -- returns the C declaration for the function - cfunc() -- returns the C function (as string) - support_code() -- generate the C support code to make this - function part work with PyUFuncGenericFunction - - + decl() -- returns the C declaration for the function + cfunc() -- returns the C function (as string) + ufunc_support_code() + -- generate the C support code to make this + function part work with PyUFuncGenericFunction """ def __init__(self, f, signature): global _cnt @@ -66,7 +65,7 @@ assert self.nout == 1 # for now if not verbose: - rem = sys.stderr + tmp = sys.stderr sys.stderr = cStringIO.StringIO() t = Translation(f, backend='c') @@ -74,8 +73,8 @@ t.source() if not verbose: - sys.stderr = rem - + sys.stderr = tmp + c_source_filename = t.driver.c_source_filename assert c_source_filename.endswith('.c') src = open(c_source_filename, 'r').read() @@ -105,7 +104,7 @@ return found[0] - def support_code(self): + def ufunc_support_code(self): arg0type = typedict[self.sig[0]][1] rettype = typedict[self.sig[-1]][1] n = self.n @@ -186,7 +185,7 @@ declarations = ''.join('\t%s\n' % cf.decl() for cf in cfuncs) - func_support = ''.join(cf.support_code() for cf in cfuncs) + func_support = ''.join(cf.ufunc_support_code() for cf in cfuncs) pyufuncs = ''.join('\tPyUFunc_%i,\n' % cf.n for cf in cfuncs) @@ -276,7 +275,7 @@ """ The actual API function, to be used as decorator function. """ - print 'signatures', signatures + #print 'signatures', signatures class Compile(object): Added: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-27 03:04:21 UTC (rev 4484) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-27 13:24:23 UTC (rev 4485) @@ -0,0 +1,22 @@ +import math +from math import sin, cos, pi +import unittest + +from numpy import array, arange, allclose + +from mkufunc import Cfunc, genufunc, mkufunc + + +class Math_Tests(unittest.TestCase): + + def test_sin(self): + @mkufunc([(float, float)]) + def u_sin(x): + return sin(x) + + x = 1.23 + self.assert_(u_sin(x), sin(x)) + + +if __name__ == '__main__': + unittest.main() From scipy-svn at scipy.org Fri Jun 27 15:37:50 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 27 Jun 2008 14:37:50 -0500 (CDT) Subject: [Scipy-svn] r4486 - trunk/scipy/interpolate Message-ID: <20080627193750.44D3BC7C08B@scipy.org> Author: rkern Date: 2008-06-27 14:37:49 -0500 (Fri, 27 Jun 2008) New Revision: 4486 Modified: trunk/scipy/interpolate/rbf.py Log: BUG: fix missing import and 'gausian' typo. Thanks to Lorenzo Bolla for the patch. Modified: trunk/scipy/interpolate/rbf.py =================================================================== --- trunk/scipy/interpolate/rbf.py 2008-06-27 13:24:23 UTC (rev 4485) +++ trunk/scipy/interpolate/rbf.py 2008-06-27 19:37:49 UTC (rev 4486) @@ -42,7 +42,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ -from numpy import sqrt, log, asarray, newaxis, all, dot, float64, eye +from numpy import sqrt, log, asarray, newaxis, all, dot, float64, exp, eye from scipy import linalg class Rbf(object): @@ -58,7 +58,7 @@ return sqrt((1.0/self.epsilon*r)**2 + 1) elif self.function.lower() == 'inverse multiquadric': return 1.0/sqrt((1.0/self.epsilon*r)**2 + 1) - elif self.function.lower() == 'gausian': + elif self.function.lower() == 'gaussian': return exp(-(self.epsilon*r)**2) elif self.function.lower() == 'cubic': return r**3 @@ -84,7 +84,7 @@ :: 'multiquadric': sqrt((self.epsilon*r)**2 + 1) 'inverse multiquadric': 1.0/sqrt((self.epsilon*r)**2 + 1) - 'gausian': exp(-(self.epsilon*r)**2) + 'gaussian': exp(-(self.epsilon*r)**2) 'cubic': r**3 'quintic': r**5 'thin-plate': r**2 * log(r) From scipy-svn at scipy.org Fri Jun 27 18:29:23 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 27 Jun 2008 17:29:23 -0500 (CDT) Subject: [Scipy-svn] r4487 - trunk/scipy/sandbox/mkufunc Message-ID: <20080627222923.283EC39C19A@scipy.org> Author: ilan Date: 2008-06-27 17:29:20 -0500 (Fri, 27 Jun 2008) New Revision: 4487 Modified: trunk/scipy/sandbox/mkufunc/test_speed.py Log: Added blitz Modified: trunk/scipy/sandbox/mkufunc/test_speed.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_speed.py 2008-06-27 19:37:49 UTC (rev 4486) +++ trunk/scipy/sandbox/mkufunc/test_speed.py 2008-06-27 22:29:20 UTC (rev 4487) @@ -2,7 +2,8 @@ from math import sin, cos import time -from numpy import arange, vectorize +from numpy import arange, vectorize, allclose +from scipy import weave from mkufunc import mkufunc @@ -11,27 +12,38 @@ return 4.2 * x * x + 3.7 * x + 1.5 +vfunc = vectorize(f) + ufunc = mkufunc([(float, float)])(f) -vfunc = vectorize(f) - x = arange(0, 1000, 0.001) #print "x =", x, x.dtype start_time = time.time() -y = 4.2 * x * x + 3.7 * x + 1.5 +b_y = x.copy() +weave.blitz("b_y[:] = 4.2 * x[:] * x[:] + 3.7 * x[:] + 1.5") +b_time = time.time() - start_time +print 'blitz: %.6f sec' % b_time + +start_time = time.time() +n_y = f(x) n_time = time.time() - start_time print 'numpy: %.6f sec' % n_time start_time = time.time() -y = vfunc(x) +v_y = vfunc(x) v_time = time.time() - start_time print 'vectorize: %.6f sec' % v_time start_time = time.time() -y = ufunc(x) +u_y = ufunc(x) u_time = time.time() - start_time print 'mkufunc: %.6f sec' % u_time -print "speedup over numpy:", n_time/u_time +print "speedup over blitz:", b_time/u_time +print "speedup over numpy:", n_time/u_time print "speedup over vectorize:", v_time/u_time + +assert allclose(b_y, n_y) +assert allclose(v_y, n_y) +assert allclose(u_y, n_y) From scipy-svn at scipy.org Fri Jun 27 21:34:30 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Fri, 27 Jun 2008 20:34:30 -0500 (CDT) Subject: [Scipy-svn] r4488 - trunk/scipy/sandbox/mkufunc Message-ID: <20080628013430.7A81039C1DF@scipy.org> Author: ilan Date: 2008-06-27 20:34:29 -0500 (Fri, 27 Jun 2008) New Revision: 4488 Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py Log: started work on typecasting Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-27 22:29:20 UTC (rev 4487) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-28 01:34:29 UTC (rev 4488) @@ -6,6 +6,7 @@ import sys import re import cStringIO +from types import * import numpy import scipy.weave as weave @@ -103,7 +104,6 @@ assert len(found) == 1 return found[0] - def ufunc_support_code(self): arg0type = typedict[self.sig[0]][1] rettype = typedict[self.sig[-1]][1] @@ -251,6 +251,7 @@ def test2(): + from numpy import array def sqr(x): return x * x @@ -271,23 +272,48 @@ print "y =", y, y.dtype -def mkufunc(signatures): +def mkufunc(arg0): """ The actual API function, to be used as decorator function. """ - #print 'signatures', signatures - class Compile(object): def __init__(self, f): + print 'sigs:', signatures self.ufunc = genufunc(f, signatures) + #self.ufunc = f def __call__(self, *args): return self.ufunc(*args) - return Compile + if isinstance(arg0, FunctionType): + f = arg0 + signatures = [float] + return Compile(f) + + elif isinstance(arg0, ListType): + signatures = arg0 + return Compile + elif arg0 in typedict: + signatures = [arg0] + return Compile + else: + raise TypeError("first argument has to be a function, a type, or " + "a list of signatures") + + if __name__ == '__main__': import doctest - doctest.testmod() + #doctest.testmod() + + def sqr(x): + return x * x + + #sqr = mkufunc({})(sqr) + sqr = mkufunc([(float, float)])(sqr) + #sqr = mkufunc(int)(sqr) + #sqr = mkufunc(sqr) + + print sqr(8) From scipy-svn at scipy.org Sat Jun 28 09:55:25 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 28 Jun 2008 08:55:25 -0500 (CDT) Subject: [Scipy-svn] r4489 - in trunk/scipy/interpolate: . tests Message-ID: <20080628135525.734B339C65A@scipy.org> Author: ptvirtan Date: 2008-06-28 08:54:11 -0500 (Sat, 28 Jun 2008) New Revision: 4489 Modified: trunk/scipy/interpolate/interpolate.py trunk/scipy/interpolate/tests/test_interpolate.py Log: interpolate: Fix #289. Make interp1d order axes in the result correctly when y.ndim > 2. Fix a bug in splmake that was triggered when y.ndim > 2. Add corresponding tests. Modified: trunk/scipy/interpolate/interpolate.py =================================================================== --- trunk/scipy/interpolate/interpolate.py 2008-06-28 01:34:29 UTC (rev 4488) +++ trunk/scipy/interpolate/interpolate.py 2008-06-28 13:54:11 UTC (rev 4489) @@ -152,9 +152,6 @@ UnivariateSpline - a more recent wrapper of the FITPACK routines """ - _interp_axis = -1 # used to set which is default interpolation - # axis. DO NOT CHANGE OR CODE WILL BREAK. - def __init__(self, x, y, kind='linear', axis=-1, copy=True, bounds_error=True, fill_value=np.nan): """ Initialize a 1D linear interpolation class. @@ -226,12 +223,18 @@ if kind == 'linear': # Make a "view" of the y array that is rotated to the interpolation # axis. - oriented_y = y.swapaxes(self._interp_axis, axis) + axes = range(y.ndim) + del axes[self.axis] + axes.append(self.axis) + oriented_y = y.transpose(axes) minval = 2 - len_y = oriented_y.shape[self._interp_axis] + len_y = oriented_y.shape[-1] self._call = self._call_linear else: - oriented_y = y.swapaxes(0, axis) + axes = range(y.ndim) + del axes[self.axis] + axes.insert(0, self.axis) + oriented_y = y.transpose(axes) minval = order + 1 len_y = oriented_y.shape[0] self._call = self._call_spline @@ -322,10 +325,10 @@ return y_new.transpose(axes) else: y_new[out_of_bounds] = self.fill_value - axes = range(ny - nx, ny) - axes[self.axis:self.axis] = range(ny - nx) + axes = range(nx, ny) + axes[self.axis:self.axis] = range(nx) return y_new.transpose(axes) - + def _check_bounds(self, x_new): """ Check the inputs for being in the bounds of the interpolated data. @@ -407,6 +410,16 @@ fromspline = classmethod(fromspline) +def _dot0(a, b): + """Similar to numpy.dot, but sum over last axis of a and 1st axis of b""" + if b.ndim <= 2: + return dot(a, b) + else: + axes = range(b.ndim) + axes.insert(-1, 0) + axes.pop(0) + return dot(a, b.transpose(axes)) + def _find_smoothest(xk, yk, order, conds=None, B=None): # construct Bmatrix, and Jmatrix # e = J*c @@ -431,9 +444,8 @@ tmp = dot(tmp,V1) tmp = dot(tmp,np.diag(1.0/s)) tmp = dot(tmp,u.T) - return dot(tmp, yk) + return _dot0(tmp, yk) - def _setdiag(a, k, v): assert (a.ndim==2) M,N = a.shape @@ -471,7 +483,7 @@ V2[1::2] = -1 V2 /= math.sqrt(Np1) dk = np.diff(xk) - b = 2*np.diff(yk)/dk + b = 2*np.diff(yk, axis=0)/dk J = np.zeros((N-1,N+1)) idk = 1.0/dk _setdiag(J,0,idk[:-1]) @@ -480,7 +492,7 @@ A = dot(J.T,J) val = dot(V2,dot(A,V2)) res1 = dot(np.outer(V2,V2)/val,A) - mk = dot(np.eye(Np1)-res1,dot(Bd,b)) + mk = dot(np.eye(Np1)-res1, _dot0(Bd,b)) return mk def _get_spline2_Bb(xk, yk, kind, conds): Modified: trunk/scipy/interpolate/tests/test_interpolate.py =================================================================== --- trunk/scipy/interpolate/tests/test_interpolate.py 2008-06-28 01:34:29 UTC (rev 4488) +++ trunk/scipy/interpolate/tests/test_interpolate.py 2008-06-28 13:54:11 UTC (rev 4489) @@ -29,7 +29,7 @@ self.y210 = np.arange(20.).reshape((2, 10)) self.y102 = np.arange(20.).reshape((10, 2)) - + self.fill_value = -100.0 def test_validation(self): @@ -125,13 +125,30 @@ np.array([2.4, 5.6, 6.0]), ) + def test_cubic(self): + """ Check the actual implementation of spline interpolation. + """ - def test_bounds(self): + interp10 = interp1d(self.x10, self.y10, kind='cubic') + assert_array_almost_equal( + interp10(self.x10), + self.y10, + ) + assert_array_almost_equal( + interp10(1.2), + np.array([1.2]), + ) + assert_array_almost_equal( + interp10([2.4, 5.6, 6.0]), + np.array([2.4, 5.6, 6.0]), + ) + + def _bounds_check(self, kind='linear'): """ Test that our handling of out-of-bounds input is correct. """ extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value, - bounds_error=False) + bounds_error=False, kind=kind) assert_array_equal( extrap10(11.2), np.array([self.fill_value]), @@ -145,25 +162,28 @@ np.array([True, False, False, False, True]), ) - raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True) + raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True, + kind=kind) self.assertRaises(ValueError, raises_bounds_error, -1.0) self.assertRaises(ValueError, raises_bounds_error, 11.0) raises_bounds_error([0.0, 5.0, 9.0]) + def test_bounds(self): + for kind in ('linear', 'cubic'): + self._bounds_check(kind=kind) - def test_nd(self): + def _nd_check(self, kind='linear'): """ Check the behavior when the inputs and outputs are multidimensional. """ - # Multidimensional input. - interp10 = interp1d(self.x10, self.y10) + interp10 = interp1d(self.x10, self.y10, kind=kind) assert_array_almost_equal( interp10(np.array([[3.4, 5.6], [2.4, 7.8]])), np.array([[3.4, 5.6], [2.4, 7.8]]), ) - + # Multidimensional outputs. - interp210 = interp1d(self.x10, self.y210) + interp210 = interp1d(self.x10, self.y210, kind=kind) assert_array_almost_equal( interp210(1.5), np.array([[1.5], [11.5]]), @@ -174,7 +194,7 @@ [11.5, 12.4]]), ) - interp102 = interp1d(self.x10, self.y102, axis=0) + interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind) assert_array_almost_equal( interp102(1.5), np.array([[3.0, 4.0]]), @@ -197,7 +217,24 @@ np.array([[[6.8, 7.8], [11.2, 12.2]], [[4.8, 5.8], [15.6, 16.6]]]), ) + + # Check large ndim output + a = [4, 5, 6, 7] + y = np.arange(np.prod(a)).reshape(*a) + for n, s in enumerate(a): + x = np.arange(s) + z = interp1d(x, y, axis=n, kind=kind) + assert_array_almost_equal(z(x), y) + + x2 = np.arange(2*3*1).reshape((2,3,1)) / 12. + b = list(a) + b[n:n+1] = [2,3,1] + assert_array_almost_equal(z(x2).shape, b) + def test_nd(self): + for kind in ('linear', 'cubic'): + self._nd_check(kind=kind) + class TestLagrange(TestCase): def test_lagrange(self): From scipy-svn at scipy.org Sat Jun 28 10:36:38 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 28 Jun 2008 09:36:38 -0500 (CDT) Subject: [Scipy-svn] r4490 - trunk/scipy/sandbox/mkufunc Message-ID: <20080628143638.B0877C7C0AC@scipy.org> Author: ilan Date: 2008-06-28 09:36:29 -0500 (Sat, 28 Jun 2008) New Revision: 4490 Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py trunk/scipy/sandbox/mkufunc/test_mkufunc.py Log: Improved dispatch on type in mkufunc and added more tests Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-28 13:54:11 UTC (rev 4489) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-28 14:36:29 UTC (rev 4490) @@ -15,7 +15,6 @@ verbose = False -_cnt = 0 typedict = { int: ['NPY_LONG', 'long' ], @@ -41,7 +40,7 @@ [, ] Attributes: - + f -- the Python function object n -- id number sig -- signature nin -- number of input arguments @@ -56,10 +55,9 @@ -- generate the C support code to make this function part work with PyUFuncGenericFunction """ - def __init__(self, f, signature): - global _cnt - _cnt += 1 - self.n = _cnt + def __init__(self, f, signature, n=0): + self.f = f + self.n = n self.sig = signature self.nin = f.func_code.co_argcount # input args self.nout = len(self.sig) - self.nin @@ -83,7 +81,7 @@ self._prefix = 'f%i_' % self.n self._allCsrc = src.replace('pypy_', self._prefix + 'pypy_') self.cname = self._prefix + 'pypy_g_' + f.__name__ - + def cfunc(self): p = re.compile(r'^\w+[*\s\w]+' + self.cname + r'\s*\([^)]*\)\s*\{.*?[\n\r]\}[\n\r]', @@ -110,7 +108,7 @@ n = self.n cname = self.cname return ''' -static %(rettype)s foo_%(n)i(%(arg0type)s x) +static %(rettype)s wrap_%(cname)s(%(arg0type)s x) { return %(cname)s(x); } @@ -120,8 +118,6 @@ static void PyUFunc_%(n)i(char **args, npy_intp *dimensions, npy_intp *steps, void *func) { - /* printf("PyUFunc_%(n)i\\n"); */ - npy_intp n = dimensions[0]; npy_intp is0 = steps[0]; npy_intp os = steps[1]; @@ -179,7 +175,7 @@ """ signatures.sort(key=lambda sig: [numpy.dtype(typ).num for typ in sig]) - cfuncs = [Cfunc(f, sig) for sig in signatures] + cfuncs = [Cfunc(f, sig, n) for n, sig in enumerate(signatures)] write_pypyc(cfuncs) @@ -189,11 +185,13 @@ pyufuncs = ''.join('\tPyUFunc_%i,\n' % cf.n for cf in cfuncs) - data = ''.join('\t(void *) foo_%i,\n' % cf.n for cf in cfuncs) + data = ''.join('\t(void *) wrap_%s,\n' % cf.cname for cf in cfuncs) + + types = ''.join('\t%s /* %i */\n' % + (''.join(typedict[t][0] + ', ' for t in cf.sig), cf.n) + for cf in cfuncs) - foo_signatures = ''.join('\t%s /* %i */\n' % - (''.join(typedict[t][0] + ', ' for t in cf.sig), cf.n) - for cf in cfuncs) + fname = f.__name__ support_code = ''' extern "C" { @@ -201,14 +199,14 @@ %(func_support)s -static PyUFuncGenericFunction foo_functions[] = { +static PyUFuncGenericFunction %(fname)s_functions[] = { %(pyufuncs)s}; -static void *foo_data[] = { +static void *%(fname)s_data[] = { %(data)s}; -static char foo_signatures[] = { -%(foo_signatures)s}; +static char %(fname)s_types[] = { +%(types)s}; ''' % locals() ntypes = len(signatures) @@ -218,14 +216,14 @@ import_ufunc(); return_val = PyUFunc_FromFuncAndData( - foo_functions, - foo_data, - foo_signatures, + %(fname)s_functions, + %(fname)s_data, + %(fname)s_types, %(ntypes)i, /* ntypes */ %(nin)i, /* nin */ 1, /* nout */ PyUFunc_None, /* identity */ - "foo", /* name */ + "%(fname)s", /* name */ "", /* doc */ 0); ''' % locals() @@ -272,20 +270,40 @@ print "y =", y, y.dtype -def mkufunc(arg0): +def mkufunc(arg0=[float]): """ The actual API function, to be used as decorator function. """ class Compile(object): def __init__(self, f): + nin = f.func_code.co_argcount + nout = 1 + for i, sig in enumerate(signatures): + if sig in typedict.keys(): + signatures[i] = (nin + nout) * (sig,) + elif isinstance(sig, tuple): + pass + else: + raise TypeError + + for sig in signatures: + assert isinstance(sig, tuple) + if len(sig) != nin + nout: + raise TypeError("signature %r does not match the " + "number of args of function %s" % + (sig, f.__name__)) + for t in sig: + if t not in typedict.keys(): + raise TypeError + print 'sigs:', signatures self.ufunc = genufunc(f, signatures) #self.ufunc = f - + def __call__(self, *args): return self.ufunc(*args) - + if isinstance(arg0, FunctionType): f = arg0 signatures = [float] @@ -294,11 +312,11 @@ elif isinstance(arg0, ListType): signatures = arg0 return Compile - - elif arg0 in typedict: + + elif arg0 in typedict.keys(): signatures = [arg0] return Compile - + else: raise TypeError("first argument has to be a function, a type, or " "a list of signatures") @@ -307,13 +325,18 @@ if __name__ == '__main__': import doctest #doctest.testmod() + + test2() + exit() + + def sqr(x): return x * x - + #sqr = mkufunc({})(sqr) sqr = mkufunc([(float, float)])(sqr) #sqr = mkufunc(int)(sqr) #sqr = mkufunc(sqr) - + print sqr(8) Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 13:54:11 UTC (rev 4489) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 14:36:29 UTC (rev 4490) @@ -1,22 +1,85 @@ import math -from math import sin, cos, pi import unittest -from numpy import array, arange, allclose +from numpy import array, arange, allclose, vectorize from mkufunc import Cfunc, genufunc, mkufunc +def f(x): + return 3.2 * x * x - 18.3 * x + sin(x) +class Arg_Tests(unittest.TestCase): + + def check_ufunc(self, f): + #self.assert_(f.__type__ + for arg in (array([0.0, 1.0, 2.5]), + [0.0, 1.0, 2.5], + (0.0, 1.0, 2.5)): + self.assert_(allclose(f(arg), [0.0, 1.0, 6.25])) + + self.assertEqual(f(3), 9) + self.assert_(f(-2.5) - 6.25 < 1E-10) + + def test_direct(self): + @mkufunc + def f(x): + return x * x + self.check_ufunc(f) + + def test_noargs(self): + @mkufunc() + def f(x): + return x * x + self.check_ufunc(f) + + def test_varargs(self): + for arg in (float, + [float], + [(float, float)]): + @mkufunc(arg) + def f(x): + return x * x + self.check_ufunc(f) + + class Math_Tests(unittest.TestCase): - def test_sin(self): - @mkufunc([(float, float)]) - def u_sin(x): - return sin(x) + def test_func1arg(self): + for f in (math.exp, math.log, math.sqrt, + math.acos, math.asin, math.atan, + math.cos, math.sin, math.tan): + @mkufunc + def uf(x): + return f(x) + x = 0.4376 + a = uf(x) + b = f(x) + self.assert_(abs(a - b) < 1E-10, '%r %s != %s' % (f, a, b)) + xx = arange(0.1, 0.9, 0.01) + a = uf(xx) + b = [f(x) for x in xx] + self.assert_(allclose(a, b)) - x = 1.23 - self.assert_(u_sin(x), sin(x)) + def test_arithmetic(self): + def f(x): + return (4 * x + 2) / (x * x - 7 * x + 1) + uf = mkufunc(f) + x = arange(0, 2, 0.1) + self.assert_(allclose(uf(x), f(x))) + +class Loop_Tests(unittest.TestCase): + pass +class Switch_Tests(unittest.TestCase): + pass + +class FreeVariable_Tests(unittest.TestCase): + pass + +class Misc_Tests(unittest.TestCase): + pass + + if __name__ == '__main__': unittest.main() From scipy-svn at scipy.org Sat Jun 28 11:40:19 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 28 Jun 2008 10:40:19 -0500 (CDT) Subject: [Scipy-svn] r4491 - trunk/scipy/sandbox/mkufunc Message-ID: <20080628154019.E8FA239C65C@scipy.org> Author: ilan Date: 2008-06-28 10:40:16 -0500 (Sat, 28 Jun 2008) New Revision: 4491 Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py trunk/scipy/sandbox/mkufunc/test_mkufunc.py Log: Moved some tests Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-28 14:36:29 UTC (rev 4490) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-28 15:40:16 UTC (rev 4491) @@ -32,6 +32,7 @@ compilation is done upon initialization >>> x = Cfunc(sqr, signature) + >>> x.nin # number of input arguments 1 >>> x.nout # number of output arguments (must be 1 for now) @@ -136,18 +137,6 @@ ''' % locals() -def test1(): - def sqr(x): - return x * x - #verbose = True - for argtypes in ([int, int], [float, float]): - x = Cfunc(sqr, argtypes) - print x.cname, x.nin, x.nout, x.sig - print x.cfunc() - print '{{{%s}}}' % x.decl() - print x.support_code() - - def write_pypyc(cfuncs): """ Given a list of Cfunc instances, write the C code containing the functions into a file. @@ -248,28 +237,6 @@ sources=['pypy.c']) -def test2(): - from numpy import array - - def sqr(x): - return x * x - - ufunc = genufunc(sqr, [ - (float, float), - (int, int), - ]) - - x = array([0.0, 1.0, 2.5, 12.0]) - print "x =", x, x.dtype - y = ufunc(x) - print "y =", y, y.dtype - - x = array([0, 1, 2, 15]) - print "x =", x, x.dtype - y = ufunc(x) - print "y =", y, y.dtype - - def mkufunc(arg0=[float]): """ The actual API function, to be used as decorator function. @@ -324,19 +291,4 @@ if __name__ == '__main__': import doctest - #doctest.testmod() - - test2() - - exit() - - - def sqr(x): - return x * x - - #sqr = mkufunc({})(sqr) - sqr = mkufunc([(float, float)])(sqr) - #sqr = mkufunc(int)(sqr) - #sqr = mkufunc(sqr) - - print sqr(8) + doctest.testmod() Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 14:36:29 UTC (rev 4490) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 15:40:16 UTC (rev 4491) @@ -5,13 +5,38 @@ from mkufunc import Cfunc, genufunc, mkufunc -def f(x): - return 3.2 * x * x - 18.3 * x + sin(x) +class Internal_Tests(unittest.TestCase): + + def test_Cfunc(self): + def sqr(x): + return x * x + cf = Cfunc(sqr, [int, int], 42) + self.assertEqual(cf.nin, 1) + self.assertEqual(cf.nout, 1) + self.assertEqual(cf.cname, 'f42_pypy_g_sqr') + def test_genufunc(self): + def foo(x): + return x + 17 + uf = genufunc(foo, [ + (float, float), + (int, int), + ]) + self.assertEqual(uf(4), 21) + x = array([1.1, 2.3]) + y = uf(x) + self.assert_(allclose(y, [18.1, 19.3])) + self.assert_(str(y.dtype).startswith('float')) + + x = array([1, 4]) + y = uf(x) + self.assertEqual(list(y), [18, 21]) + self.assert_(str(y.dtype).startswith('int')) + + class Arg_Tests(unittest.TestCase): def check_ufunc(self, f): - #self.assert_(f.__type__ for arg in (array([0.0, 1.0, 2.5]), [0.0, 1.0, 2.5], (0.0, 1.0, 2.5)): From scipy-svn at scipy.org Sat Jun 28 15:05:30 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 28 Jun 2008 14:05:30 -0500 (CDT) Subject: [Scipy-svn] r4492 - trunk/scipy/sandbox/mkufunc Message-ID: <20080628190530.71B6C39C442@scipy.org> Author: ilan Date: 2008-06-28 14:05:29 -0500 (Sat, 28 Jun 2008) New Revision: 4492 Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py trunk/scipy/sandbox/mkufunc/test_mkufunc.py Log: Implemented multiple input arguments Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-28 15:40:16 UTC (rev 4491) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-28 19:05:29 UTC (rev 4492) @@ -102,36 +102,69 @@ found = p.findall(self._allCsrc) assert len(found) == 1 return found[0] - + def ufunc_support_code(self): - arg0type = typedict[self.sig[0]][1] - rettype = typedict[self.sig[-1]][1] + # Unfortunately the code in here is very hard to read. + # In order to make the code clearer, one would need a real template + # engine link Cheetah (http://cheetahtemplate.org/). + # However, somehting like that would be too much overhead for scipy. + n = self.n + nin = self.nin cname = self.cname + + def varname(i): + return chr(i + ord('a')) + + declargs = ', '.join('%s %s' % (typedict[self.sig[i]][1], varname(i)) + for i in xrange(self.nin)) + + args = ', '.join(varname(i) for i in xrange(self.nin)) + + isn_steps = '\n\t'.join('npy_intp is%i = steps[%i];' % (i, i) + for i in xrange(self.nin)) + + ipn_args = '\n\t'.join('char *ip%i = args[%i];' % (i, i) + for i in xrange(self.nin)) + + body1d_in = '\n\t\t'.join('%s *in%i = (%s *)ip%i;' % + (2*(typedict[self.sig[i]][1], i)) + for i in xrange(self.nin)) + + body1d_add = '\n\t\t'.join('ip%i += is%i;' % (i, i) + for i in xrange(self.nin)) + + ptrargs = ', '.join('*in%i' % i for i in xrange(self.nin)) + + rettype = typedict[self.sig[-1]][1] + return ''' -static %(rettype)s wrap_%(cname)s(%(arg0type)s x) +static %(rettype)s wrap_%(cname)s(%(declargs)s) { - return %(cname)s(x); + return %(cname)s(%(args)s); } -typedef %(rettype)s Func_%(n)i(%(arg0type)s); +typedef %(rettype)s Func_%(n)i(%(declargs)s); static void PyUFunc_%(n)i(char **args, npy_intp *dimensions, npy_intp *steps, void *func) { - npy_intp n = dimensions[0]; - npy_intp is0 = steps[0]; - npy_intp os = steps[1]; - char *ip0 = args[0]; - char *op = args[1]; + npy_intp i, n; + %(isn_steps)s + npy_intp os = steps[%(nin)s]; + %(ipn_args)s + char *op = args[%(nin)s]; Func_%(n)i *f = (Func_%(n)i *) func; - npy_intp i; - - for(i = 0; i < n; i++, ip0 += is0, op += os) { - %(arg0type)s *in1 = (%(arg0type)s *)ip0; + n = dimensions[0]; + + for(i = 0; i < n; i++) { + %(body1d_in)s %(rettype)s *out = (%(rettype)s *)op; - *out = f(*in1); + *out = f(%(ptrargs)s); + + %(body1d_add)s + op += os; } } ''' % locals() @@ -208,12 +241,12 @@ %(fname)s_functions, %(fname)s_data, %(fname)s_types, - %(ntypes)i, /* ntypes */ - %(nin)i, /* nin */ - 1, /* nout */ - PyUFunc_None, /* identity */ - "%(fname)s", /* name */ - "", /* doc */ + %(ntypes)i, /* ntypes */ + %(nin)i, /* nin */ + 1, /* nout */ + PyUFunc_None, /* identity */ + "%(fname)s", /* name */ + "UFunc made by mkufunc", /* doc */ 0); ''' % locals() @@ -238,8 +271,8 @@ def mkufunc(arg0=[float]): - """ The actual API function, to be used as decorator function. - + """ The actual API function, for use in decorator function. + """ class Compile(object): @@ -264,9 +297,7 @@ if t not in typedict.keys(): raise TypeError - print 'sigs:', signatures self.ufunc = genufunc(f, signatures) - #self.ufunc = f def __call__(self, *args): return self.ufunc(*args) Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 15:40:16 UTC (rev 4491) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 19:05:29 UTC (rev 4492) @@ -84,7 +84,18 @@ a = uf(xx) b = [f(x) for x in xx] self.assert_(allclose(a, b)) - + + def test_func2arg(self): + @mkufunc + def f(x, y): + return math.atan2(x, y) + + xx = array([1.0, 3.0, -2.4, 3.1, -2.3]) + yy = array([1.0, 2.0, 7.5, -8.7, 0.0]) + a = f(xx, yy) + b = [math.atan2(x, y) for x, y in zip(xx, yy)] + self.assert_(allclose(a, b)) + def test_arithmetic(self): def f(x): return (4 * x + 2) / (x * x - 7 * x + 1) @@ -92,7 +103,15 @@ x = arange(0, 2, 0.1) self.assert_(allclose(uf(x), f(x))) + def f(x, y, z): + return x * y * z + uf = mkufunc(f) + x = arange(0, 1, 0.1) + y = 2 * x + z = 3 * x + self.assert_(allclose(uf(x, y, z), f(x, y, z))) + class Loop_Tests(unittest.TestCase): pass From scipy-svn at scipy.org Sat Jun 28 16:37:05 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 28 Jun 2008 15:37:05 -0500 (CDT) Subject: [Scipy-svn] r4493 - trunk/scipy/sandbox/mkufunc Message-ID: <20080628203705.A326F39C627@scipy.org> Author: ilan Date: 2008-06-28 15:37:04 -0500 (Sat, 28 Jun 2008) New Revision: 4493 Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py Log: Added more test cases Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 19:05:29 UTC (rev 4492) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 20:37:04 UTC (rev 4493) @@ -1,7 +1,7 @@ import math import unittest -from numpy import array, arange, allclose, vectorize +from numpy import array, arange, allclose from mkufunc import Cfunc, genufunc, mkufunc @@ -66,7 +66,27 @@ return x * x self.check_ufunc(f) + def test_int(self): + @mkufunc(int) + def f(x): + return x * x + self.assertEqual(f(3), 9) + self.assert_(isinstance(f(42), int)) + def test_mixed(self): + @mkufunc([(int, float, int), float]) + def f(n, x): + return n + x * x + + y = f(2, 3.9) # Note that int(2 + 3.9 * 3.9) = 17 + self.assertEqual(y, 17) + self.assert_(isinstance(y, int)) + + y = f(2.0, 3.9) + self.assert_(abs(y - 17.21) < 1E-10) + self.assert_(isinstance(y, float)) + + class Math_Tests(unittest.TestCase): def test_func1arg(self): @@ -112,15 +132,69 @@ self.assert_(allclose(uf(x, y, z), f(x, y, z))) -class Loop_Tests(unittest.TestCase): - pass +class Control_Flow_Tests(unittest.TestCase): -class Switch_Tests(unittest.TestCase): - pass + def test_if(self): + @mkufunc(int) + def f(n): + if n < 4: + return n + else: + return n * n + self.assertEqual(f(3), 3) + self.assertEqual(f(4), 16) + + def test_switch(self): + @mkufunc(int) + def f(n): + if n < 4: + return n + elif n == 4: + return 42 + elif n == 5: + return 73 + else: + return n * n + + self.assertEqual(f(3), 3) + self.assertEqual(f(4), 42) + self.assertEqual(f(5), 73) + self.assertEqual(f(6), 36) + + def test_loop(self): + @mkufunc(int) + def f(n): + res = 0 + for i in xrange(n): + res += i*i + return res + + self.assertEqual(f(3), 5) + self.assertEqual(f(95), 281295) + + class FreeVariable_Tests(unittest.TestCase): - pass + def test_const(self): + a = 13.6 + @mkufunc + def f(x): + return a * x + + x = arange(0, 1, 0.1) + self.assert_(allclose(f(x), a * x)) + + def test_const2(self): + from math import sin, pi, sqrt + @mkufunc + def sin_deg(angle): + return sin(angle / 180.0 * pi) + + self.assert_(allclose(sin_deg([0, 30, 45, 60, 90, 180, 270, 360]), + [0, 0.5, 1/sqrt(2), sqrt(3)/2, 1, 0, -1, 0])) + + class Misc_Tests(unittest.TestCase): pass From scipy-svn at scipy.org Sat Jun 28 17:17:59 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 28 Jun 2008 16:17:59 -0500 (CDT) Subject: [Scipy-svn] r4494 - trunk/scipy/sandbox/mkufunc Message-ID: <20080628211759.83A2B39C633@scipy.org> Author: ilan Date: 2008-06-28 16:17:58 -0500 (Sat, 28 Jun 2008) New Revision: 4494 Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py trunk/scipy/sandbox/mkufunc/test_mkufunc.py Log: Improved type checking and added tests to see if TypeError was raised Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-28 20:37:04 UTC (rev 4493) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-28 21:17:58 UTC (rev 4494) @@ -6,7 +6,7 @@ import sys import re import cStringIO -from types import * +from types import FunctionType import numpy import scipy.weave as weave @@ -280,13 +280,13 @@ nin = f.func_code.co_argcount nout = 1 for i, sig in enumerate(signatures): - if sig in typedict.keys(): + if isinstance(sig, tuple): + pass + elif sig in typedict.keys(): signatures[i] = (nin + nout) * (sig,) - elif isinstance(sig, tuple): - pass else: - raise TypeError - + raise TypeError("no match for %r" % sig) + for sig in signatures: assert isinstance(sig, tuple) if len(sig) != nin + nout: @@ -295,7 +295,7 @@ (sig, f.__name__)) for t in sig: if t not in typedict.keys(): - raise TypeError + raise TypeError("no match for %r" % t) self.ufunc = genufunc(f, signatures) @@ -307,7 +307,7 @@ signatures = [float] return Compile(f) - elif isinstance(arg0, ListType): + elif isinstance(arg0, list): signatures = arg0 return Compile Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 20:37:04 UTC (rev 4493) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 21:17:58 UTC (rev 4494) @@ -85,7 +85,17 @@ y = f(2.0, 3.9) self.assert_(abs(y - 17.21) < 1E-10) self.assert_(isinstance(y, float)) + + def test_exceptions(self): + def f(x): + return x + self.assertRaises(TypeError, mkufunc, {}) + self.assertRaises(TypeError, mkufunc([(float,)]), f) + self.assertRaises(TypeError, mkufunc([3*(float,)]), f) + self.assertRaises(TypeError, mkufunc([{}]), f) + self.assertRaises(TypeError, mkufunc([(int, {})]), f) + class Math_Tests(unittest.TestCase): From scipy-svn at scipy.org Sat Jun 28 20:45:42 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 28 Jun 2008 19:45:42 -0500 (CDT) Subject: [Scipy-svn] r4495 - trunk/scipy/sandbox/mkufunc Message-ID: <20080629004542.B104B39C089@scipy.org> Author: ilan Date: 2008-06-28 19:45:36 -0500 (Sat, 28 Jun 2008) New Revision: 4495 Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py trunk/scipy/sandbox/mkufunc/test_mkufunc.py Log: Refactoring Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-28 21:17:58 UTC (rev 4494) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-29 00:45:36 UTC (rev 4495) @@ -16,12 +16,36 @@ verbose = False +def translate(f, argtypes): + + if not verbose: + tmp = sys.stderr + sys.stderr = cStringIO.StringIO() + + t = Translation(f, backend='c') + t.annotate(argtypes) + t.source() + + if not verbose: + sys.stderr = tmp + + c_source_filename = t.driver.c_source_filename + assert c_source_filename.endswith('.c') + + return open(c_source_filename, 'r').read() + + +class Ctype: + def __init__(self, npy, c): + self.npy = npy + self.c = c + typedict = { - int: ['NPY_LONG', 'long' ], - long: ['NPY_LONG', 'long' ], - float: ['NPY_DOUBLE', 'double'], + int: Ctype('NPY_LONG', 'long' ), + float: Ctype('NPY_DOUBLE', 'double'), } + class Cfunc(object): """ C compiled python functions @@ -31,7 +55,7 @@ >>> signature = [int, int] # only the input arguments are used here compilation is done upon initialization - >>> x = Cfunc(sqr, signature) + >>> x = Cfunc(sqr, signature, 123) >>> x.nin # number of input arguments 1 @@ -56,7 +80,7 @@ -- generate the C support code to make this function part work with PyUFuncGenericFunction """ - def __init__(self, f, signature, n=0): + def __init__(self, f, signature, n): self.f = f self.n = n self.sig = signature @@ -64,21 +88,8 @@ self.nout = len(self.sig) - self.nin assert self.nout == 1 # for now - if not verbose: - tmp = sys.stderr - sys.stderr = cStringIO.StringIO() - - t = Translation(f, backend='c') - t.annotate(signature[:self.nin]) - t.source() - - if not verbose: - sys.stderr = tmp + src = translate(f, signature[:self.nin]) - c_source_filename = t.driver.c_source_filename - assert c_source_filename.endswith('.c') - src = open(c_source_filename, 'r').read() - self._prefix = 'f%i_' % self.n self._allCsrc = src.replace('pypy_', self._prefix + 'pypy_') self.cname = self._prefix + 'pypy_g_' + f.__name__ @@ -108,7 +119,6 @@ # In order to make the code clearer, one would need a real template # engine link Cheetah (http://cheetahtemplate.org/). # However, somehting like that would be too much overhead for scipy. - n = self.n nin = self.nin cname = self.cname @@ -116,7 +126,7 @@ def varname(i): return chr(i + ord('a')) - declargs = ', '.join('%s %s' % (typedict[self.sig[i]][1], varname(i)) + declargs = ', '.join('%s %s' % (typedict[self.sig[i]].c, varname(i)) for i in xrange(self.nin)) args = ', '.join(varname(i) for i in xrange(self.nin)) @@ -128,7 +138,7 @@ for i in xrange(self.nin)) body1d_in = '\n\t\t'.join('%s *in%i = (%s *)ip%i;' % - (2*(typedict[self.sig[i]][1], i)) + (2*(typedict[self.sig[i]].c, i)) for i in xrange(self.nin)) body1d_add = '\n\t\t'.join('ip%i += is%i;' % (i, i) @@ -136,7 +146,7 @@ ptrargs = ', '.join('*in%i' % i for i in xrange(self.nin)) - rettype = typedict[self.sig[-1]][1] + rettype = typedict[self.sig[-1]].c return ''' static %(rettype)s wrap_%(cname)s(%(declargs)s) @@ -210,7 +220,7 @@ data = ''.join('\t(void *) wrap_%s,\n' % cf.cname for cf in cfuncs) types = ''.join('\t%s /* %i */\n' % - (''.join(typedict[t][0] + ', ' for t in cf.sig), cf.n) + (''.join(typedict[t].npy + ', ' for t in cf.sig), cf.n) for cf in cfuncs) fname = f.__name__ Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-28 21:17:58 UTC (rev 4494) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-29 00:45:36 UTC (rev 4495) @@ -72,7 +72,7 @@ return x * x self.assertEqual(f(3), 9) self.assert_(isinstance(f(42), int)) - + def test_mixed(self): @mkufunc([(int, float, int), float]) def f(n, x): From scipy-svn at scipy.org Sat Jun 28 20:47:15 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sat, 28 Jun 2008 19:47:15 -0500 (CDT) Subject: [Scipy-svn] r4496 - trunk/scipy/sandbox/mkufunc Message-ID: <20080629004715.B62BA39C089@scipy.org> Author: ilan Date: 2008-06-28 19:47:14 -0500 (Sat, 28 Jun 2008) New Revision: 4496 Removed: trunk/scipy/sandbox/mkufunc/test_1.py Log: Now all in test_mkufunc.py Deleted: trunk/scipy/sandbox/mkufunc/test_1.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_1.py 2008-06-29 00:45:36 UTC (rev 4495) +++ trunk/scipy/sandbox/mkufunc/test_1.py 2008-06-29 00:47:14 UTC (rev 4496) @@ -1,53 +0,0 @@ -#!/usr/bin/env python -from math import sin, cos, pi - -from numpy import array - -from mkufunc import mkufunc - -int_const = 42 - -float_const = 3.14 - -def my_sqr(x): - return x * x / 1.23456 - - at mkufunc([(float, float), (int, int)]) -def bar(n): - "Bar docstring" - if n < 10: - for i in xrange(10): - n += i*i - return n - elif n == 10: - return int_const - elif n == 11: - return float_const - elif n == 12: - return cos(pi) - #return 1 - elif n > 12: - return my_sqr(n) - else: - return 5 - - -#@mkufunc(float) -#def baz(n): -# "Baz docstring" -# return n * n + 1000 - - -#print bar - -x = array([0.0, 1.0, 2.5, 12.0]) -print "x =", x, x.dtype -y = bar(x) -print "y =", y, y.dtype - -print bar(5) -print bar(15) -print bar(10) -print bar(11) -print bar(12) -print bar(12.5) From scipy-svn at scipy.org Sun Jun 29 04:47:39 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 29 Jun 2008 03:47:39 -0500 (CDT) Subject: [Scipy-svn] r4497 - trunk/scipy/sandbox/mkufunc Message-ID: <20080629084739.6B7BD39C603@scipy.org> Author: ilan Date: 2008-06-29 03:47:38 -0500 (Sun, 29 Jun 2008) New Revision: 4497 Added: trunk/scipy/sandbox/mkufunc/funcutil.py Log: Implements a function for getting an MD5 from the bytecode of a function Added: trunk/scipy/sandbox/mkufunc/funcutil.py =================================================================== --- trunk/scipy/sandbox/mkufunc/funcutil.py 2008-06-29 00:47:14 UTC (rev 4496) +++ trunk/scipy/sandbox/mkufunc/funcutil.py 2008-06-29 08:47:38 UTC (rev 4497) @@ -0,0 +1,76 @@ +import sys, re, dis, hashlib, cStringIO + + +def disassemble(co): + """ Given a code object, return output from dis.disassemble as a string. + + (dis.disassemble writes its output to stdout.) + """ + tmp = sys.stdout + sys.stdout = cStringIO.StringIO() + dis.disassemble(co) + res = sys.stdout.getvalue() + sys.stdout = tmp + return res + +pat_norep = re.compile(r'<[^<>]*>') +pat_white = re.compile(r'\s+') + +def disassemble2(co): + acc = cStringIO.StringIO() + for line in disassemble(co).splitlines(): + line = line[16:].strip() + if line: + acc.write(line+'\n') + + acc.write('co_names:\n') + for name in co.co_names: + if name in '''math exp log sqrt + cos sin tan acos asin atan atan2'''.split(): + continue + acc.write('%8s: %s\n' % (name, eval(name))) + + res = acc.getvalue() + + while True: + tmp = pat_norep.sub('NO_REPRESENTATION', res) + if tmp == res: + break + res = tmp + + return res + +def func_hash(f): + txt = disassemble2(f.func_code) + #print txt + txt = pat_white.sub(' ', txt) + return hashlib.md5(txt).hexdigest() + + +if __name__ == '__main__': + import math + from math import * + + md5sums = [] + + b = 3.14159 + g = lambda x: x + def h(n): + return n + 3 + + for a in xrange(2): + def f(x): + inner1 = lambda t: t/3.0 + def inner2(): return + t = b + g(42) + h(4) + return sin(pi * x) + a + t + md5sums.append(func_hash(f)) + + + def f(x): + return math.sin(x) + math.cos(x) + md5sums.append(func_hash(f)) + + assert md5sums == ['b821514915e98426c49d93f58e400025', + '2bf13d8983c80c8fd773db4534a2c1b6', + '8d2ce5ab9152dabc3d49d0732fb84666'] From scipy-svn at scipy.org Sun Jun 29 04:53:49 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 29 Jun 2008 03:53:49 -0500 (CDT) Subject: [Scipy-svn] r4498 - trunk/scipy/sandbox/mkufunc Message-ID: <20080629085349.A57E939C603@scipy.org> Author: ilan Date: 2008-06-29 03:53:48 -0500 (Sun, 29 Jun 2008) New Revision: 4498 Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py trunk/scipy/sandbox/mkufunc/test_mkufunc.py Log: Numerous changes, mostly regarding for preparing caching Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-29 08:47:38 UTC (rev 4497) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-29 08:53:48 UTC (rev 4498) @@ -12,6 +12,7 @@ import scipy.weave as weave from interactive import Translation +from funcutil import func_hash verbose = False @@ -243,10 +244,17 @@ ntypes = len(signatures) nin = cfuncs[0].nin + fhash = func_hash(f) code = ''' import_ufunc(); +/**************************************************************************** +** function name: %(fname)s +** signatures: %(signatures)r +** bytecode hash: %(fhash)s +*****************************************************************************/ + return_val = PyUFunc_FromFuncAndData( %(fname)s_functions, %(fname)s_data, @@ -274,7 +282,7 @@ ufunc_info.add_include_dir('"."') return weave.inline(code, - verbose=0, force=1, # XXX + verbose=0, #force=1, support_code=support_code, customize=ufunc_info, sources=['pypy.c']) Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-29 08:47:38 UTC (rev 4497) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-29 08:53:48 UTC (rev 4498) @@ -5,7 +5,14 @@ from mkufunc import Cfunc, genufunc, mkufunc -class Internal_Tests(unittest.TestCase): + +class Util: + + def assertClose(self, x, y): + self.assert_(allclose(x, y), '%s != %s' % (x, y)) + + +class Internal_Tests(unittest.TestCase, Util): def test_Cfunc(self): def sqr(x): @@ -25,7 +32,7 @@ self.assertEqual(uf(4), 21) x = array([1.1, 2.3]) y = uf(x) - self.assert_(allclose(y, [18.1, 19.3])) + self.assertClose(y, [18.1, 19.3]) self.assert_(str(y.dtype).startswith('float')) x = array([1, 4]) @@ -34,13 +41,13 @@ self.assert_(str(y.dtype).startswith('int')) -class Arg_Tests(unittest.TestCase): +class Arg_Tests(unittest.TestCase, Util): def check_ufunc(self, f): for arg in (array([0.0, 1.0, 2.5]), [0.0, 1.0, 2.5], (0.0, 1.0, 2.5)): - self.assert_(allclose(f(arg), [0.0, 1.0, 6.25])) + self.assertClose(f(arg), [0.0, 1.0, 6.25]) self.assertEqual(f(3), 9) self.assert_(f(-2.5) - 6.25 < 1E-10) @@ -83,7 +90,7 @@ self.assert_(isinstance(y, int)) y = f(2.0, 3.9) - self.assert_(abs(y - 17.21) < 1E-10) + self.assertClose(y, 17.21) self.assert_(isinstance(y, float)) def test_exceptions(self): @@ -97,41 +104,82 @@ self.assertRaises(TypeError, mkufunc([(int, {})]), f) -class Math_Tests(unittest.TestCase): +class Math_Tests(unittest.TestCase, Util): - def test_func1arg(self): - for f in (math.exp, math.log, math.sqrt, - math.acos, math.asin, math.atan, - math.cos, math.sin, math.tan): - @mkufunc - def uf(x): - return f(x) - x = 0.4376 - a = uf(x) - b = f(x) - self.assert_(abs(a - b) < 1E-10, '%r %s != %s' % (f, a, b)) - xx = arange(0.1, 0.9, 0.01) - a = uf(xx) - b = [f(x) for x in xx] - self.assert_(allclose(a, b)) - - def test_func2arg(self): + def assertFuncsEqual(self, uf, f): + x = 0.4376 + a = uf(x) + b = f(x) + self.assertClose(a, b) + xx = arange(0.1, 0.9, 0.01) + a = uf(xx) + b = [f(x) for x in xx] + self.assertClose(a, b) + + def test_exp(self): @mkufunc + def f(x): return math.exp(x) + self.assertFuncsEqual(f, math.exp) + + def test_log(self): + @mkufunc + def f(x): return math.log(x) + self.assertFuncsEqual(f, math.log) + + def test_sqrt(self): + @mkufunc + def f(x): return math.sqrt(x) + self.assertFuncsEqual(f, math.sqrt) + + def test_cos(self): + @mkufunc + def f(x): return math.cos(x) + self.assertFuncsEqual(f, math.cos) + + def test_sin(self): + @mkufunc + def f(x): return math.sin(x) + self.assertFuncsEqual(f, math.sin) + + def test_tan(self): + @mkufunc + def f(x): return math.tan(x) + self.assertFuncsEqual(f, math.tan) + + def test_acos(self): + @mkufunc + def f(x): return math.acos(x) + self.assertFuncsEqual(f, math.acos) + + def test_asin(self): + @mkufunc + def f(x): return math.asin(x) + self.assertFuncsEqual(f, math.asin) + + def test_atan(self): + @mkufunc + def f(x): return math.atan(x) + self.assertFuncsEqual(f, math.atan) + + def test_atan2(self): + @mkufunc def f(x, y): return math.atan2(x, y) + + self.assertClose(f(4, 5), math.atan2(4, 5)) xx = array([1.0, 3.0, -2.4, 3.1, -2.3]) yy = array([1.0, 2.0, 7.5, -8.7, 0.0]) a = f(xx, yy) b = [math.atan2(x, y) for x, y in zip(xx, yy)] - self.assert_(allclose(a, b)) + self.assertClose(a, b) def test_arithmetic(self): def f(x): return (4 * x + 2) / (x * x - 7 * x + 1) uf = mkufunc(f) x = arange(0, 2, 0.1) - self.assert_(allclose(uf(x), f(x))) + self.assertClose(uf(x), f(x)) def f(x, y, z): return x * y * z @@ -139,7 +187,7 @@ x = arange(0, 1, 0.1) y = 2 * x z = 3 * x - self.assert_(allclose(uf(x, y, z), f(x, y, z))) + self.assertClose(uf(x, y, z), f(x, y, z)) class Control_Flow_Tests(unittest.TestCase): @@ -184,7 +232,7 @@ self.assertEqual(f(95), 281295) -class FreeVariable_Tests(unittest.TestCase): +class FreeVariable_Tests(unittest.TestCase, Util): def test_const(self): a = 13.6 @@ -193,7 +241,7 @@ return a * x x = arange(0, 1, 0.1) - self.assert_(allclose(f(x), a * x)) + self.assertClose(f(x), a * x) def test_const2(self): from math import sin, pi, sqrt @@ -201,11 +249,12 @@ def sin_deg(angle): return sin(angle / 180.0 * pi) - self.assert_(allclose(sin_deg([0, 30, 45, 60, 90, 180, 270, 360]), - [0, 0.5, 1/sqrt(2), sqrt(3)/2, 1, 0, -1, 0])) + self.assertClose(sin_deg([0, 30, 45, 60, 90, 180, 270, 360]), + [0, 0.5, 1/sqrt(2), sqrt(3)/2, 1, 0, -1, 0]) -class Misc_Tests(unittest.TestCase): +class Misc_Tests(unittest.TestCase, Util): + pass From scipy-svn at scipy.org Sun Jun 29 06:30:24 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 29 Jun 2008 05:30:24 -0500 (CDT) Subject: [Scipy-svn] r4499 - trunk/scipy/sandbox/mkufunc Message-ID: <20080629103024.6EC5639C661@scipy.org> Author: ilan Date: 2008-06-29 05:30:22 -0500 (Sun, 29 Jun 2008) New Revision: 4499 Modified: trunk/scipy/sandbox/mkufunc/funcutil.py Log: Free variables still need work Modified: trunk/scipy/sandbox/mkufunc/funcutil.py =================================================================== --- trunk/scipy/sandbox/mkufunc/funcutil.py 2008-06-29 08:53:48 UTC (rev 4498) +++ trunk/scipy/sandbox/mkufunc/funcutil.py 2008-06-29 10:30:22 UTC (rev 4499) @@ -25,10 +25,11 @@ acc.write('co_names:\n') for name in co.co_names: - if name in '''math exp log sqrt - cos sin tan acos asin atan atan2'''.split(): - continue - acc.write('%8s: %s\n' % (name, eval(name))) + try: + tmp = str(eval(name)) + except NameError: + tmp = 'EVAL_FAILED' + acc.write('%8s: %s\n' % (name, tmp)) res = acc.getvalue() @@ -70,7 +71,7 @@ def f(x): return math.sin(x) + math.cos(x) md5sums.append(func_hash(f)) - - assert md5sums == ['b821514915e98426c49d93f58e400025', - '2bf13d8983c80c8fd773db4534a2c1b6', - '8d2ce5ab9152dabc3d49d0732fb84666'] + #print md5sums + assert md5sums == ['91d13599d610a554dccd6b44cb5ef1f0', + 'be0c54b477180f897cbf7604fc565d18', + '732d1ef6c1ce8cc92a7f28917496d292'] From scipy-svn at scipy.org Sun Jun 29 18:21:39 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 29 Jun 2008 17:21:39 -0500 (CDT) Subject: [Scipy-svn] r4500 - trunk/scipy/sandbox/mkufunc Message-ID: <20080629222139.31A2239C1B5@scipy.org> Author: ilan Date: 2008-06-29 17:21:36 -0500 (Sun, 29 Jun 2008) New Revision: 4500 Added: trunk/scipy/sandbox/mkufunc/pypy_head.h Removed: trunk/scipy/sandbox/mkufunc/head.c Modified: trunk/scipy/sandbox/mkufunc/funcutil.py trunk/scipy/sandbox/mkufunc/mkufunc.py Log: Implemented caching for pypy translated functions, also temp files are now stored in weave's temp directory. Modified: trunk/scipy/sandbox/mkufunc/funcutil.py =================================================================== --- trunk/scipy/sandbox/mkufunc/funcutil.py 2008-06-29 10:30:22 UTC (rev 4499) +++ trunk/scipy/sandbox/mkufunc/funcutil.py 2008-06-29 22:21:36 UTC (rev 4500) @@ -41,16 +41,16 @@ return res -def func_hash(f): - txt = disassemble2(f.func_code) + +def func_hash(f, extra=None): + txt = disassemble2(f.func_code) + repr(extra) #print txt txt = pat_white.sub(' ', txt) return hashlib.md5(txt).hexdigest() - if __name__ == '__main__': - import math - from math import * +# import math +# from math import * md5sums = [] @@ -69,9 +69,14 @@ def f(x): - return math.sin(x) + math.cos(x) + return math.sin(x) md5sums.append(func_hash(f)) - #print md5sums - assert md5sums == ['91d13599d610a554dccd6b44cb5ef1f0', - 'be0c54b477180f897cbf7604fc565d18', - '732d1ef6c1ce8cc92a7f28917496d292'] + + def f(x): + return sin(x) + md5sums.append(func_hash(f, float)) + + print md5sums + #assert md5sums == ['91d13599d610a554dccd6b44cb5ef1f0', + # 'be0c54b477180f897cbf7604fc565d18', + # '732d1ef6c1ce8cc92a7f28917496d292'] Deleted: trunk/scipy/sandbox/mkufunc/head.c =================================================================== --- trunk/scipy/sandbox/mkufunc/head.c 2008-06-29 10:30:22 UTC (rev 4499) +++ trunk/scipy/sandbox/mkufunc/head.c 2008-06-29 22:21:36 UTC (rev 4500) @@ -1,382 +0,0 @@ - -#include -#include -#include - -/* ================================================== g_prerequisite.h === */ - -typedef unsigned char bool_t; - -/* ================================================== exception.h ======== */ - -#define RPY_DEBUG_RETURN() /* nothing */ - - -/* ================================================== int.h ============== */ - -/*** unary operations ***/ - -#define OP_INT_IS_TRUE(x,r) OP_INT_NE(x,0,r) - -#define OP_INT_INVERT(x,r) r = ~((x)) - -#define OP_INT_NEG(x,r) r = -(x) - -#define OP_INT_NEG_OVF(x,r) \ - if ((x) == LONG_MIN) FAIL_OVF("integer negate"); \ - OP_INT_NEG(x,r) -#define OP_LLONG_NEG_OVF(x,r) \ - if ((x) == LLONG_MIN) FAIL_OVF("integer negate"); \ - OP_LLONG_NEG(x,r) - -#define OP_INT_ABS(x,r) r = (x) >= 0 ? x : -(x) - -#define OP_INT_ABS_OVF(x,r) \ - if ((x) == LONG_MIN) FAIL_OVF("integer absolute"); \ - OP_INT_ABS(x,r) -#define OP_LLONG_ABS_OVF(x,r) \ - if ((x) == LLONG_MIN) FAIL_OVF("integer absolute"); \ - OP_LLONG_ABS(x,r) - -/*** binary operations ***/ - -#define OP_INT_EQ(x,y,r) r = ((x) == (y)) -#define OP_INT_NE(x,y,r) r = ((x) != (y)) -#define OP_INT_LE(x,y,r) r = ((x) <= (y)) -#define OP_INT_GT(x,y,r) r = ((x) > (y)) -#define OP_INT_LT(x,y,r) r = ((x) < (y)) -#define OP_INT_GE(x,y,r) r = ((x) >= (y)) - -/* addition, subtraction */ - -#define OP_INT_ADD(x,y,r) r = (x) + (y) - -#define OP_INT_ADD_OVF(x,y,r) \ - OP_INT_ADD(x,y,r); \ - if ((r^(x)) >= 0 || (r^(y)) >= 0); \ - else FAIL_OVF("integer addition") - -#define OP_INT_ADD_NONNEG_OVF(x,y,r) /* y can be assumed >= 0 */ \ - OP_INT_ADD(x,y,r); \ - if (r >= (x)); \ - else FAIL_OVF("integer addition") -/* XXX can a C compiler be too clever and think it can "prove" that - * r >= x always hold above? */ - -#define OP_INT_SUB(x,y,r) r = (x) - (y) - -#define OP_INT_SUB_OVF(x,y,r) \ - OP_INT_SUB(x,y,r); \ - if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ - else FAIL_OVF("integer subtraction") - -#define OP_INT_MUL(x,y,r) r = (x) * (y) - -#if defined(HAVE_LONG_LONG) && SIZE_OF_LONG_LONG < SIZE_OF_LONG -# define OP_INT_MUL_OVF_LL 1 -#lse -# define OP_INT_MUL_OVF_LL 0 -#endif - -#if !OP_INT_MUL_OVF_LL - -#define OP_INT_MUL_OVF(x,y,r) \ - if (op_int_mul_ovf(x,y,&r)); \ - else FAIL_OVF("integer multiplication") - -#else - -#define OP_INT_MUL_OVF(x,y,r) \ - { \ - PY_LONG_LONG lr = (PY_LONG_LONG)(x) * (PY_LONG_LONG)(y); \ - r = (long)lr; \ - if ((PY_LONG_LONG)r == lr); \ - else FAIL_OVF("integer multiplication"); \ - } -#endif - -/* shifting */ - -/* NB. shifting has same limitations as C: the shift count must be - >= 0 and < LONG_BITS. */ -#define OP_INT_RSHIFT(x,y,r) r = Py_ARITHMETIC_RIGHT_SHIFT(long, x, y) -#define OP_UINT_RSHIFT(x,y,r) r = (x) >> (y) -#define OP_LLONG_RSHIFT(x,y,r) r = Py_ARITHMETIC_RIGHT_SHIFT(PY_LONG_LONG,x,y) -#define OP_ULLONG_RSHIFT(x,y,r) r = (x) >> (y) - -#define OP_INT_LSHIFT(x,y,r) r = (x) << (y) -#define OP_UINT_LSHIFT(x,y,r) r = (x) << (y) -#define OP_LLONG_LSHIFT(x,y,r) r = (x) << (y) -#define OP_ULLONG_LSHIFT(x,y,r) r = (x) << (y) - -#define OP_INT_LSHIFT_OVF(x,y,r) \ - OP_INT_LSHIFT(x,y,r); \ - if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(long, r, (y))) \ - FAIL_OVF("x<= 0) { OP_INT_RSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") -#define OP_LLONG_RSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_LLONG_RSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") - -#define OP_INT_LSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_INT_LSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") -#define OP_LLONG_LSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_LLONG_LSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") - -#define OP_INT_LSHIFT_OVF_VAL(x,y,r) \ - if ((y) >= 0) { OP_INT_LSHIFT_OVF(x,y,r); } \ - else FAIL_VAL("negative shift count") - -/* pff */ -#define OP_UINT_LSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_UINT_LSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") -#define OP_ULLONG_LSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_ULLONG_LSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") - -#define OP_UINT_RSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_UINT_RSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") -#define OP_ULLONG_RSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_ULLONG_RSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") - - -/* floor division */ - -#define OP_INT_FLOORDIV(x,y,r) r = (x) / (y) -#define OP_UINT_FLOORDIV(x,y,r) r = (x) / (y) -#define OP_LLONG_FLOORDIV(x,y,r) r = (x) / (y) -#define OP_ULLONG_FLOORDIV(x,y,r) r = (x) / (y) - -#define OP_INT_FLOORDIV_OVF(x,y,r) \ - if ((y) == -1 && (x) == LONG_MIN) \ - { FAIL_OVF("integer division"); } \ - else OP_INT_FLOORDIV(x,y,r) - -#define OP_INT_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_INT_FLOORDIV(x,y,r); } \ - else FAIL_ZER("integer division") -#define OP_UINT_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_UINT_FLOORDIV(x,y,r); } \ - else FAIL_ZER("unsigned integer division") -#define OP_LLONG_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_LLONG_FLOORDIV(x,y,r); } \ - else FAIL_ZER("integer division") -#define OP_ULLONG_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_ULLONG_FLOORDIV(x,y,r); } \ - else FAIL_ZER("unsigned integer division") - -#define OP_INT_FLOORDIV_OVF_ZER(x,y,r) \ - if ((y)) { OP_INT_FLOORDIV_OVF(x,y,r); } \ - else FAIL_ZER("integer division") - -/* modulus */ - -#define OP_INT_MOD(x,y,r) r = (x) % (y) -#define OP_UINT_MOD(x,y,r) r = (x) % (y) -#define OP_LLONG_MOD(x,y,r) r = (x) % (y) -#define OP_ULLONG_MOD(x,y,r) r = (x) % (y) - -#define OP_INT_MOD_OVF(x,y,r) \ - if ((y) == -1 && (x) == LONG_MIN) \ - { FAIL_OVF("integer modulo"); }\ - else OP_INT_MOD(x,y,r) - -#define OP_INT_MOD_ZER(x,y,r) \ - if ((y)) { OP_INT_MOD(x,y,r); } \ - else FAIL_ZER("integer modulo") -#define OP_UINT_MOD_ZER(x,y,r) \ - if ((y)) { OP_UINT_MOD(x,y,r); } \ - else FAIL_ZER("unsigned integer modulo") -#define OP_LLONG_MOD_ZER(x,y,r) \ - if ((y)) { OP_LLONG_MOD(x,y,r); } \ - else FAIL_ZER("integer modulo") -#define OP_ULLONG_MOD_ZER(x,y,r) \ - if ((y)) { OP_ULLONG_MOD(x,y,r); } \ - else FAIL_ZER("integer modulo") - -#define OP_INT_MOD_OVF_ZER(x,y,r) \ - if ((y)) { OP_INT_MOD_OVF(x,y,r); } \ - else FAIL_ZER("integer modulo") - -/* bit operations */ - -#define OP_INT_AND(x,y,r) r = (x) & (y) -#define OP_INT_OR( x,y,r) r = (x) | (y) -#define OP_INT_XOR(x,y,r) r = (x) ^ (y) - -/*** conversions ***/ - -#define OP_CAST_BOOL_TO_INT(x,r) r = (long)(x) -#define OP_CAST_BOOL_TO_UINT(x,r) r = (unsigned long)(x) -#define OP_CAST_UINT_TO_INT(x,r) r = (long)(x) -#define OP_CAST_INT_TO_UINT(x,r) r = (unsigned long)(x) -#define OP_CAST_INT_TO_LONGLONG(x,r) r = (long long)(x) -#define OP_CAST_CHAR_TO_INT(x,r) r = (long)((unsigned char)(x)) -#define OP_CAST_INT_TO_CHAR(x,r) r = (char)(x) -#define OP_CAST_PTR_TO_INT(x,r) r = (long)(x) /* XXX */ - -#define OP_TRUNCATE_LONGLONG_TO_INT(x,r) r = (long)(x) - -#define OP_CAST_UNICHAR_TO_INT(x,r) r = (long)((unsigned long)(x)) /*?*/ -#define OP_CAST_INT_TO_UNICHAR(x,r) r = (unsigned int)(x) - -/* bool operations */ - -#define OP_BOOL_NOT(x, r) r = !(x) - -/* _________________ certain implementations __________________ */ - -#if !OP_INT_MUL_OVF_LL -/* adjusted from intobject.c, Python 2.3.3 */ - -/* prototypes */ - -int op_int_mul_ovf(long a, long b, long *longprod); - -/* implementations */ - -#ifndef PYPY_NOT_MAIN_FILE - -int -op_int_mul_ovf(long a, long b, long *longprod) -{ - double doubled_longprod; /* (double)longprod */ - double doubleprod; /* (double)a * (double)b */ - - *longprod = a * b; - doubleprod = (double)a * (double)b; - doubled_longprod = (double)*longprod; - - /* Fast path for normal case: small multiplicands, and no info - is lost in either method. */ - if (doubled_longprod == doubleprod) - return 1; - - /* Somebody somewhere lost info. Close enough, or way off? Note - that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0). - The difference either is or isn't significant compared to the - true value (of which doubleprod is a good approximation). - */ - { - const double diff = doubled_longprod - doubleprod; - const double absdiff = diff >= 0.0 ? diff : -diff; - const double absprod = doubleprod >= 0.0 ? doubleprod : - -doubleprod; - /* absdiff/absprod <= 1/32 iff - 32 * absdiff <= absprod -- 5 good bits is "close enough" */ - if (32.0 * absdiff <= absprod) - return 1; - return 0; - } -} - -#endif /* PYPY_NOT_MAIN_FILE */ - -#endif /* !OP_INT_MUL_OVF_LL */ - -/* implementations */ - -#define OP_UINT_IS_TRUE OP_INT_IS_TRUE -#define OP_UINT_INVERT OP_INT_INVERT -#define OP_UINT_ADD OP_INT_ADD -#define OP_UINT_SUB OP_INT_SUB -#define OP_UINT_MUL OP_INT_MUL -#define OP_UINT_LT OP_INT_LT -#define OP_UINT_LE OP_INT_LE -#define OP_UINT_EQ OP_INT_EQ -#define OP_UINT_NE OP_INT_NE -#define OP_UINT_GT OP_INT_GT -#define OP_UINT_GE OP_INT_GE -#define OP_UINT_AND OP_INT_AND -#define OP_UINT_OR OP_INT_OR -#define OP_UINT_XOR OP_INT_XOR - -#define OP_LLONG_IS_TRUE OP_INT_IS_TRUE -#define OP_LLONG_NEG OP_INT_NEG -#define OP_LLONG_ABS OP_INT_ABS -#define OP_LLONG_INVERT OP_INT_INVERT - -#define OP_LLONG_ADD OP_INT_ADD -#define OP_LLONG_SUB OP_INT_SUB -#define OP_LLONG_MUL OP_INT_MUL -#define OP_LLONG_LT OP_INT_LT -#define OP_LLONG_LE OP_INT_LE -#define OP_LLONG_EQ OP_INT_EQ -#define OP_LLONG_NE OP_INT_NE -#define OP_LLONG_GT OP_INT_GT -#define OP_LLONG_GE OP_INT_GE -#define OP_LLONG_AND OP_INT_AND -#define OP_LLONG_OR OP_INT_OR -#define OP_LLONG_XOR OP_INT_XOR - -#define OP_ULLONG_IS_TRUE OP_LLONG_IS_TRUE -#define OP_ULLONG_INVERT OP_LLONG_INVERT -#define OP_ULLONG_ADD OP_LLONG_ADD -#define OP_ULLONG_SUB OP_LLONG_SUB -#define OP_ULLONG_MUL OP_LLONG_MUL -#define OP_ULLONG_LT OP_LLONG_LT -#define OP_ULLONG_LE OP_LLONG_LE -#define OP_ULLONG_EQ OP_LLONG_EQ -#define OP_ULLONG_NE OP_LLONG_NE -#define OP_ULLONG_GT OP_LLONG_GT -#define OP_ULLONG_GE OP_LLONG_GE -#define OP_ULLONG_AND OP_LLONG_AND -#define OP_ULLONG_OR OP_LLONG_OR -#define OP_ULLONG_XOR OP_LLONG_XOR - -/* ================================================== float.h ============ */ - -/*** unary operations ***/ - -#define OP_FLOAT_IS_TRUE(x,r) OP_FLOAT_NE(x,0.0,r) -#define OP_FLOAT_NEG(x,r) r = -x -#define OP_FLOAT_ABS(x,r) r = fabs(x) - -/*** binary operations ***/ - -#define OP_FLOAT_EQ(x,y,r) r = (x == y) -#define OP_FLOAT_NE(x,y,r) r = (x != y) -#define OP_FLOAT_LE(x,y,r) r = (x <= y) -#define OP_FLOAT_GT(x,y,r) r = (x > y) -#define OP_FLOAT_LT(x,y,r) r = (x < y) -#define OP_FLOAT_GE(x,y,r) r = (x >= y) - -#define OP_FLOAT_CMP(x,y,r) \ - r = ((x > y) - (x < y)) - -/* addition, subtraction */ - -#define OP_FLOAT_ADD(x,y,r) r = x + y -#define OP_FLOAT_SUB(x,y,r) r = x - y -#define OP_FLOAT_MUL(x,y,r) r = x * y -#define OP_FLOAT_TRUEDIV(x,y,r) r = x / y -#define OP_FLOAT_POW(x,y,r) r = pow(x, y) - -/*** conversions ***/ - -#define OP_CAST_FLOAT_TO_INT(x,r) r = (long)(x) -#define OP_CAST_FLOAT_TO_UINT(x,r) r = (unsigned long)(x) -#define OP_CAST_INT_TO_FLOAT(x,r) r = (double)(x) -#define OP_CAST_UINT_TO_FLOAT(x,r) r = (double)(x) -#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = (double)(x) -#define OP_CAST_BOOL_TO_FLOAT(x,r) r = (double)(x) - -#ifdef HAVE_LONG_LONG -#define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) -#endif - -/* ================================================== support.h ========== */ - -#define RPyField(ptr, name) NULL - -/* ================================================== EOF ================ */ Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-29 10:30:22 UTC (rev 4499) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-29 22:21:36 UTC (rev 4500) @@ -5,24 +5,31 @@ """ import sys import re +import os.path import cStringIO from types import FunctionType import numpy -import scipy.weave as weave +from scipy import weave -from interactive import Translation from funcutil import func_hash verbose = False def translate(f, argtypes): - + cache_fname = os.path.join(weave.catalog.default_dir(), + 'pypy_%s.c' % func_hash(f, argtypes)) + try: + return open(cache_fname).read() + except IOError: + pass + + from interactive import Translation if not verbose: tmp = sys.stderr sys.stderr = cStringIO.StringIO() - + t = Translation(f, backend='c') t.annotate(argtypes) t.source() @@ -33,9 +40,15 @@ c_source_filename = t.driver.c_source_filename assert c_source_filename.endswith('.c') - return open(c_source_filename, 'r').read() + src = open(c_source_filename, 'r').read() + fo = open(cache_fname, 'w') + fo.write(src) + fo.close() + return src + + class Ctype: def __init__(self, npy, c): self.npy = npy @@ -180,13 +193,17 @@ } ''' % locals() +pypyc = os.path.join(weave.catalog.default_temp_dir(), 'pypy.c') def write_pypyc(cfuncs): """ Given a list of Cfunc instances, write the C code containing the functions into a file. """ - fo = open('pypy.c', 'w'); - fo.write('#include "head.c"\n\n') + header = open(os.path.join(os.path.dirname(__file__), + 'pypy_head.h')).read() + fo = open(pypyc, 'w') + fo.write(header) + fo.write('/********************* end header ********************/\n\n') for cf in cfuncs: fo.write(cf.cfunc()) fo.close() @@ -279,13 +296,12 @@ ufunc_info = weave.base_info.custom_info() ufunc_info.add_header('"numpy/ufuncobject.h"') - ufunc_info.add_include_dir('"."') return weave.inline(code, - verbose=0, #force=1, + verbose=0, support_code=support_code, customize=ufunc_info, - sources=['pypy.c']) + sources=[pypyc]) def mkufunc(arg0=[float]): Added: trunk/scipy/sandbox/mkufunc/pypy_head.h =================================================================== --- trunk/scipy/sandbox/mkufunc/pypy_head.h 2008-06-29 10:30:22 UTC (rev 4499) +++ trunk/scipy/sandbox/mkufunc/pypy_head.h 2008-06-29 22:21:36 UTC (rev 4500) @@ -0,0 +1,381 @@ + +#include +#include +#include + +/* ================================================== g_prerequisite.h === */ + +typedef unsigned char bool_t; + +/* ================================================== exception.h ======== */ + +#define RPY_DEBUG_RETURN() /* nothing */ + + +/* ================================================== int.h ============== */ + +/*** unary operations ***/ + +#define OP_INT_IS_TRUE(x,r) OP_INT_NE(x,0,r) + +#define OP_INT_INVERT(x,r) r = ~((x)) + +#define OP_INT_NEG(x,r) r = -(x) + +#define OP_INT_NEG_OVF(x,r) \ + if ((x) == LONG_MIN) FAIL_OVF("integer negate"); \ + OP_INT_NEG(x,r) +#define OP_LLONG_NEG_OVF(x,r) \ + if ((x) == LLONG_MIN) FAIL_OVF("integer negate"); \ + OP_LLONG_NEG(x,r) + +#define OP_INT_ABS(x,r) r = (x) >= 0 ? x : -(x) + +#define OP_INT_ABS_OVF(x,r) \ + if ((x) == LONG_MIN) FAIL_OVF("integer absolute"); \ + OP_INT_ABS(x,r) +#define OP_LLONG_ABS_OVF(x,r) \ + if ((x) == LLONG_MIN) FAIL_OVF("integer absolute"); \ + OP_LLONG_ABS(x,r) + +/*** binary operations ***/ + +#define OP_INT_EQ(x,y,r) r = ((x) == (y)) +#define OP_INT_NE(x,y,r) r = ((x) != (y)) +#define OP_INT_LE(x,y,r) r = ((x) <= (y)) +#define OP_INT_GT(x,y,r) r = ((x) > (y)) +#define OP_INT_LT(x,y,r) r = ((x) < (y)) +#define OP_INT_GE(x,y,r) r = ((x) >= (y)) + +/* addition, subtraction */ + +#define OP_INT_ADD(x,y,r) r = (x) + (y) + +#define OP_INT_ADD_OVF(x,y,r) \ + OP_INT_ADD(x,y,r); \ + if ((r^(x)) >= 0 || (r^(y)) >= 0); \ + else FAIL_OVF("integer addition") + +#define OP_INT_ADD_NONNEG_OVF(x,y,r) /* y can be assumed >= 0 */ \ + OP_INT_ADD(x,y,r); \ + if (r >= (x)); \ + else FAIL_OVF("integer addition") +/* XXX can a C compiler be too clever and think it can "prove" that + * r >= x always hold above? */ + +#define OP_INT_SUB(x,y,r) r = (x) - (y) + +#define OP_INT_SUB_OVF(x,y,r) \ + OP_INT_SUB(x,y,r); \ + if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ + else FAIL_OVF("integer subtraction") + +#define OP_INT_MUL(x,y,r) r = (x) * (y) + +#if defined(HAVE_LONG_LONG) && SIZE_OF_LONG_LONG < SIZE_OF_LONG +# define OP_INT_MUL_OVF_LL 1 +#lse +# define OP_INT_MUL_OVF_LL 0 +#endif + +#if !OP_INT_MUL_OVF_LL + +#define OP_INT_MUL_OVF(x,y,r) \ + if (op_int_mul_ovf(x,y,&r)); \ + else FAIL_OVF("integer multiplication") + +#else + +#define OP_INT_MUL_OVF(x,y,r) \ + { \ + PY_LONG_LONG lr = (PY_LONG_LONG)(x) * (PY_LONG_LONG)(y); \ + r = (long)lr; \ + if ((PY_LONG_LONG)r == lr); \ + else FAIL_OVF("integer multiplication"); \ + } +#endif + +/* shifting */ + +/* NB. shifting has same limitations as C: the shift count must be + >= 0 and < LONG_BITS. */ +#define OP_INT_RSHIFT(x,y,r) r = Py_ARITHMETIC_RIGHT_SHIFT(long, x, y) +#define OP_UINT_RSHIFT(x,y,r) r = (x) >> (y) +#define OP_LLONG_RSHIFT(x,y,r) r = Py_ARITHMETIC_RIGHT_SHIFT(PY_LONG_LONG,x,y) +#define OP_ULLONG_RSHIFT(x,y,r) r = (x) >> (y) + +#define OP_INT_LSHIFT(x,y,r) r = (x) << (y) +#define OP_UINT_LSHIFT(x,y,r) r = (x) << (y) +#define OP_LLONG_LSHIFT(x,y,r) r = (x) << (y) +#define OP_ULLONG_LSHIFT(x,y,r) r = (x) << (y) + +#define OP_INT_LSHIFT_OVF(x,y,r) \ + OP_INT_LSHIFT(x,y,r); \ + if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(long, r, (y))) \ + FAIL_OVF("x<= 0) { OP_INT_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_LLONG_RSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_LLONG_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + +#define OP_INT_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_INT_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_LLONG_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_LLONG_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + +#define OP_INT_LSHIFT_OVF_VAL(x,y,r) \ + if ((y) >= 0) { OP_INT_LSHIFT_OVF(x,y,r); } \ + else FAIL_VAL("negative shift count") + +/* pff */ +#define OP_UINT_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_UINT_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_ULLONG_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_ULLONG_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + +#define OP_UINT_RSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_UINT_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_ULLONG_RSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_ULLONG_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + + +/* floor division */ + +#define OP_INT_FLOORDIV(x,y,r) r = (x) / (y) +#define OP_UINT_FLOORDIV(x,y,r) r = (x) / (y) +#define OP_LLONG_FLOORDIV(x,y,r) r = (x) / (y) +#define OP_ULLONG_FLOORDIV(x,y,r) r = (x) / (y) + +#define OP_INT_FLOORDIV_OVF(x,y,r) \ + if ((y) == -1 && (x) == LONG_MIN) \ + { FAIL_OVF("integer division"); } \ + else OP_INT_FLOORDIV(x,y,r) + +#define OP_INT_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_INT_FLOORDIV(x,y,r); } \ + else FAIL_ZER("integer division") +#define OP_UINT_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_UINT_FLOORDIV(x,y,r); } \ + else FAIL_ZER("unsigned integer division") +#define OP_LLONG_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_LLONG_FLOORDIV(x,y,r); } \ + else FAIL_ZER("integer division") +#define OP_ULLONG_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_ULLONG_FLOORDIV(x,y,r); } \ + else FAIL_ZER("unsigned integer division") + +#define OP_INT_FLOORDIV_OVF_ZER(x,y,r) \ + if ((y)) { OP_INT_FLOORDIV_OVF(x,y,r); } \ + else FAIL_ZER("integer division") + +/* modulus */ + +#define OP_INT_MOD(x,y,r) r = (x) % (y) +#define OP_UINT_MOD(x,y,r) r = (x) % (y) +#define OP_LLONG_MOD(x,y,r) r = (x) % (y) +#define OP_ULLONG_MOD(x,y,r) r = (x) % (y) + +#define OP_INT_MOD_OVF(x,y,r) \ + if ((y) == -1 && (x) == LONG_MIN) \ + { FAIL_OVF("integer modulo"); }\ + else OP_INT_MOD(x,y,r) + +#define OP_INT_MOD_ZER(x,y,r) \ + if ((y)) { OP_INT_MOD(x,y,r); } \ + else FAIL_ZER("integer modulo") +#define OP_UINT_MOD_ZER(x,y,r) \ + if ((y)) { OP_UINT_MOD(x,y,r); } \ + else FAIL_ZER("unsigned integer modulo") +#define OP_LLONG_MOD_ZER(x,y,r) \ + if ((y)) { OP_LLONG_MOD(x,y,r); } \ + else FAIL_ZER("integer modulo") +#define OP_ULLONG_MOD_ZER(x,y,r) \ + if ((y)) { OP_ULLONG_MOD(x,y,r); } \ + else FAIL_ZER("integer modulo") + +#define OP_INT_MOD_OVF_ZER(x,y,r) \ + if ((y)) { OP_INT_MOD_OVF(x,y,r); } \ + else FAIL_ZER("integer modulo") + +/* bit operations */ + +#define OP_INT_AND(x,y,r) r = (x) & (y) +#define OP_INT_OR( x,y,r) r = (x) | (y) +#define OP_INT_XOR(x,y,r) r = (x) ^ (y) + +/*** conversions ***/ + +#define OP_CAST_BOOL_TO_INT(x,r) r = (long)(x) +#define OP_CAST_BOOL_TO_UINT(x,r) r = (unsigned long)(x) +#define OP_CAST_UINT_TO_INT(x,r) r = (long)(x) +#define OP_CAST_INT_TO_UINT(x,r) r = (unsigned long)(x) +#define OP_CAST_INT_TO_LONGLONG(x,r) r = (long long)(x) +#define OP_CAST_CHAR_TO_INT(x,r) r = (long)((unsigned char)(x)) +#define OP_CAST_INT_TO_CHAR(x,r) r = (char)(x) +#define OP_CAST_PTR_TO_INT(x,r) r = (long)(x) /* XXX */ + +#define OP_TRUNCATE_LONGLONG_TO_INT(x,r) r = (long)(x) + +#define OP_CAST_UNICHAR_TO_INT(x,r) r = (long)((unsigned long)(x)) /*?*/ +#define OP_CAST_INT_TO_UNICHAR(x,r) r = (unsigned int)(x) + +/* bool operations */ + +#define OP_BOOL_NOT(x, r) r = !(x) + +/* _________________ certain implementations __________________ */ + +#if !OP_INT_MUL_OVF_LL +/* adjusted from intobject.c, Python 2.3.3 */ + +/* prototypes */ + +int op_int_mul_ovf(long a, long b, long *longprod); + +/* implementations */ + +#ifndef PYPY_NOT_MAIN_FILE + +int +op_int_mul_ovf(long a, long b, long *longprod) +{ + double doubled_longprod; /* (double)longprod */ + double doubleprod; /* (double)a * (double)b */ + + *longprod = a * b; + doubleprod = (double)a * (double)b; + doubled_longprod = (double)*longprod; + + /* Fast path for normal case: small multiplicands, and no info + is lost in either method. */ + if (doubled_longprod == doubleprod) + return 1; + + /* Somebody somewhere lost info. Close enough, or way off? Note + that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0). + The difference either is or isn't significant compared to the + true value (of which doubleprod is a good approximation). + */ + { + const double diff = doubled_longprod - doubleprod; + const double absdiff = diff >= 0.0 ? diff : -diff; + const double absprod = doubleprod >= 0.0 ? doubleprod : + -doubleprod; + /* absdiff/absprod <= 1/32 iff + 32 * absdiff <= absprod -- 5 good bits is "close enough" */ + if (32.0 * absdiff <= absprod) + return 1; + return 0; + } +} + +#endif /* PYPY_NOT_MAIN_FILE */ + +#endif /* !OP_INT_MUL_OVF_LL */ + +/* implementations */ + +#define OP_UINT_IS_TRUE OP_INT_IS_TRUE +#define OP_UINT_INVERT OP_INT_INVERT +#define OP_UINT_ADD OP_INT_ADD +#define OP_UINT_SUB OP_INT_SUB +#define OP_UINT_MUL OP_INT_MUL +#define OP_UINT_LT OP_INT_LT +#define OP_UINT_LE OP_INT_LE +#define OP_UINT_EQ OP_INT_EQ +#define OP_UINT_NE OP_INT_NE +#define OP_UINT_GT OP_INT_GT +#define OP_UINT_GE OP_INT_GE +#define OP_UINT_AND OP_INT_AND +#define OP_UINT_OR OP_INT_OR +#define OP_UINT_XOR OP_INT_XOR + +#define OP_LLONG_IS_TRUE OP_INT_IS_TRUE +#define OP_LLONG_NEG OP_INT_NEG +#define OP_LLONG_ABS OP_INT_ABS +#define OP_LLONG_INVERT OP_INT_INVERT + +#define OP_LLONG_ADD OP_INT_ADD +#define OP_LLONG_SUB OP_INT_SUB +#define OP_LLONG_MUL OP_INT_MUL +#define OP_LLONG_LT OP_INT_LT +#define OP_LLONG_LE OP_INT_LE +#define OP_LLONG_EQ OP_INT_EQ +#define OP_LLONG_NE OP_INT_NE +#define OP_LLONG_GT OP_INT_GT +#define OP_LLONG_GE OP_INT_GE +#define OP_LLONG_AND OP_INT_AND +#define OP_LLONG_OR OP_INT_OR +#define OP_LLONG_XOR OP_INT_XOR + +#define OP_ULLONG_IS_TRUE OP_LLONG_IS_TRUE +#define OP_ULLONG_INVERT OP_LLONG_INVERT +#define OP_ULLONG_ADD OP_LLONG_ADD +#define OP_ULLONG_SUB OP_LLONG_SUB +#define OP_ULLONG_MUL OP_LLONG_MUL +#define OP_ULLONG_LT OP_LLONG_LT +#define OP_ULLONG_LE OP_LLONG_LE +#define OP_ULLONG_EQ OP_LLONG_EQ +#define OP_ULLONG_NE OP_LLONG_NE +#define OP_ULLONG_GT OP_LLONG_GT +#define OP_ULLONG_GE OP_LLONG_GE +#define OP_ULLONG_AND OP_LLONG_AND +#define OP_ULLONG_OR OP_LLONG_OR +#define OP_ULLONG_XOR OP_LLONG_XOR + +/* ================================================== float.h ============ */ + +/*** unary operations ***/ + +#define OP_FLOAT_IS_TRUE(x,r) OP_FLOAT_NE(x,0.0,r) +#define OP_FLOAT_NEG(x,r) r = -x +#define OP_FLOAT_ABS(x,r) r = fabs(x) + +/*** binary operations ***/ + +#define OP_FLOAT_EQ(x,y,r) r = (x == y) +#define OP_FLOAT_NE(x,y,r) r = (x != y) +#define OP_FLOAT_LE(x,y,r) r = (x <= y) +#define OP_FLOAT_GT(x,y,r) r = (x > y) +#define OP_FLOAT_LT(x,y,r) r = (x < y) +#define OP_FLOAT_GE(x,y,r) r = (x >= y) + +#define OP_FLOAT_CMP(x,y,r) \ + r = ((x > y) - (x < y)) + +/* addition, subtraction */ + +#define OP_FLOAT_ADD(x,y,r) r = x + y +#define OP_FLOAT_SUB(x,y,r) r = x - y +#define OP_FLOAT_MUL(x,y,r) r = x * y +#define OP_FLOAT_TRUEDIV(x,y,r) r = x / y +#define OP_FLOAT_POW(x,y,r) r = pow(x, y) + +/*** conversions ***/ + +#define OP_CAST_FLOAT_TO_INT(x,r) r = (long)(x) +#define OP_CAST_FLOAT_TO_UINT(x,r) r = (unsigned long)(x) +#define OP_CAST_INT_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_UINT_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_BOOL_TO_FLOAT(x,r) r = (double)(x) + +#ifdef HAVE_LONG_LONG +#define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) +#endif + +/* ================================================== support.h ========== */ + +#define RPyField(ptr, name) NULL + From scipy-svn at scipy.org Sun Jun 29 20:07:42 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 29 Jun 2008 19:07:42 -0500 (CDT) Subject: [Scipy-svn] r4501 - trunk/scipy/sandbox/mkufunc Message-ID: <20080630000742.8B6CC39C246@scipy.org> Author: ilan Date: 2008-06-29 19:07:36 -0500 (Sun, 29 Jun 2008) New Revision: 4501 Added: trunk/scipy/sandbox/mkufunc/test_funcutil.py Modified: trunk/scipy/sandbox/mkufunc/funcutil.py Log: Moved tests into seperate file Modified: trunk/scipy/sandbox/mkufunc/funcutil.py =================================================================== --- trunk/scipy/sandbox/mkufunc/funcutil.py 2008-06-29 22:21:36 UTC (rev 4500) +++ trunk/scipy/sandbox/mkufunc/funcutil.py 2008-06-30 00:07:36 UTC (rev 4501) @@ -3,7 +3,7 @@ def disassemble(co): """ Given a code object, return output from dis.disassemble as a string. - + (dis.disassemble writes its output to stdout.) """ tmp = sys.stdout @@ -13,10 +13,11 @@ sys.stdout = tmp return res + pat_norep = re.compile(r'<[^<>]*>') pat_white = re.compile(r'\s+') -def disassemble2(co): +def dis2(co): acc = cStringIO.StringIO() for line in disassemble(co).splitlines(): line = line[16:].strip() @@ -42,41 +43,18 @@ return res -def func_hash(f, extra=None): - txt = disassemble2(f.func_code) + repr(extra) - #print txt +def func_hash(f, salt=None, verbose=0): + """ Return the MD5 hash for a function object as string. + + 'salt' can be any object that has a representation + """ + txt = dis2(f.func_code) + repr(salt) + if verbose: + print txt + txt = pat_white.sub(' ', txt) return hashlib.md5(txt).hexdigest() -if __name__ == '__main__': -# import math -# from math import * - - md5sums = [] - - b = 3.14159 - g = lambda x: x - def h(n): - return n + 3 - - for a in xrange(2): - def f(x): - inner1 = lambda t: t/3.0 - def inner2(): return - t = b + g(42) + h(4) - return sin(pi * x) + a + t - md5sums.append(func_hash(f)) - - def f(x): - return math.sin(x) - md5sums.append(func_hash(f)) - - def f(x): - return sin(x) - md5sums.append(func_hash(f, float)) - - print md5sums - #assert md5sums == ['91d13599d610a554dccd6b44cb5ef1f0', - # 'be0c54b477180f897cbf7604fc565d18', - # '732d1ef6c1ce8cc92a7f28917496d292'] +if __name__ == '__main__': + pass Added: trunk/scipy/sandbox/mkufunc/test_funcutil.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_funcutil.py 2008-06-29 22:21:36 UTC (rev 4500) +++ trunk/scipy/sandbox/mkufunc/test_funcutil.py 2008-06-30 00:07:36 UTC (rev 4501) @@ -0,0 +1,47 @@ +import unittest + +from funcutil import func_hash + + +class Tests(unittest.TestCase): + + def test_simple(self): + + def f(x): + return 2.5 * x * x + 4.7 * x + + self.assertEqual(func_hash(f), + 'f8c94c2e2dd69226706f90c2f4293497') + + + def test_extra(self): + + def f(x): + return 2.5 * x * x + 4.7 * x + + self.assertEqual(func_hash(f, salt=[(int, int), (float, float)]), + 'd81db2e37ade51a430e47b72c55e197e') + + def test_const(self): + + def add_a(b): + return a + b # a in globals + + self.assertEqual(func_hash(add_a), + '55a68633f905a1373f61659b41402f02') + + def test_inner(self): + + def foo(x): + inner1 = lambda t: t/3.0 + def inner2(n): + return n + 3 + return inner1(x) + inner2(int(x)) + + #func_hash(foo, verbose=1) + self.assertEqual(func_hash(foo), + 'a836c2dbe1b202bd68e1fe3affe1ce7a') + + +if __name__ == '__main__': + unittest.main() From scipy-svn at scipy.org Sun Jun 29 21:32:42 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 29 Jun 2008 20:32:42 -0500 (CDT) Subject: [Scipy-svn] r4502 - trunk/scipy/sandbox/mkufunc Message-ID: <20080630013242.1FBE839C089@scipy.org> Author: ilan Date: 2008-06-29 20:32:41 -0500 (Sun, 29 Jun 2008) New Revision: 4502 Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py trunk/scipy/sandbox/mkufunc/test_mkufunc.py Log: Refactoring Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-30 00:07:36 UTC (rev 4501) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-30 01:32:41 UTC (rev 4502) @@ -5,7 +5,7 @@ """ import sys import re -import os.path +import os, os.path import cStringIO from types import FunctionType @@ -15,40 +15,28 @@ from funcutil import func_hash -verbose = False - def translate(f, argtypes): - cache_fname = os.path.join(weave.catalog.default_dir(), - 'pypy_%s.c' % func_hash(f, argtypes)) + """ Return pypy's C output for a given function and argument types. + + The cache files are in weave's directory. + """ + cache_file_name = os.path.join(weave.catalog.default_dir(), + 'pypy_%s.c' % func_hash(f, salt=argtypes)) try: - return open(cache_fname).read() - except IOError: - pass + return open(cache_file_name).read() - from interactive import Translation - if not verbose: - tmp = sys.stderr - sys.stderr = cStringIO.StringIO() + except IOError: + from interactive import Translation - t = Translation(f, backend='c') - t.annotate(argtypes) - t.source() - - if not verbose: - sys.stderr = tmp - - c_source_filename = t.driver.c_source_filename - assert c_source_filename.endswith('.c') - - src = open(c_source_filename, 'r').read() + t = Translation(f, backend='c') + t.annotate(argtypes) + t.source() + + os.rename(t.driver.c_source_filename, cache_file_name) + + return translate(f, argtypes) - fo = open(cache_fname, 'w') - fo.write(src) - fo.close() - return src - - class Ctype: def __init__(self, npy, c): self.npy = npy @@ -98,7 +86,7 @@ self.f = f self.n = n self.sig = signature - self.nin = f.func_code.co_argcount # input args + self.nin = f.func_code.co_argcount self.nout = len(self.sig) - self.nin assert self.nout == 1 # for now @@ -193,11 +181,12 @@ } ''' % locals() + pypyc = os.path.join(weave.catalog.default_temp_dir(), 'pypy.c') def write_pypyc(cfuncs): """ Given a list of Cfunc instances, write the C code containing the - functions into a file. + functions into a file. """ header = open(os.path.join(os.path.dirname(__file__), 'pypy_head.h')).read() @@ -209,30 +198,15 @@ fo.close() -def genufunc(f, signatures): - """ Given a Python function and its signatures, do the following: - - - Compile the function to C for each signature - - - Write the C code for all these functions to a file - - - Generate the support code for weave - - - Generate the code for weave. This contains the actual call to - PyUFuncGenericFunction - - - Return the Ufunc Python object +def support_code(cfuncs): + """ Given a list of Cfunc instances, return the support_code for weave. """ - signatures.sort(key=lambda sig: [numpy.dtype(typ).num for typ in sig]) + fname = cfuncs[0].f.__name__ - cfuncs = [Cfunc(f, sig, n) for n, sig in enumerate(signatures)] - - write_pypyc(cfuncs) - declarations = ''.join('\t%s\n' % cf.decl() for cf in cfuncs) - + func_support = ''.join(cf.ufunc_support_code() for cf in cfuncs) - + pyufuncs = ''.join('\tPyUFunc_%i,\n' % cf.n for cf in cfuncs) data = ''.join('\t(void *) wrap_%s,\n' % cf.cname for cf in cfuncs) @@ -240,10 +214,7 @@ types = ''.join('\t%s /* %i */\n' % (''.join(typedict[t].npy + ', ' for t in cf.sig), cf.n) for cf in cfuncs) - - fname = f.__name__ - - support_code = ''' + return ''' extern "C" { %(declarations)s} @@ -259,17 +230,22 @@ %(types)s}; ''' % locals() + +def code(f, signatures): + """ Return the code for weave. + """ + nin = f.func_code.co_argcount ntypes = len(signatures) - nin = cfuncs[0].nin + fname = f.__name__ fhash = func_hash(f) - code = ''' + return ''' import_ufunc(); /**************************************************************************** ** function name: %(fname)s ** signatures: %(signatures)r -** bytecode hash: %(fhash)s +** fhash: %(fhash)s *****************************************************************************/ return_val = PyUFunc_FromFuncAndData( @@ -281,25 +257,29 @@ 1, /* nout */ PyUFunc_None, /* identity */ "%(fname)s", /* name */ - "UFunc made by mkufunc", /* doc */ + "UFunc created by mkufunc", /* doc */ 0); ''' % locals() + + +def genufunc(f, signatures): + """ Return the Ufunc Python object for given function and signatures. + """ + if len(signatures) == 0: + raise ValueError("At least one signature needed") - if 1: - fo = open(f.__name__ + '_code.cc', 'w'); - fo.write(code); - fo.close() - - fo = open(f.__name__ + '_support_code.cc', 'w'); - fo.write(support_code); - fo.close() + signatures.sort(key=lambda sig: [numpy.dtype(typ).num for typ in sig]) + cfuncs = [Cfunc(f, sig, n) for n, sig in enumerate(signatures)] + + write_pypyc(cfuncs) + ufunc_info = weave.base_info.custom_info() ufunc_info.add_header('"numpy/ufuncobject.h"') - return weave.inline(code, + return weave.inline(code(f, signatures), verbose=0, - support_code=support_code, + support_code=support_code(cfuncs), customize=ufunc_info, sources=[pypyc]) Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-30 00:07:36 UTC (rev 4501) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-30 01:32:41 UTC (rev 4502) @@ -102,6 +102,7 @@ self.assertRaises(TypeError, mkufunc([3*(float,)]), f) self.assertRaises(TypeError, mkufunc([{}]), f) self.assertRaises(TypeError, mkufunc([(int, {})]), f) + self.assertRaises(ValueError, mkufunc([]), f) class Math_Tests(unittest.TestCase, Util): From scipy-svn at scipy.org Sun Jun 29 23:19:30 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Sun, 29 Jun 2008 22:19:30 -0500 (CDT) Subject: [Scipy-svn] r4503 - trunk/scipy/sandbox/mkufunc Message-ID: <20080630031930.6A74739C080@scipy.org> Author: ilan Date: 2008-06-29 22:19:29 -0500 (Sun, 29 Jun 2008) New Revision: 4503 Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py Log: PyPy output source now also in support code; no more pypy.c Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-30 01:32:41 UTC (rev 4502) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-30 03:19:29 UTC (rev 4503) @@ -105,17 +105,8 @@ assert len(found) == 1 res = found[0] res = res.replace(self._prefix + 'pypy_g_ll_math_ll_math_', '') - return res + '\n' + return 'inline ' + res + '\n' - def decl(self): - p = re.compile(r'^\w+[*\s\w]+' + self.cname + - r'\s*\([^)]*\);', - re.DOTALL | re.MULTILINE | re.VERBOSE) - - found = p.findall(self._allCsrc) - assert len(found) == 1 - return found[0] - def ufunc_support_code(self): # Unfortunately the code in here is very hard to read. # In order to make the code clearer, one would need a real template @@ -173,7 +164,7 @@ %(body1d_in)s %(rettype)s *out = (%(rettype)s *)op; - *out = f(%(ptrargs)s); + *out = (%(rettype)s) f(%(ptrargs)s); %(body1d_add)s op += os; @@ -182,30 +173,21 @@ ''' % locals() -pypyc = os.path.join(weave.catalog.default_temp_dir(), 'pypy.c') - -def write_pypyc(cfuncs): - """ Given a list of Cfunc instances, write the C code containing the - functions into a file. - """ - header = open(os.path.join(os.path.dirname(__file__), - 'pypy_head.h')).read() - fo = open(pypyc, 'w') - fo.write(header) - fo.write('/********************* end header ********************/\n\n') - for cf in cfuncs: - fo.write(cf.cfunc()) - fo.close() - - def support_code(cfuncs): """ Given a list of Cfunc instances, return the support_code for weave. """ - fname = cfuncs[0].f.__name__ + acc = cStringIO.StringIO() - declarations = ''.join('\t%s\n' % cf.decl() for cf in cfuncs) + acc.write('/********************* start pypy_head.h **************/\n\n') + acc.write(open(os.path.join(os.path.dirname(__file__), + 'pypy_head.h')).read()) + acc.write('/********************** end pypy_head.h ****************/\n\n') - func_support = ''.join(cf.ufunc_support_code() for cf in cfuncs) + for cf in cfuncs: + acc.write(cf.cfunc()) + acc.write(cf.ufunc_support_code()) + + fname = cfuncs[0].f.__name__ pyufuncs = ''.join('\tPyUFunc_%i,\n' % cf.n for cf in cfuncs) @@ -214,12 +196,8 @@ types = ''.join('\t%s /* %i */\n' % (''.join(typedict[t].npy + ', ' for t in cf.sig), cf.n) for cf in cfuncs) - return ''' -extern "C" { -%(declarations)s} - -%(func_support)s - + + acc.write(''' static PyUFuncGenericFunction %(fname)s_functions[] = { %(pyufuncs)s}; @@ -228,7 +206,9 @@ static char %(fname)s_types[] = { %(types)s}; -''' % locals() +''' % locals()) + + return acc.getvalue() def code(f, signatures): @@ -272,16 +252,13 @@ cfuncs = [Cfunc(f, sig, n) for n, sig in enumerate(signatures)] - write_pypyc(cfuncs) - ufunc_info = weave.base_info.custom_info() ufunc_info.add_header('"numpy/ufuncobject.h"') return weave.inline(code(f, signatures), verbose=0, support_code=support_code(cfuncs), - customize=ufunc_info, - sources=[pypyc]) + customize=ufunc_info) def mkufunc(arg0=[float]): From scipy-svn at scipy.org Mon Jun 30 10:08:54 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Jun 2008 09:08:54 -0500 (CDT) Subject: [Scipy-svn] r4504 - trunk/scipy/sandbox/mkufunc Message-ID: <20080630140854.077EB39C192@scipy.org> Author: ilan Date: 2008-06-30 09:08:53 -0500 (Mon, 30 Jun 2008) New Revision: 4504 Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py Log: Added test for function with no args Modified: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-30 03:19:29 UTC (rev 4503) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-06-30 14:08:53 UTC (rev 4504) @@ -181,16 +181,23 @@ uf = mkufunc(f) x = arange(0, 2, 0.1) self.assertClose(uf(x), f(x)) + + +class FuncArg_Tests(unittest.TestCase, Util): - def f(x, y, z): - return x * y * z + def test_fargs0(self): + def f(): + return 42 + uf = mkufunc(f) - x = arange(0, 1, 0.1) - y = 2 * x - z = 3 * x - self.assertClose(uf(x, y, z), f(x, y, z)) + self.assertEqual(uf(), 42) + self.assert_(type(uf()).__name__.startswith('float')) + uf = mkufunc(int)(f) + self.assertEqual(uf(), 42) + self.assert_(type(uf()).__name__.startswith('int')) + class Control_Flow_Tests(unittest.TestCase): def test_if(self): From scipy-svn at scipy.org Mon Jun 30 12:21:16 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Jun 2008 11:21:16 -0500 (CDT) Subject: [Scipy-svn] r4505 - trunk/scipy/optimize Message-ID: <20080630162116.E852439C6AF@scipy.org> Author: rkern Date: 2008-06-30 11:21:16 -0500 (Mon, 30 Jun 2008) New Revision: 4505 Modified: trunk/scipy/optimize/anneal.py Log: BUG: handle a broader range of function outputs. Thanks to Yosef Meller for the fix. Modified: trunk/scipy/optimize/anneal.py =================================================================== --- trunk/scipy/optimize/anneal.py 2008-06-30 14:08:53 UTC (rev 4504) +++ trunk/scipy/optimize/anneal.py 2008-06-30 16:21:16 UTC (rev 4505) @@ -9,6 +9,7 @@ __all__ = ['anneal'] +_double_min = numpy.finfo(float).min _double_max = numpy.finfo(float).max class base_schedule(object): def __init__(self): @@ -35,11 +36,25 @@ self.tests = 0 def getstart_temp(self, best_state): + """ Find a matching starting temperature and starting parameters vector + i.e. find x0 such that func(x0) = T0. + + Parameters + ---------- + best_state : _state + A _state object to store the function value and x0 found. + + Returns + ------- + x0 : array + The starting parameters vector. + """ + assert(not self.dims is None) lrange = self.lower urange = self.upper - fmax = -300e8 - fmin = 300e8 + fmax = _double_min + fmin = _double_max for _ in range(self.Ninit): x0 = random.uniform(size=self.dims)*(urange-lrange) + lrange fval = self.func(x0, *self.args) @@ -50,6 +65,7 @@ fmin = fval best_state.cost = fval best_state.x = array(x0) + self.T0 = (fmax-fmin)*1.5 return best_state.x From scipy-svn at scipy.org Mon Jun 30 15:19:56 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Jun 2008 14:19:56 -0500 (CDT) Subject: [Scipy-svn] r4506 - trunk/scipy/sandbox/mkufunc Message-ID: <20080630191956.B648A39C6D9@scipy.org> Author: ilan Date: 2008-06-30 14:19:55 -0500 (Mon, 30 Jun 2008) New Revision: 4506 Added: trunk/scipy/sandbox/mkufunc/test_func_hash.py Removed: trunk/scipy/sandbox/mkufunc/funcutil.py trunk/scipy/sandbox/mkufunc/test_funcutil.py Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py Log: Simplyfied func_hash and put into main file Deleted: trunk/scipy/sandbox/mkufunc/funcutil.py =================================================================== --- trunk/scipy/sandbox/mkufunc/funcutil.py 2008-06-30 16:21:16 UTC (rev 4505) +++ trunk/scipy/sandbox/mkufunc/funcutil.py 2008-06-30 19:19:55 UTC (rev 4506) @@ -1,60 +0,0 @@ -import sys, re, dis, hashlib, cStringIO - - -def disassemble(co): - """ Given a code object, return output from dis.disassemble as a string. - - (dis.disassemble writes its output to stdout.) - """ - tmp = sys.stdout - sys.stdout = cStringIO.StringIO() - dis.disassemble(co) - res = sys.stdout.getvalue() - sys.stdout = tmp - return res - - -pat_norep = re.compile(r'<[^<>]*>') -pat_white = re.compile(r'\s+') - -def dis2(co): - acc = cStringIO.StringIO() - for line in disassemble(co).splitlines(): - line = line[16:].strip() - if line: - acc.write(line+'\n') - - acc.write('co_names:\n') - for name in co.co_names: - try: - tmp = str(eval(name)) - except NameError: - tmp = 'EVAL_FAILED' - acc.write('%8s: %s\n' % (name, tmp)) - - res = acc.getvalue() - - while True: - tmp = pat_norep.sub('NO_REPRESENTATION', res) - if tmp == res: - break - res = tmp - - return res - - -def func_hash(f, salt=None, verbose=0): - """ Return the MD5 hash for a function object as string. - - 'salt' can be any object that has a representation - """ - txt = dis2(f.func_code) + repr(salt) - if verbose: - print txt - - txt = pat_white.sub(' ', txt) - return hashlib.md5(txt).hexdigest() - - -if __name__ == '__main__': - pass Modified: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-30 16:21:16 UTC (rev 4505) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-06-30 19:19:55 UTC (rev 4506) @@ -7,14 +7,23 @@ import re import os, os.path import cStringIO +import hashlib from types import FunctionType import numpy from scipy import weave -from funcutil import func_hash +verbose = 0 +def func_hash(f, salt=None): + """ Return a MD5 hash for a function object as string. + """ + co = f.func_code + return hashlib.md5(co.co_code + repr(co.co_names) + repr(salt) + ).hexdigest() + + def translate(f, argtypes): """ Return pypy's C output for a given function and argument types. @@ -174,7 +183,7 @@ def support_code(cfuncs): - """ Given a list of Cfunc instances, return the support_code for weave. + """ Given a list of Cfunc instances, return the support code for weave. """ acc = cStringIO.StringIO() @@ -207,7 +216,12 @@ static char %(fname)s_types[] = { %(types)s}; ''' % locals()) - + + if verbose: + print '------------------ start support_code -----------------' + print acc.getvalue() + print '------------------- end support_code ------------------' + return acc.getvalue() @@ -219,7 +233,7 @@ fname = f.__name__ fhash = func_hash(f) - return ''' + res = ''' import_ufunc(); /**************************************************************************** @@ -241,7 +255,14 @@ 0); ''' % locals() + if verbose: + print '---------------------- start code ---------------------' + print res + print '----------------------- end code ----------------------' + return res + + def genufunc(f, signatures): """ Return the Ufunc Python object for given function and signatures. """ @@ -256,7 +277,7 @@ ufunc_info.add_header('"numpy/ufuncobject.h"') return weave.inline(code(f, signatures), - verbose=0, + verbose=verbose, support_code=support_code(cfuncs), customize=ufunc_info) Added: trunk/scipy/sandbox/mkufunc/test_func_hash.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_func_hash.py 2008-06-30 16:21:16 UTC (rev 4505) +++ trunk/scipy/sandbox/mkufunc/test_func_hash.py 2008-06-30 19:19:55 UTC (rev 4506) @@ -0,0 +1,47 @@ +import unittest + +from mkufunc import func_hash + + +class Tests(unittest.TestCase): + + def test_simple(self): + + def f(x): + return 2.5 * x * x + 4.7 * x + + self.assertEqual(func_hash(f), + '5f12e97debf1d2cb9e0a2f92e045b1fb') + + + def test_extra(self): + + def f(x): + return 2.5 * x * x + 4.7 * x + + self.assertEqual(func_hash(f, salt=[(int, int), (float, float)]), + 'e637d9825ef20cb56d364041118ca72e') + + def test_const(self): + + def add_a(b): + return a + b # a in globals + + self.assertEqual(func_hash(add_a), + '9ff237f372bf233470ce940edd58f60d') + + def test_inner(self): + + def foo(x): + inner1 = lambda t: t/3.0 + def inner2(n): + return n + 3 + return inner1(x) + inner2(int(x)) + + #func_hash(foo, verbose=1) + self.assertEqual(func_hash(foo), + '814c113dfc77e7ebb52915dd3ce9c37a') + + +if __name__ == '__main__': + unittest.main() Deleted: trunk/scipy/sandbox/mkufunc/test_funcutil.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_funcutil.py 2008-06-30 16:21:16 UTC (rev 4505) +++ trunk/scipy/sandbox/mkufunc/test_funcutil.py 2008-06-30 19:19:55 UTC (rev 4506) @@ -1,47 +0,0 @@ -import unittest - -from funcutil import func_hash - - -class Tests(unittest.TestCase): - - def test_simple(self): - - def f(x): - return 2.5 * x * x + 4.7 * x - - self.assertEqual(func_hash(f), - 'f8c94c2e2dd69226706f90c2f4293497') - - - def test_extra(self): - - def f(x): - return 2.5 * x * x + 4.7 * x - - self.assertEqual(func_hash(f, salt=[(int, int), (float, float)]), - 'd81db2e37ade51a430e47b72c55e197e') - - def test_const(self): - - def add_a(b): - return a + b # a in globals - - self.assertEqual(func_hash(add_a), - '55a68633f905a1373f61659b41402f02') - - def test_inner(self): - - def foo(x): - inner1 = lambda t: t/3.0 - def inner2(n): - return n + 3 - return inner1(x) + inner2(int(x)) - - #func_hash(foo, verbose=1) - self.assertEqual(func_hash(foo), - 'a836c2dbe1b202bd68e1fe3affe1ce7a') - - -if __name__ == '__main__': - unittest.main() From scipy-svn at scipy.org Mon Jun 30 20:32:14 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Jun 2008 19:32:14 -0500 (CDT) Subject: [Scipy-svn] r4507 - in trunk/scipy/sandbox/mkufunc: . examples mkufunc Message-ID: <20080701003214.3F5D639C4BD@scipy.org> Author: ilan Date: 2008-06-30 19:32:12 -0500 (Mon, 30 Jun 2008) New Revision: 4507 Added: trunk/scipy/sandbox/mkufunc/TODO.txt trunk/scipy/sandbox/mkufunc/docs/ trunk/scipy/sandbox/mkufunc/examples/ trunk/scipy/sandbox/mkufunc/examples/benchmark.py trunk/scipy/sandbox/mkufunc/examples/primes.py trunk/scipy/sandbox/mkufunc/mkufunc/ trunk/scipy/sandbox/mkufunc/mkufunc/__init__.py trunk/scipy/sandbox/mkufunc/mkufunc/api.py trunk/scipy/sandbox/mkufunc/mkufunc/driver.py trunk/scipy/sandbox/mkufunc/mkufunc/interactive.py trunk/scipy/sandbox/mkufunc/mkufunc/pypy.h trunk/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py trunk/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py trunk/scipy/sandbox/mkufunc/setup.py Log: Made a package which uses setuptools Added: trunk/scipy/sandbox/mkufunc/TODO.txt =================================================================== Added: trunk/scipy/sandbox/mkufunc/examples/benchmark.py =================================================================== --- trunk/scipy/sandbox/mkufunc/examples/benchmark.py 2008-06-30 19:19:55 UTC (rev 4506) +++ trunk/scipy/sandbox/mkufunc/examples/benchmark.py 2008-07-01 00:32:12 UTC (rev 4507) @@ -0,0 +1,49 @@ +#!/usr/bin/env python +from math import sin, cos +import time + +from numpy import arange, vectorize, allclose +from scipy import weave + +from mkufunc.api import mkufunc + + +def f(x): + return 4.2 * x * x + 3.7 * x + 1.5 + + +vfunc = vectorize(f) + +ufunc = mkufunc([(float, float)])(f) + + +x = arange(0, 1000, 0.001) #print "x =", x, x.dtype + +start_time = time.time() +b_y = x.copy() +weave.blitz("b_y[:] = 4.2 * x[:] * x[:] + 3.7 * x[:] + 1.5") +b_time = time.time() - start_time +print 'blitz: %.6f sec' % b_time + +start_time = time.time() +n_y = f(x) +n_time = time.time() - start_time +print 'numpy: %.6f sec' % n_time + +start_time = time.time() +v_y = vfunc(x) +v_time = time.time() - start_time +print 'vectorize: %.6f sec' % v_time + +start_time = time.time() +u_y = ufunc(x) +u_time = time.time() - start_time +print 'mkufunc: %.6f sec' % u_time + +print "speedup over blitz:", b_time/u_time +print "speedup over numpy:", n_time/u_time +print "speedup over vectorize:", v_time/u_time + +assert allclose(b_y, n_y) +assert allclose(v_y, n_y) +assert allclose(u_y, n_y) Property changes on: trunk/scipy/sandbox/mkufunc/examples/benchmark.py ___________________________________________________________________ Name: svn:executable + * Added: trunk/scipy/sandbox/mkufunc/examples/primes.py =================================================================== --- trunk/scipy/sandbox/mkufunc/examples/primes.py 2008-06-30 19:19:55 UTC (rev 4506) +++ trunk/scipy/sandbox/mkufunc/examples/primes.py 2008-07-01 00:32:12 UTC (rev 4507) @@ -0,0 +1,29 @@ +#!/usr/bin/env python +from math import sqrt +import time + +from numpy import arange + +from mkufunc.api import mkufunc + + +def is_prime(n): + if n < 2: + return 0 + for i in xrange(2, min(n, int(sqrt(n)+2.0))): + if n %i == 0: + return 0 + return 1 + + +start_time = time.time() +assert sum(is_prime(n) for n in xrange(1000000)) == 78498 +print 'Python: %.6f sec' % (time.time() - start_time) + + +is_prime = mkufunc(int)(is_prime) + + +start_time = time.time() +assert is_prime(arange(1000000)).sum() == 78498 +print 'Compiled: %.6f sec' % (time.time() - start_time) Property changes on: trunk/scipy/sandbox/mkufunc/examples/primes.py ___________________________________________________________________ Name: svn:executable + * Added: trunk/scipy/sandbox/mkufunc/mkufunc/__init__.py =================================================================== Added: trunk/scipy/sandbox/mkufunc/mkufunc/api.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc/api.py 2008-06-30 19:19:55 UTC (rev 4506) +++ trunk/scipy/sandbox/mkufunc/mkufunc/api.py 2008-07-01 00:32:12 UTC (rev 4507) @@ -0,0 +1,346 @@ +""" mkufunc (make U function) + + +Author: Ilan Schnell (with help from Travis Oliphant and Eric Jones) +""" +import sys +import re +import os, os.path +import cStringIO +import hashlib +from types import FunctionType + +import numpy +from scipy import weave + + +verbose = 0 + +def func_hash(f, salt=None): + """ Return a MD5 hash for a function object as string. + """ + co = f.func_code + return hashlib.md5(co.co_code + repr(co.co_names) + repr(salt) + ).hexdigest() + + +def translate(f, argtypes): + """ Return pypy's C output for a given function and argument types. + The cache files are in weave's directory. + """ + cache_file_name = os.path.join(weave.catalog.default_dir(), + 'pypy_%s.c' % func_hash(f, salt=argtypes)) + try: + return open(cache_file_name).read() + + except IOError: + from interactive import Translation + + t = Translation(f, backend='c') + t.annotate(argtypes) + t.source() + + os.rename(t.driver.c_source_filename, cache_file_name) + + return translate(f, argtypes) + + +class Ctype: + def __init__(self, npy, c): + self.npy = npy + self.c = c + +typedict = { + int: Ctype('NPY_LONG', 'long' ), + float: Ctype('NPY_DOUBLE', 'double'), +} + + +class Cfunc(object): + """ C compiled python functions + + >>> def sqr(x): + ... return x * x + + >>> signature = [int, int] # only the input arguments are used here + + compilation is done upon initialization + >>> x = Cfunc(sqr, signature, 123) + ... + >>> x.nin # number of input arguments + 1 + >>> x.nout # number of output arguments (must be 1 for now) + 1 + >>> x.sig + [, ] + + Attributes: + f -- the Python function object + n -- id number + sig -- signature + nin -- number of input arguments + nout -- number of output arguments + cname -- name of the C function + + Methods: + decl() -- returns the C declaration for the function + cfunc() -- returns the C function (as string) + ufunc_support_code() + -- generate the C support code to make this + function part work with PyUFuncGenericFunction + """ + def __init__(self, f, signature, n): + self.f = f + self.n = n + self.sig = signature + self.nin = f.func_code.co_argcount + self.nout = len(self.sig) - self.nin + assert self.nout == 1 # for now + + src = translate(f, signature[:self.nin]) + + self._prefix = 'f%i_' % self.n + self._allCsrc = src.replace('pypy_', self._prefix + 'pypy_') + self.cname = self._prefix + 'pypy_g_' + f.__name__ + + def cfunc(self): + p = re.compile(r'^\w+[*\s\w]+' + self.cname + + r'\s*\([^)]*\)\s*\{.*?[\n\r]\}[\n\r]', + re.DOTALL | re.MULTILINE | re.VERBOSE) + + found = p.findall(self._allCsrc) + assert len(found) == 1 + res = found[0] + res = res.replace(self._prefix + 'pypy_g_ll_math_ll_math_', '') + return 'inline ' + res + '\n' + + def ufunc_support_code(self): + # Unfortunately the code in here is very hard to read. + # In order to make the code clearer, one would need a real template + # engine link Cheetah (http://cheetahtemplate.org/). + # However, somehting like that would be too much overhead for scipy. + n = self.n + nin = self.nin + cname = self.cname + + def varname(i): + return chr(i + ord('a')) + + declargs = ', '.join('%s %s' % (typedict[self.sig[i]].c, varname(i)) + for i in xrange(self.nin)) + + args = ', '.join(varname(i) for i in xrange(self.nin)) + + isn_steps = '\n\t'.join('npy_intp is%i = steps[%i];' % (i, i) + for i in xrange(self.nin)) + + ipn_args = '\n\t'.join('char *ip%i = args[%i];' % (i, i) + for i in xrange(self.nin)) + + body1d_in = '\n\t\t'.join('%s *in%i = (%s *)ip%i;' % + (2*(typedict[self.sig[i]].c, i)) + for i in xrange(self.nin)) + + body1d_add = '\n\t\t'.join('ip%i += is%i;' % (i, i) + for i in xrange(self.nin)) + + ptrargs = ', '.join('*in%i' % i for i in xrange(self.nin)) + + rettype = typedict[self.sig[-1]].c + + return ''' +static %(rettype)s wrap_%(cname)s(%(declargs)s) +{ + return %(cname)s(%(args)s); +} + +typedef %(rettype)s Func_%(n)i(%(declargs)s); + +static void +PyUFunc_%(n)i(char **args, npy_intp *dimensions, npy_intp *steps, void *func) +{ + npy_intp i, n; + %(isn_steps)s + npy_intp os = steps[%(nin)s]; + %(ipn_args)s + char *op = args[%(nin)s]; + Func_%(n)i *f = (Func_%(n)i *) func; + n = dimensions[0]; + + for(i = 0; i < n; i++) { + %(body1d_in)s + %(rettype)s *out = (%(rettype)s *)op; + + *out = (%(rettype)s) f(%(ptrargs)s); + + %(body1d_add)s + op += os; + } +} +''' % locals() + + +def support_code(cfuncs): + """ Given a list of Cfunc instances, return the support code for weave. + """ + acc = cStringIO.StringIO() + + acc.write('/********************* start pypy.h **************/\n\n') + acc.write(open(os.path.join(os.path.dirname(__file__), + 'pypy.h')).read()) + acc.write('/********************** end pypy.h ****************/\n\n') + + for cf in cfuncs: + acc.write(cf.cfunc()) + acc.write(cf.ufunc_support_code()) + + fname = cfuncs[0].f.__name__ + + pyufuncs = ''.join('\tPyUFunc_%i,\n' % cf.n for cf in cfuncs) + + data = ''.join('\t(void *) wrap_%s,\n' % cf.cname for cf in cfuncs) + + types = ''.join('\t%s /* %i */\n' % + (''.join(typedict[t].npy + ', ' for t in cf.sig), cf.n) + for cf in cfuncs) + + acc.write(''' +static PyUFuncGenericFunction %(fname)s_functions[] = { +%(pyufuncs)s}; + +static void *%(fname)s_data[] = { +%(data)s}; + +static char %(fname)s_types[] = { +%(types)s}; +''' % locals()) + + if verbose: + print '------------------ start support_code -----------------' + print acc.getvalue() + print '------------------- end support_code ------------------' + + return acc.getvalue() + + +def code(f, signatures): + """ Return the code for weave. + """ + nin = f.func_code.co_argcount + ntypes = len(signatures) + fname = f.__name__ + fhash = func_hash(f) + + res = ''' +import_ufunc(); + +/**************************************************************************** +** function name: %(fname)s +** signatures: %(signatures)r +** fhash: %(fhash)s +*****************************************************************************/ + +return_val = PyUFunc_FromFuncAndData( + %(fname)s_functions, + %(fname)s_data, + %(fname)s_types, + %(ntypes)i, /* ntypes */ + %(nin)i, /* nin */ + 1, /* nout */ + PyUFunc_None, /* identity */ + "%(fname)s", /* name */ + "UFunc created by mkufunc", /* doc */ + 0); +''' % locals() + + if verbose: + print '---------------------- start code ---------------------' + print res + print '----------------------- end code ----------------------' + + return res + + +def genufunc(f, signatures): + """ Return the Ufunc Python object for given function and signatures. + """ + if len(signatures) == 0: + raise ValueError("At least one signature needed") + + signatures.sort(key=lambda sig: [numpy.dtype(typ).num for typ in sig]) + + cfuncs = [Cfunc(f, sig, n) for n, sig in enumerate(signatures)] + + ufunc_info = weave.base_info.custom_info() + ufunc_info.add_header('"numpy/ufuncobject.h"') + + return weave.inline(code(f, signatures), + verbose=verbose, + support_code=support_code(cfuncs), + customize=ufunc_info) + + +def mkufunc(arg0=[float]): + """ Python decorator which returns compiled UFunc of the function given. + + >>> from numpy import arange + >>> from mkufunc.api import mkufunc + >>> @mkufunc + ... def foo(x): + ... return 4.2 * x * x - x + 6.3 + ... + >>> a = arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> foo(a) + array([ 6.3, 9.5, 21.1, 41.1, 69.5]) + """ + class UFunc(object): + + def __init__(self, f): + nin = f.func_code.co_argcount + nout = 1 + for i, sig in enumerate(signatures): + if isinstance(sig, tuple): + pass + elif sig in typedict.keys(): + signatures[i] = (nin + nout) * (sig,) + else: + raise TypeError("no match for %r" % sig) + + for sig in signatures: + assert isinstance(sig, tuple) + if len(sig) != nin + nout: + raise TypeError("signature %r does not match the " + "number of args of function %s" % + (sig, f.__name__)) + for t in sig: + if t not in typedict.keys(): + raise TypeError("no match for %r" % t) + + self.ufunc = genufunc(f, signatures) + + def __call__(self, *args): + return self.ufunc(*args) + + if isinstance(arg0, FunctionType): + f = arg0 + signatures = [float] + return UFunc(f) + + elif isinstance(arg0, list): + signatures = arg0 + return UFunc + + elif arg0 in typedict.keys(): + signatures = [arg0] + return UFunc + + else: + raise TypeError("first argument has to be a function, a type, or " + "a list of signatures") + + +if __name__ == '__main__': + import doctest + doctest.testmod() Added: trunk/scipy/sandbox/mkufunc/mkufunc/driver.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc/driver.py 2008-06-30 19:19:55 UTC (rev 4506) +++ trunk/scipy/sandbox/mkufunc/mkufunc/driver.py 2008-07-01 00:32:12 UTC (rev 4507) @@ -0,0 +1,728 @@ +import sys, os + +from pypy.translator.translator import TranslationContext, graphof +from pypy.translator.tool.taskengine import SimpleTaskEngine +from pypy.translator.goal import query +from pypy.translator.goal.timing import Timer +from pypy.annotation import model as annmodel +from pypy.annotation.listdef import s_list_of_strings +from pypy.annotation import policy as annpolicy +from py.compat import optparse +from pypy.tool.udir import udir + +import py +from pypy.tool.ansi_print import ansi_log +log = py.log.Producer("translation") +py.log.setconsumer("translation", ansi_log) + +DEFAULTS = { + 'translation.gc': 'ref', + 'translation.cc': None, + 'translation.profopt': None, + + 'translation.thread': False, # influences GC policy + + 'translation.stackless': False, + 'translation.debug': True, + 'translation.insist': False, + 'translation.backend': 'c', + 'translation.fork_before': None, + 'translation.backendopt.raisingop2direct_call' : False, + 'translation.backendopt.merge_if_blocks': True, +} + + +def taskdef(taskfunc, deps, title, new_state=None, expected_states=[], + idemp=False, earlycheck=None): + taskfunc.task_deps = deps + taskfunc.task_title = title + taskfunc.task_newstate = None + taskfunc.task_expected_states = expected_states + taskfunc.task_idempotent = idemp + taskfunc.task_earlycheck = earlycheck + return taskfunc + +# TODO: +# sanity-checks using states + +_BACKEND_TO_TYPESYSTEM = { + 'c': 'lltype', + 'llvm': 'lltype' +} + +def backend_to_typesystem(backend): + return _BACKEND_TO_TYPESYSTEM.get(backend, 'ootype') + +# set of translation steps to profile +PROFILE = set([]) + +class Instrument(Exception): + pass + + +class ProfInstrument(object): + name = "profinstrument" + def __init__(self, datafile, compiler): + self.datafile = datafile + self.compiler = compiler + + def first(self): + self.compiler._build() + + def probe(self, exe, args): + from py.compat import subprocess + env = os.environ.copy() + env['_INSTRUMENT_COUNTERS'] = str(self.datafile) + subprocess.call("'%s' %s" % (exe, args), env=env, shell=True) + + def after(self): + # xxx + os._exit(0) + + +class TranslationDriver(SimpleTaskEngine): + + def __init__(self, setopts=None, default_goal=None, + disable=[], + exe_name=None, extmod_name=None, + config=None, overrides=None): + self.timer = Timer() + SimpleTaskEngine.__init__(self) + + self.log = log + + if config is None: + from pypy.config.pypyoption import get_pypy_config + config = get_pypy_config(DEFAULTS, translating=True) + self.config = config + if overrides is not None: + self.config.override(overrides) + + if setopts is not None: + self.config.set(**setopts) + + self.exe_name = exe_name + self.extmod_name = extmod_name + + self.done = {} + + self.disable(disable) + + if default_goal: + default_goal, = self.backend_select_goals([default_goal]) + if default_goal in self._maybe_skip(): + default_goal = None + + self.default_goal = default_goal + self.extra_goals = [] + self.exposed = [] + + # expose tasks + def expose_task(task, backend_goal=None): + if backend_goal is None: + backend_goal = task + def proc(): + return self.proceed(backend_goal) + self.exposed.append(task) + setattr(self, task, proc) + + backend, ts = self.get_backend_and_type_system() + for task in self.tasks: + explicit_task = task + parts = task.split('_') + if len(parts) == 1: + if task in ('annotate'): + expose_task(task) + else: + task, postfix = parts + if task in ('rtype', 'backendopt', 'llinterpret', + 'prehannotatebackendopt', 'hintannotate', + 'timeshift'): + if ts: + if ts == postfix: + expose_task(task, explicit_task) + else: + expose_task(explicit_task) + elif task in ('source', 'compile', 'run'): + if backend: + if backend == postfix: + expose_task(task, explicit_task) + elif ts: + if ts == backend_to_typesystem(postfix): + expose_task(explicit_task) + else: + expose_task(explicit_task) + + def set_extra_goals(self, goals): + self.extra_goals = goals + + def get_info(self): # XXX more? + d = {'backend': self.config.translation.backend} + return d + + def get_backend_and_type_system(self): + type_system = self.config.translation.type_system + backend = self.config.translation.backend + return backend, type_system + + def backend_select_goals(self, goals): + backend, ts = self.get_backend_and_type_system() + postfixes = [''] + ['_'+p for p in (backend, ts) if p] + l = [] + for goal in goals: + for postfix in postfixes: + cand = "%s%s" % (goal, postfix) + if cand in self.tasks: + new_goal = cand + break + else: + raise Exception, "cannot infer complete goal from: %r" % goal + l.append(new_goal) + return l + + def disable(self, to_disable): + self._disabled = to_disable + + def _maybe_skip(self): + maybe_skip = [] + if self._disabled: + for goal in self.backend_select_goals(self._disabled): + maybe_skip.extend(self._depending_on_closure(goal)) + return dict.fromkeys(maybe_skip).keys() + + + def setup(self, entry_point, inputtypes, policy=None, extra={}, empty_translator=None): + standalone = inputtypes is None + self.standalone = standalone + + if standalone: + inputtypes = [s_list_of_strings] + self.inputtypes = inputtypes + + if policy is None: + policy = annpolicy.AnnotatorPolicy() + if standalone: + policy.allow_someobjects = False + self.policy = policy + + self.extra = extra + + if empty_translator: + translator = empty_translator + else: + translator = TranslationContext(config=self.config) + + self.entry_point = entry_point + self.translator = translator + self.libdef = None + + self.translator.driver_instrument_result = self.instrument_result + + def setup_library(self, libdef, policy=None, extra={}, empty_translator=None): + self.setup(None, None, policy, extra, empty_translator) + self.libdef = libdef + + def instrument_result(self, args): + backend, ts = self.get_backend_and_type_system() + if backend != 'c' or sys.platform == 'win32': + raise Exception("instrumentation requires the c backend" + " and unix for now") + from pypy.tool.udir import udir + + datafile = udir.join('_instrument_counters') + makeProfInstrument = lambda compiler: ProfInstrument(datafile, compiler) + + pid = os.fork() + if pid == 0: + # child compiling and running with instrumentation + self.config.translation.instrument = True + self.config.translation.instrumentctl = (makeProfInstrument, + args) + raise Instrument + else: + pid, status = os.waitpid(pid, 0) + if os.WIFEXITED(status): + status = os.WEXITSTATUS(status) + if status != 0: + raise Exception, "instrumentation child failed: %d" % status + else: + raise Exception, "instrumentation child aborted" + import array, struct + n = datafile.size()//struct.calcsize('L') + datafile = datafile.open('rb') + counters = array.array('L') + counters.fromfile(datafile, n) + datafile.close() + return counters + + def info(self, msg): + log.info(msg) + + def _profile(self, goal, func): + from cProfile import Profile + from pypy.tool.lsprofcalltree import KCacheGrind + d = {'func':func} + prof = Profile() + prof.runctx("res = func()", globals(), d) + KCacheGrind(prof).output(open(goal + ".out", "w")) + return d['res'] + + def _do(self, goal, func, *args, **kwds): + title = func.task_title + if goal in self.done: + self.log.info("already done: %s" % title) + return + else: + self.log.info("%s..." % title) + self.timer.start_event(goal) + try: + instrument = False + try: + if goal in PROFILE: + res = self._profile(goal, func) + else: + res = func() + except Instrument: + instrument = True + if not func.task_idempotent: + self.done[goal] = True + if instrument: + self.proceed('compile') + assert False, 'we should not get here' + finally: + self.timer.end_event(goal) + return res + + def task_annotate(self): + # includes annotation and annotatation simplifications + translator = self.translator + policy = self.policy + self.log.info('with policy: %s.%s' % + (policy.__class__.__module__, policy.__class__.__name__)) + + annmodel.DEBUG = self.config.translation.debug + annotator = translator.buildannotator(policy=policy) + + if self.entry_point: + s = annotator.build_types(self.entry_point, self.inputtypes) + + self.sanity_check_annotation() + if self.standalone and s.knowntype != int: + raise Exception("stand-alone program entry point must return an " + "int (and not, e.g., None or always raise an " + "exception).") + annotator.simplify() + return s + else: + assert self.libdef is not None + for func, inputtypes in self.libdef.functions: + annotator.build_types(func, inputtypes) + self.sanity_check_annotation() + annotator.simplify() + # + task_annotate = taskdef(task_annotate, [], "Annotating&simplifying") + + + def sanity_check_annotation(self): + translator = self.translator + irreg = query.qoutput(query.check_exceptblocks_qgen(translator)) + if irreg: + self.log.info("Some exceptblocks seem insane") + + lost = query.qoutput(query.check_methods_qgen(translator)) + assert not lost, "lost methods, something gone wrong with the annotation of method defs" + + so = query.qoutput(query.polluted_qgen(translator)) + tot = len(translator.graphs) + percent = int(tot and (100.0*so / tot) or 0) + # if there are a few SomeObjects even if the policy doesn't allow + # them, it means that they were put there in a controlled way + # and then it's not a warning. + if not translator.annotator.policy.allow_someobjects: + pr = self.log.info + elif percent == 0: + pr = self.log.info + else: + pr = log.WARNING + pr("-- someobjectness %2d%% (%d of %d functions polluted by SomeObjects)" % (percent, so, tot)) + + + + def task_rtype_lltype(self): + rtyper = self.translator.buildrtyper(type_system='lltype') + insist = not self.config.translation.insist + rtyper.specialize(dont_simplify_again=True, + crash_on_first_typeerror=insist) + # + task_rtype_lltype = taskdef(task_rtype_lltype, ['annotate'], "RTyping") + RTYPE = 'rtype_lltype' + + def task_rtype_ootype(self): + # Maybe type_system should simply be an option used in task_rtype + insist = not self.config.translation.insist + rtyper = self.translator.buildrtyper(type_system="ootype") + rtyper.specialize(dont_simplify_again=True, + crash_on_first_typeerror=insist) + # + task_rtype_ootype = taskdef(task_rtype_ootype, ['annotate'], "ootyping") + OOTYPE = 'rtype_ootype' + + def task_prehannotatebackendopt_lltype(self): + from pypy.translator.backendopt.all import backend_optimizations + backend_optimizations(self.translator, + inline_threshold=0, + merge_if_blocks=True, + constfold=True, + raisingop2direct_call=False, + remove_asserts=True) + # + task_prehannotatebackendopt_lltype = taskdef( + task_prehannotatebackendopt_lltype, + [RTYPE], + "Backendopt before Hint-annotate") + + def task_hintannotate_lltype(self): + from pypy.jit.hintannotator.annotator import HintAnnotator + from pypy.jit.hintannotator.model import OriginFlags + from pypy.jit.hintannotator.model import SomeLLAbstractConstant + + get_portal = self.extra['portal'] + PORTAL, POLICY = get_portal(self) + t = self.translator + self.portal_graph = graphof(t, PORTAL) + + hannotator = HintAnnotator(base_translator=t, policy=POLICY) + self.hint_translator = hannotator.translator + hs = hannotator.build_types(self.portal_graph, + [SomeLLAbstractConstant(v.concretetype, + {OriginFlags(): True}) + for v in self.portal_graph.getargs()]) + count = hannotator.bookkeeper.nonstuboriggraphcount + stubcount = hannotator.bookkeeper.stuboriggraphcount + self.log.info("The hint-annotator saw %d graphs" + " (and made stubs for %d graphs)." % (count, stubcount)) + n = len(list(hannotator.translator.graphs[0].iterblocks())) + self.log.info("portal has %d blocks" % n) + self.hannotator = hannotator + # + task_hintannotate_lltype = taskdef(task_hintannotate_lltype, + ['prehannotatebackendopt_lltype'], + "Hint-annotate") + + def task_timeshift_lltype(self): + from pypy.jit.timeshifter.hrtyper import HintRTyper + from pypy.jit.codegen import detect_cpu + cpu = detect_cpu.autodetect() + if cpu == 'i386': + from pypy.jit.codegen.i386.rgenop import RI386GenOp as RGenOp + RGenOp.MC_SIZE = 32 * 1024 * 1024 + elif cpu == 'ppc': + from pypy.jit.codegen.ppc.rgenop import RPPCGenOp as RGenOp + RGenOp.MC_SIZE = 32 * 1024 * 1024 + else: + raise Exception('Unsuported cpu %r'%cpu) + + del self.hint_translator + ha = self.hannotator + t = self.translator + # make the timeshifted graphs + hrtyper = HintRTyper(ha, t.rtyper, RGenOp) + hrtyper.specialize(origportalgraph=self.portal_graph, view=False) + # + task_timeshift_lltype = taskdef(task_timeshift_lltype, + ["hintannotate_lltype"], + "Timeshift") + + def task_backendopt_lltype(self): + from pypy.translator.backendopt.all import backend_optimizations + backend_optimizations(self.translator) + # + task_backendopt_lltype = taskdef(task_backendopt_lltype, + [RTYPE, + '??timeshift_lltype'], + "lltype back-end optimisations") + BACKENDOPT = 'backendopt_lltype' + + def task_backendopt_ootype(self): + from pypy.translator.backendopt.all import backend_optimizations + backend_optimizations(self.translator) + # + task_backendopt_ootype = taskdef(task_backendopt_ootype, + [OOTYPE], "ootype back-end optimisations") + OOBACKENDOPT = 'backendopt_ootype' + + + def task_stackcheckinsertion_lltype(self): + from pypy.translator.transform import insert_ll_stackcheck + count = insert_ll_stackcheck(self.translator) + self.log.info("inserted %d stack checks." % (count,)) + + task_stackcheckinsertion_lltype = taskdef( + task_stackcheckinsertion_lltype, + ['?'+BACKENDOPT, RTYPE, 'annotate'], + "inserting stack checks") + STACKCHECKINSERTION = 'stackcheckinsertion_lltype' + + def possibly_check_for_boehm(self): + if self.config.translation.gc == "boehm": + from pypy.translator.tool.cbuild import check_boehm_presence + from pypy.translator.tool.cbuild import CompilationError + try: + check_boehm_presence(noerr=False) + except CompilationError, e: + i = 'Boehm GC not installed. Try e.g. "translate.py --gc=hybrid"' + raise CompilationError('%s\n--------------------\n%s' % (e, i)) + + def task_database_c(self): + translator = self.translator + if translator.annotator is not None: + translator.frozen = True + + standalone = self.standalone + + if standalone: + from pypy.translator.c.genc import CStandaloneBuilder as CBuilder + else: + from pypy.translator.c.genc import CExtModuleBuilder as CBuilder + cbuilder = CBuilder(self.translator, self.entry_point, + config=self.config) + cbuilder.stackless = self.config.translation.stackless + if not standalone: # xxx more messy + cbuilder.modulename = self.extmod_name + database = cbuilder.build_database() + self.log.info("database for generating C source was created") + self.cbuilder = cbuilder + self.database = database + # + task_database_c = taskdef(task_database_c, + [STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE, '?annotate'], + "Creating database for generating c source", + earlycheck = possibly_check_for_boehm) + + def task_source_c(self): # xxx messy + translator = self.translator + cbuilder = self.cbuilder + database = self.database + c_source_filename = cbuilder.generate_source(database) + self.log.info("written: %s" % (c_source_filename,)) + self.c_source_filename = str(c_source_filename) + # + task_source_c = taskdef(task_source_c, ['database_c'], "Generating c source") + + def task_compile_c(self): # xxx messy + cbuilder = self.cbuilder + cbuilder.compile() + + if self.standalone: + self.c_entryp = cbuilder.executable_name + self.create_exe() + else: + self.c_entryp = cbuilder.get_entry_point() + # + task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source") + + + def task_run_c(self): + self.backend_run('c') + # + task_run_c = taskdef(task_run_c, ['compile_c'], + "Running compiled c source", + idemp=True) + + def task_llinterpret_lltype(self): + from pypy.rpython.llinterp import LLInterpreter + py.log.setconsumer("llinterp operation", None) + + translator = self.translator + interp = LLInterpreter(translator.rtyper) + bk = translator.annotator.bookkeeper + graph = bk.getdesc(self.entry_point).getuniquegraph() + v = interp.eval_graph(graph, + self.extra.get('get_llinterp_args', + lambda: [])()) + + log.llinterpret.event("result -> %s" % v) + # + task_llinterpret_lltype = taskdef(task_llinterpret_lltype, + [STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE], + "LLInterpreting") + + def task_source_llvm(self): + translator = self.translator + if translator.annotator is None: + raise ValueError, "llvm requires annotation." + + from pypy.translator.llvm import genllvm + + self.llvmgen = genllvm.GenLLVM(translator, self.standalone) + + llvm_filename = self.llvmgen.gen_source(self.entry_point) + self.log.info("written: %s" % (llvm_filename,)) + # + task_source_llvm = taskdef(task_source_llvm, + [STACKCHECKINSERTION, BACKENDOPT, RTYPE], + "Generating llvm source") + + def task_compile_llvm(self): + gen = self.llvmgen + if self.standalone: + exe_name = (self.exe_name or 'testing') % self.get_info() + self.c_entryp = gen.compile_standalone(exe_name) + self.create_exe() + else: + self.c_module, self.c_entryp = gen.compile_module() + # + task_compile_llvm = taskdef(task_compile_llvm, + ['source_llvm'], + "Compiling llvm source") + + def task_run_llvm(self): + self.backend_run('llvm') + # + task_run_llvm = taskdef(task_run_llvm, ['compile_llvm'], + "Running compiled llvm source", + idemp=True) + + def task_source_js(self): + from pypy.translator.js.js import JS + self.gen = JS(self.translator, functions=[self.entry_point], + stackless=self.config.translation.stackless) + filename = self.gen.write_source() + self.log.info("Wrote %s" % (filename,)) + task_source_js = taskdef(task_source_js, + [OOTYPE], + 'Generating Javascript source') + + def task_compile_js(self): + pass + task_compile_js = taskdef(task_compile_js, ['source_js'], + 'Skipping Javascript compilation') + + def task_run_js(self): + pass + task_run_js = taskdef(task_run_js, ['compile_js'], + 'Please manually run the generated code') + + def task_source_cli(self): + from pypy.translator.cli.gencli import GenCli + from pypy.translator.cli.entrypoint import get_entrypoint + + if self.entry_point is not None: # executable mode + entry_point_graph = self.translator.graphs[0] + entry_point = get_entrypoint(entry_point_graph) + else: + # library mode + assert self.libdef is not None + bk = self.translator.annotator.bookkeeper + entry_point = self.libdef.get_entrypoint(bk) + + self.gen = GenCli(udir, self.translator, entry_point, config=self.config) + filename = self.gen.generate_source() + self.log.info("Wrote %s" % (filename,)) + task_source_cli = taskdef(task_source_cli, ["?" + OOBACKENDOPT, OOTYPE], + 'Generating CLI source') + + def task_compile_cli(self): + from pypy.translator.oosupport.support import unpatch_os + from pypy.translator.cli.test.runtest import CliFunctionWrapper + filename = self.gen.build_exe() + self.c_entryp = CliFunctionWrapper(filename) + # restore original os values + if hasattr(self, 'old_cli_defs'): + unpatch_os(self.old_cli_defs) + + self.log.info("Compiled %s" % filename) + if self.standalone and self.exe_name: + self.copy_cli_exe() + task_compile_cli = taskdef(task_compile_cli, ['source_cli'], + 'Compiling CLI source') + + def task_run_cli(self): + pass + task_run_cli = taskdef(task_run_cli, ['compile_cli'], + 'XXX') + + def task_source_jvm(self): + from pypy.translator.jvm.genjvm import GenJvm + from pypy.translator.jvm.node import EntryPoint + + entry_point_graph = self.translator.graphs[0] + is_func = not self.standalone + entry_point = EntryPoint(entry_point_graph, is_func, is_func) + self.gen = GenJvm(udir, self.translator, entry_point) + self.jvmsource = self.gen.generate_source() + self.log.info("Wrote JVM code") + task_source_jvm = taskdef(task_source_jvm, ["?" + OOBACKENDOPT, OOTYPE], + 'Generating JVM source') + + def task_compile_jvm(self): + from pypy.translator.oosupport.support import unpatch_os + from pypy.translator.jvm.test.runtest import JvmGeneratedSourceWrapper + self.jvmsource.compile() + self.c_entryp = JvmGeneratedSourceWrapper(self.jvmsource) + # restore original os values + if hasattr(self, 'old_cli_defs'): + unpatch_os(self.old_cli_defs) + self.log.info("Compiled JVM source") + if self.standalone and self.exe_name: + self.copy_jvm_jar() + task_compile_jvm = taskdef(task_compile_jvm, ['source_jvm'], + 'Compiling JVM source') + + def task_run_jvm(self): + pass + task_run_jvm = taskdef(task_run_jvm, ['compile_jvm'], + 'XXX') + + def proceed(self, goals): + if not goals: + if self.default_goal: + goals = [self.default_goal] + else: + self.log.info("nothing to do") + return + elif isinstance(goals, str): + goals = [goals] + goals.extend(self.extra_goals) + goals = self.backend_select_goals(goals) + return self._execute(goals, task_skip = self._maybe_skip()) + + def from_targetspec(targetspec_dic, config=None, args=None, + empty_translator=None, + disable=[], + default_goal=None): + if args is None: + args = [] + + driver = TranslationDriver(config=config, default_goal=default_goal, + disable=disable) + # patch some attributes of the os module to make sure they + # have the same value on every platform. + backend, ts = driver.get_backend_and_type_system() + if backend in ('cli', 'jvm'): + from pypy.translator.oosupport.support import patch_os + driver.old_cli_defs = patch_os() + + target = targetspec_dic['target'] + spec = target(driver, args) + + try: + entry_point, inputtypes, policy = spec + except ValueError: + entry_point, inputtypes = spec + policy = None + + driver.setup(entry_point, inputtypes, + policy=policy, + extra=targetspec_dic, + empty_translator=empty_translator) + + return driver + + from_targetspec = staticmethod(from_targetspec) + + def prereq_checkpt_rtype(self): + assert 'pypy.rpython.rmodel' not in sys.modules, ( + "cannot fork because the rtyper has already been imported") + prereq_checkpt_rtype_lltype = prereq_checkpt_rtype + prereq_checkpt_rtype_ootype = prereq_checkpt_rtype Added: trunk/scipy/sandbox/mkufunc/mkufunc/interactive.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc/interactive.py 2008-06-30 19:19:55 UTC (rev 4506) +++ trunk/scipy/sandbox/mkufunc/mkufunc/interactive.py 2008-07-01 00:32:12 UTC (rev 4507) @@ -0,0 +1,115 @@ +import driver + +from pypy.translator.translator import TranslationContext + + +DEFAULTS = { + 'translation.backend': None, + 'translation.type_system': None, + 'translation.verbose': False, +} + +class Translation(object): + + def __init__(self, entry_point, argtypes=None, **kwds): + self.driver = driver.TranslationDriver(overrides=DEFAULTS) + self.config = self.driver.config + + self.entry_point = entry_point + self.context = TranslationContext(config=self.config) + + # hook into driver events + driver_own_event = self.driver._event + def _event(kind, goal, func): + self.driver_event(kind, goal, func) + driver_own_event(kind, goal, func) + self.driver._event = _event + self.driver_setup = False + + self.update_options(argtypes, kwds) + # for t.view() to work just after construction + graph = self.context.buildflowgraph(entry_point) + self.context._prebuilt_graphs[entry_point] = graph + + def driver_event(self, kind, goal, func): + if kind == 'pre': + self.ensure_setup() + + def ensure_setup(self, argtypes=None, policy=None, standalone=False): + if not self.driver_setup: + if standalone: + assert argtypes is None + else: + if argtypes is None: + argtypes = [] + self.driver.setup(self.entry_point, argtypes, policy, + empty_translator=self.context) + self.ann_argtypes = argtypes + self.ann_policy = policy + self.driver_setup = True + else: + # check consistency + if standalone: + assert argtypes is None + assert self.ann_argtypes is None + elif argtypes is not None and argtypes != self.ann_argtypes: + raise Exception("inconsistent argtype supplied") + if policy is not None and policy != self.ann_policy: + raise Exception("inconsistent annotation polish supplied") + + def update_options(self, argtypes, kwds): + if argtypes or kwds.get('policy') or kwds.get('standalone'): + self.ensure_setup(argtypes, kwds.get('policy'), + kwds.get('standalone')) + kwds.pop('policy', None) + kwds.pop('standalone', None) + self.config.translation.set(**kwds) + + def ensure_opt(self, name, value=None, fallback=None): + if value is not None: + self.update_options(None, {name: value}) + return value + val = getattr(self.config.translation, name, None) + if fallback is not None and val is None: + self.update_options(None, {name: fallback}) + return fallback + if val is not None: + return val + raise Exception( + "the %r option should have been specified at this point" %name) + + def ensure_type_system(self, type_system=None): + if self.config.translation.backend is not None: + return self.ensure_opt('type_system') + return self.ensure_opt('type_system', type_system, 'lltype') + + def ensure_backend(self, backend=None): + backend = self.ensure_opt('backend', backend) + self.ensure_type_system() + return backend + + # backend independent + + def annotate(self, argtypes=None, **kwds): + self.update_options(argtypes, kwds) + return self.driver.annotate() + + # type system dependent + + def rtype(self, argtypes=None, **kwds): + self.update_options(argtypes, kwds) + ts = self.ensure_type_system() + return getattr(self.driver, 'rtype_'+ts)() + + # backend depedent + + def source(self, argtypes=None, **kwds): + self.update_options(argtypes, kwds) + backend = self.ensure_backend() + self.driver.source_c() + + def compile(self, argtypes=None, **kwds): + self.update_options(argtypes, kwds) + backend = self.ensure_backend() + self.driver.compile_c() + return self.driver.c_entryp Added: trunk/scipy/sandbox/mkufunc/mkufunc/pypy.h =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc/pypy.h 2008-06-30 19:19:55 UTC (rev 4506) +++ trunk/scipy/sandbox/mkufunc/mkufunc/pypy.h 2008-07-01 00:32:12 UTC (rev 4507) @@ -0,0 +1,381 @@ + +#include +#include +#include + +/* ================================================== g_prerequisite.h === */ + +typedef unsigned char bool_t; + +/* ================================================== exception.h ======== */ + +#define RPY_DEBUG_RETURN() /* nothing */ + + +/* ================================================== int.h ============== */ + +/*** unary operations ***/ + +#define OP_INT_IS_TRUE(x,r) OP_INT_NE(x,0,r) + +#define OP_INT_INVERT(x,r) r = ~((x)) + +#define OP_INT_NEG(x,r) r = -(x) + +#define OP_INT_NEG_OVF(x,r) \ + if ((x) == LONG_MIN) FAIL_OVF("integer negate"); \ + OP_INT_NEG(x,r) +#define OP_LLONG_NEG_OVF(x,r) \ + if ((x) == LLONG_MIN) FAIL_OVF("integer negate"); \ + OP_LLONG_NEG(x,r) + +#define OP_INT_ABS(x,r) r = (x) >= 0 ? x : -(x) + +#define OP_INT_ABS_OVF(x,r) \ + if ((x) == LONG_MIN) FAIL_OVF("integer absolute"); \ + OP_INT_ABS(x,r) +#define OP_LLONG_ABS_OVF(x,r) \ + if ((x) == LLONG_MIN) FAIL_OVF("integer absolute"); \ + OP_LLONG_ABS(x,r) + +/*** binary operations ***/ + +#define OP_INT_EQ(x,y,r) r = ((x) == (y)) +#define OP_INT_NE(x,y,r) r = ((x) != (y)) +#define OP_INT_LE(x,y,r) r = ((x) <= (y)) +#define OP_INT_GT(x,y,r) r = ((x) > (y)) +#define OP_INT_LT(x,y,r) r = ((x) < (y)) +#define OP_INT_GE(x,y,r) r = ((x) >= (y)) + +/* addition, subtraction */ + +#define OP_INT_ADD(x,y,r) r = (x) + (y) + +#define OP_INT_ADD_OVF(x,y,r) \ + OP_INT_ADD(x,y,r); \ + if ((r^(x)) >= 0 || (r^(y)) >= 0); \ + else FAIL_OVF("integer addition") + +#define OP_INT_ADD_NONNEG_OVF(x,y,r) /* y can be assumed >= 0 */ \ + OP_INT_ADD(x,y,r); \ + if (r >= (x)); \ + else FAIL_OVF("integer addition") +/* XXX can a C compiler be too clever and think it can "prove" that + * r >= x always hold above? */ + +#define OP_INT_SUB(x,y,r) r = (x) - (y) + +#define OP_INT_SUB_OVF(x,y,r) \ + OP_INT_SUB(x,y,r); \ + if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ + else FAIL_OVF("integer subtraction") + +#define OP_INT_MUL(x,y,r) r = (x) * (y) + +#if defined(HAVE_LONG_LONG) && SIZE_OF_LONG_LONG < SIZE_OF_LONG +# define OP_INT_MUL_OVF_LL 1 +#lse +# define OP_INT_MUL_OVF_LL 0 +#endif + +#if !OP_INT_MUL_OVF_LL + +#define OP_INT_MUL_OVF(x,y,r) \ + if (op_int_mul_ovf(x,y,&r)); \ + else FAIL_OVF("integer multiplication") + +#else + +#define OP_INT_MUL_OVF(x,y,r) \ + { \ + PY_LONG_LONG lr = (PY_LONG_LONG)(x) * (PY_LONG_LONG)(y); \ + r = (long)lr; \ + if ((PY_LONG_LONG)r == lr); \ + else FAIL_OVF("integer multiplication"); \ + } +#endif + +/* shifting */ + +/* NB. shifting has same limitations as C: the shift count must be + >= 0 and < LONG_BITS. */ +#define OP_INT_RSHIFT(x,y,r) r = Py_ARITHMETIC_RIGHT_SHIFT(long, x, y) +#define OP_UINT_RSHIFT(x,y,r) r = (x) >> (y) +#define OP_LLONG_RSHIFT(x,y,r) r = Py_ARITHMETIC_RIGHT_SHIFT(PY_LONG_LONG,x,y) +#define OP_ULLONG_RSHIFT(x,y,r) r = (x) >> (y) + +#define OP_INT_LSHIFT(x,y,r) r = (x) << (y) +#define OP_UINT_LSHIFT(x,y,r) r = (x) << (y) +#define OP_LLONG_LSHIFT(x,y,r) r = (x) << (y) +#define OP_ULLONG_LSHIFT(x,y,r) r = (x) << (y) + +#define OP_INT_LSHIFT_OVF(x,y,r) \ + OP_INT_LSHIFT(x,y,r); \ + if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(long, r, (y))) \ + FAIL_OVF("x<= 0) { OP_INT_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_LLONG_RSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_LLONG_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + +#define OP_INT_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_INT_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_LLONG_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_LLONG_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + +#define OP_INT_LSHIFT_OVF_VAL(x,y,r) \ + if ((y) >= 0) { OP_INT_LSHIFT_OVF(x,y,r); } \ + else FAIL_VAL("negative shift count") + +/* pff */ +#define OP_UINT_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_UINT_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_ULLONG_LSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_ULLONG_LSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + +#define OP_UINT_RSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_UINT_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") +#define OP_ULLONG_RSHIFT_VAL(x,y,r) \ + if ((y) >= 0) { OP_ULLONG_RSHIFT(x,y,r); } \ + else FAIL_VAL("negative shift count") + + +/* floor division */ + +#define OP_INT_FLOORDIV(x,y,r) r = (x) / (y) +#define OP_UINT_FLOORDIV(x,y,r) r = (x) / (y) +#define OP_LLONG_FLOORDIV(x,y,r) r = (x) / (y) +#define OP_ULLONG_FLOORDIV(x,y,r) r = (x) / (y) + +#define OP_INT_FLOORDIV_OVF(x,y,r) \ + if ((y) == -1 && (x) == LONG_MIN) \ + { FAIL_OVF("integer division"); } \ + else OP_INT_FLOORDIV(x,y,r) + +#define OP_INT_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_INT_FLOORDIV(x,y,r); } \ + else FAIL_ZER("integer division") +#define OP_UINT_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_UINT_FLOORDIV(x,y,r); } \ + else FAIL_ZER("unsigned integer division") +#define OP_LLONG_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_LLONG_FLOORDIV(x,y,r); } \ + else FAIL_ZER("integer division") +#define OP_ULLONG_FLOORDIV_ZER(x,y,r) \ + if ((y)) { OP_ULLONG_FLOORDIV(x,y,r); } \ + else FAIL_ZER("unsigned integer division") + +#define OP_INT_FLOORDIV_OVF_ZER(x,y,r) \ + if ((y)) { OP_INT_FLOORDIV_OVF(x,y,r); } \ + else FAIL_ZER("integer division") + +/* modulus */ + +#define OP_INT_MOD(x,y,r) r = (x) % (y) +#define OP_UINT_MOD(x,y,r) r = (x) % (y) +#define OP_LLONG_MOD(x,y,r) r = (x) % (y) +#define OP_ULLONG_MOD(x,y,r) r = (x) % (y) + +#define OP_INT_MOD_OVF(x,y,r) \ + if ((y) == -1 && (x) == LONG_MIN) \ + { FAIL_OVF("integer modulo"); }\ + else OP_INT_MOD(x,y,r) + +#define OP_INT_MOD_ZER(x,y,r) \ + if ((y)) { OP_INT_MOD(x,y,r); } \ + else FAIL_ZER("integer modulo") +#define OP_UINT_MOD_ZER(x,y,r) \ + if ((y)) { OP_UINT_MOD(x,y,r); } \ + else FAIL_ZER("unsigned integer modulo") +#define OP_LLONG_MOD_ZER(x,y,r) \ + if ((y)) { OP_LLONG_MOD(x,y,r); } \ + else FAIL_ZER("integer modulo") +#define OP_ULLONG_MOD_ZER(x,y,r) \ + if ((y)) { OP_ULLONG_MOD(x,y,r); } \ + else FAIL_ZER("integer modulo") + +#define OP_INT_MOD_OVF_ZER(x,y,r) \ + if ((y)) { OP_INT_MOD_OVF(x,y,r); } \ + else FAIL_ZER("integer modulo") + +/* bit operations */ + +#define OP_INT_AND(x,y,r) r = (x) & (y) +#define OP_INT_OR( x,y,r) r = (x) | (y) +#define OP_INT_XOR(x,y,r) r = (x) ^ (y) + +/*** conversions ***/ + +#define OP_CAST_BOOL_TO_INT(x,r) r = (long)(x) +#define OP_CAST_BOOL_TO_UINT(x,r) r = (unsigned long)(x) +#define OP_CAST_UINT_TO_INT(x,r) r = (long)(x) +#define OP_CAST_INT_TO_UINT(x,r) r = (unsigned long)(x) +#define OP_CAST_INT_TO_LONGLONG(x,r) r = (long long)(x) +#define OP_CAST_CHAR_TO_INT(x,r) r = (long)((unsigned char)(x)) +#define OP_CAST_INT_TO_CHAR(x,r) r = (char)(x) +#define OP_CAST_PTR_TO_INT(x,r) r = (long)(x) /* XXX */ + +#define OP_TRUNCATE_LONGLONG_TO_INT(x,r) r = (long)(x) + +#define OP_CAST_UNICHAR_TO_INT(x,r) r = (long)((unsigned long)(x)) /*?*/ +#define OP_CAST_INT_TO_UNICHAR(x,r) r = (unsigned int)(x) + +/* bool operations */ + +#define OP_BOOL_NOT(x, r) r = !(x) + +/* _________________ certain implementations __________________ */ + +#if !OP_INT_MUL_OVF_LL +/* adjusted from intobject.c, Python 2.3.3 */ + +/* prototypes */ + +int op_int_mul_ovf(long a, long b, long *longprod); + +/* implementations */ + +#ifndef PYPY_NOT_MAIN_FILE + +int +op_int_mul_ovf(long a, long b, long *longprod) +{ + double doubled_longprod; /* (double)longprod */ + double doubleprod; /* (double)a * (double)b */ + + *longprod = a * b; + doubleprod = (double)a * (double)b; + doubled_longprod = (double)*longprod; + + /* Fast path for normal case: small multiplicands, and no info + is lost in either method. */ + if (doubled_longprod == doubleprod) + return 1; + + /* Somebody somewhere lost info. Close enough, or way off? Note + that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0). + The difference either is or isn't significant compared to the + true value (of which doubleprod is a good approximation). + */ + { + const double diff = doubled_longprod - doubleprod; + const double absdiff = diff >= 0.0 ? diff : -diff; + const double absprod = doubleprod >= 0.0 ? doubleprod : + -doubleprod; + /* absdiff/absprod <= 1/32 iff + 32 * absdiff <= absprod -- 5 good bits is "close enough" */ + if (32.0 * absdiff <= absprod) + return 1; + return 0; + } +} + +#endif /* PYPY_NOT_MAIN_FILE */ + +#endif /* !OP_INT_MUL_OVF_LL */ + +/* implementations */ + +#define OP_UINT_IS_TRUE OP_INT_IS_TRUE +#define OP_UINT_INVERT OP_INT_INVERT +#define OP_UINT_ADD OP_INT_ADD +#define OP_UINT_SUB OP_INT_SUB +#define OP_UINT_MUL OP_INT_MUL +#define OP_UINT_LT OP_INT_LT +#define OP_UINT_LE OP_INT_LE +#define OP_UINT_EQ OP_INT_EQ +#define OP_UINT_NE OP_INT_NE +#define OP_UINT_GT OP_INT_GT +#define OP_UINT_GE OP_INT_GE +#define OP_UINT_AND OP_INT_AND +#define OP_UINT_OR OP_INT_OR +#define OP_UINT_XOR OP_INT_XOR + +#define OP_LLONG_IS_TRUE OP_INT_IS_TRUE +#define OP_LLONG_NEG OP_INT_NEG +#define OP_LLONG_ABS OP_INT_ABS +#define OP_LLONG_INVERT OP_INT_INVERT + +#define OP_LLONG_ADD OP_INT_ADD +#define OP_LLONG_SUB OP_INT_SUB +#define OP_LLONG_MUL OP_INT_MUL +#define OP_LLONG_LT OP_INT_LT +#define OP_LLONG_LE OP_INT_LE +#define OP_LLONG_EQ OP_INT_EQ +#define OP_LLONG_NE OP_INT_NE +#define OP_LLONG_GT OP_INT_GT +#define OP_LLONG_GE OP_INT_GE +#define OP_LLONG_AND OP_INT_AND +#define OP_LLONG_OR OP_INT_OR +#define OP_LLONG_XOR OP_INT_XOR + +#define OP_ULLONG_IS_TRUE OP_LLONG_IS_TRUE +#define OP_ULLONG_INVERT OP_LLONG_INVERT +#define OP_ULLONG_ADD OP_LLONG_ADD +#define OP_ULLONG_SUB OP_LLONG_SUB +#define OP_ULLONG_MUL OP_LLONG_MUL +#define OP_ULLONG_LT OP_LLONG_LT +#define OP_ULLONG_LE OP_LLONG_LE +#define OP_ULLONG_EQ OP_LLONG_EQ +#define OP_ULLONG_NE OP_LLONG_NE +#define OP_ULLONG_GT OP_LLONG_GT +#define OP_ULLONG_GE OP_LLONG_GE +#define OP_ULLONG_AND OP_LLONG_AND +#define OP_ULLONG_OR OP_LLONG_OR +#define OP_ULLONG_XOR OP_LLONG_XOR + +/* ================================================== float.h ============ */ + +/*** unary operations ***/ + +#define OP_FLOAT_IS_TRUE(x,r) OP_FLOAT_NE(x,0.0,r) +#define OP_FLOAT_NEG(x,r) r = -x +#define OP_FLOAT_ABS(x,r) r = fabs(x) + +/*** binary operations ***/ + +#define OP_FLOAT_EQ(x,y,r) r = (x == y) +#define OP_FLOAT_NE(x,y,r) r = (x != y) +#define OP_FLOAT_LE(x,y,r) r = (x <= y) +#define OP_FLOAT_GT(x,y,r) r = (x > y) +#define OP_FLOAT_LT(x,y,r) r = (x < y) +#define OP_FLOAT_GE(x,y,r) r = (x >= y) + +#define OP_FLOAT_CMP(x,y,r) \ + r = ((x > y) - (x < y)) + +/* addition, subtraction */ + +#define OP_FLOAT_ADD(x,y,r) r = x + y +#define OP_FLOAT_SUB(x,y,r) r = x - y +#define OP_FLOAT_MUL(x,y,r) r = x * y +#define OP_FLOAT_TRUEDIV(x,y,r) r = x / y +#define OP_FLOAT_POW(x,y,r) r = pow(x, y) + +/*** conversions ***/ + +#define OP_CAST_FLOAT_TO_INT(x,r) r = (long)(x) +#define OP_CAST_FLOAT_TO_UINT(x,r) r = (unsigned long)(x) +#define OP_CAST_INT_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_UINT_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = (double)(x) +#define OP_CAST_BOOL_TO_FLOAT(x,r) r = (double)(x) + +#ifdef HAVE_LONG_LONG +#define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) +#endif + +/* ================================================== support.h ========== */ + +#define RPyField(ptr, name) NULL + Added: trunk/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py 2008-06-30 19:19:55 UTC (rev 4506) +++ trunk/scipy/sandbox/mkufunc/mkufunc/test_func_hash.py 2008-07-01 00:32:12 UTC (rev 4507) @@ -0,0 +1,48 @@ +import unittest + +from api import func_hash + + +class Tests(unittest.TestCase): + + # These tests are very (Python) version specific. + + def test_simple(self): + + def f(x): + return 2.5 * x * x + 4.7 * x + + self.assertEqual(func_hash(f), + '5f12e97debf1d2cb9e0a2f92e045b1fb') + + + def test_extra(self): + + def f(x): + return 2.5 * x * x + 4.7 * x + + self.assertEqual(func_hash(f, salt=[(int, int), (float, float)]), + 'e637d9825ef20cb56d364041118ca72e') + + def test_const(self): + + def add_a(b): + return a + b # a in globals + + self.assertEqual(func_hash(add_a), + '9ff237f372bf233470ce940edd58f60d') + + def test_inner(self): + + def foo(x): + inner1 = lambda t: t/3.0 + def inner2(n): + return n + 3 + return inner1(x) + inner2(int(x)) + + self.assertEqual(func_hash(foo), + '814c113dfc77e7ebb52915dd3ce9c37a') + + +if __name__ == '__main__': + unittest.main() Added: trunk/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py 2008-06-30 19:19:55 UTC (rev 4506) +++ trunk/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py 2008-07-01 00:32:12 UTC (rev 4507) @@ -0,0 +1,270 @@ +import math +import unittest + +from numpy import array, arange, allclose + +from api import Cfunc, genufunc, mkufunc + + +class Util: + + def assertClose(self, x, y): + self.assert_(allclose(x, y), '%s != %s' % (x, y)) + + +class Internal_Tests(unittest.TestCase, Util): + + def test_Cfunc(self): + def sqr(x): + return x * x + cf = Cfunc(sqr, [int, int], 42) + self.assertEqual(cf.nin, 1) + self.assertEqual(cf.nout, 1) + self.assertEqual(cf.cname, 'f42_pypy_g_sqr') + + def test_genufunc(self): + def foo(x): + return x + 17 + uf = genufunc(foo, [ + (float, float), + (int, int), + ]) + self.assertEqual(uf(4), 21) + x = array([1.1, 2.3]) + y = uf(x) + self.assertClose(y, [18.1, 19.3]) + self.assert_(str(y.dtype).startswith('float')) + + x = array([1, 4]) + y = uf(x) + self.assertEqual(list(y), [18, 21]) + self.assert_(str(y.dtype).startswith('int')) + + +class Arg_Tests(unittest.TestCase, Util): + + def check_ufunc(self, f): + for arg in (array([0.0, 1.0, 2.5]), + [0.0, 1.0, 2.5], + (0.0, 1.0, 2.5)): + self.assertClose(f(arg), [0.0, 1.0, 6.25]) + + self.assertEqual(f(3), 9) + self.assert_(f(-2.5) - 6.25 < 1E-10) + + def test_direct(self): + @mkufunc + def f(x): + return x * x + self.check_ufunc(f) + + def test_noargs(self): + @mkufunc() + def f(x): + return x * x + self.check_ufunc(f) + + def test_varargs(self): + for arg in (float, + [float], + [(float, float)]): + @mkufunc(arg) + def f(x): + return x * x + self.check_ufunc(f) + + def test_int(self): + @mkufunc(int) + def f(x): + return x * x + self.assertEqual(f(3), 9) + self.assert_(isinstance(f(42), int)) + + def test_mixed(self): + @mkufunc([(int, float, int), float]) + def f(n, x): + return n + x * x + + y = f(2, 3.9) # Note that int(2 + 3.9 * 3.9) = 17 + self.assertEqual(y, 17) + self.assert_(isinstance(y, int)) + + y = f(2.0, 3.9) + self.assertClose(y, 17.21) + self.assert_(isinstance(y, float)) + + def test_exceptions(self): + def f(x): + return x + + self.assertRaises(TypeError, mkufunc, {}) + self.assertRaises(TypeError, mkufunc([(float,)]), f) + self.assertRaises(TypeError, mkufunc([3*(float,)]), f) + self.assertRaises(TypeError, mkufunc([{}]), f) + self.assertRaises(TypeError, mkufunc([(int, {})]), f) + self.assertRaises(ValueError, mkufunc([]), f) + + +class Math_Tests(unittest.TestCase, Util): + + def assertFuncsEqual(self, uf, f): + x = 0.4376 + a = uf(x) + b = f(x) + self.assertClose(a, b) + xx = arange(0.1, 0.9, 0.01) + a = uf(xx) + b = [f(x) for x in xx] + self.assertClose(a, b) + + def test_exp(self): + @mkufunc + def f(x): return math.exp(x) + self.assertFuncsEqual(f, math.exp) + + def test_log(self): + @mkufunc + def f(x): return math.log(x) + self.assertFuncsEqual(f, math.log) + + def test_sqrt(self): + @mkufunc + def f(x): return math.sqrt(x) + self.assertFuncsEqual(f, math.sqrt) + + def test_cos(self): + @mkufunc + def f(x): return math.cos(x) + self.assertFuncsEqual(f, math.cos) + + def test_sin(self): + @mkufunc + def f(x): return math.sin(x) + self.assertFuncsEqual(f, math.sin) + + def test_tan(self): + @mkufunc + def f(x): return math.tan(x) + self.assertFuncsEqual(f, math.tan) + + def test_acos(self): + @mkufunc + def f(x): return math.acos(x) + self.assertFuncsEqual(f, math.acos) + + def test_asin(self): + @mkufunc + def f(x): return math.asin(x) + self.assertFuncsEqual(f, math.asin) + + def test_atan(self): + @mkufunc + def f(x): return math.atan(x) + self.assertFuncsEqual(f, math.atan) + + def test_atan2(self): + @mkufunc + def f(x, y): + return math.atan2(x, y) + + self.assertClose(f(4, 5), math.atan2(4, 5)) + + xx = array([1.0, 3.0, -2.4, 3.1, -2.3]) + yy = array([1.0, 2.0, 7.5, -8.7, 0.0]) + a = f(xx, yy) + b = [math.atan2(x, y) for x, y in zip(xx, yy)] + self.assertClose(a, b) + + def test_arithmetic(self): + def f(x): + return (4 * x + 2) / (x * x - 7 * x + 1) + uf = mkufunc(f) + x = arange(0, 2, 0.1) + self.assertClose(uf(x), f(x)) + + +class FuncArg_Tests(unittest.TestCase, Util): + + def test_fargs0(self): + def f(): + return 42 + + uf = mkufunc(f) + self.assertEqual(uf(), 42) + self.assert_(type(uf()).__name__.startswith('float')) + + uf = mkufunc(int)(f) + self.assertEqual(uf(), 42) + self.assert_(type(uf()).__name__.startswith('int')) + + +class Control_Flow_Tests(unittest.TestCase): + + def test_if(self): + @mkufunc(int) + def f(n): + if n < 4: + return n + else: + return n * n + + self.assertEqual(f(3), 3) + self.assertEqual(f(4), 16) + + def test_switch(self): + @mkufunc(int) + def f(n): + if n < 4: + return n + elif n == 4: + return 42 + elif n == 5: + return 73 + else: + return n * n + + self.assertEqual(f(3), 3) + self.assertEqual(f(4), 42) + self.assertEqual(f(5), 73) + self.assertEqual(f(6), 36) + + def test_loop(self): + @mkufunc(int) + def f(n): + res = 0 + for i in xrange(n): + res += i*i + return res + + self.assertEqual(f(3), 5) + self.assertEqual(f(95), 281295) + + +class FreeVariable_Tests(unittest.TestCase, Util): + + def test_const(self): + a = 13.6 + @mkufunc + def f(x): + return a * x + + x = arange(0, 1, 0.1) + self.assertClose(f(x), a * x) + + def test_const2(self): + from math import sin, pi, sqrt + @mkufunc + def sin_deg(angle): + return sin(angle / 180.0 * pi) + + self.assertClose(sin_deg([0, 30, 45, 60, 90, 180, 270, 360]), + [0, 0.5, 1/sqrt(2), sqrt(3)/2, 1, 0, -1, 0]) + + +class Misc_Tests(unittest.TestCase, Util): + + pass + + +if __name__ == '__main__': + unittest.main() Added: trunk/scipy/sandbox/mkufunc/setup.py =================================================================== --- trunk/scipy/sandbox/mkufunc/setup.py 2008-06-30 19:19:55 UTC (rev 4506) +++ trunk/scipy/sandbox/mkufunc/setup.py 2008-07-01 00:32:12 UTC (rev 4507) @@ -0,0 +1,15 @@ +from setuptools import setup, find_packages + +setup( + author = 'Ilan Schnell', + author_email = 'ischnell at enthought.com', + description = 'C compiled UFuncs from python source', + + name = "mkufunc", + version = "0.1", + + zip_safe = False, + package_data = {'': ['*.h']}, + packages = find_packages(), + install_requires = ['scipy >= 0.6.0'] + ) From scipy-svn at scipy.org Mon Jun 30 20:35:15 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Jun 2008 19:35:15 -0500 (CDT) Subject: [Scipy-svn] r4508 - trunk/scipy/sandbox/mkufunc Message-ID: <20080701003515.C379939C4BD@scipy.org> Author: ilan Date: 2008-06-30 19:35:15 -0500 (Mon, 30 Jun 2008) New Revision: 4508 Removed: trunk/scipy/sandbox/mkufunc/driver.py trunk/scipy/sandbox/mkufunc/interactive.py trunk/scipy/sandbox/mkufunc/pypy_head.h trunk/scipy/sandbox/mkufunc/test_func_hash.py trunk/scipy/sandbox/mkufunc/test_mkufunc.py trunk/scipy/sandbox/mkufunc/test_speed.py Log: Moved into mkufunc/ Deleted: trunk/scipy/sandbox/mkufunc/driver.py =================================================================== --- trunk/scipy/sandbox/mkufunc/driver.py 2008-07-01 00:32:12 UTC (rev 4507) +++ trunk/scipy/sandbox/mkufunc/driver.py 2008-07-01 00:35:15 UTC (rev 4508) @@ -1,728 +0,0 @@ -import sys, os - -from pypy.translator.translator import TranslationContext, graphof -from pypy.translator.tool.taskengine import SimpleTaskEngine -from pypy.translator.goal import query -from pypy.translator.goal.timing import Timer -from pypy.annotation import model as annmodel -from pypy.annotation.listdef import s_list_of_strings -from pypy.annotation import policy as annpolicy -from py.compat import optparse -from pypy.tool.udir import udir - -import py -from pypy.tool.ansi_print import ansi_log -log = py.log.Producer("translation") -py.log.setconsumer("translation", ansi_log) - -DEFAULTS = { - 'translation.gc': 'ref', - 'translation.cc': None, - 'translation.profopt': None, - - 'translation.thread': False, # influences GC policy - - 'translation.stackless': False, - 'translation.debug': True, - 'translation.insist': False, - 'translation.backend': 'c', - 'translation.fork_before': None, - 'translation.backendopt.raisingop2direct_call' : False, - 'translation.backendopt.merge_if_blocks': True, -} - - -def taskdef(taskfunc, deps, title, new_state=None, expected_states=[], - idemp=False, earlycheck=None): - taskfunc.task_deps = deps - taskfunc.task_title = title - taskfunc.task_newstate = None - taskfunc.task_expected_states = expected_states - taskfunc.task_idempotent = idemp - taskfunc.task_earlycheck = earlycheck - return taskfunc - -# TODO: -# sanity-checks using states - -_BACKEND_TO_TYPESYSTEM = { - 'c': 'lltype', - 'llvm': 'lltype' -} - -def backend_to_typesystem(backend): - return _BACKEND_TO_TYPESYSTEM.get(backend, 'ootype') - -# set of translation steps to profile -PROFILE = set([]) - -class Instrument(Exception): - pass - - -class ProfInstrument(object): - name = "profinstrument" - def __init__(self, datafile, compiler): - self.datafile = datafile - self.compiler = compiler - - def first(self): - self.compiler._build() - - def probe(self, exe, args): - from py.compat import subprocess - env = os.environ.copy() - env['_INSTRUMENT_COUNTERS'] = str(self.datafile) - subprocess.call("'%s' %s" % (exe, args), env=env, shell=True) - - def after(self): - # xxx - os._exit(0) - - -class TranslationDriver(SimpleTaskEngine): - - def __init__(self, setopts=None, default_goal=None, - disable=[], - exe_name=None, extmod_name=None, - config=None, overrides=None): - self.timer = Timer() - SimpleTaskEngine.__init__(self) - - self.log = log - - if config is None: - from pypy.config.pypyoption import get_pypy_config - config = get_pypy_config(DEFAULTS, translating=True) - self.config = config - if overrides is not None: - self.config.override(overrides) - - if setopts is not None: - self.config.set(**setopts) - - self.exe_name = exe_name - self.extmod_name = extmod_name - - self.done = {} - - self.disable(disable) - - if default_goal: - default_goal, = self.backend_select_goals([default_goal]) - if default_goal in self._maybe_skip(): - default_goal = None - - self.default_goal = default_goal - self.extra_goals = [] - self.exposed = [] - - # expose tasks - def expose_task(task, backend_goal=None): - if backend_goal is None: - backend_goal = task - def proc(): - return self.proceed(backend_goal) - self.exposed.append(task) - setattr(self, task, proc) - - backend, ts = self.get_backend_and_type_system() - for task in self.tasks: - explicit_task = task - parts = task.split('_') - if len(parts) == 1: - if task in ('annotate'): - expose_task(task) - else: - task, postfix = parts - if task in ('rtype', 'backendopt', 'llinterpret', - 'prehannotatebackendopt', 'hintannotate', - 'timeshift'): - if ts: - if ts == postfix: - expose_task(task, explicit_task) - else: - expose_task(explicit_task) - elif task in ('source', 'compile', 'run'): - if backend: - if backend == postfix: - expose_task(task, explicit_task) - elif ts: - if ts == backend_to_typesystem(postfix): - expose_task(explicit_task) - else: - expose_task(explicit_task) - - def set_extra_goals(self, goals): - self.extra_goals = goals - - def get_info(self): # XXX more? - d = {'backend': self.config.translation.backend} - return d - - def get_backend_and_type_system(self): - type_system = self.config.translation.type_system - backend = self.config.translation.backend - return backend, type_system - - def backend_select_goals(self, goals): - backend, ts = self.get_backend_and_type_system() - postfixes = [''] + ['_'+p for p in (backend, ts) if p] - l = [] - for goal in goals: - for postfix in postfixes: - cand = "%s%s" % (goal, postfix) - if cand in self.tasks: - new_goal = cand - break - else: - raise Exception, "cannot infer complete goal from: %r" % goal - l.append(new_goal) - return l - - def disable(self, to_disable): - self._disabled = to_disable - - def _maybe_skip(self): - maybe_skip = [] - if self._disabled: - for goal in self.backend_select_goals(self._disabled): - maybe_skip.extend(self._depending_on_closure(goal)) - return dict.fromkeys(maybe_skip).keys() - - - def setup(self, entry_point, inputtypes, policy=None, extra={}, empty_translator=None): - standalone = inputtypes is None - self.standalone = standalone - - if standalone: - inputtypes = [s_list_of_strings] - self.inputtypes = inputtypes - - if policy is None: - policy = annpolicy.AnnotatorPolicy() - if standalone: - policy.allow_someobjects = False - self.policy = policy - - self.extra = extra - - if empty_translator: - translator = empty_translator - else: - translator = TranslationContext(config=self.config) - - self.entry_point = entry_point - self.translator = translator - self.libdef = None - - self.translator.driver_instrument_result = self.instrument_result - - def setup_library(self, libdef, policy=None, extra={}, empty_translator=None): - self.setup(None, None, policy, extra, empty_translator) - self.libdef = libdef - - def instrument_result(self, args): - backend, ts = self.get_backend_and_type_system() - if backend != 'c' or sys.platform == 'win32': - raise Exception("instrumentation requires the c backend" - " and unix for now") - from pypy.tool.udir import udir - - datafile = udir.join('_instrument_counters') - makeProfInstrument = lambda compiler: ProfInstrument(datafile, compiler) - - pid = os.fork() - if pid == 0: - # child compiling and running with instrumentation - self.config.translation.instrument = True - self.config.translation.instrumentctl = (makeProfInstrument, - args) - raise Instrument - else: - pid, status = os.waitpid(pid, 0) - if os.WIFEXITED(status): - status = os.WEXITSTATUS(status) - if status != 0: - raise Exception, "instrumentation child failed: %d" % status - else: - raise Exception, "instrumentation child aborted" - import array, struct - n = datafile.size()//struct.calcsize('L') - datafile = datafile.open('rb') - counters = array.array('L') - counters.fromfile(datafile, n) - datafile.close() - return counters - - def info(self, msg): - log.info(msg) - - def _profile(self, goal, func): - from cProfile import Profile - from pypy.tool.lsprofcalltree import KCacheGrind - d = {'func':func} - prof = Profile() - prof.runctx("res = func()", globals(), d) - KCacheGrind(prof).output(open(goal + ".out", "w")) - return d['res'] - - def _do(self, goal, func, *args, **kwds): - title = func.task_title - if goal in self.done: - self.log.info("already done: %s" % title) - return - else: - self.log.info("%s..." % title) - self.timer.start_event(goal) - try: - instrument = False - try: - if goal in PROFILE: - res = self._profile(goal, func) - else: - res = func() - except Instrument: - instrument = True - if not func.task_idempotent: - self.done[goal] = True - if instrument: - self.proceed('compile') - assert False, 'we should not get here' - finally: - self.timer.end_event(goal) - return res - - def task_annotate(self): - # includes annotation and annotatation simplifications - translator = self.translator - policy = self.policy - self.log.info('with policy: %s.%s' % - (policy.__class__.__module__, policy.__class__.__name__)) - - annmodel.DEBUG = self.config.translation.debug - annotator = translator.buildannotator(policy=policy) - - if self.entry_point: - s = annotator.build_types(self.entry_point, self.inputtypes) - - self.sanity_check_annotation() - if self.standalone and s.knowntype != int: - raise Exception("stand-alone program entry point must return an " - "int (and not, e.g., None or always raise an " - "exception).") - annotator.simplify() - return s - else: - assert self.libdef is not None - for func, inputtypes in self.libdef.functions: - annotator.build_types(func, inputtypes) - self.sanity_check_annotation() - annotator.simplify() - # - task_annotate = taskdef(task_annotate, [], "Annotating&simplifying") - - - def sanity_check_annotation(self): - translator = self.translator - irreg = query.qoutput(query.check_exceptblocks_qgen(translator)) - if irreg: - self.log.info("Some exceptblocks seem insane") - - lost = query.qoutput(query.check_methods_qgen(translator)) - assert not lost, "lost methods, something gone wrong with the annotation of method defs" - - so = query.qoutput(query.polluted_qgen(translator)) - tot = len(translator.graphs) - percent = int(tot and (100.0*so / tot) or 0) - # if there are a few SomeObjects even if the policy doesn't allow - # them, it means that they were put there in a controlled way - # and then it's not a warning. - if not translator.annotator.policy.allow_someobjects: - pr = self.log.info - elif percent == 0: - pr = self.log.info - else: - pr = log.WARNING - pr("-- someobjectness %2d%% (%d of %d functions polluted by SomeObjects)" % (percent, so, tot)) - - - - def task_rtype_lltype(self): - rtyper = self.translator.buildrtyper(type_system='lltype') - insist = not self.config.translation.insist - rtyper.specialize(dont_simplify_again=True, - crash_on_first_typeerror=insist) - # - task_rtype_lltype = taskdef(task_rtype_lltype, ['annotate'], "RTyping") - RTYPE = 'rtype_lltype' - - def task_rtype_ootype(self): - # Maybe type_system should simply be an option used in task_rtype - insist = not self.config.translation.insist - rtyper = self.translator.buildrtyper(type_system="ootype") - rtyper.specialize(dont_simplify_again=True, - crash_on_first_typeerror=insist) - # - task_rtype_ootype = taskdef(task_rtype_ootype, ['annotate'], "ootyping") - OOTYPE = 'rtype_ootype' - - def task_prehannotatebackendopt_lltype(self): - from pypy.translator.backendopt.all import backend_optimizations - backend_optimizations(self.translator, - inline_threshold=0, - merge_if_blocks=True, - constfold=True, - raisingop2direct_call=False, - remove_asserts=True) - # - task_prehannotatebackendopt_lltype = taskdef( - task_prehannotatebackendopt_lltype, - [RTYPE], - "Backendopt before Hint-annotate") - - def task_hintannotate_lltype(self): - from pypy.jit.hintannotator.annotator import HintAnnotator - from pypy.jit.hintannotator.model import OriginFlags - from pypy.jit.hintannotator.model import SomeLLAbstractConstant - - get_portal = self.extra['portal'] - PORTAL, POLICY = get_portal(self) - t = self.translator - self.portal_graph = graphof(t, PORTAL) - - hannotator = HintAnnotator(base_translator=t, policy=POLICY) - self.hint_translator = hannotator.translator - hs = hannotator.build_types(self.portal_graph, - [SomeLLAbstractConstant(v.concretetype, - {OriginFlags(): True}) - for v in self.portal_graph.getargs()]) - count = hannotator.bookkeeper.nonstuboriggraphcount - stubcount = hannotator.bookkeeper.stuboriggraphcount - self.log.info("The hint-annotator saw %d graphs" - " (and made stubs for %d graphs)." % (count, stubcount)) - n = len(list(hannotator.translator.graphs[0].iterblocks())) - self.log.info("portal has %d blocks" % n) - self.hannotator = hannotator - # - task_hintannotate_lltype = taskdef(task_hintannotate_lltype, - ['prehannotatebackendopt_lltype'], - "Hint-annotate") - - def task_timeshift_lltype(self): - from pypy.jit.timeshifter.hrtyper import HintRTyper - from pypy.jit.codegen import detect_cpu - cpu = detect_cpu.autodetect() - if cpu == 'i386': - from pypy.jit.codegen.i386.rgenop import RI386GenOp as RGenOp - RGenOp.MC_SIZE = 32 * 1024 * 1024 - elif cpu == 'ppc': - from pypy.jit.codegen.ppc.rgenop import RPPCGenOp as RGenOp - RGenOp.MC_SIZE = 32 * 1024 * 1024 - else: - raise Exception('Unsuported cpu %r'%cpu) - - del self.hint_translator - ha = self.hannotator - t = self.translator - # make the timeshifted graphs - hrtyper = HintRTyper(ha, t.rtyper, RGenOp) - hrtyper.specialize(origportalgraph=self.portal_graph, view=False) - # - task_timeshift_lltype = taskdef(task_timeshift_lltype, - ["hintannotate_lltype"], - "Timeshift") - - def task_backendopt_lltype(self): - from pypy.translator.backendopt.all import backend_optimizations - backend_optimizations(self.translator) - # - task_backendopt_lltype = taskdef(task_backendopt_lltype, - [RTYPE, - '??timeshift_lltype'], - "lltype back-end optimisations") - BACKENDOPT = 'backendopt_lltype' - - def task_backendopt_ootype(self): - from pypy.translator.backendopt.all import backend_optimizations - backend_optimizations(self.translator) - # - task_backendopt_ootype = taskdef(task_backendopt_ootype, - [OOTYPE], "ootype back-end optimisations") - OOBACKENDOPT = 'backendopt_ootype' - - - def task_stackcheckinsertion_lltype(self): - from pypy.translator.transform import insert_ll_stackcheck - count = insert_ll_stackcheck(self.translator) - self.log.info("inserted %d stack checks." % (count,)) - - task_stackcheckinsertion_lltype = taskdef( - task_stackcheckinsertion_lltype, - ['?'+BACKENDOPT, RTYPE, 'annotate'], - "inserting stack checks") - STACKCHECKINSERTION = 'stackcheckinsertion_lltype' - - def possibly_check_for_boehm(self): - if self.config.translation.gc == "boehm": - from pypy.translator.tool.cbuild import check_boehm_presence - from pypy.translator.tool.cbuild import CompilationError - try: - check_boehm_presence(noerr=False) - except CompilationError, e: - i = 'Boehm GC not installed. Try e.g. "translate.py --gc=hybrid"' - raise CompilationError('%s\n--------------------\n%s' % (e, i)) - - def task_database_c(self): - translator = self.translator - if translator.annotator is not None: - translator.frozen = True - - standalone = self.standalone - - if standalone: - from pypy.translator.c.genc import CStandaloneBuilder as CBuilder - else: - from pypy.translator.c.genc import CExtModuleBuilder as CBuilder - cbuilder = CBuilder(self.translator, self.entry_point, - config=self.config) - cbuilder.stackless = self.config.translation.stackless - if not standalone: # xxx more messy - cbuilder.modulename = self.extmod_name - database = cbuilder.build_database() - self.log.info("database for generating C source was created") - self.cbuilder = cbuilder - self.database = database - # - task_database_c = taskdef(task_database_c, - [STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE, '?annotate'], - "Creating database for generating c source", - earlycheck = possibly_check_for_boehm) - - def task_source_c(self): # xxx messy - translator = self.translator - cbuilder = self.cbuilder - database = self.database - c_source_filename = cbuilder.generate_source(database) - self.log.info("written: %s" % (c_source_filename,)) - self.c_source_filename = str(c_source_filename) - # - task_source_c = taskdef(task_source_c, ['database_c'], "Generating c source") - - def task_compile_c(self): # xxx messy - cbuilder = self.cbuilder - cbuilder.compile() - - if self.standalone: - self.c_entryp = cbuilder.executable_name - self.create_exe() - else: - self.c_entryp = cbuilder.get_entry_point() - # - task_compile_c = taskdef(task_compile_c, ['source_c'], "Compiling c source") - - - def task_run_c(self): - self.backend_run('c') - # - task_run_c = taskdef(task_run_c, ['compile_c'], - "Running compiled c source", - idemp=True) - - def task_llinterpret_lltype(self): - from pypy.rpython.llinterp import LLInterpreter - py.log.setconsumer("llinterp operation", None) - - translator = self.translator - interp = LLInterpreter(translator.rtyper) - bk = translator.annotator.bookkeeper - graph = bk.getdesc(self.entry_point).getuniquegraph() - v = interp.eval_graph(graph, - self.extra.get('get_llinterp_args', - lambda: [])()) - - log.llinterpret.event("result -> %s" % v) - # - task_llinterpret_lltype = taskdef(task_llinterpret_lltype, - [STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE], - "LLInterpreting") - - def task_source_llvm(self): - translator = self.translator - if translator.annotator is None: - raise ValueError, "llvm requires annotation." - - from pypy.translator.llvm import genllvm - - self.llvmgen = genllvm.GenLLVM(translator, self.standalone) - - llvm_filename = self.llvmgen.gen_source(self.entry_point) - self.log.info("written: %s" % (llvm_filename,)) - # - task_source_llvm = taskdef(task_source_llvm, - [STACKCHECKINSERTION, BACKENDOPT, RTYPE], - "Generating llvm source") - - def task_compile_llvm(self): - gen = self.llvmgen - if self.standalone: - exe_name = (self.exe_name or 'testing') % self.get_info() - self.c_entryp = gen.compile_standalone(exe_name) - self.create_exe() - else: - self.c_module, self.c_entryp = gen.compile_module() - # - task_compile_llvm = taskdef(task_compile_llvm, - ['source_llvm'], - "Compiling llvm source") - - def task_run_llvm(self): - self.backend_run('llvm') - # - task_run_llvm = taskdef(task_run_llvm, ['compile_llvm'], - "Running compiled llvm source", - idemp=True) - - def task_source_js(self): - from pypy.translator.js.js import JS - self.gen = JS(self.translator, functions=[self.entry_point], - stackless=self.config.translation.stackless) - filename = self.gen.write_source() - self.log.info("Wrote %s" % (filename,)) - task_source_js = taskdef(task_source_js, - [OOTYPE], - 'Generating Javascript source') - - def task_compile_js(self): - pass - task_compile_js = taskdef(task_compile_js, ['source_js'], - 'Skipping Javascript compilation') - - def task_run_js(self): - pass - task_run_js = taskdef(task_run_js, ['compile_js'], - 'Please manually run the generated code') - - def task_source_cli(self): - from pypy.translator.cli.gencli import GenCli - from pypy.translator.cli.entrypoint import get_entrypoint - - if self.entry_point is not None: # executable mode - entry_point_graph = self.translator.graphs[0] - entry_point = get_entrypoint(entry_point_graph) - else: - # library mode - assert self.libdef is not None - bk = self.translator.annotator.bookkeeper - entry_point = self.libdef.get_entrypoint(bk) - - self.gen = GenCli(udir, self.translator, entry_point, config=self.config) - filename = self.gen.generate_source() - self.log.info("Wrote %s" % (filename,)) - task_source_cli = taskdef(task_source_cli, ["?" + OOBACKENDOPT, OOTYPE], - 'Generating CLI source') - - def task_compile_cli(self): - from pypy.translator.oosupport.support import unpatch_os - from pypy.translator.cli.test.runtest import CliFunctionWrapper - filename = self.gen.build_exe() - self.c_entryp = CliFunctionWrapper(filename) - # restore original os values - if hasattr(self, 'old_cli_defs'): - unpatch_os(self.old_cli_defs) - - self.log.info("Compiled %s" % filename) - if self.standalone and self.exe_name: - self.copy_cli_exe() - task_compile_cli = taskdef(task_compile_cli, ['source_cli'], - 'Compiling CLI source') - - def task_run_cli(self): - pass - task_run_cli = taskdef(task_run_cli, ['compile_cli'], - 'XXX') - - def task_source_jvm(self): - from pypy.translator.jvm.genjvm import GenJvm - from pypy.translator.jvm.node import EntryPoint - - entry_point_graph = self.translator.graphs[0] - is_func = not self.standalone - entry_point = EntryPoint(entry_point_graph, is_func, is_func) - self.gen = GenJvm(udir, self.translator, entry_point) - self.jvmsource = self.gen.generate_source() - self.log.info("Wrote JVM code") - task_source_jvm = taskdef(task_source_jvm, ["?" + OOBACKENDOPT, OOTYPE], - 'Generating JVM source') - - def task_compile_jvm(self): - from pypy.translator.oosupport.support import unpatch_os - from pypy.translator.jvm.test.runtest import JvmGeneratedSourceWrapper - self.jvmsource.compile() - self.c_entryp = JvmGeneratedSourceWrapper(self.jvmsource) - # restore original os values - if hasattr(self, 'old_cli_defs'): - unpatch_os(self.old_cli_defs) - self.log.info("Compiled JVM source") - if self.standalone and self.exe_name: - self.copy_jvm_jar() - task_compile_jvm = taskdef(task_compile_jvm, ['source_jvm'], - 'Compiling JVM source') - - def task_run_jvm(self): - pass - task_run_jvm = taskdef(task_run_jvm, ['compile_jvm'], - 'XXX') - - def proceed(self, goals): - if not goals: - if self.default_goal: - goals = [self.default_goal] - else: - self.log.info("nothing to do") - return - elif isinstance(goals, str): - goals = [goals] - goals.extend(self.extra_goals) - goals = self.backend_select_goals(goals) - return self._execute(goals, task_skip = self._maybe_skip()) - - def from_targetspec(targetspec_dic, config=None, args=None, - empty_translator=None, - disable=[], - default_goal=None): - if args is None: - args = [] - - driver = TranslationDriver(config=config, default_goal=default_goal, - disable=disable) - # patch some attributes of the os module to make sure they - # have the same value on every platform. - backend, ts = driver.get_backend_and_type_system() - if backend in ('cli', 'jvm'): - from pypy.translator.oosupport.support import patch_os - driver.old_cli_defs = patch_os() - - target = targetspec_dic['target'] - spec = target(driver, args) - - try: - entry_point, inputtypes, policy = spec - except ValueError: - entry_point, inputtypes = spec - policy = None - - driver.setup(entry_point, inputtypes, - policy=policy, - extra=targetspec_dic, - empty_translator=empty_translator) - - return driver - - from_targetspec = staticmethod(from_targetspec) - - def prereq_checkpt_rtype(self): - assert 'pypy.rpython.rmodel' not in sys.modules, ( - "cannot fork because the rtyper has already been imported") - prereq_checkpt_rtype_lltype = prereq_checkpt_rtype - prereq_checkpt_rtype_ootype = prereq_checkpt_rtype Deleted: trunk/scipy/sandbox/mkufunc/interactive.py =================================================================== --- trunk/scipy/sandbox/mkufunc/interactive.py 2008-07-01 00:32:12 UTC (rev 4507) +++ trunk/scipy/sandbox/mkufunc/interactive.py 2008-07-01 00:35:15 UTC (rev 4508) @@ -1,115 +0,0 @@ -import driver - -from pypy.translator.translator import TranslationContext - - -DEFAULTS = { - 'translation.backend': None, - 'translation.type_system': None, - 'translation.verbose': False, -} - -class Translation(object): - - def __init__(self, entry_point, argtypes=None, **kwds): - self.driver = driver.TranslationDriver(overrides=DEFAULTS) - self.config = self.driver.config - - self.entry_point = entry_point - self.context = TranslationContext(config=self.config) - - # hook into driver events - driver_own_event = self.driver._event - def _event(kind, goal, func): - self.driver_event(kind, goal, func) - driver_own_event(kind, goal, func) - self.driver._event = _event - self.driver_setup = False - - self.update_options(argtypes, kwds) - # for t.view() to work just after construction - graph = self.context.buildflowgraph(entry_point) - self.context._prebuilt_graphs[entry_point] = graph - - def driver_event(self, kind, goal, func): - if kind == 'pre': - self.ensure_setup() - - def ensure_setup(self, argtypes=None, policy=None, standalone=False): - if not self.driver_setup: - if standalone: - assert argtypes is None - else: - if argtypes is None: - argtypes = [] - self.driver.setup(self.entry_point, argtypes, policy, - empty_translator=self.context) - self.ann_argtypes = argtypes - self.ann_policy = policy - self.driver_setup = True - else: - # check consistency - if standalone: - assert argtypes is None - assert self.ann_argtypes is None - elif argtypes is not None and argtypes != self.ann_argtypes: - raise Exception("inconsistent argtype supplied") - if policy is not None and policy != self.ann_policy: - raise Exception("inconsistent annotation polish supplied") - - def update_options(self, argtypes, kwds): - if argtypes or kwds.get('policy') or kwds.get('standalone'): - self.ensure_setup(argtypes, kwds.get('policy'), - kwds.get('standalone')) - kwds.pop('policy', None) - kwds.pop('standalone', None) - self.config.translation.set(**kwds) - - def ensure_opt(self, name, value=None, fallback=None): - if value is not None: - self.update_options(None, {name: value}) - return value - val = getattr(self.config.translation, name, None) - if fallback is not None and val is None: - self.update_options(None, {name: fallback}) - return fallback - if val is not None: - return val - raise Exception( - "the %r option should have been specified at this point" %name) - - def ensure_type_system(self, type_system=None): - if self.config.translation.backend is not None: - return self.ensure_opt('type_system') - return self.ensure_opt('type_system', type_system, 'lltype') - - def ensure_backend(self, backend=None): - backend = self.ensure_opt('backend', backend) - self.ensure_type_system() - return backend - - # backend independent - - def annotate(self, argtypes=None, **kwds): - self.update_options(argtypes, kwds) - return self.driver.annotate() - - # type system dependent - - def rtype(self, argtypes=None, **kwds): - self.update_options(argtypes, kwds) - ts = self.ensure_type_system() - return getattr(self.driver, 'rtype_'+ts)() - - # backend depedent - - def source(self, argtypes=None, **kwds): - self.update_options(argtypes, kwds) - backend = self.ensure_backend() - self.driver.source_c() - - def compile(self, argtypes=None, **kwds): - self.update_options(argtypes, kwds) - backend = self.ensure_backend() - self.driver.compile_c() - return self.driver.c_entryp Deleted: trunk/scipy/sandbox/mkufunc/pypy_head.h =================================================================== --- trunk/scipy/sandbox/mkufunc/pypy_head.h 2008-07-01 00:32:12 UTC (rev 4507) +++ trunk/scipy/sandbox/mkufunc/pypy_head.h 2008-07-01 00:35:15 UTC (rev 4508) @@ -1,381 +0,0 @@ - -#include -#include -#include - -/* ================================================== g_prerequisite.h === */ - -typedef unsigned char bool_t; - -/* ================================================== exception.h ======== */ - -#define RPY_DEBUG_RETURN() /* nothing */ - - -/* ================================================== int.h ============== */ - -/*** unary operations ***/ - -#define OP_INT_IS_TRUE(x,r) OP_INT_NE(x,0,r) - -#define OP_INT_INVERT(x,r) r = ~((x)) - -#define OP_INT_NEG(x,r) r = -(x) - -#define OP_INT_NEG_OVF(x,r) \ - if ((x) == LONG_MIN) FAIL_OVF("integer negate"); \ - OP_INT_NEG(x,r) -#define OP_LLONG_NEG_OVF(x,r) \ - if ((x) == LLONG_MIN) FAIL_OVF("integer negate"); \ - OP_LLONG_NEG(x,r) - -#define OP_INT_ABS(x,r) r = (x) >= 0 ? x : -(x) - -#define OP_INT_ABS_OVF(x,r) \ - if ((x) == LONG_MIN) FAIL_OVF("integer absolute"); \ - OP_INT_ABS(x,r) -#define OP_LLONG_ABS_OVF(x,r) \ - if ((x) == LLONG_MIN) FAIL_OVF("integer absolute"); \ - OP_LLONG_ABS(x,r) - -/*** binary operations ***/ - -#define OP_INT_EQ(x,y,r) r = ((x) == (y)) -#define OP_INT_NE(x,y,r) r = ((x) != (y)) -#define OP_INT_LE(x,y,r) r = ((x) <= (y)) -#define OP_INT_GT(x,y,r) r = ((x) > (y)) -#define OP_INT_LT(x,y,r) r = ((x) < (y)) -#define OP_INT_GE(x,y,r) r = ((x) >= (y)) - -/* addition, subtraction */ - -#define OP_INT_ADD(x,y,r) r = (x) + (y) - -#define OP_INT_ADD_OVF(x,y,r) \ - OP_INT_ADD(x,y,r); \ - if ((r^(x)) >= 0 || (r^(y)) >= 0); \ - else FAIL_OVF("integer addition") - -#define OP_INT_ADD_NONNEG_OVF(x,y,r) /* y can be assumed >= 0 */ \ - OP_INT_ADD(x,y,r); \ - if (r >= (x)); \ - else FAIL_OVF("integer addition") -/* XXX can a C compiler be too clever and think it can "prove" that - * r >= x always hold above? */ - -#define OP_INT_SUB(x,y,r) r = (x) - (y) - -#define OP_INT_SUB_OVF(x,y,r) \ - OP_INT_SUB(x,y,r); \ - if ((r^(x)) >= 0 || (r^~(y)) >= 0); \ - else FAIL_OVF("integer subtraction") - -#define OP_INT_MUL(x,y,r) r = (x) * (y) - -#if defined(HAVE_LONG_LONG) && SIZE_OF_LONG_LONG < SIZE_OF_LONG -# define OP_INT_MUL_OVF_LL 1 -#lse -# define OP_INT_MUL_OVF_LL 0 -#endif - -#if !OP_INT_MUL_OVF_LL - -#define OP_INT_MUL_OVF(x,y,r) \ - if (op_int_mul_ovf(x,y,&r)); \ - else FAIL_OVF("integer multiplication") - -#else - -#define OP_INT_MUL_OVF(x,y,r) \ - { \ - PY_LONG_LONG lr = (PY_LONG_LONG)(x) * (PY_LONG_LONG)(y); \ - r = (long)lr; \ - if ((PY_LONG_LONG)r == lr); \ - else FAIL_OVF("integer multiplication"); \ - } -#endif - -/* shifting */ - -/* NB. shifting has same limitations as C: the shift count must be - >= 0 and < LONG_BITS. */ -#define OP_INT_RSHIFT(x,y,r) r = Py_ARITHMETIC_RIGHT_SHIFT(long, x, y) -#define OP_UINT_RSHIFT(x,y,r) r = (x) >> (y) -#define OP_LLONG_RSHIFT(x,y,r) r = Py_ARITHMETIC_RIGHT_SHIFT(PY_LONG_LONG,x,y) -#define OP_ULLONG_RSHIFT(x,y,r) r = (x) >> (y) - -#define OP_INT_LSHIFT(x,y,r) r = (x) << (y) -#define OP_UINT_LSHIFT(x,y,r) r = (x) << (y) -#define OP_LLONG_LSHIFT(x,y,r) r = (x) << (y) -#define OP_ULLONG_LSHIFT(x,y,r) r = (x) << (y) - -#define OP_INT_LSHIFT_OVF(x,y,r) \ - OP_INT_LSHIFT(x,y,r); \ - if ((x) != Py_ARITHMETIC_RIGHT_SHIFT(long, r, (y))) \ - FAIL_OVF("x<= 0) { OP_INT_RSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") -#define OP_LLONG_RSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_LLONG_RSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") - -#define OP_INT_LSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_INT_LSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") -#define OP_LLONG_LSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_LLONG_LSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") - -#define OP_INT_LSHIFT_OVF_VAL(x,y,r) \ - if ((y) >= 0) { OP_INT_LSHIFT_OVF(x,y,r); } \ - else FAIL_VAL("negative shift count") - -/* pff */ -#define OP_UINT_LSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_UINT_LSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") -#define OP_ULLONG_LSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_ULLONG_LSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") - -#define OP_UINT_RSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_UINT_RSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") -#define OP_ULLONG_RSHIFT_VAL(x,y,r) \ - if ((y) >= 0) { OP_ULLONG_RSHIFT(x,y,r); } \ - else FAIL_VAL("negative shift count") - - -/* floor division */ - -#define OP_INT_FLOORDIV(x,y,r) r = (x) / (y) -#define OP_UINT_FLOORDIV(x,y,r) r = (x) / (y) -#define OP_LLONG_FLOORDIV(x,y,r) r = (x) / (y) -#define OP_ULLONG_FLOORDIV(x,y,r) r = (x) / (y) - -#define OP_INT_FLOORDIV_OVF(x,y,r) \ - if ((y) == -1 && (x) == LONG_MIN) \ - { FAIL_OVF("integer division"); } \ - else OP_INT_FLOORDIV(x,y,r) - -#define OP_INT_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_INT_FLOORDIV(x,y,r); } \ - else FAIL_ZER("integer division") -#define OP_UINT_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_UINT_FLOORDIV(x,y,r); } \ - else FAIL_ZER("unsigned integer division") -#define OP_LLONG_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_LLONG_FLOORDIV(x,y,r); } \ - else FAIL_ZER("integer division") -#define OP_ULLONG_FLOORDIV_ZER(x,y,r) \ - if ((y)) { OP_ULLONG_FLOORDIV(x,y,r); } \ - else FAIL_ZER("unsigned integer division") - -#define OP_INT_FLOORDIV_OVF_ZER(x,y,r) \ - if ((y)) { OP_INT_FLOORDIV_OVF(x,y,r); } \ - else FAIL_ZER("integer division") - -/* modulus */ - -#define OP_INT_MOD(x,y,r) r = (x) % (y) -#define OP_UINT_MOD(x,y,r) r = (x) % (y) -#define OP_LLONG_MOD(x,y,r) r = (x) % (y) -#define OP_ULLONG_MOD(x,y,r) r = (x) % (y) - -#define OP_INT_MOD_OVF(x,y,r) \ - if ((y) == -1 && (x) == LONG_MIN) \ - { FAIL_OVF("integer modulo"); }\ - else OP_INT_MOD(x,y,r) - -#define OP_INT_MOD_ZER(x,y,r) \ - if ((y)) { OP_INT_MOD(x,y,r); } \ - else FAIL_ZER("integer modulo") -#define OP_UINT_MOD_ZER(x,y,r) \ - if ((y)) { OP_UINT_MOD(x,y,r); } \ - else FAIL_ZER("unsigned integer modulo") -#define OP_LLONG_MOD_ZER(x,y,r) \ - if ((y)) { OP_LLONG_MOD(x,y,r); } \ - else FAIL_ZER("integer modulo") -#define OP_ULLONG_MOD_ZER(x,y,r) \ - if ((y)) { OP_ULLONG_MOD(x,y,r); } \ - else FAIL_ZER("integer modulo") - -#define OP_INT_MOD_OVF_ZER(x,y,r) \ - if ((y)) { OP_INT_MOD_OVF(x,y,r); } \ - else FAIL_ZER("integer modulo") - -/* bit operations */ - -#define OP_INT_AND(x,y,r) r = (x) & (y) -#define OP_INT_OR( x,y,r) r = (x) | (y) -#define OP_INT_XOR(x,y,r) r = (x) ^ (y) - -/*** conversions ***/ - -#define OP_CAST_BOOL_TO_INT(x,r) r = (long)(x) -#define OP_CAST_BOOL_TO_UINT(x,r) r = (unsigned long)(x) -#define OP_CAST_UINT_TO_INT(x,r) r = (long)(x) -#define OP_CAST_INT_TO_UINT(x,r) r = (unsigned long)(x) -#define OP_CAST_INT_TO_LONGLONG(x,r) r = (long long)(x) -#define OP_CAST_CHAR_TO_INT(x,r) r = (long)((unsigned char)(x)) -#define OP_CAST_INT_TO_CHAR(x,r) r = (char)(x) -#define OP_CAST_PTR_TO_INT(x,r) r = (long)(x) /* XXX */ - -#define OP_TRUNCATE_LONGLONG_TO_INT(x,r) r = (long)(x) - -#define OP_CAST_UNICHAR_TO_INT(x,r) r = (long)((unsigned long)(x)) /*?*/ -#define OP_CAST_INT_TO_UNICHAR(x,r) r = (unsigned int)(x) - -/* bool operations */ - -#define OP_BOOL_NOT(x, r) r = !(x) - -/* _________________ certain implementations __________________ */ - -#if !OP_INT_MUL_OVF_LL -/* adjusted from intobject.c, Python 2.3.3 */ - -/* prototypes */ - -int op_int_mul_ovf(long a, long b, long *longprod); - -/* implementations */ - -#ifndef PYPY_NOT_MAIN_FILE - -int -op_int_mul_ovf(long a, long b, long *longprod) -{ - double doubled_longprod; /* (double)longprod */ - double doubleprod; /* (double)a * (double)b */ - - *longprod = a * b; - doubleprod = (double)a * (double)b; - doubled_longprod = (double)*longprod; - - /* Fast path for normal case: small multiplicands, and no info - is lost in either method. */ - if (doubled_longprod == doubleprod) - return 1; - - /* Somebody somewhere lost info. Close enough, or way off? Note - that a != 0 and b != 0 (else doubled_longprod == doubleprod == 0). - The difference either is or isn't significant compared to the - true value (of which doubleprod is a good approximation). - */ - { - const double diff = doubled_longprod - doubleprod; - const double absdiff = diff >= 0.0 ? diff : -diff; - const double absprod = doubleprod >= 0.0 ? doubleprod : - -doubleprod; - /* absdiff/absprod <= 1/32 iff - 32 * absdiff <= absprod -- 5 good bits is "close enough" */ - if (32.0 * absdiff <= absprod) - return 1; - return 0; - } -} - -#endif /* PYPY_NOT_MAIN_FILE */ - -#endif /* !OP_INT_MUL_OVF_LL */ - -/* implementations */ - -#define OP_UINT_IS_TRUE OP_INT_IS_TRUE -#define OP_UINT_INVERT OP_INT_INVERT -#define OP_UINT_ADD OP_INT_ADD -#define OP_UINT_SUB OP_INT_SUB -#define OP_UINT_MUL OP_INT_MUL -#define OP_UINT_LT OP_INT_LT -#define OP_UINT_LE OP_INT_LE -#define OP_UINT_EQ OP_INT_EQ -#define OP_UINT_NE OP_INT_NE -#define OP_UINT_GT OP_INT_GT -#define OP_UINT_GE OP_INT_GE -#define OP_UINT_AND OP_INT_AND -#define OP_UINT_OR OP_INT_OR -#define OP_UINT_XOR OP_INT_XOR - -#define OP_LLONG_IS_TRUE OP_INT_IS_TRUE -#define OP_LLONG_NEG OP_INT_NEG -#define OP_LLONG_ABS OP_INT_ABS -#define OP_LLONG_INVERT OP_INT_INVERT - -#define OP_LLONG_ADD OP_INT_ADD -#define OP_LLONG_SUB OP_INT_SUB -#define OP_LLONG_MUL OP_INT_MUL -#define OP_LLONG_LT OP_INT_LT -#define OP_LLONG_LE OP_INT_LE -#define OP_LLONG_EQ OP_INT_EQ -#define OP_LLONG_NE OP_INT_NE -#define OP_LLONG_GT OP_INT_GT -#define OP_LLONG_GE OP_INT_GE -#define OP_LLONG_AND OP_INT_AND -#define OP_LLONG_OR OP_INT_OR -#define OP_LLONG_XOR OP_INT_XOR - -#define OP_ULLONG_IS_TRUE OP_LLONG_IS_TRUE -#define OP_ULLONG_INVERT OP_LLONG_INVERT -#define OP_ULLONG_ADD OP_LLONG_ADD -#define OP_ULLONG_SUB OP_LLONG_SUB -#define OP_ULLONG_MUL OP_LLONG_MUL -#define OP_ULLONG_LT OP_LLONG_LT -#define OP_ULLONG_LE OP_LLONG_LE -#define OP_ULLONG_EQ OP_LLONG_EQ -#define OP_ULLONG_NE OP_LLONG_NE -#define OP_ULLONG_GT OP_LLONG_GT -#define OP_ULLONG_GE OP_LLONG_GE -#define OP_ULLONG_AND OP_LLONG_AND -#define OP_ULLONG_OR OP_LLONG_OR -#define OP_ULLONG_XOR OP_LLONG_XOR - -/* ================================================== float.h ============ */ - -/*** unary operations ***/ - -#define OP_FLOAT_IS_TRUE(x,r) OP_FLOAT_NE(x,0.0,r) -#define OP_FLOAT_NEG(x,r) r = -x -#define OP_FLOAT_ABS(x,r) r = fabs(x) - -/*** binary operations ***/ - -#define OP_FLOAT_EQ(x,y,r) r = (x == y) -#define OP_FLOAT_NE(x,y,r) r = (x != y) -#define OP_FLOAT_LE(x,y,r) r = (x <= y) -#define OP_FLOAT_GT(x,y,r) r = (x > y) -#define OP_FLOAT_LT(x,y,r) r = (x < y) -#define OP_FLOAT_GE(x,y,r) r = (x >= y) - -#define OP_FLOAT_CMP(x,y,r) \ - r = ((x > y) - (x < y)) - -/* addition, subtraction */ - -#define OP_FLOAT_ADD(x,y,r) r = x + y -#define OP_FLOAT_SUB(x,y,r) r = x - y -#define OP_FLOAT_MUL(x,y,r) r = x * y -#define OP_FLOAT_TRUEDIV(x,y,r) r = x / y -#define OP_FLOAT_POW(x,y,r) r = pow(x, y) - -/*** conversions ***/ - -#define OP_CAST_FLOAT_TO_INT(x,r) r = (long)(x) -#define OP_CAST_FLOAT_TO_UINT(x,r) r = (unsigned long)(x) -#define OP_CAST_INT_TO_FLOAT(x,r) r = (double)(x) -#define OP_CAST_UINT_TO_FLOAT(x,r) r = (double)(x) -#define OP_CAST_LONGLONG_TO_FLOAT(x,r) r = (double)(x) -#define OP_CAST_BOOL_TO_FLOAT(x,r) r = (double)(x) - -#ifdef HAVE_LONG_LONG -#define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) -#endif - -/* ================================================== support.h ========== */ - -#define RPyField(ptr, name) NULL - Deleted: trunk/scipy/sandbox/mkufunc/test_func_hash.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_func_hash.py 2008-07-01 00:32:12 UTC (rev 4507) +++ trunk/scipy/sandbox/mkufunc/test_func_hash.py 2008-07-01 00:35:15 UTC (rev 4508) @@ -1,47 +0,0 @@ -import unittest - -from mkufunc import func_hash - - -class Tests(unittest.TestCase): - - def test_simple(self): - - def f(x): - return 2.5 * x * x + 4.7 * x - - self.assertEqual(func_hash(f), - '5f12e97debf1d2cb9e0a2f92e045b1fb') - - - def test_extra(self): - - def f(x): - return 2.5 * x * x + 4.7 * x - - self.assertEqual(func_hash(f, salt=[(int, int), (float, float)]), - 'e637d9825ef20cb56d364041118ca72e') - - def test_const(self): - - def add_a(b): - return a + b # a in globals - - self.assertEqual(func_hash(add_a), - '9ff237f372bf233470ce940edd58f60d') - - def test_inner(self): - - def foo(x): - inner1 = lambda t: t/3.0 - def inner2(n): - return n + 3 - return inner1(x) + inner2(int(x)) - - #func_hash(foo, verbose=1) - self.assertEqual(func_hash(foo), - '814c113dfc77e7ebb52915dd3ce9c37a') - - -if __name__ == '__main__': - unittest.main() Deleted: trunk/scipy/sandbox/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-07-01 00:32:12 UTC (rev 4507) +++ trunk/scipy/sandbox/mkufunc/test_mkufunc.py 2008-07-01 00:35:15 UTC (rev 4508) @@ -1,270 +0,0 @@ -import math -import unittest - -from numpy import array, arange, allclose - -from mkufunc import Cfunc, genufunc, mkufunc - - -class Util: - - def assertClose(self, x, y): - self.assert_(allclose(x, y), '%s != %s' % (x, y)) - - -class Internal_Tests(unittest.TestCase, Util): - - def test_Cfunc(self): - def sqr(x): - return x * x - cf = Cfunc(sqr, [int, int], 42) - self.assertEqual(cf.nin, 1) - self.assertEqual(cf.nout, 1) - self.assertEqual(cf.cname, 'f42_pypy_g_sqr') - - def test_genufunc(self): - def foo(x): - return x + 17 - uf = genufunc(foo, [ - (float, float), - (int, int), - ]) - self.assertEqual(uf(4), 21) - x = array([1.1, 2.3]) - y = uf(x) - self.assertClose(y, [18.1, 19.3]) - self.assert_(str(y.dtype).startswith('float')) - - x = array([1, 4]) - y = uf(x) - self.assertEqual(list(y), [18, 21]) - self.assert_(str(y.dtype).startswith('int')) - - -class Arg_Tests(unittest.TestCase, Util): - - def check_ufunc(self, f): - for arg in (array([0.0, 1.0, 2.5]), - [0.0, 1.0, 2.5], - (0.0, 1.0, 2.5)): - self.assertClose(f(arg), [0.0, 1.0, 6.25]) - - self.assertEqual(f(3), 9) - self.assert_(f(-2.5) - 6.25 < 1E-10) - - def test_direct(self): - @mkufunc - def f(x): - return x * x - self.check_ufunc(f) - - def test_noargs(self): - @mkufunc() - def f(x): - return x * x - self.check_ufunc(f) - - def test_varargs(self): - for arg in (float, - [float], - [(float, float)]): - @mkufunc(arg) - def f(x): - return x * x - self.check_ufunc(f) - - def test_int(self): - @mkufunc(int) - def f(x): - return x * x - self.assertEqual(f(3), 9) - self.assert_(isinstance(f(42), int)) - - def test_mixed(self): - @mkufunc([(int, float, int), float]) - def f(n, x): - return n + x * x - - y = f(2, 3.9) # Note that int(2 + 3.9 * 3.9) = 17 - self.assertEqual(y, 17) - self.assert_(isinstance(y, int)) - - y = f(2.0, 3.9) - self.assertClose(y, 17.21) - self.assert_(isinstance(y, float)) - - def test_exceptions(self): - def f(x): - return x - - self.assertRaises(TypeError, mkufunc, {}) - self.assertRaises(TypeError, mkufunc([(float,)]), f) - self.assertRaises(TypeError, mkufunc([3*(float,)]), f) - self.assertRaises(TypeError, mkufunc([{}]), f) - self.assertRaises(TypeError, mkufunc([(int, {})]), f) - self.assertRaises(ValueError, mkufunc([]), f) - - -class Math_Tests(unittest.TestCase, Util): - - def assertFuncsEqual(self, uf, f): - x = 0.4376 - a = uf(x) - b = f(x) - self.assertClose(a, b) - xx = arange(0.1, 0.9, 0.01) - a = uf(xx) - b = [f(x) for x in xx] - self.assertClose(a, b) - - def test_exp(self): - @mkufunc - def f(x): return math.exp(x) - self.assertFuncsEqual(f, math.exp) - - def test_log(self): - @mkufunc - def f(x): return math.log(x) - self.assertFuncsEqual(f, math.log) - - def test_sqrt(self): - @mkufunc - def f(x): return math.sqrt(x) - self.assertFuncsEqual(f, math.sqrt) - - def test_cos(self): - @mkufunc - def f(x): return math.cos(x) - self.assertFuncsEqual(f, math.cos) - - def test_sin(self): - @mkufunc - def f(x): return math.sin(x) - self.assertFuncsEqual(f, math.sin) - - def test_tan(self): - @mkufunc - def f(x): return math.tan(x) - self.assertFuncsEqual(f, math.tan) - - def test_acos(self): - @mkufunc - def f(x): return math.acos(x) - self.assertFuncsEqual(f, math.acos) - - def test_asin(self): - @mkufunc - def f(x): return math.asin(x) - self.assertFuncsEqual(f, math.asin) - - def test_atan(self): - @mkufunc - def f(x): return math.atan(x) - self.assertFuncsEqual(f, math.atan) - - def test_atan2(self): - @mkufunc - def f(x, y): - return math.atan2(x, y) - - self.assertClose(f(4, 5), math.atan2(4, 5)) - - xx = array([1.0, 3.0, -2.4, 3.1, -2.3]) - yy = array([1.0, 2.0, 7.5, -8.7, 0.0]) - a = f(xx, yy) - b = [math.atan2(x, y) for x, y in zip(xx, yy)] - self.assertClose(a, b) - - def test_arithmetic(self): - def f(x): - return (4 * x + 2) / (x * x - 7 * x + 1) - uf = mkufunc(f) - x = arange(0, 2, 0.1) - self.assertClose(uf(x), f(x)) - - -class FuncArg_Tests(unittest.TestCase, Util): - - def test_fargs0(self): - def f(): - return 42 - - uf = mkufunc(f) - self.assertEqual(uf(), 42) - self.assert_(type(uf()).__name__.startswith('float')) - - uf = mkufunc(int)(f) - self.assertEqual(uf(), 42) - self.assert_(type(uf()).__name__.startswith('int')) - - -class Control_Flow_Tests(unittest.TestCase): - - def test_if(self): - @mkufunc(int) - def f(n): - if n < 4: - return n - else: - return n * n - - self.assertEqual(f(3), 3) - self.assertEqual(f(4), 16) - - def test_switch(self): - @mkufunc(int) - def f(n): - if n < 4: - return n - elif n == 4: - return 42 - elif n == 5: - return 73 - else: - return n * n - - self.assertEqual(f(3), 3) - self.assertEqual(f(4), 42) - self.assertEqual(f(5), 73) - self.assertEqual(f(6), 36) - - def test_loop(self): - @mkufunc(int) - def f(n): - res = 0 - for i in xrange(n): - res += i*i - return res - - self.assertEqual(f(3), 5) - self.assertEqual(f(95), 281295) - - -class FreeVariable_Tests(unittest.TestCase, Util): - - def test_const(self): - a = 13.6 - @mkufunc - def f(x): - return a * x - - x = arange(0, 1, 0.1) - self.assertClose(f(x), a * x) - - def test_const2(self): - from math import sin, pi, sqrt - @mkufunc - def sin_deg(angle): - return sin(angle / 180.0 * pi) - - self.assertClose(sin_deg([0, 30, 45, 60, 90, 180, 270, 360]), - [0, 0.5, 1/sqrt(2), sqrt(3)/2, 1, 0, -1, 0]) - - -class Misc_Tests(unittest.TestCase, Util): - - pass - - -if __name__ == '__main__': - unittest.main() Deleted: trunk/scipy/sandbox/mkufunc/test_speed.py =================================================================== --- trunk/scipy/sandbox/mkufunc/test_speed.py 2008-07-01 00:32:12 UTC (rev 4507) +++ trunk/scipy/sandbox/mkufunc/test_speed.py 2008-07-01 00:35:15 UTC (rev 4508) @@ -1,49 +0,0 @@ -#!/usr/bin/env python -from math import sin, cos -import time - -from numpy import arange, vectorize, allclose -from scipy import weave - -from mkufunc import mkufunc - - -def f(x): - return 4.2 * x * x + 3.7 * x + 1.5 - - -vfunc = vectorize(f) - -ufunc = mkufunc([(float, float)])(f) - - -x = arange(0, 1000, 0.001) #print "x =", x, x.dtype - -start_time = time.time() -b_y = x.copy() -weave.blitz("b_y[:] = 4.2 * x[:] * x[:] + 3.7 * x[:] + 1.5") -b_time = time.time() - start_time -print 'blitz: %.6f sec' % b_time - -start_time = time.time() -n_y = f(x) -n_time = time.time() - start_time -print 'numpy: %.6f sec' % n_time - -start_time = time.time() -v_y = vfunc(x) -v_time = time.time() - start_time -print 'vectorize: %.6f sec' % v_time - -start_time = time.time() -u_y = ufunc(x) -u_time = time.time() - start_time -print 'mkufunc: %.6f sec' % u_time - -print "speedup over blitz:", b_time/u_time -print "speedup over numpy:", n_time/u_time -print "speedup over vectorize:", v_time/u_time - -assert allclose(b_y, n_y) -assert allclose(v_y, n_y) -assert allclose(u_y, n_y) From scipy-svn at scipy.org Mon Jun 30 20:36:02 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Jun 2008 19:36:02 -0500 (CDT) Subject: [Scipy-svn] r4509 - trunk/scipy/sandbox/mkufunc Message-ID: <20080701003602.CAFE539C4BD@scipy.org> Author: ilan Date: 2008-06-30 19:36:02 -0500 (Mon, 30 Jun 2008) New Revision: 4509 Removed: trunk/scipy/sandbox/mkufunc/mkufunc.py Log: Moved into mkufunc/ Deleted: trunk/scipy/sandbox/mkufunc/mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-07-01 00:35:15 UTC (rev 4508) +++ trunk/scipy/sandbox/mkufunc/mkufunc.py 2008-07-01 00:36:02 UTC (rev 4509) @@ -1,337 +0,0 @@ -""" mkufunc (make U function) - - -Author: Ilan Schnell (with help from Travis Oliphant and Eric Jones) -""" -import sys -import re -import os, os.path -import cStringIO -import hashlib -from types import FunctionType - -import numpy -from scipy import weave - - -verbose = 0 - -def func_hash(f, salt=None): - """ Return a MD5 hash for a function object as string. - """ - co = f.func_code - return hashlib.md5(co.co_code + repr(co.co_names) + repr(salt) - ).hexdigest() - - -def translate(f, argtypes): - """ Return pypy's C output for a given function and argument types. - - The cache files are in weave's directory. - """ - cache_file_name = os.path.join(weave.catalog.default_dir(), - 'pypy_%s.c' % func_hash(f, salt=argtypes)) - try: - return open(cache_file_name).read() - - except IOError: - from interactive import Translation - - t = Translation(f, backend='c') - t.annotate(argtypes) - t.source() - - os.rename(t.driver.c_source_filename, cache_file_name) - - return translate(f, argtypes) - - -class Ctype: - def __init__(self, npy, c): - self.npy = npy - self.c = c - -typedict = { - int: Ctype('NPY_LONG', 'long' ), - float: Ctype('NPY_DOUBLE', 'double'), -} - - -class Cfunc(object): - """ C compiled python functions - - >>> def sqr(x): - ... return x * x - - >>> signature = [int, int] # only the input arguments are used here - - compilation is done upon initialization - >>> x = Cfunc(sqr, signature, 123) - - >>> x.nin # number of input arguments - 1 - >>> x.nout # number of output arguments (must be 1 for now) - 1 - >>> x.sig - [, ] - - Attributes: - f -- the Python function object - n -- id number - sig -- signature - nin -- number of input arguments - nout -- number of output arguments - cname -- name of the C function - - Methods: - - decl() -- returns the C declaration for the function - cfunc() -- returns the C function (as string) - ufunc_support_code() - -- generate the C support code to make this - function part work with PyUFuncGenericFunction - """ - def __init__(self, f, signature, n): - self.f = f - self.n = n - self.sig = signature - self.nin = f.func_code.co_argcount - self.nout = len(self.sig) - self.nin - assert self.nout == 1 # for now - - src = translate(f, signature[:self.nin]) - - self._prefix = 'f%i_' % self.n - self._allCsrc = src.replace('pypy_', self._prefix + 'pypy_') - self.cname = self._prefix + 'pypy_g_' + f.__name__ - - def cfunc(self): - p = re.compile(r'^\w+[*\s\w]+' + self.cname + - r'\s*\([^)]*\)\s*\{.*?[\n\r]\}[\n\r]', - re.DOTALL | re.MULTILINE | re.VERBOSE) - - found = p.findall(self._allCsrc) - assert len(found) == 1 - res = found[0] - res = res.replace(self._prefix + 'pypy_g_ll_math_ll_math_', '') - return 'inline ' + res + '\n' - - def ufunc_support_code(self): - # Unfortunately the code in here is very hard to read. - # In order to make the code clearer, one would need a real template - # engine link Cheetah (http://cheetahtemplate.org/). - # However, somehting like that would be too much overhead for scipy. - n = self.n - nin = self.nin - cname = self.cname - - def varname(i): - return chr(i + ord('a')) - - declargs = ', '.join('%s %s' % (typedict[self.sig[i]].c, varname(i)) - for i in xrange(self.nin)) - - args = ', '.join(varname(i) for i in xrange(self.nin)) - - isn_steps = '\n\t'.join('npy_intp is%i = steps[%i];' % (i, i) - for i in xrange(self.nin)) - - ipn_args = '\n\t'.join('char *ip%i = args[%i];' % (i, i) - for i in xrange(self.nin)) - - body1d_in = '\n\t\t'.join('%s *in%i = (%s *)ip%i;' % - (2*(typedict[self.sig[i]].c, i)) - for i in xrange(self.nin)) - - body1d_add = '\n\t\t'.join('ip%i += is%i;' % (i, i) - for i in xrange(self.nin)) - - ptrargs = ', '.join('*in%i' % i for i in xrange(self.nin)) - - rettype = typedict[self.sig[-1]].c - - return ''' -static %(rettype)s wrap_%(cname)s(%(declargs)s) -{ - return %(cname)s(%(args)s); -} - -typedef %(rettype)s Func_%(n)i(%(declargs)s); - -static void -PyUFunc_%(n)i(char **args, npy_intp *dimensions, npy_intp *steps, void *func) -{ - npy_intp i, n; - %(isn_steps)s - npy_intp os = steps[%(nin)s]; - %(ipn_args)s - char *op = args[%(nin)s]; - Func_%(n)i *f = (Func_%(n)i *) func; - n = dimensions[0]; - - for(i = 0; i < n; i++) { - %(body1d_in)s - %(rettype)s *out = (%(rettype)s *)op; - - *out = (%(rettype)s) f(%(ptrargs)s); - - %(body1d_add)s - op += os; - } -} -''' % locals() - - -def support_code(cfuncs): - """ Given a list of Cfunc instances, return the support code for weave. - """ - acc = cStringIO.StringIO() - - acc.write('/********************* start pypy_head.h **************/\n\n') - acc.write(open(os.path.join(os.path.dirname(__file__), - 'pypy_head.h')).read()) - acc.write('/********************** end pypy_head.h ****************/\n\n') - - for cf in cfuncs: - acc.write(cf.cfunc()) - acc.write(cf.ufunc_support_code()) - - fname = cfuncs[0].f.__name__ - - pyufuncs = ''.join('\tPyUFunc_%i,\n' % cf.n for cf in cfuncs) - - data = ''.join('\t(void *) wrap_%s,\n' % cf.cname for cf in cfuncs) - - types = ''.join('\t%s /* %i */\n' % - (''.join(typedict[t].npy + ', ' for t in cf.sig), cf.n) - for cf in cfuncs) - - acc.write(''' -static PyUFuncGenericFunction %(fname)s_functions[] = { -%(pyufuncs)s}; - -static void *%(fname)s_data[] = { -%(data)s}; - -static char %(fname)s_types[] = { -%(types)s}; -''' % locals()) - - if verbose: - print '------------------ start support_code -----------------' - print acc.getvalue() - print '------------------- end support_code ------------------' - - return acc.getvalue() - - -def code(f, signatures): - """ Return the code for weave. - """ - nin = f.func_code.co_argcount - ntypes = len(signatures) - fname = f.__name__ - fhash = func_hash(f) - - res = ''' -import_ufunc(); - -/**************************************************************************** -** function name: %(fname)s -** signatures: %(signatures)r -** fhash: %(fhash)s -*****************************************************************************/ - -return_val = PyUFunc_FromFuncAndData( - %(fname)s_functions, - %(fname)s_data, - %(fname)s_types, - %(ntypes)i, /* ntypes */ - %(nin)i, /* nin */ - 1, /* nout */ - PyUFunc_None, /* identity */ - "%(fname)s", /* name */ - "UFunc created by mkufunc", /* doc */ - 0); -''' % locals() - - if verbose: - print '---------------------- start code ---------------------' - print res - print '----------------------- end code ----------------------' - - return res - - -def genufunc(f, signatures): - """ Return the Ufunc Python object for given function and signatures. - """ - if len(signatures) == 0: - raise ValueError("At least one signature needed") - - signatures.sort(key=lambda sig: [numpy.dtype(typ).num for typ in sig]) - - cfuncs = [Cfunc(f, sig, n) for n, sig in enumerate(signatures)] - - ufunc_info = weave.base_info.custom_info() - ufunc_info.add_header('"numpy/ufuncobject.h"') - - return weave.inline(code(f, signatures), - verbose=verbose, - support_code=support_code(cfuncs), - customize=ufunc_info) - - -def mkufunc(arg0=[float]): - """ The actual API function, for use in decorator function. - - """ - class Compile(object): - - def __init__(self, f): - nin = f.func_code.co_argcount - nout = 1 - for i, sig in enumerate(signatures): - if isinstance(sig, tuple): - pass - elif sig in typedict.keys(): - signatures[i] = (nin + nout) * (sig,) - else: - raise TypeError("no match for %r" % sig) - - for sig in signatures: - assert isinstance(sig, tuple) - if len(sig) != nin + nout: - raise TypeError("signature %r does not match the " - "number of args of function %s" % - (sig, f.__name__)) - for t in sig: - if t not in typedict.keys(): - raise TypeError("no match for %r" % t) - - self.ufunc = genufunc(f, signatures) - - def __call__(self, *args): - return self.ufunc(*args) - - if isinstance(arg0, FunctionType): - f = arg0 - signatures = [float] - return Compile(f) - - elif isinstance(arg0, list): - signatures = arg0 - return Compile - - elif arg0 in typedict.keys(): - signatures = [arg0] - return Compile - - else: - raise TypeError("first argument has to be a function, a type, or " - "a list of signatures") - - -if __name__ == '__main__': - import doctest - doctest.testmod() From scipy-svn at scipy.org Mon Jun 30 20:46:27 2008 From: scipy-svn at scipy.org (scipy-svn at scipy.org) Date: Mon, 30 Jun 2008 19:46:27 -0500 (CDT) Subject: [Scipy-svn] r4510 - trunk/scipy/sandbox/mkufunc/mkufunc Message-ID: <20080701004627.1929639C32C@scipy.org> Author: ilan Date: 2008-06-30 19:46:27 -0500 (Mon, 30 Jun 2008) New Revision: 4510 Modified: trunk/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py Log: Test removed Modified: trunk/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py =================================================================== --- trunk/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py 2008-07-01 00:36:02 UTC (rev 4509) +++ trunk/scipy/sandbox/mkufunc/mkufunc/test_mkufunc.py 2008-07-01 00:46:27 UTC (rev 4510) @@ -183,21 +183,6 @@ self.assertClose(uf(x), f(x)) -class FuncArg_Tests(unittest.TestCase, Util): - - def test_fargs0(self): - def f(): - return 42 - - uf = mkufunc(f) - self.assertEqual(uf(), 42) - self.assert_(type(uf()).__name__.startswith('float')) - - uf = mkufunc(int)(f) - self.assertEqual(uf(), 42) - self.assert_(type(uf()).__name__.startswith('int')) - - class Control_Flow_Tests(unittest.TestCase): def test_if(self):