[Scipy-svn] r2182 - in trunk/Lib: cluster integrate io linalg optimize sandbox/arraysetops sandbox/models sandbox/plt sandbox/stats sandbox/xplt signal special stats tests
scipy-svn at scipy.org
scipy-svn at scipy.org
Tue Aug 29 03:22:25 EDT 2006
Author: oliphant
Date: 2006-08-29 02:22:11 -0500 (Tue, 29 Aug 2006)
New Revision: 2182
Modified:
trunk/Lib/cluster/vq.py
trunk/Lib/integrate/quadrature.py
trunk/Lib/io/array_import.py
trunk/Lib/linalg/basic.py
trunk/Lib/optimize/minpack.py
trunk/Lib/optimize/optimize.py
trunk/Lib/sandbox/arraysetops/arraysetops.py
trunk/Lib/sandbox/models/utils.py
trunk/Lib/sandbox/plt/plot_objects.py
trunk/Lib/sandbox/stats/anova.py
trunk/Lib/sandbox/xplt/pl3d.py
trunk/Lib/sandbox/xplt/plwf.py
trunk/Lib/signal/signaltools.py
trunk/Lib/signal/wavelets.py
trunk/Lib/special/orthogonal.py
trunk/Lib/stats/distributions.py
trunk/Lib/tests/test_basic.py
Log:
Fix usages of take with no axis argument.
Modified: trunk/Lib/cluster/vq.py
===================================================================
--- trunk/Lib/cluster/vq.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/cluster/vq.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -20,7 +20,7 @@
from numpy.random import randint
from scipy.stats import std, mean
from numpy import shape, zeros, subtract, sqrt, argmin, minimum, array, \
- newaxis, arange, compress, equal, take, common_type, single, double
+ newaxis, arange, compress, equal, common_type, single, double, take
def whiten(obs):
""" Normalize a group of observations on a per feature basis
Modified: trunk/Lib/integrate/quadrature.py
===================================================================
--- trunk/Lib/integrate/quadrature.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/integrate/quadrature.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -37,7 +37,7 @@
raise ValueError, "Gaussian quadrature is only available for " \
"finite limits."
y = (b-a)*(x+1)/2.0 + a
- return (b-a)/2.0*sum(w*func(y,*args)), None
+ return (b-a)/2.0*sum(w*func(y,*args),0), None
def vectorize1(func, args=(), vec_func=False):
if vec_func:
@@ -323,7 +323,7 @@
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h;
points = lox + h * arange(0, numtosum)
- s = sum(function(points))
+ s = sum(function(points),0)
return s
def _romberg_diff(b, c, k):
Modified: trunk/Lib/io/array_import.py
===================================================================
--- trunk/Lib/io/array_import.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/io/array_import.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -197,11 +197,11 @@
if len(collist) == 1:
toconvlist = arlist[::-collist[-1]]
else:
- toconvlist = take(arlist,collist[:-1])
+ toconvlist = take(arlist,collist[:-1],0)
toconvlist = concatenate((toconvlist,
arlist[(collist[-2]-collist[-1])::(-collist[-1])]))
else:
- toconvlist = take(arlist, collist)
+ toconvlist = take(arlist, collist,0)
return numpyio.convert_objectarray(toconvlist, atype, missing)
Modified: trunk/Lib/linalg/basic.py
===================================================================
--- trunk/Lib/linalg/basic.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/linalg/basic.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -472,7 +472,7 @@
rows = mgrid[rN:0:-1]
indx = cols[:,newaxis]*ones((1,rN),dtype=int) + \
rows[newaxis,:]*ones((cN,1),dtype=int) - 1
- return take(vals, indx)
+ return take(vals, indx, 0)
def hankel(c,r=None):
@@ -503,7 +503,7 @@
rows = mgrid[0:rN]
indx = cols[:,newaxis]*ones((1,rN),dtype=int) + \
rows[newaxis,:]*ones((cN,1),dtype=int) - 1
- return take(vals, indx)
+ return take(vals, indx, 0)
def all_mat(*args):
return map(Matrix,args)
Modified: trunk/Lib/optimize/minpack.py
===================================================================
--- trunk/Lib/optimize/minpack.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/optimize/minpack.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -264,7 +264,7 @@
mesg = errors[info][0]
if full_output:
import scipy.linalg as sl
- perm = take(eye(n),retval[1]['ipvt']-1)
+ perm = take(eye(n),retval[1]['ipvt']-1,0)
r = triu(transpose(retval[1]['fjac'])[:n,:])
R = dot(r, perm)
try:
Modified: trunk/Lib/optimize/optimize.py
===================================================================
--- trunk/Lib/optimize/optimize.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/optimize/optimize.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -172,7 +172,8 @@
fsim[k+1] = f
ind = numpy.argsort(fsim)
- fsim = numpy.take(fsim,ind) # sort so sim[0,:] has the lowest function value
+ fsim = numpy.take(fsim,ind,0)
+ # sort so sim[0,:] has the lowest function value
sim = numpy.take(sim,ind,0)
iterations = 1
@@ -230,7 +231,7 @@
ind = numpy.argsort(fsim)
sim = numpy.take(sim,ind,0)
- fsim = numpy.take(fsim,ind)
+ fsim = numpy.take(fsim,ind,0)
if callback is not None:
callback(sim[0])
iterations += 1
Modified: trunk/Lib/sandbox/arraysetops/arraysetops.py
===================================================================
--- trunk/Lib/sandbox/arraysetops/arraysetops.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/sandbox/arraysetops/arraysetops.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -62,7 +62,7 @@
ar = numpy.array( ar1 ).ravel()
if retIndx:
perm = numpy.argsort( ar )
- aux = numpy.take( ar, perm )
+ aux = numpy.take( ar, perm 0)
flag = ediff1d( aux, 1 ) != 0
return numpy.compress( flag, perm ), numpy.compress( flag, aux )
else:
@@ -104,8 +104,8 @@
tt = numpy.concatenate( (numpy.zeros_like( ar1 ),
numpy.zeros_like( ar2 ) + 1) )
perm = numpy.argsort( ar )
- aux = numpy.take( ar, perm )
- aux2 = numpy.take( tt, perm )
+ aux = numpy.take( ar, perm, 0)
+ aux2 = numpy.take( tt, perm, 0 )
flag = ediff1d( aux, 1 ) == 0
ii = numpy.where( flag * aux2 )
@@ -115,7 +115,7 @@
indx = numpy.argsort( perm )[:len( ar1 )]
- return numpy.take( flag, indx )
+ return numpy.take( flag, indx, 0 )
##
# 03.11.2005, c
Modified: trunk/Lib/sandbox/models/utils.py
===================================================================
--- trunk/Lib/sandbox/models/utils.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/sandbox/models/utils.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -107,8 +107,8 @@
if not sorted:
asort = N.argsort(self.x)
- self.x = N.take(self.x, asort)
- self.y = N.take(self.y, asort)
+ self.x = N.take(self.x, asort, 0)
+ self.y = N.take(self.y, asort, 0)
self.n = self.x.shape[0]
def __call__(self, time):
Modified: trunk/Lib/sandbox/plt/plot_objects.py
===================================================================
--- trunk/Lib/sandbox/plt/plot_objects.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/sandbox/plt/plot_objects.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -928,7 +928,7 @@
else:
cmap = colormap
- pixels = take( cmap, scaled_mag)
+ pixels = take( cmap, scaled_mag, 0)
del scaled_mag
# need to transpose pixels in memory...
bitmap = pixels.astype(UnsignedInt8).tostring()
Modified: trunk/Lib/sandbox/stats/anova.py
===================================================================
--- trunk/Lib/sandbox/stats/anova.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/sandbox/stats/anova.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -74,7 +74,7 @@
Nwifactors = len(Wscols) - 1 # WAS len(Wcolumns)
#Nwlevels = take(array(Nlevels),Wscols) # no.lvls for each w/i subj fact
#Nbtwfactors = len(Bscols) - 1 # WASNfactors - Nwifactors + 1
- Nblevels = take(array(Nlevels),Bscols)
+ Nblevels = take(array(Nlevels),Bscols,0)
Nwsources = 2**Nwifactors - 1 # num within-subject factor-combos
#Nbsources = Nallsources - Nwsources
@@ -159,7 +159,7 @@
new = alluniqueslist[j].index(data[i][j])
idx.append(new)
DA[idx] = data[i][-1] # put this data point in proper place in DA
- btwidx = take(idx,array(Bscols))
+ btwidx = take(idx,array(Bscols),0)
subjslots[btwidx] = 1
# DONE CREATING DATA ARRAY, DA ... #dims = numfactors+1, dim 0=subjects
# dim -1=measured values, dummyval = values used to fill empty slots in DA
@@ -221,7 +221,7 @@
Lwithinsourcecol = map(Lsourceandbtws.index,Lwithinsourcecol)
# Now indxlist should hold a list of indices into the list of possible
# coefficients, one row per combo of coefficient. Next line PRESERVES dummyval
- dvarshape = array(take(mns.shape,Lwithinsourcecol[1:])) -1
+ dvarshape = array(take(mns.shape,Lwithinsourcecol[1:],0)) -1
idxarray = indices(dvarshape)
newshape = array([idxarray.shape[0],
multiply.reduce(idxarray.shape[1:])])
@@ -379,7 +379,7 @@
## Calc and save sums of squares for this source
SS = sum((effect**2 *sourceNarray) *
- multiply.reduce(take(Marray.shape,btwnonsourcedims)))
+ multiply.reduce(take(Marray.shape,btwnonsourcedims,0)))
## Save it so you don't have to calculate it again next time
SSlist.append(SS)
SSsources.append(source)
@@ -687,7 +687,7 @@
# Calc and save sums of squares for this source
SS = zeros((levels,levels),'f')
SS = sum((effect**2 *sourceDNarray) *
- multiply.reduce(take(DM[dindex].shape,btwnonsourcedims)),
+ multiply.reduce(take(DM[dindex].shape,btwnonsourcedims,0)),
range(len(sourceDMarray.shape)-1))
# Save it so you don't have to calculate it again next time
SSlist.append(SS)
@@ -739,7 +739,7 @@
idx[0] = -1 # compensate for pre-increment of 1st slot in incr()
# Get a list of the maximum values each factor can handle
- loopcap = take(array(Nlevels),sourcedims)-1
+ loopcap = take(array(Nlevels),sourcedims,0)-1
### WHILE STILL MORE GROUPS, CALCULATE GROUP MEAN FOR EACH D-VAR
while incr(idx,loopcap) != -1: # loop through source btw level-combos
Modified: trunk/Lib/sandbox/xplt/pl3d.py
===================================================================
--- trunk/Lib/sandbox/xplt/pl3d.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/sandbox/xplt/pl3d.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -469,13 +469,13 @@
# (reduces to above medians for quads)
# (2) compute midpoints of first three sides
n2 = (nxyz [0] + 1) / 2
- c0 = (take(xyz, frst) + take(xyz, frst + 1)) / 2.
+ c0 = (take(xyz, frst, 0) + take(xyz, frst + 1, 0)) / 2.
i = frst + n2 - 1
- c1 = (take(xyz, i) + take(xyz, i + 1)) / 2.
+ c1 = (take(xyz, i, 0) + take(xyz, i + 1, 0)) / 2.
i = n2 / 2
- c2 = (take(xyz, frst + i) + take(xyz, frst + (i + 1) % nxyz [0])) / 2.
+ c2 = (take(xyz, frst + i, 0) + take(xyz, frst + (i + 1) % nxyz [0], 0)) / 2.
i = minimum (i + n2, nxyz [0]) - 1
- c3 = (take(xyz, frst + i) + take(xyz, frst + (i + 1) % nxyz [0])) / 2.
+ c3 = (take(xyz, frst + i, 0) + take(xyz, frst + (i + 1) % nxyz [0], 0)) / 2.
m1 = c1 - c0
m2 = c3 - c2
@@ -847,7 +847,7 @@
array_set (vlist, list, arange (len (list), dtype = Int))
# then reset the nlist values to that pre-sorted order, so that
# sort(nlist) will be the required vertex sorting list
- nlist = take(vlist, nlist)
+ nlist = take(vlist, nlist, 0)
# the final hitch is to ensure that the vertices within each polygon
# remain in their initial order (sort scrambles equal values)
# since the vertices of a polygon can be cyclically permuted,
Modified: trunk/Lib/sandbox/xplt/plwf.py
===================================================================
--- trunk/Lib/sandbox/xplt/plwf.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/sandbox/xplt/plwf.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -243,9 +243,9 @@
ravel(add.outer (adders,zeros(nj-1, Int))) +
arange((ni-1)*(nj-1), dtype = Int),
array ( [[0, 1], [nj + 1, nj]])))
- xyz=array([take(ravel(xyz[0]),list),
- take(ravel(xyz[1]),list),
- take(ravel(xyz[2]),list)])
+ xyz=array([take(ravel(xyz[0]),list,0),
+ take(ravel(xyz[1]),list,0),
+ take(ravel(xyz[2]),list,0)])
nxyz= ones((ni-1)*(nj-1)) * 4;
The resulting array xyz is 3-by-(4*(nj-1)*(ni-1)).
xyz[0:3,4*i:4*(i+1)] are the clockwise coordinates of the
Modified: trunk/Lib/signal/signaltools.py
===================================================================
--- trunk/Lib/signal/signaltools.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/signal/signaltools.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -887,7 +887,7 @@
indx = argsort(abs(p))
else:
indx = argsort(p)
- return take(p,indx), indx
+ return take(p,indx,0), indx
def unique_roots(p,tol=1e-3,rtype='min'):
"""Determine the unique roots and their multiplicities in two lists
@@ -959,7 +959,7 @@
"""
extra = k
p, indx = cmplx_sort(p)
- r = take(r,indx)
+ r = take(r,indx,0)
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for k in range(len(pout)):
@@ -1130,7 +1130,7 @@
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
- r = take(r,indx)
+ r = take(r,indx,0)
pout, mult = unique_roots(p,tol=tol,rtype=rtype)
p = []
for k in range(len(pout)):
@@ -1341,7 +1341,7 @@
coef,resids,rank,s = linalg.lstsq(A,newdata[sl])
newdata[sl] = newdata[sl] - dot(A,coef)
# Put data back in original shape.
- tdshape = take(dshape,newdims)
+ tdshape = take(dshape,newdims,0)
ret = reshape(newdata,tuple(tdshape))
vals = range(1,rnk)
olddims = vals[:axis] + [0] + vals[axis:]
Modified: trunk/Lib/signal/wavelets.py
===================================================================
--- trunk/Lib/signal/wavelets.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/signal/wavelets.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -111,10 +111,10 @@
indx1 = sb.clip(2*nn-kk,-1,N+1)
indx2 = sb.clip(2*nn-kk+1,-1,N+1)
m = sb.zeros((2,2,N,N),'d')
- m[0,0] = sb.take(thk,indx1)
- m[0,1] = sb.take(thk,indx2)
- m[1,0] = sb.take(tgk,indx1)
- m[1,1] = sb.take(tgk,indx2)
+ m[0,0] = sb.take(thk,indx1,0)
+ m[0,1] = sb.take(thk,indx2,0)
+ m[1,0] = sb.take(tgk,indx1,0)
+ m[1,1] = sb.take(tgk,indx2,0)
m *= s2
# construct the grid of points
Modified: trunk/Lib/special/orthogonal.py
===================================================================
--- trunk/Lib/special/orthogonal.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/special/orthogonal.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -61,6 +61,7 @@
from __future__ import nested_scopes
from numpy import *
+from numpy.oldnumeric import take
import _cephes as cephes
_gam = cephes.gamma
from scipy.linalg import eig
Modified: trunk/Lib/stats/distributions.py
===================================================================
--- trunk/Lib/stats/distributions.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/stats/distributions.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -3057,7 +3057,7 @@
c_bad = atleast_1d((b<=0) | (x != x))
indxiter = nonzero(c_xiter)
- xiter = take(x, indxiter)
+ xiter = take(x, indxiter, 0)
vals = ones(len(c_xsimple),float)
putmask(vals, c_bad, nan)
@@ -3066,7 +3066,7 @@
st = where(isnan(st),0.0,st)
putmask(vals, c_xnormal, norm.cdf(x, scale=st))
- biter = take(atleast_1d(b)*(x==x), indxiter)
+ biter = take(atleast_1d(b)*(x==x), indxiter, 0)
if len(xiter) > 0:
fac = special.i0(biter)
x2 = xiter
@@ -3191,7 +3191,7 @@
qk = 1.0*qk / sum(qk)
# If qk is zero anywhere, then unless pk is zero at those places
# too, the relative entropy is infinite.
- if any(take(pk,nonzero(qk==0.0))!=0.0):
+ if any(take(pk,nonzero(qk==0.0))!=0.0, 0):
return inf
vec = where (pk == 0, 0.0, pk*log(pk / qk))
return -sum(vec)
@@ -3359,8 +3359,8 @@
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
- self.xk = take(ravel(self.xk),indx)
- self.pk = take(ravel(self.pk),indx)
+ self.xk = take(ravel(self.xk),indx, 0)
+ self.pk = take(ravel(self.pk),indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = make_dict(self.xk, self.pk)
Modified: trunk/Lib/tests/test_basic.py
===================================================================
--- trunk/Lib/tests/test_basic.py 2006-08-29 03:40:28 UTC (rev 2181)
+++ trunk/Lib/tests/test_basic.py 2006-08-29 07:22:11 UTC (rev 2182)
@@ -273,7 +273,7 @@
b = [[3,6.0, 9.0],
[4,10.0,5.0],
[8,3.0,2.0]]
- assert_equal(ptp(b),[5.0,7.0,7.0])
+ assert_equal(ptp(b,axis=0),[5.0,7.0,7.0])
assert_equal(ptp(b,axis=1),[6.0,6.0,6.0])
More information about the Scipy-svn
mailing list