From numpy-svn at scipy.org Thu May 1 15:25:34 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 1 May 2008 14:25:34 -0500 (CDT) Subject: [Numpy-svn] r5117 - in trunk/numpy: . core/src core/tests Message-ID: <20080501192534.4546039C16C@new.scipy.org> Author: stefan Date: 2008-05-01 14:24:51 -0500 (Thu, 01 May 2008) New Revision: 5117 Modified: trunk/numpy/add_newdocs.py trunk/numpy/core/src/arraymethods.c trunk/numpy/core/tests/test_multiarray.py Log: Support for Python types in x.view. Modified: trunk/numpy/add_newdocs.py =================================================================== --- trunk/numpy/add_newdocs.py 2008-04-30 23:48:02 UTC (rev 5116) +++ trunk/numpy/add_newdocs.py 2008-05-01 19:24:51 UTC (rev 5117) @@ -2198,15 +2198,28 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view', - """a.view(dtype=None) + """a.view(dtype=None, type=None) New view of array with the same data. Parameters ---------- - dtype : sub-type or data-descriptor - Data-type of the returned view. + dtype : data-type + Data-type descriptor of the returned view, e.g. float32 or int16. + type : python type + Type of the returned view, e.g. ndarray or matrix. + Examples + -------- + >>> x = np.array([(1,2)],dtype=[('a',np.int8),('b',np.int8)]) + >>> y = x.view(dtype=np.int16, type=np.matrix) + + >>> print y.dtype + int16 + + >>> print type(y) + + """)) add_newdoc('numpy.core.umath','geterrobj', Modified: trunk/numpy/core/src/arraymethods.c =================================================================== --- trunk/numpy/core/src/arraymethods.c 2008-04-30 23:48:02 UTC (rev 5116) +++ trunk/numpy/core/src/arraymethods.c 2008-05-01 19:24:51 UTC (rev 5117) @@ -103,26 +103,63 @@ static PyObject * array_view(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyObject *otype=NULL; - PyArray_Descr *type=NULL; + PyObject *out_dtype_or_type=NULL; + PyObject *out_dtype=NULL; + PyObject *out_type=NULL; + PyArray_Descr *dtype=NULL; - static char *kwlist[] = {"dtype", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O", kwlist, &otype)) + static char *kwlist[] = {"dtype_or_type", "dtype", "type", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO", kwlist, + &out_dtype_or_type, + &out_dtype, + &out_type)) return NULL; - if (otype) { - if (PyType_Check(otype) && \ - PyType_IsSubtype((PyTypeObject *)otype, + /* If user specified a positional argument, guess whether it + represents a type or a dtype for backward compatibility. */ + if (out_dtype_or_type) { + + /* type specified? */ + if (PyType_Check(out_dtype_or_type) && + PyType_IsSubtype((PyTypeObject *)out_dtype_or_type, &PyArray_Type)) { - return PyArray_View(self, NULL, - (PyTypeObject *)otype); + if (out_type) { + PyErr_SetString(PyExc_ValueError, + "Cannot specify output type twice."); + return NULL; + } + + out_type = out_dtype_or_type; } + + /* dtype specified */ else { - if (PyArray_DescrConverter(otype, &type) == PY_FAIL) + if (out_dtype) { + PyErr_SetString(PyExc_ValueError, + "Cannot specify output dtype twice."); return NULL; + } + + out_dtype = out_dtype_or_type; } } - return PyArray_View(self, type, NULL); + + if ((out_type) && (!PyType_Check(out_type) || + !PyType_IsSubtype((PyTypeObject *)out_type, + &PyArray_Type))) { + PyErr_SetString(PyExc_ValueError, + "Type must be a Python type object"); + return NULL; + } + + if ((out_dtype) && + (PyArray_DescrConverter(out_dtype, &dtype) == PY_FAIL)) { + PyErr_SetString(PyExc_ValueError, + "Dtype must be a numpy data-type"); + return NULL; + } + + return PyArray_View(self, dtype, (PyTypeObject*)out_type); } static PyObject * Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2008-04-30 23:48:02 UTC (rev 5116) +++ trunk/numpy/core/tests/test_multiarray.py 2008-05-01 19:24:51 UTC (rev 5117) @@ -842,7 +842,19 @@ assert_array_equal(y,z) assert_array_equal(y, [67305985, 134678021]) + def test_type(self): + x = np.array([1,2,3]) + assert(isinstance(x.view(np.matrix),np.matrix)) + def test_keywords(self): + x = np.array([(1,2)],dtype=[('a',np.int8),('b',np.int8)]) + y = x.view(dtype=np.int16, type=np.matrix) + assert_array_equal(y,[[513]]) + + assert(isinstance(y,np.matrix)) + assert_equal(y.dtype,np.int16) + + # Import tests without matching module names set_local_path() from test_unicode import * From numpy-svn at scipy.org Thu May 1 15:27:20 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 1 May 2008 14:27:20 -0500 (CDT) Subject: [Numpy-svn] r5118 - trunk/numpy/lib/tests Message-ID: <20080501192720.34E0239C16C@new.scipy.org> Author: stefan Date: 2008-05-01 14:26:58 -0500 (Thu, 01 May 2008) New Revision: 5118 Modified: trunk/numpy/lib/tests/test_machar.py Log: Suppress test output. Modified: trunk/numpy/lib/tests/test_machar.py =================================================================== --- trunk/numpy/lib/tests/test_machar.py 2008-05-01 19:24:51 UTC (rev 5117) +++ trunk/numpy/lib/tests/test_machar.py 2008-05-01 19:26:58 UTC (rev 5118) @@ -12,8 +12,7 @@ hiprec = ntypes.float96 machar = MachAr(lambda v:array([v], hiprec)) except AttributeError: - print "Skipping test: no nyptes.float96 available on this" \ - " platform." + "Skipping test: no nyptes.float96 available on this platform." def test_underlow(self): """Regression testing for #759: instanciating MachAr for dtype = From numpy-svn at scipy.org Thu May 1 16:02:39 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 1 May 2008 15:02:39 -0500 (CDT) Subject: [Numpy-svn] r5119 - trunk/numpy/core/src Message-ID: <20080501200239.15DD739C27C@new.scipy.org> Author: oliphant Date: 2008-05-01 15:02:38 -0500 (Thu, 01 May 2008) New Revision: 5119 Modified: trunk/numpy/core/src/arraymethods.c Log: Remove dtype_or_type key word in favor of simpler interface. Modified: trunk/numpy/core/src/arraymethods.c =================================================================== --- trunk/numpy/core/src/arraymethods.c 2008-05-01 19:26:58 UTC (rev 5118) +++ trunk/numpy/core/src/arraymethods.c 2008-05-01 20:02:38 UTC (rev 5119) @@ -103,52 +103,38 @@ static PyObject * array_view(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyObject *out_dtype_or_type=NULL; PyObject *out_dtype=NULL; PyObject *out_type=NULL; PyArray_Descr *dtype=NULL; - static char *kwlist[] = {"dtype_or_type", "dtype", "type", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO", kwlist, - &out_dtype_or_type, + static char *kwlist[] = {"dtype", "type", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", kwlist, &out_dtype, &out_type)) return NULL; /* If user specified a positional argument, guess whether it represents a type or a dtype for backward compatibility. */ - if (out_dtype_or_type) { - + if (out_dtype) { /* type specified? */ - if (PyType_Check(out_dtype_or_type) && - PyType_IsSubtype((PyTypeObject *)out_dtype_or_type, + if (PyType_Check(out_dtype) && + PyType_IsSubtype((PyTypeObject *)out_dtype, &PyArray_Type)) { - if (out_type) { + if (out_type) { PyErr_SetString(PyExc_ValueError, "Cannot specify output type twice."); return NULL; } - - out_type = out_dtype_or_type; + out_type = out_dtype; + out_dtype = NULL; } - - /* dtype specified */ - else { - if (out_dtype) { - PyErr_SetString(PyExc_ValueError, - "Cannot specify output dtype twice."); - return NULL; - } - - out_dtype = out_dtype_or_type; - } } if ((out_type) && (!PyType_Check(out_type) || !PyType_IsSubtype((PyTypeObject *)out_type, &PyArray_Type))) { PyErr_SetString(PyExc_ValueError, - "Type must be a Python type object"); + "Type must be a sub-type of ndarray type"); return NULL; } From numpy-svn at scipy.org Thu May 1 17:21:09 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 1 May 2008 16:21:09 -0500 (CDT) Subject: [Numpy-svn] r5120 - trunk/numpy/doc/swig Message-ID: <20080501212109.ED078C7C00C@new.scipy.org> Author: charris Date: 2008-05-01 16:21:08 -0500 (Thu, 01 May 2008) New Revision: 5120 Modified: trunk/numpy/doc/swig/numpy.i Log: Fix grammar. Modified: trunk/numpy/doc/swig/numpy.i =================================================================== --- trunk/numpy/doc/swig/numpy.i 2008-05-01 20:02:38 UTC (rev 5119) +++ trunk/numpy/doc/swig/numpy.i 2008-05-01 21:21:08 UTC (rev 5120) @@ -341,7 +341,7 @@ sprintf(s, " or %d", exact_dimensions[n-1]); strcat(dims_str,s); PyErr_Format(PyExc_TypeError, - "Array must be have %s dimensions. Given array has %d dimensions", + "Array must have %s dimensions. Given array has %d dimensions", dims_str, array_numdims(ary)); } return success; @@ -390,7 +390,7 @@ len = strlen(actual_dims); actual_dims[len-1] = ']'; PyErr_Format(PyExc_TypeError, - "Array must be have shape of %s. Given array has shape of %s", + "Array must have shape of %s. Given array has shape of %s", desired_dims, actual_dims); } return success; From numpy-svn at scipy.org Thu May 1 17:26:16 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 1 May 2008 16:26:16 -0500 (CDT) Subject: [Numpy-svn] r5121 - trunk/numpy/doc/swig Message-ID: <20080501212616.6A509C7C029@new.scipy.org> Author: charris Date: 2008-05-01 16:26:14 -0500 (Thu, 01 May 2008) New Revision: 5121 Modified: trunk/numpy/doc/swig/numpy.i Log: Remove trailing whitespace. Modified: trunk/numpy/doc/swig/numpy.i =================================================================== --- trunk/numpy/doc/swig/numpy.i 2008-05-01 21:21:08 UTC (rev 5120) +++ trunk/numpy/doc/swig/numpy.i 2008-05-01 21:26:14 UTC (rev 5121) @@ -161,8 +161,8 @@ { char* desired_type = typecode_string(typecode); char* actual_type = typecode_string(array_type(input)); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. Array of type '%s' given", + PyErr_Format(PyExc_TypeError, + "Array of type '%s' required. Array of type '%s' given", desired_type, actual_type); ary = NULL; } @@ -170,8 +170,8 @@ { char * desired_type = typecode_string(typecode); char * actual_type = pytype_string(input); - PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. A '%s' was given", + PyErr_Format(PyExc_TypeError, + "Array of type '%s' required. A '%s' was given", desired_type, actual_type); ary = NULL; } @@ -220,8 +220,8 @@ } else { - result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, - array_type(ary), + result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, + array_type(ary), min_dims, max_dims); *is_new_object = 1; @@ -241,7 +241,7 @@ int is_new1 = 0; int is_new2 = 0; PyArrayObject* ary2; - PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, + PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, &is_new1); if (ary1) { @@ -250,7 +250,7 @@ { Py_DECREF(ary1); } - ary1 = ary2; + ary1 = ary2; } *is_new_object = is_new1 || is_new2; return ary1; @@ -305,8 +305,8 @@ int success = 1; if (array_numdims(ary) != exact_dimensions) { - PyErr_Format(PyExc_TypeError, - "Array must have %d dimensions. Given array has %d dimensions", + PyErr_Format(PyExc_TypeError, + "Array must have %d dimensions. Given array has %d dimensions", exact_dimensions, array_numdims(ary)); success = 0; } @@ -335,17 +335,17 @@ { for (i = 0; i < n-1; i++) { - sprintf(s, "%d, ", exact_dimensions[i]); + sprintf(s, "%d, ", exact_dimensions[i]); strcat(dims_str,s); } - sprintf(s, " or %d", exact_dimensions[n-1]); + sprintf(s, " or %d", exact_dimensions[n-1]); strcat(dims_str,s); - PyErr_Format(PyExc_TypeError, + PyErr_Format(PyExc_TypeError, "Array must have %s dimensions. Given array has %d dimensions", dims_str, array_numdims(ary)); } return success; - } + } /* Require the given PyArrayObject to have a specified shape. If the * array has the specified shape, return 1. Otherwise, set the python @@ -363,7 +363,7 @@ { if (size[i] != -1 && size[i] != array_size(ary,i)) { - success = 0; + success = 0; } } if (!success) @@ -372,24 +372,24 @@ { if (size[i] == -1) { - sprintf(s, "*,"); + sprintf(s, "*,"); } else { - sprintf(s, "%ld,", (long int)size[i]); - } + sprintf(s, "%ld,", (long int)size[i]); + } strcat(desired_dims,s); } len = strlen(desired_dims); desired_dims[len-1] = ']'; for (i = 0; i < n; i++) { - sprintf(s, "%ld,", (long int)array_size(ary,i)); + sprintf(s, "%ld,", (long int)array_size(ary,i)); strcat(actual_dims,s); } len = strlen(actual_dims); actual_dims[len-1] = ']'; - PyErr_Format(PyExc_TypeError, + PyErr_Format(PyExc_TypeError, "Array must have shape of %s. Given array has shape of %s", desired_dims, actual_dims); } @@ -818,7 +818,7 @@ { Py_DECREF(array$argnum); } } -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, * DATA_TYPE* IN_ARRAY3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, @@ -880,7 +880,7 @@ { Py_DECREF(array$argnum); } } -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, * DATA_TYPE* IN_FARRAY3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, @@ -1134,7 +1134,7 @@ $4 = (DIM_TYPE) array_size(array,2); } -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, * DATA_TYPE* INPLACE_ARRAY3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, @@ -1182,7 +1182,7 @@ $4 = (DIM_TYPE) array_size(array,2); } -/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, +/* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, * DATA_TYPE* INPLACE_FARRAY3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, @@ -1239,8 +1239,8 @@ if (!PyInt_Check($input)) { char* typestring = pytype_string($input); - PyErr_Format(PyExc_TypeError, - "Int dimension expected. '%s' given.", + PyErr_Format(PyExc_TypeError, + "Int dimension expected. '%s' given.", typestring); SWIG_fail; } @@ -1267,8 +1267,8 @@ if (!PyInt_Check($input)) { char* typestring = pytype_string($input); - PyErr_Format(PyExc_TypeError, - "Int dimension expected. '%s' given.", + PyErr_Format(PyExc_TypeError, + "Int dimension expected. '%s' given.", typestring); SWIG_fail; } From numpy-svn at scipy.org Thu May 1 17:33:46 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 1 May 2008 16:33:46 -0500 (CDT) Subject: [Numpy-svn] r5122 - trunk/numpy/doc/swig Message-ID: <20080501213346.2E2DC39C1FE@new.scipy.org> Author: charris Date: 2008-05-01 16:33:44 -0500 (Thu, 01 May 2008) New Revision: 5122 Modified: trunk/numpy/doc/swig/pyfragments.swg Log: Remove trailing whitespace. Modified: trunk/numpy/doc/swig/pyfragments.swg =================================================================== --- trunk/numpy/doc/swig/pyfragments.swg 2008-05-01 21:26:14 UTC (rev 5121) +++ trunk/numpy/doc/swig/pyfragments.swg 2008-05-01 21:33:44 UTC (rev 5122) @@ -127,7 +127,7 @@ fragment="NumPy_Backward_Compatibility") { SWIGINTERN int - SWIG_AsVal_dec(unsigned long)(PyObject *obj, unsigned long *val) + SWIG_AsVal_dec(unsigned long)(PyObject *obj, unsigned long *val) { static PyArray_Descr * ulongDescr = PyArray_DescrNewFromType(NPY_ULONG); if (PyInt_Check(obj)) { From numpy-svn at scipy.org Thu May 1 17:34:12 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 1 May 2008 16:34:12 -0500 (CDT) Subject: [Numpy-svn] r5123 - trunk/numpy/doc/swig Message-ID: <20080501213412.DB12639C0B4@new.scipy.org> Author: charris Date: 2008-05-01 16:34:11 -0500 (Thu, 01 May 2008) New Revision: 5123 Modified: trunk/numpy/doc/swig/numpy.i Log: Replace tabs with spaces. Modified: trunk/numpy/doc/swig/numpy.i =================================================================== --- trunk/numpy/doc/swig/numpy.i 2008-05-01 21:33:44 UTC (rev 5122) +++ trunk/numpy/doc/swig/numpy.i 2008-05-01 21:34:11 UTC (rev 5123) @@ -118,14 +118,14 @@ */ char* typecode_string(int typecode) { static char* type_names[25] = {"bool", "byte", "unsigned byte", - "short", "unsigned short", "int", - "unsigned int", "long", "unsigned long", - "long long", "unsigned long long", - "float", "double", "long double", - "complex float", "complex double", - "complex long double", "object", - "string", "unicode", "void", "ntypes", - "notype", "char", "unknown"}; + "short", "unsigned short", "int", + "unsigned int", "long", "unsigned long", + "long long", "unsigned long long", + "float", "double", "long double", + "complex float", "complex double", + "complex long double", "object", + "string", "unicode", "void", "ntypes", + "notype", "char", "unknown"}; return typecode < 24 ? type_names[typecode] : type_names[24]; } @@ -141,9 +141,9 @@ /**********************************************************************/ %fragment("NumPy_Object_to_Array", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros", - fragment="NumPy_Utilities") + fragment="NumPy_Backward_Compatibility", + fragment="NumPy_Macros", + fragment="NumPy_Utilities") { /* Given a PyObject pointer, cast it to a PyArrayObject pointer if * legal. If not, set the python error string appropriately and @@ -153,7 +153,7 @@ { PyArrayObject* ary = NULL; if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input), typecode))) + PyArray_EquivTypenums(array_type(input), typecode))) { ary = (PyArrayObject*) input; } @@ -162,8 +162,8 @@ char* desired_type = typecode_string(typecode); char* actual_type = typecode_string(array_type(input)); PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. Array of type '%s' given", - desired_type, actual_type); + "Array of type '%s' required. Array of type '%s' given", + desired_type, actual_type); ary = NULL; } else @@ -171,8 +171,8 @@ char * desired_type = typecode_string(typecode); char * actual_type = pytype_string(input); PyErr_Format(PyExc_TypeError, - "Array of type '%s' required. A '%s' was given", - desired_type, actual_type); + "Array of type '%s' required. A '%s' was given", + desired_type, actual_type); ary = NULL; } return ary; @@ -184,12 +184,12 @@ * the routine returns NULL. */ PyArrayObject* obj_to_array_allow_conversion(PyObject* input, int typecode, - int* is_new_object) + int* is_new_object) { PyArrayObject* ary = NULL; PyObject* py_obj; if (is_array(input) && (typecode == NPY_NOTYPE || - PyArray_EquivTypenums(array_type(input),typecode))) + PyArray_EquivTypenums(array_type(input),typecode))) { ary = (PyArrayObject*) input; *is_new_object = 0; @@ -210,7 +210,7 @@ * flag it as a new object and return the pointer. */ PyArrayObject* make_contiguous(PyArrayObject* ary, int* is_new_object, - int min_dims, int max_dims) + int min_dims, int max_dims) { PyArrayObject* result; if (array_is_contiguous(ary)) @@ -221,9 +221,9 @@ else { result = (PyArrayObject*) PyArray_ContiguousFromObject((PyObject*)ary, - array_type(ary), - min_dims, - max_dims); + array_type(ary), + min_dims, + max_dims); *is_new_object = 1; } return result; @@ -235,20 +235,20 @@ * will be set. */ PyArrayObject* obj_to_array_contiguous_allow_conversion(PyObject* input, - int typecode, - int* is_new_object) + int typecode, + int* is_new_object) { int is_new1 = 0; int is_new2 = 0; PyArrayObject* ary2; PyArrayObject* ary1 = obj_to_array_allow_conversion(input, typecode, - &is_new1); + &is_new1); if (ary1) { ary2 = make_contiguous(ary1, &is_new2, 0, 0); if ( is_new1 && is_new2) { - Py_DECREF(ary1); + Py_DECREF(ary1); } ary1 = ary2; } @@ -260,8 +260,8 @@ /**********************************************************************/ %fragment("NumPy_Array_Requirements", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros") + fragment="NumPy_Backward_Compatibility", + fragment="NumPy_Macros") { /* Test whether a python object is contiguous. If array is * contiguous, return 1. Otherwise, set the python error string and @@ -273,7 +273,7 @@ if (!array_is_contiguous(ary)) { PyErr_SetString(PyExc_TypeError, - "Array must be contiguous. A non-contiguous array was given"); + "Array must be contiguous. A non-contiguous array was given"); contiguous = 0; } return contiguous; @@ -289,8 +289,8 @@ if (!array_is_native(ary)) { PyErr_SetString(PyExc_TypeError, - "Array must have native byteorder. " - "A byte-swapped array was given"); + "Array must have native byteorder. " + "A byte-swapped array was given"); native = 0; } return native; @@ -306,8 +306,8 @@ if (array_numdims(ary) != exact_dimensions) { PyErr_Format(PyExc_TypeError, - "Array must have %d dimensions. Given array has %d dimensions", - exact_dimensions, array_numdims(ary)); + "Array must have %d dimensions. Given array has %d dimensions", + exact_dimensions, array_numdims(ary)); success = 0; } return success; @@ -328,21 +328,21 @@ { if (array_numdims(ary) == exact_dimensions[i]) { - success = 1; + success = 1; } } if (!success) { for (i = 0; i < n-1; i++) { - sprintf(s, "%d, ", exact_dimensions[i]); - strcat(dims_str,s); + sprintf(s, "%d, ", exact_dimensions[i]); + strcat(dims_str,s); } sprintf(s, " or %d", exact_dimensions[n-1]); strcat(dims_str,s); PyErr_Format(PyExc_TypeError, - "Array must have %s dimensions. Given array has %d dimensions", - dims_str, array_numdims(ary)); + "Array must have %s dimensions. Given array has %d dimensions", + dims_str, array_numdims(ary)); } return success; } @@ -363,35 +363,35 @@ { if (size[i] != -1 && size[i] != array_size(ary,i)) { - success = 0; + success = 0; } } if (!success) { for (i = 0; i < n; i++) { - if (size[i] == -1) - { - sprintf(s, "*,"); - } - else - { - sprintf(s, "%ld,", (long int)size[i]); - } - strcat(desired_dims,s); + if (size[i] == -1) + { + sprintf(s, "*,"); + } + else + { + sprintf(s, "%ld,", (long int)size[i]); + } + strcat(desired_dims,s); } len = strlen(desired_dims); desired_dims[len-1] = ']'; for (i = 0; i < n; i++) { - sprintf(s, "%ld,", (long int)array_size(ary,i)); - strcat(actual_dims,s); + sprintf(s, "%ld,", (long int)array_size(ary,i)); + strcat(actual_dims,s); } len = strlen(actual_dims); actual_dims[len-1] = ']'; PyErr_Format(PyExc_TypeError, - "Array must have shape of %s. Given array has shape of %s", - desired_dims, actual_dims); + "Array must have shape of %s. Given array has shape of %s", + desired_dims, actual_dims); } return success; } @@ -418,11 +418,11 @@ /* Combine all NumPy fragments into one for convenience */ %fragment("NumPy_Fragments", "header", - fragment="NumPy_Backward_Compatibility", - fragment="NumPy_Macros", - fragment="NumPy_Utilities", - fragment="NumPy_Object_to_Array", - fragment="NumPy_Array_Requirements") { } + fragment="NumPy_Backward_Compatibility", + fragment="NumPy_Macros", + fragment="NumPy_Utilities", + fragment="NumPy_Object_to_Array", + fragment="NumPy_Array_Requirements") { } /* End John Hunter translation (with modifications by Bill Spotz) */ @@ -537,19 +537,19 @@ /* Typemap suite for (DATA_TYPE IN_ARRAY1[ANY]) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE IN_ARRAY1[ANY]) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE IN_ARRAY1[ANY]) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[1] = { $1_dim0 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 1) || !require_size(array, size, 1)) SWIG_fail; $1 = ($1_ltype) array_data(array); @@ -564,19 +564,19 @@ /* Typemap suite for (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* IN_ARRAY1, DIM_TYPE DIM1) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[1] = { -1 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 1) || !require_size(array, size, 1)) SWIG_fail; $1 = (DATA_TYPE*) array_data(array); @@ -592,19 +592,19 @@ /* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DATA_TYPE* IN_ARRAY1) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[1] = {-1}; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 1) || !require_size(array, size, 1)) SWIG_fail; $1 = (DIM_TYPE) array_size(array,0); @@ -620,19 +620,19 @@ /* Typemap suite for (DATA_TYPE IN_ARRAY2[ANY][ANY]) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE IN_ARRAY2[ANY][ANY]) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE IN_ARRAY2[ANY][ANY]) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[2] = { $1_dim0, $1_dim1 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 2) || !require_size(array, size, 2)) SWIG_fail; $1 = ($1_ltype) array_data(array); @@ -647,19 +647,19 @@ /* Typemap suite for (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* IN_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[2] = { -1, -1 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 2) || !require_size(array, size, 2)) SWIG_fail; $1 = (DATA_TYPE*) array_data(array); @@ -676,19 +676,19 @@ /* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_ARRAY2) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[2] = { -1, -1 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 2) || !require_size(array, size, 2)) SWIG_fail; $1 = (DIM_TYPE) array_size(array,0); @@ -705,19 +705,19 @@ /* Typemap suite for (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* IN_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[2] = { -1, -1 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 2) || !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail; $1 = (DATA_TYPE*) array_data(array); @@ -734,19 +734,19 @@ /* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* IN_FARRAY2) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[2] = { -1, -1 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 2) || !require_size(array, size, 2) || !require_fortran(array)) SWIG_fail; $1 = (DIM_TYPE) array_size(array,0); @@ -763,19 +763,19 @@ /* Typemap suite for (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE IN_ARRAY3[ANY][ANY][ANY]) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[3] = { $1_dim0, $1_dim1, $1_dim2 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 3) || !require_size(array, size, 3)) SWIG_fail; $1 = ($1_ltype) array_data(array); @@ -791,19 +791,19 @@ * DIM_TYPE DIM3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* IN_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[3] = { -1, -1, -1 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 3) || !require_size(array, size, 3)) SWIG_fail; $1 = (DATA_TYPE*) array_data(array); @@ -822,19 +822,19 @@ * DATA_TYPE* IN_ARRAY3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_ARRAY3) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[3] = { -1, -1, -1 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 3) || !require_size(array, size, 3)) SWIG_fail; $1 = (DIM_TYPE) array_size(array,0); @@ -853,19 +853,19 @@ * DIM_TYPE DIM3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* IN_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[3] = { -1, -1, -1 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 3) || !require_size(array, size, 3) | !require_fortran(array)) SWIG_fail; $1 = (DATA_TYPE*) array_data(array); @@ -884,19 +884,19 @@ * DATA_TYPE* IN_FARRAY3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) { $1 = is_array($input) || PySequence_Check($input); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* IN_FARRAY3) (PyArrayObject* array=NULL, int is_new_object=0) { npy_intp size[3] = { -1, -1, -1 }; array = obj_to_array_contiguous_allow_conversion($input, DATA_TYPECODE, - &is_new_object); + &is_new_object); if (!array || !require_dimensions(array, 3) || !require_size(array, size, 3) || !require_fortran(array)) SWIG_fail; $1 = (DIM_TYPE) array_size(array,0); @@ -918,14 +918,14 @@ /* Typemap suite for (DATA_TYPE INPLACE_ARRAY1[ANY]) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE INPLACE_ARRAY1[ANY]) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE INPLACE_ARRAY1[ANY]) (PyArrayObject* array=NULL) { @@ -939,14 +939,14 @@ /* Typemap suite for (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* INPLACE_ARRAY1, DIM_TYPE DIM1) (PyArrayObject* array=NULL, int i=1) { @@ -961,14 +961,14 @@ /* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DATA_TYPE* INPLACE_ARRAY1) (PyArrayObject* array=NULL, int i=0) { @@ -983,14 +983,14 @@ /* Typemap suite for (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE INPLACE_ARRAY2[ANY][ANY]) (PyArrayObject* array=NULL) { @@ -1004,14 +1004,14 @@ /* Typemap suite for (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* INPLACE_ARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) (PyArrayObject* array=NULL) { @@ -1026,14 +1026,14 @@ /* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_ARRAY2) (PyArrayObject* array=NULL) { @@ -1048,14 +1048,14 @@ /* Typemap suite for (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* INPLACE_FARRAY2, DIM_TYPE DIM1, DIM_TYPE DIM2) (PyArrayObject* array=NULL) { @@ -1070,14 +1070,14 @@ /* Typemap suite for (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DIM_TYPE DIM2, DATA_TYPE* INPLACE_FARRAY2) (PyArrayObject* array=NULL) { @@ -1092,14 +1092,14 @@ /* Typemap suite for (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY]) (PyArrayObject* array=NULL) { @@ -1114,14 +1114,14 @@ * DIM_TYPE DIM3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* INPLACE_ARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) (PyArrayObject* array=NULL) { @@ -1138,14 +1138,14 @@ * DATA_TYPE* INPLACE_ARRAY3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_ARRAY3) (PyArrayObject* array=NULL) { @@ -1162,14 +1162,14 @@ * DIM_TYPE DIM3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* INPLACE_FARRAY3, DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3) (PyArrayObject* array=NULL) { @@ -1186,14 +1186,14 @@ * DATA_TYPE* INPLACE_FARRAY3) */ %typecheck(SWIG_TYPECHECK_DOUBLE_ARRAY, - fragment="NumPy_Macros") + fragment="NumPy_Macros") (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) { $1 = is_array($input) && PyArray_EquivTypenums(array_type($input), - DATA_TYPECODE); + DATA_TYPECODE); } %typemap(in, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DIM_TYPE DIM2, DIM_TYPE DIM3, DATA_TYPE* INPLACE_FARRAY3) (PyArrayObject* array=NULL) { @@ -1213,7 +1213,7 @@ /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY1[ANY]) */ %typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") + fragment="NumPy_Backward_Compatibility,NumPy_Macros") (DATA_TYPE ARGOUT_ARRAY1[ANY]) (PyObject * array = NULL) { @@ -1231,7 +1231,7 @@ /* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) */ %typemap(in,numinputs=1, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) (PyObject * array = NULL) { @@ -1240,8 +1240,8 @@ { char* typestring = pytype_string($input); PyErr_Format(PyExc_TypeError, - "Int dimension expected. '%s' given.", - typestring); + "Int dimension expected. '%s' given.", + typestring); SWIG_fail; } $2 = (DIM_TYPE) PyInt_AsLong($input); @@ -1259,7 +1259,7 @@ /* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) */ %typemap(in,numinputs=1, - fragment="NumPy_Fragments") + fragment="NumPy_Fragments") (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) (PyObject * array = NULL) { @@ -1268,8 +1268,8 @@ { char* typestring = pytype_string($input); PyErr_Format(PyExc_TypeError, - "Int dimension expected. '%s' given.", - typestring); + "Int dimension expected. '%s' given.", + typestring); SWIG_fail; } $1 = (DIM_TYPE) PyInt_AsLong($input); @@ -1287,7 +1287,7 @@ /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) */ %typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") + fragment="NumPy_Backward_Compatibility,NumPy_Macros") (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) (PyObject * array = NULL) { @@ -1305,7 +1305,7 @@ /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) */ %typemap(in,numinputs=0, - fragment="NumPy_Backward_Compatibility,NumPy_Macros") + fragment="NumPy_Backward_Compatibility,NumPy_Macros") (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) (PyObject * array = NULL) { @@ -1334,7 +1334,7 @@ $2 = &dim_temp; } %typemap(argout, - fragment="NumPy_Backward_Compatibility") + fragment="NumPy_Backward_Compatibility") (DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1) { npy_intp dims[1] = { *$2 }; @@ -1353,7 +1353,7 @@ $2 = &data_temp; } %typemap(argout, - fragment="NumPy_Backward_Compatibility") + fragment="NumPy_Backward_Compatibility") (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) { npy_intp dims[1] = { *$1 }; @@ -1373,7 +1373,7 @@ $3 = &dim2_temp; } %typemap(argout, - fragment="NumPy_Backward_Compatibility") + fragment="NumPy_Backward_Compatibility") (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) { npy_intp dims[2] = { *$2, *$3 }; @@ -1393,7 +1393,7 @@ $3 = &data_temp; } %typemap(argout, - fragment="NumPy_Backward_Compatibility") + fragment="NumPy_Backward_Compatibility") (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) { npy_intp dims[2] = { *$1, *$2 }; @@ -1413,7 +1413,7 @@ $3 = &dim2_temp; } %typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") + fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) { npy_intp dims[2] = { *$2, *$3 }; @@ -1434,7 +1434,7 @@ $3 = &data_temp; } %typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") + fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) { npy_intp dims[2] = { *$1, *$2 }; @@ -1457,7 +1457,7 @@ $4 = &dim3_temp; } %typemap(argout, - fragment="NumPy_Backward_Compatibility") + fragment="NumPy_Backward_Compatibility") (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) { npy_intp dims[3] = { *$2, *$3, *$4 }; @@ -1479,7 +1479,7 @@ $4 = &data_temp; } %typemap(argout, - fragment="NumPy_Backward_Compatibility") + fragment="NumPy_Backward_Compatibility") (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3) { npy_intp dims[3] = { *$1, *$2, *$3 }; @@ -1501,7 +1501,7 @@ $4 = &dim3_temp; } %typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") + fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3) { npy_intp dims[3] = { *$2, *$3, *$4 }; @@ -1524,7 +1524,7 @@ $4 = &data_temp; } %typemap(argout, - fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") + fragment="NumPy_Backward_Compatibility,NumPy_Array_Requirements") (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3) { npy_intp dims[3] = { *$1, *$2, *$3 }; From numpy-svn at scipy.org Thu May 1 19:56:10 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 1 May 2008 18:56:10 -0500 (CDT) Subject: [Numpy-svn] r5124 - in trunk/numpy/ma: . tests Message-ID: <20080501235610.6EE1F39C0F1@new.scipy.org> Author: pierregm Date: 2008-05-01 18:56:07 -0500 (Thu, 01 May 2008) New Revision: 5124 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/mrecords.py trunk/numpy/ma/tests/test_subclassing.py Log: core : clean up update_from Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-01 21:34:11 UTC (rev 5123) +++ trunk/numpy/ma/core.py 2008-05-01 23:56:07 UTC (rev 5124) @@ -1170,7 +1170,7 @@ # Process data............ _data = narray(data, dtype=dtype, copy=copy, subok=True, ndmin=ndmin) _baseclass = getattr(data, '_baseclass', type(_data)) - _basedict = getattr(data, '_basedict', getattr(data, '__dict__', None)) + _basedict = getattr(data, '_basedict', getattr(data, '__dict__', {})) if not isinstance(data, MaskedArray) or not subok: _data = _data.view(cls) else: @@ -1225,26 +1225,27 @@ # def _update_from(self, obj): """Copies some attributes of obj to self. - """ - self._hardmask = getattr(obj, '_hardmask', False) - self._sharedmask = getattr(obj, '_sharedmask', False) + """ if obj is not None: - self._baseclass = getattr(obj, '_baseclass', type(obj)) + _baseclass = type(obj) else: - self._baseclass = ndarray - self._fill_value = getattr(obj, '_fill_value', None) + _baseclass = ndarray + _basedict = getattr(obj,'_basedict',getattr(obj,'__dict__',{})) + _dict = dict(_fill_value=getattr(obj, '_fill_value', None), + _hardmask=getattr(obj, '_hardmask', False), + _sharedmask=getattr(obj, '_sharedmask', False), + _baseclass=getattr(obj,'_baseclass',_baseclass), + _basedict=_basedict,) + self.__dict__.update(_dict) + self.__dict__.update(_basedict) return #........................ def __array_finalize__(self,obj): """Finalizes the masked array. """ # Get main attributes ......... + self._update_from(obj) self._mask = getattr(obj, '_mask', nomask) - self._update_from(obj) - # Update special attributes ... - self._basedict = getattr(obj, '_basedict', getattr(obj, '__dict__', None)) - if self._basedict is not None: - self.__dict__.update(self._basedict) # Finalize the mask ........... if self._mask is not nomask: self._mask.shape = self.shape @@ -1305,6 +1306,7 @@ # if getmask(indx) is not nomask: # msg = "Masked arrays must be filled before they can be used as indices!" # raise IndexError, msg + dtest = ndarray.__getitem__(self, indx) dout = ndarray.__getitem__(self.view(ndarray), indx) m = self._mask if not getattr(dout,'ndim', False): Modified: trunk/numpy/ma/mrecords.py =================================================================== --- trunk/numpy/ma/mrecords.py 2008-05-01 21:34:11 UTC (rev 5123) +++ trunk/numpy/ma/mrecords.py 2008-05-01 23:56:07 UTC (rev 5124) @@ -180,9 +180,8 @@ self.__dict__.update(attrdict) # Finalize as a regular maskedarray ..... # Update special attributes ... - self._basedict = getattr(obj, '_basedict', getattr(obj,'__dict__',None)) - if self._basedict is not None: - self.__dict__.update(self._basedict) + self._basedict = getattr(obj, '_basedict', getattr(obj,'__dict__',{})) + self.__dict__.update(self._basedict) return #...................................................... def _getdata(self): Modified: trunk/numpy/ma/tests/test_subclassing.py =================================================================== --- trunk/numpy/ma/tests/test_subclassing.py 2008-05-01 21:34:11 UTC (rev 5123) +++ trunk/numpy/ma/tests/test_subclassing.py 2008-05-01 23:56:07 UTC (rev 5124) @@ -39,7 +39,7 @@ subarray = SubArray class MSubArray(SubArray,MaskedArray): - def __new__(cls, data, info=None, mask=nomask): + def __new__(cls, data, info={}, mask=nomask): subarr = SubArray(data, info) _data = MaskedArray.__new__(cls, data=subarr, mask=mask) _data.info = subarr.info From numpy-svn at scipy.org Thu May 1 20:04:30 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 1 May 2008 19:04:30 -0500 (CDT) Subject: [Numpy-svn] r5125 - trunk/numpy/ma Message-ID: <20080502000430.2FB1739C135@new.scipy.org> Author: pierregm Date: 2008-05-01 19:04:28 -0500 (Thu, 01 May 2008) New Revision: 5125 Modified: trunk/numpy/ma/core.py Log: Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-01 23:56:07 UTC (rev 5124) +++ trunk/numpy/ma/core.py 2008-05-02 00:04:28 UTC (rev 5125) @@ -1306,7 +1306,6 @@ # if getmask(indx) is not nomask: # msg = "Masked arrays must be filled before they can be used as indices!" # raise IndexError, msg - dtest = ndarray.__getitem__(self, indx) dout = ndarray.__getitem__(self.view(ndarray), indx) m = self._mask if not getattr(dout,'ndim', False): From numpy-svn at scipy.org Mon May 5 15:22:08 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 5 May 2008 14:22:08 -0500 (CDT) Subject: [Numpy-svn] r5126 - in trunk/numpy/ma: . tests Message-ID: <20080505192208.B966539C330@new.scipy.org> Author: pierregm Date: 2008-05-05 14:22:01 -0500 (Mon, 05 May 2008) New Revision: 5126 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: core : force .compressed() to return a type(_baseclass) object (usually a ndarray) : fixed a bug in .compressed() when the _baseclass is a matrix Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-02 00:04:28 UTC (rev 5125) +++ trunk/numpy/ma/core.py 2008-05-05 19:22:01 UTC (rev 5126) @@ -1307,6 +1307,10 @@ # msg = "Masked arrays must be filled before they can be used as indices!" # raise IndexError, msg dout = ndarray.__getitem__(self.view(ndarray), indx) + # We could directly use ndarray.__getitem__ on self... + # But then we would have to modify __array_finalize__ to prevent the + # mask of being reshaped if it hasn't been set up properly yet... + # So it's easier to stick to the current version m = self._mask if not getattr(dout,'ndim', False): # Just a scalar............ @@ -1577,10 +1581,9 @@ """Return a 1-D array of all the non-masked data. """ - data = ndarray.ravel(self._data).view(type(self)) - data._update_from(self) + data = ndarray.ravel(self._data) if self._mask is not nomask: - data = data[numpy.logical_not(ndarray.ravel(self._mask))] + data = data.compress(numpy.logical_not(ndarray.ravel(self._mask))) return data Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-05-02 00:04:28 UTC (rev 5125) +++ trunk/numpy/ma/tests/test_core.py 2008-05-05 19:22:01 UTC (rev 5126) @@ -1342,6 +1342,13 @@ # Checs that small_mask is preserved a = array([1,2,3,4],mask=[0,0,0,0],shrink=False) assert_equal(a.ravel()._mask, [0,0,0,0]) + # Test that the fill_value is preserved + a.fill_value = -99 + a.shape = (2,2) + ar = a.ravel() + assert_equal(ar._mask, [0,0,0,0]) + assert_equal(ar._data, [1,2,3,4]) + assert_equal(ar.fill_value, -99) def test_reshape(self): "Tests reshape" @@ -1358,11 +1365,18 @@ a = array([1,2,3,4],mask=[0,0,0,0]) b = a.compressed() assert_equal(b, a) - assert_equal(b._mask, nomask) a[0] = masked b = a.compressed() - assert_equal(b._data, [2,3,4]) - assert_equal(b._mask, nomask) + assert_equal(b, [2,3,4]) + # + a = array(numpy.matrix([1,2,3,4]), mask=[0,0,0,0]) + b = a.compressed() + assert_equal(b,a) + assert(isinstance(b,numpy.matrix)) + a[0,0] = masked + b = a.compressed() + assert_equal(b, [[2,3,4]]) + def test_tolist(self): "Tests to list" From numpy-svn at scipy.org Tue May 6 01:17:20 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 6 May 2008 00:17:20 -0500 (CDT) Subject: [Numpy-svn] r5127 - branches Message-ID: <20080506051720.7BBA839C07A@new.scipy.org> Author: cdavid Date: 2008-05-06 00:17:15 -0500 (Tue, 06 May 2008) New Revision: 5127 Added: branches/aligned_alloca/ Log: Start branch to experiment with aligned allocators. Copied: branches/aligned_alloca (from rev 5126, trunk) From numpy-svn at scipy.org Tue May 6 01:20:05 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 6 May 2008 00:20:05 -0500 (CDT) Subject: [Numpy-svn] r5128 - trunk Message-ID: <20080506052005.B3EFE39C056@new.scipy.org> Author: cdavid Date: 2008-05-06 00:20:02 -0500 (Tue, 06 May 2008) New Revision: 5128 Modified: trunk/ Log: Initialized merge tracking via "svnmerge" with revisions "1-5127" from http://svn.scipy.org/svn/numpy/branches/aligned_alloca Property changes on: trunk ___________________________________________________________________ Name: svnmerge-integrated - /branches/build_with_scons:1-4676 /branches/cleanconfig_rtm:1-4677 /branches/distutils-revamp:1-2752 /branches/distutils_scons_command:1-4619 /branches/multicore:1-3687 /branches/numpy.scons:1-4484 /trunk:1-2871 + /branches/aligned_alloca:1-5127 /branches/build_with_scons:1-4676 /branches/cleanconfig_rtm:1-4677 /branches/distutils-revamp:1-2752 /branches/distutils_scons_command:1-4619 /branches/multicore:1-3687 /branches/numpy.scons:1-4484 /trunk:1-2871 From numpy-svn at scipy.org Tue May 6 01:21:42 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 6 May 2008 00:21:42 -0500 (CDT) Subject: [Numpy-svn] r5129 - branches/aligned_alloca Message-ID: <20080506052142.8259939C056@new.scipy.org> Author: cdavid Date: 2008-05-06 00:21:39 -0500 (Tue, 06 May 2008) New Revision: 5129 Modified: branches/aligned_alloca/ Log: Initialized merge tracking via "svnmerge" with revisions "1-5128" from http://svn.scipy.org/svn/numpy/trunk Property changes on: branches/aligned_alloca ___________________________________________________________________ Name: svnmerge-integrated - /branches/build_with_scons:1-4676 /branches/cleanconfig_rtm:1-4677 /branches/distutils-revamp:1-2752 /branches/distutils_scons_command:1-4619 /branches/multicore:1-3687 /branches/numpy.scons:1-4484 /trunk:1-2871 + /branches/build_with_scons:1-4676 /branches/cleanconfig_rtm:1-4677 /branches/distutils-revamp:1-2752 /branches/distutils_scons_command:1-4619 /branches/multicore:1-3687 /branches/numpy.scons:1-4484 /trunk:1-5128 From numpy-svn at scipy.org Tue May 6 02:23:42 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 6 May 2008 01:23:42 -0500 (CDT) Subject: [Numpy-svn] r5130 - branches/aligned_alloca/numpy/core/tests Message-ID: <20080506062342.9A6D5C7C063@new.scipy.org> Author: cdavid Date: 2008-05-06 01:23:37 -0500 (Tue, 06 May 2008) New Revision: 5130 Added: branches/aligned_alloca/numpy/core/tests/test_renew.py Log: add tests for function which use PyDataMem_RENEW. Added: branches/aligned_alloca/numpy/core/tests/test_renew.py =================================================================== --- branches/aligned_alloca/numpy/core/tests/test_renew.py 2008-05-06 05:21:39 UTC (rev 5129) +++ branches/aligned_alloca/numpy/core/tests/test_renew.py 2008-05-06 06:23:37 UTC (rev 5130) @@ -0,0 +1,88 @@ +import os +import tempfile + +from numpy.testing import * + +set_local_path() +import numpy as np +restore_path() + +nrepeat = 10 +deflevel = 5 + +# Here are the C functions which use PyDataMem_RENEW: +# - PyArray_Resize (2 calls) +# - PyArray_FromIter (2 calls) +# - array_from_text (2 calls) +# - PyArray_FromFile (1 call) + +class TestResize(NumpyTestCase): + def _test(self, sz, shift): + y = np.random.randn(sz) + + print '%8.2f' % self.measure('np.resize(y, sz + shift)', nrepeat) + + def test_small_up(self, level = deflevel): + sz = 1024 * 1024 * 10 + self._test(sz, 10) + + def test_small_down(self, level = deflevel): + sz = 1024 * 1024 * 10 + self._test(sz, -10) + + def test_half_up(self, level = deflevel): + sz = 1024 * 1024 * 10 + self._test(sz, sz/2) + + def test_half_down(self, level = deflevel): + sz = 1024 * 1024 * 10 + self._test(sz, -sz/2) + +class TestFromIter(NumpyTestCase): + def _test(self, sz, rep = nrepeat): + fid, name = tempfile.mkstemp('ypyp') + f = os.fdopen(fid, 'w') + a = '\n'.join(['1', '2', '3', '4', '5']) + for i in xrange(sz): + f.writelines(a) + + print '%8.2f' % self.measure('f.seek(0); np.fromiter(f, np.float64)', rep) + + def test1(self, level = deflevel): + self._test(1000, 100) + + def test2(self, level = deflevel): + self._test(100000) + +class TestLoadText(NumpyTestCase): + def _test(self, sz): + fid, name = tempfile.mkstemp('ypyp') + f = os.fdopen(fid, 'w') + a = np.random.randn(sz) + np.savetxt(f, a) + + print '%8.2f' % self.measure('f.seek(0); np.loadtxt(f)', nrepeat) + + def test1(self, level = deflevel): + self._test(1000) + + def test2(self, level = deflevel): + self._test(10000) + +class TestFromFile(NumpyTestCase): + def _test(self, sz, nrep = nrepeat): + fid, name = tempfile.mkstemp('ypyp') + f = os.fdopen(fid, 'w') + a = np.random.randn(sz) + a.tofile(f) + + print '%8.2f' % self.measure('f.seek(0); np.fromfile(f)', nrep) + + def test1(self, level = deflevel): + self._test(100000, 100) + + def test2(self, level = deflevel): + self._test(1000000, 100) + +if __name__ == "__main__": + NumpyTest().test(verbosity = 10, level = 5) From numpy-svn at scipy.org Tue May 6 03:20:08 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 6 May 2008 02:20:08 -0500 (CDT) Subject: [Numpy-svn] r5131 - in branches/aligned_alloca/numpy/core: include/numpy src Message-ID: <20080506072008.A222C39C2C6@new.scipy.org> Author: cdavid Date: 2008-05-06 02:19:58 -0500 (Tue, 06 May 2008) New Revision: 5131 Modified: branches/aligned_alloca/numpy/core/include/numpy/ndarrayobject.h branches/aligned_alloca/numpy/core/src/arrayobject.c branches/aligned_alloca/numpy/core/src/multiarraymodule.c Log: Do not use PyDataMem_RENEW anymore if NOUSE_PYDATAMEM_RENEW is defined. Modified: branches/aligned_alloca/numpy/core/include/numpy/ndarrayobject.h =================================================================== --- branches/aligned_alloca/numpy/core/include/numpy/ndarrayobject.h 2008-05-06 06:23:37 UTC (rev 5130) +++ branches/aligned_alloca/numpy/core/include/numpy/ndarrayobject.h 2008-05-06 07:19:58 UTC (rev 5131) @@ -983,7 +983,13 @@ /* Data buffer */ #define PyDataMem_NEW(size) ((char *)malloc(size)) #define PyDataMem_FREE(ptr) free(ptr) + +#ifdef NOUSE_PYDATAMEM_RENEW +#define PyDataMem_RENEW(ptr,size) DO_NOT_USE_PYDATAMEM_RENEW +#define SYS_REALLOC(ptr, size) realloc((ptr), (size)) +#else #define PyDataMem_RENEW(ptr,size) ((char *)realloc(ptr,size)) +#endif #define NPY_USE_PYMEM 1 Modified: branches/aligned_alloca/numpy/core/src/arrayobject.c =================================================================== --- branches/aligned_alloca/numpy/core/src/arrayobject.c 2008-05-06 06:23:37 UTC (rev 5130) +++ branches/aligned_alloca/numpy/core/src/arrayobject.c 2008-05-06 07:19:58 UTC (rev 5131) @@ -5717,7 +5717,33 @@ return; } +void* _fake_realloc(void* ptr, size_t new_sz, size_t old_sz) +{ + void* nptr; + //fprintf(stderr, "Using %s\n", __func__); + if (ptr == NULL) { + return PyDataMem_NEW(new_sz); + } + + if (new_sz == 0) { + free(ptr); + return NULL; + } + + if (new_sz == old_sz) { + return ptr; + } + + nptr = PyDataMem_NEW(new_sz); + if (nptr == NULL) { + return NULL; + } + memcpy(nptr, ptr, new_sz > old_sz ? old_sz : new_sz); + free(ptr); + return nptr; +} + /*OBJECT_API Resize (reallocate data). Only works if nothing else is referencing this array and it is contiguous. @@ -5792,7 +5818,11 @@ if (newsize == 0) sd = self->descr->elsize; else sd = newsize * self->descr->elsize; /* Reallocate space if needed */ +#if NOUSE_PYDATAMEM_RENEW + new_data = _fake_realloc(self->data, sd, PyArray_SIZE(self) * self->descr->elsize); +#else new_data = PyDataMem_RENEW(self->data, sd); +#endif if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); Modified: branches/aligned_alloca/numpy/core/src/multiarraymodule.c =================================================================== --- branches/aligned_alloca/numpy/core/src/multiarraymodule.c 2008-05-06 06:23:37 UTC (rev 5130) +++ branches/aligned_alloca/numpy/core/src/multiarraymodule.c 2008-05-06 07:19:58 UTC (rev 5131) @@ -6124,7 +6124,11 @@ dptr += dtype->elsize; if (num < 0 && thisbuf == size) { totalbytes += bytes; +#ifdef NOUSE_PYDATAMEM_RENEW + tmp = SYS_REALLOC(r->data, totalbytes); +#else tmp = PyDataMem_RENEW(r->data, totalbytes); +#endif if (tmp == NULL) { err = 1; break; @@ -6136,6 +6140,15 @@ if (skip_sep(&stream, clean_sep, stream_data) < 0) break; } +#ifdef NOUSE_PYDATAMEM_RENEW + tmp = _fake_realloc(r->data, (*nread)*dtype->elsize, + PyArray_SIZE(r) * dtype->elsize); + if (tmp == NULL) err=1; + else { + PyArray_DIM(r,0) = *nread; + r->data = tmp; + } +#else if (num < 0) { tmp = PyDataMem_RENEW(r->data, (*nread)*dtype->elsize); if (tmp == NULL) err=1; @@ -6144,6 +6157,7 @@ r->data = tmp; } } +#endif NPY_END_ALLOW_THREADS; free(clean_sep); if (err == 1) PyErr_NoMemory(); @@ -6374,8 +6388,13 @@ fprintf(stderr, "%ld items requested but only %ld read\n", (long) num, (long) nread); /* Make sure realloc is > 0 */ +#ifdef NOUSE_PYDATAMEM_RENEW + tmp = _fake_realloc(ret->data, NPY_MAX(nread,1) * ret->descr->elsize, + PyArray_SIZE(ret) * ret->descr->elsize); +#else tmp = PyDataMem_RENEW(ret->data, NPY_MAX(nread,1) * ret->descr->elsize); +#endif /* FIXME: This should not raise a memory error when nread == 0 We should return an empty array or at least raise an EOF Error. */ @@ -6479,7 +6498,11 @@ */ elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; if (elcount <= (intp)((~(size_t)0) / elsize)) +#ifdef NOUSE_PYDATAMEM_RENEW + new_data = SYS_REALLOC(ret->data, elcount * elsize); +#else new_data = PyDataMem_RENEW(ret->data, elcount * elsize); +#endif else new_data = NULL; if (new_data == NULL) { @@ -6511,7 +6534,11 @@ (assuming realloc is reasonably good about reusing space...) */ if (i==0) i = 1; +#ifdef NOUSE_PYDATAMEM_RENEW + new_data = _fake_realloc(ret->data, i * elsize, PyArray_SIZE(ret) * elsize); +#else new_data = PyDataMem_RENEW(ret->data, i * elsize); +#endif if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate array memory"); goto done; From numpy-svn at scipy.org Tue May 6 03:29:52 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 6 May 2008 02:29:52 -0500 (CDT) Subject: [Numpy-svn] r5132 - branches/aligned_alloca/numpy/core/src Message-ID: <20080506072952.7CAF839C0B6@new.scipy.org> Author: cdavid Date: 2008-05-06 02:29:49 -0500 (Tue, 06 May 2008) New Revision: 5132 Modified: branches/aligned_alloca/numpy/core/src/arrayobject.c Log: Do not use free but PyDataMem_FREE in _fake_realloc. Modified: branches/aligned_alloca/numpy/core/src/arrayobject.c =================================================================== --- branches/aligned_alloca/numpy/core/src/arrayobject.c 2008-05-06 07:19:58 UTC (rev 5131) +++ branches/aligned_alloca/numpy/core/src/arrayobject.c 2008-05-06 07:29:49 UTC (rev 5132) @@ -5727,7 +5727,7 @@ } if (new_sz == 0) { - free(ptr); + PyDataMem_FREE(ptr); return NULL; } @@ -5740,7 +5740,7 @@ return NULL; } memcpy(nptr, ptr, new_sz > old_sz ? old_sz : new_sz); - free(ptr); + PyDataMem_FREE(ptr); return nptr; } From numpy-svn at scipy.org Tue May 6 08:12:20 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 6 May 2008 07:12:20 -0500 (CDT) Subject: [Numpy-svn] r5133 - in branches/aligned_alloca/numpy/core: code_generators include/numpy src Message-ID: <20080506121220.15C9439C10D@new.scipy.org> Author: cdavid Date: 2008-05-06 07:12:10 -0500 (Tue, 06 May 2008) New Revision: 5133 Added: branches/aligned_alloca/numpy/core/src/alignedalloc.c Modified: branches/aligned_alloca/numpy/core/code_generators/array_api_order.txt branches/aligned_alloca/numpy/core/include/numpy/ndarrayobject.h branches/aligned_alloca/numpy/core/src/arrayobject.c branches/aligned_alloca/numpy/core/src/multiarraymodule.c Log: Add aligned allocator; 16 bytes by default. Modified: branches/aligned_alloca/numpy/core/code_generators/array_api_order.txt =================================================================== --- branches/aligned_alloca/numpy/core/code_generators/array_api_order.txt 2008-05-06 07:29:49 UTC (rev 5132) +++ branches/aligned_alloca/numpy/core/code_generators/array_api_order.txt 2008-05-06 12:12:10 UTC (rev 5133) @@ -83,3 +83,6 @@ PyArray_Item_INCREF PyArray_Item_XDECREF PyArray_FieldNames +PyArray_AlignedMalloc +PyArray_AlignedRealloc +PyArray_AlignedFree Modified: branches/aligned_alloca/numpy/core/include/numpy/ndarrayobject.h =================================================================== --- branches/aligned_alloca/numpy/core/include/numpy/ndarrayobject.h 2008-05-06 07:29:49 UTC (rev 5132) +++ branches/aligned_alloca/numpy/core/include/numpy/ndarrayobject.h 2008-05-06 12:12:10 UTC (rev 5133) @@ -981,15 +981,26 @@ */ /* Data buffer */ -#define PyDataMem_NEW(size) ((char *)malloc(size)) -#define PyDataMem_FREE(ptr) free(ptr) +/* + * default alignment (in bytes) of PyDataMem_ANEW Any pointer redurned by + * PyDataMem_ANEW is guaranteed to be at least NPY_DEF_ALIGNMENT bytes + * + * TODO: provides an API to get this value + */ +#define NPY_DEF_ALIGNMENT 16 +#define PyDataMem_NEW(size) PyArray_AlignedMalloc(size, NPY_DEF_ALIGNMENT) +#define PyDataMem_FREE(ptr) PyArray_AlignedFree(ptr) +#define PyDataMem_RENEW(ptr,size) \ + PyArray_AlignedRealloc(ptr, size, NPY_DEF_ALIGNMENT) + +#define PyDataAlignedMem_NEW(size, alignment) \ + PyArray_AlignedMalloc(size, alignment) +#define PyDataAlignedMem_FREE(ptr) \ + PyArray_AlignedFree(ptr) +#define PyDataAlignedMem_RENEW(ptr,size, alignment) \ + PyArray_AlignedRealloc(ptr, size, alignment) -#ifdef NOUSE_PYDATAMEM_RENEW -#define PyDataMem_RENEW(ptr,size) DO_NOT_USE_PYDATAMEM_RENEW -#define SYS_REALLOC(ptr, size) realloc((ptr), (size)) -#else -#define PyDataMem_RENEW(ptr,size) ((char *)realloc(ptr,size)) -#endif +#define SYS_REALLOC realloc #define NPY_USE_PYMEM 1 Added: branches/aligned_alloca/numpy/core/src/alignedalloc.c =================================================================== --- branches/aligned_alloca/numpy/core/src/alignedalloc.c 2008-05-06 07:29:49 UTC (rev 5132) +++ branches/aligned_alloca/numpy/core/src/alignedalloc.c 2008-05-06 12:12:10 UTC (rev 5133) @@ -0,0 +1,129 @@ +/* + * Cross platform memory allocator with optional alignment + * + * Most of the code is from Steven Johnson (FFTW). + * + * TODO: + * - Had some magic for debug aligned allocators (to detect mismatch) + * - some platforms set errno for errors, other not. According to man + * posix_memalign, this function does NOT set errno; according to MSDN, + * _align_memalloc does set errno. This seems logical seeing the different + * signatures of the functions, but I have not checked it. + */ + +#ifndef _MULTIARRAYMODULE +#error Mmmh, looke like Python.h was not already included ! +#endif + +/* Are those four headers always available ? */ +#include +#include +#include /* ptrdiff_t */ +#include /* memmove */ + +#ifdef HAVE_STDINT_H +#include /* uintptr_t */ +#else +#define uintptr_t size_t +#endif + +#define NPY_ALIGNED_NOT_POWER_OF_TWO(n) (((n) & ((n) - 1))) +#define NPY_ALIGNED_UI(p) ((uintptr_t) (p)) +#define NPY_ALIGNED_CP(p) ((char *) p) + +#define NPY_ALIGNED_PTR_ALIGN(p0, alignment, offset) \ + ((void *) (((NPY_ALIGNED_UI(p0) + (alignment + sizeof(void*)) + offset) \ + & (~NPY_ALIGNED_UI(alignment - 1))) \ + - offset)) + +/* pointer must sometimes be aligned; assume sizeof(void*) is a power of two */ +#define NPY_ALIGNED_ORIG_PTR(p) \ + (*(((void **) (NPY_ALIGNED_UI(p) & (~NPY_ALIGNED_UI(sizeof(void*) - 1)))) - 1)) + +/* Default implementation: simply using malloc and co */ +static void *_aligned_offset_malloc(size_t size, size_t alignment, + size_t offset) +{ + void *p0, *p; + + if (NPY_ALIGNED_NOT_POWER_OF_TWO(alignment)) { + errno = EINVAL; + return ((void *) 0); + } + if (size == 0) { + return ((void *) 0); + } + if (alignment < sizeof(void *)) { + alignment = sizeof(void *); + } + + /* including the extra sizeof(void*) is overkill on a 32-bit + machine, since malloc is already 8-byte aligned, as long + as we enforce alignment >= 8 ...but oh well */ + + p0 = malloc(size + (alignment + sizeof(void *))); + if (!p0) { + return ((void *) 0); + } + p = NPY_ALIGNED_PTR_ALIGN(p0, alignment, offset); + NPY_ALIGNED_ORIG_PTR(p) = p0; + return p; +} + +void *_aligned_malloc(size_t size, size_t alignment) +{ + return _aligned_offset_malloc(size, alignment, 0); +} + +void _aligned_free(void *memblock) +{ + if (memblock) { + free(NPY_ALIGNED_ORIG_PTR(memblock)); + } +} + +void *_aligned_realloc(void *memblock, size_t size, size_t alignment) +{ + void *p0, *p; + ptrdiff_t shift; + + if (!memblock) { + return _aligned_malloc(size, alignment); + } + if (NPY_ALIGNED_NOT_POWER_OF_TWO(alignment)) { + goto bad; + } + if (size == 0) { + _aligned_free(memblock); + return ((void *) 0); + } + if (alignment < sizeof(void *)) { + alignment = sizeof(void *); + } + + p0 = NPY_ALIGNED_ORIG_PTR(memblock); + if (memblock != NPY_ALIGNED_PTR_ALIGN(p0, alignment, 0)) { + goto bad; /* it is an error for the alignment to change */ + } + shift = NPY_ALIGNED_CP(memblock) - NPY_ALIGNED_CP(p0); + + p0 = realloc(p0, size + (alignment + sizeof(void *))); + if (!p0) { + return ((void *) 0); + } + p = NPY_ALIGNED_PTR_ALIGN(p0, alignment, 0); + + /* relative shift of actual data may be different from before, ugh */ + if (shift != NPY_ALIGNED_CP(p) - NPY_ALIGNED_CP(p0)) { + /* ugh, moves more than necessary if size is increased */ + memmove(NPY_ALIGNED_CP(p), NPY_ALIGNED_CP(p0) + shift, + size); + } + + NPY_ALIGNED_ORIG_PTR(p) = p0; + return p; + +bad: + errno = EINVAL; + return ((void *) 0); +} Modified: branches/aligned_alloca/numpy/core/src/arrayobject.c =================================================================== --- branches/aligned_alloca/numpy/core/src/arrayobject.c 2008-05-06 07:29:49 UTC (rev 5132) +++ branches/aligned_alloca/numpy/core/src/arrayobject.c 2008-05-06 12:12:10 UTC (rev 5133) @@ -5818,11 +5818,8 @@ if (newsize == 0) sd = self->descr->elsize; else sd = newsize * self->descr->elsize; /* Reallocate space if needed */ -#if NOUSE_PYDATAMEM_RENEW - new_data = _fake_realloc(self->data, sd, PyArray_SIZE(self) * self->descr->elsize); -#else - new_data = PyDataMem_RENEW(self->data, sd); -#endif + new_data = _fake_realloc(self->data, sd, + PyArray_SIZE(self) * self->descr->elsize); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate memory for array"); Modified: branches/aligned_alloca/numpy/core/src/multiarraymodule.c =================================================================== --- branches/aligned_alloca/numpy/core/src/multiarraymodule.c 2008-05-06 07:29:49 UTC (rev 5132) +++ branches/aligned_alloca/numpy/core/src/multiarraymodule.c 2008-05-06 12:12:10 UTC (rev 5133) @@ -81,7 +81,33 @@ return NULL; } +/* + * Aligned memory allocators + */ + +#include "alignedalloc.c" + +/*OBJECT_API*/ +static void* +PyArray_AlignedMalloc(size_t size, size_t alignment) +{ + return _aligned_malloc(size, alignment); +} +/*OBJECT_API*/ +static void +PyArray_AlignedFree(void* ptr) +{ + _aligned_free(ptr); +} + +/*OBJECT_API*/ +static void* +PyArray_AlignedRealloc(void* ptr, size_t size, size_t alignment) +{ + return _aligned_realloc(ptr, size, alignment); +} + /* Including this file is the only way I know how to declare functions static in each file, and store the pointers from functions in both arrayobject.c and multiarraymodule.c for the C-API @@ -6124,11 +6150,7 @@ dptr += dtype->elsize; if (num < 0 && thisbuf == size) { totalbytes += bytes; -#ifdef NOUSE_PYDATAMEM_RENEW tmp = SYS_REALLOC(r->data, totalbytes); -#else - tmp = PyDataMem_RENEW(r->data, totalbytes); -#endif if (tmp == NULL) { err = 1; break; @@ -6140,7 +6162,8 @@ if (skip_sep(&stream, clean_sep, stream_data) < 0) break; } -#ifdef NOUSE_PYDATAMEM_RENEW + + /* XXX: replaces this when using real aligned allocator */ tmp = _fake_realloc(r->data, (*nread)*dtype->elsize, PyArray_SIZE(r) * dtype->elsize); if (tmp == NULL) err=1; @@ -6148,7 +6171,7 @@ PyArray_DIM(r,0) = *nread; r->data = tmp; } -#else +#if 0 if (num < 0) { tmp = PyDataMem_RENEW(r->data, (*nread)*dtype->elsize); if (tmp == NULL) err=1; @@ -6388,13 +6411,8 @@ fprintf(stderr, "%ld items requested but only %ld read\n", (long) num, (long) nread); /* Make sure realloc is > 0 */ -#ifdef NOUSE_PYDATAMEM_RENEW tmp = _fake_realloc(ret->data, NPY_MAX(nread,1) * ret->descr->elsize, PyArray_SIZE(ret) * ret->descr->elsize); -#else - tmp = PyDataMem_RENEW(ret->data, - NPY_MAX(nread,1) * ret->descr->elsize); -#endif /* FIXME: This should not raise a memory error when nread == 0 We should return an empty array or at least raise an EOF Error. */ @@ -6498,11 +6516,7 @@ */ elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; if (elcount <= (intp)((~(size_t)0) / elsize)) -#ifdef NOUSE_PYDATAMEM_RENEW new_data = SYS_REALLOC(ret->data, elcount * elsize); -#else - new_data = PyDataMem_RENEW(ret->data, elcount * elsize); -#endif else new_data = NULL; if (new_data == NULL) { @@ -6534,11 +6548,7 @@ (assuming realloc is reasonably good about reusing space...) */ if (i==0) i = 1; -#ifdef NOUSE_PYDATAMEM_RENEW new_data = _fake_realloc(ret->data, i * elsize, PyArray_SIZE(ret) * elsize); -#else - new_data = PyDataMem_RENEW(ret->data, i * elsize); -#endif if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate array memory"); goto done; From numpy-svn at scipy.org Wed May 7 01:57:59 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 00:57:59 -0500 (CDT) Subject: [Numpy-svn] r5134 - branches Message-ID: <20080507055759.39F7439C366@new.scipy.org> Author: jarrod.millman Date: 2008-05-07 00:57:53 -0500 (Wed, 07 May 2008) New Revision: 5134 Added: branches/1.1.x/ Log: branching r5133 for the stable 1.1 series Copied: branches/1.1.x (from rev 5133, trunk) From numpy-svn at scipy.org Wed May 7 02:10:12 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 01:10:12 -0500 (CDT) Subject: [Numpy-svn] r5135 - trunk/numpy Message-ID: <20080507061012.17C3C39C018@new.scipy.org> Author: jarrod.millman Date: 2008-05-07 01:10:11 -0500 (Wed, 07 May 2008) New Revision: 5135 Modified: trunk/numpy/version.py Log: trunk open for 1.2 development Modified: trunk/numpy/version.py =================================================================== --- trunk/numpy/version.py 2008-05-07 05:57:53 UTC (rev 5134) +++ trunk/numpy/version.py 2008-05-07 06:10:11 UTC (rev 5135) @@ -1,4 +1,4 @@ -version='1.1.0' +version='1.2.0' release=False if not release: From numpy-svn at scipy.org Wed May 7 11:05:47 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 10:05:47 -0500 (CDT) Subject: [Numpy-svn] r5136 - in branches/1.1.x/numpy/core: . tests Message-ID: <20080507150547.DE32A39C177@new.scipy.org> Author: peridot Date: 2008-05-07 10:05:45 -0500 (Wed, 07 May 2008) New Revision: 5136 Modified: branches/1.1.x/numpy/core/defmatrix.py branches/1.1.x/numpy/core/tests/test_defmatrix.py Log: Matrix indexing fix as discussed on the list and described in ticket #760;closes #760. Modified: branches/1.1.x/numpy/core/defmatrix.py =================================================================== --- branches/1.1.x/numpy/core/defmatrix.py 2008-05-07 06:10:11 UTC (rev 5135) +++ branches/1.1.x/numpy/core/defmatrix.py 2008-05-07 15:05:45 UTC (rev 5136) @@ -223,6 +223,8 @@ return def __getitem__(self, index): + if isscalar(index): + return self.__array__()[index] self._getitem = True try: Modified: branches/1.1.x/numpy/core/tests/test_defmatrix.py =================================================================== --- branches/1.1.x/numpy/core/tests/test_defmatrix.py 2008-05-07 06:10:11 UTC (rev 5135) +++ branches/1.1.x/numpy/core/tests/test_defmatrix.py 2008-05-07 15:05:45 UTC (rev 5136) @@ -190,6 +190,9 @@ ## assert_equal(x[0],0) ## assert_equal(x[:,0].shape,x.shape) + def check_scalar_indexing(self): + x = asmatrix(zeros((3,2),float)) + assert_equal(x[0,0],x[0][0]) if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Wed May 7 15:06:31 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 14:06:31 -0500 (CDT) Subject: [Numpy-svn] r5137 - in trunk/numpy/ma: . tests Message-ID: <20080507190631.63D0339C607@scipy.org> Author: pierregm Date: 2008-05-07 14:06:28 -0500 (Wed, 07 May 2008) New Revision: 5137 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: core : prevent power to mask negative values when it should not Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-07 15:05:45 UTC (rev 5136) +++ trunk/numpy/ma/core.py 2008-05-07 19:06:28 UTC (rev 5137) @@ -2854,13 +2854,14 @@ fb = getdata(b) if fb.dtype.char in typecodes["Integer"]: return masked_array(umath.power(fa, fb), m) - md = make_mask((fa < 0), shrink=True) - m = mask_or(m, md) + if numpy.abs(fb) < 1.: + md = make_mask((fa < 0), shrink=True) + m = mask_or(m, md) if m is nomask: return masked_array(umath.power(fa, fb)) else: fa = fa.copy() - fa[(fa < 0)] = 1 + fa[m] = 1 return masked_array(umath.power(fa, fb), m) #.............................................................................. @@ -3375,4 +3376,4 @@ indices = numpy.indices ############################################################################### - \ No newline at end of file + Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-05-07 15:05:45 UTC (rev 5136) +++ trunk/numpy/ma/tests/test_core.py 2008-05-07 19:06:28 UTC (rev 5137) @@ -1561,6 +1561,11 @@ a = identity(5) assert(isinstance(a, MaskedArray)) assert_equal(a, numpy.identity(5)) + # + def test_power(self): + x = -1.1 + assert_almost_equal(power(x,2.), 1.21) + assert_equal(power(x,0.5)._mask, 1) ############################################################################### From numpy-svn at scipy.org Wed May 7 16:12:58 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 15:12:58 -0500 (CDT) Subject: [Numpy-svn] r5138 - branches/1.1.x/numpy/core/src Message-ID: <20080507201258.C586139C0AD@scipy.org> Author: oliphant Date: 2008-05-07 15:12:49 -0500 (Wed, 07 May 2008) New Revision: 5138 Modified: branches/1.1.x/numpy/core/src/arrayobject.c Log: Apply a fix to PyArray_ToList to work with subclasses better. Modified: branches/1.1.x/numpy/core/src/arrayobject.c =================================================================== --- branches/1.1.x/numpy/core/src/arrayobject.c 2008-05-07 19:06:28 UTC (rev 5137) +++ branches/1.1.x/numpy/core/src/arrayobject.c 2008-05-07 20:12:49 UTC (rev 5138) @@ -1972,14 +1972,19 @@ sz = self->dimensions[0]; lp = PyList_New(sz); for(i = 0; i < sz; i++) { - v=(PyArrayObject *)array_big_item(self, i); - if (v->nd >= self->nd) { - PyErr_SetString(PyExc_RuntimeError, - "array_item not returning smaller-" \ - "dimensional array"); - Py_DECREF(v); - Py_DECREF(lp); - return NULL; + if (PyArray_CheckExact(self)) { + v=(PyArrayObject *)array_big_item(self, i); + } + else { + v = (PyArrayObject *)PySequence_GetItem((PyObject *)self, i); + if ((!PyArray_Check(v)) || (v->nd >= self->nd)) { + PyErr_SetString(PyExc_RuntimeError, + "array_item not returning smaller-" \ + "dimensional array"); + Py_DECREF(v); + Py_DECREF(lp); + return NULL; + } } PyList_SetItem(lp, i, PyArray_ToList(v)); Py_DECREF(v); From numpy-svn at scipy.org Wed May 7 16:13:45 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 15:13:45 -0500 (CDT) Subject: [Numpy-svn] r5139 - branches/1.1.x/numpy/core Message-ID: <20080507201345.B9E6239C0AD@scipy.org> Author: oliphant Date: 2008-05-07 15:13:45 -0500 (Wed, 07 May 2008) New Revision: 5139 Modified: branches/1.1.x/numpy/core/defmatrix.py Log: Matrices don't need to special-case tolist now. Modified: branches/1.1.x/numpy/core/defmatrix.py =================================================================== --- branches/1.1.x/numpy/core/defmatrix.py 2008-05-07 20:12:49 UTC (rev 5138) +++ branches/1.1.x/numpy/core/defmatrix.py 2008-05-07 20:13:45 UTC (rev 5139) @@ -474,11 +474,6 @@ def ptp(self, axis=None, out=None): return N.ndarray.ptp(self, axis, out)._align(axis) - # Needed becase tolist method expects a[i] - # to have dimension a.ndim-1 - def tolist(self): - return self.__array__().tolist() - def getI(self): M,N = self.shape if M == N: From numpy-svn at scipy.org Wed May 7 16:24:37 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 15:24:37 -0500 (CDT) Subject: [Numpy-svn] r5140 - in trunk/numpy/core: . blasdot src Message-ID: <20080507202437.72C7439C254@scipy.org> Author: oliphant Date: 2008-05-07 15:24:34 -0500 (Wed, 07 May 2008) New Revision: 5140 Modified: trunk/numpy/core/blasdot/_dotblas.c trunk/numpy/core/defmatrix.py trunk/numpy/core/src/arrayobject.c Log: * Make matrices return 1-dimensional array on item selection for 1.2\n * Remove (now un-needed) tolist method from matrices\n * For ticket #551, copy data if start of memory is not aligned on itemsize location in optimized blas wrapper. Modified: trunk/numpy/core/blasdot/_dotblas.c =================================================================== --- trunk/numpy/core/blasdot/_dotblas.c 2008-05-07 20:13:45 UTC (rev 5139) +++ trunk/numpy/core/blasdot/_dotblas.c 2008-05-07 20:24:34 UTC (rev 5140) @@ -176,6 +176,9 @@ } +/* This also makes sure that the data segment is aligned with + an itemsize address as well by returning one if not true. +*/ static int _bad_strides(PyArrayObject *ap) { @@ -183,6 +186,8 @@ register int i, N=PyArray_NDIM(ap); register intp *strides = PyArray_STRIDES(ap); + if (((intp)(ap->data) % itemsize) != 0) + return 1; for (i=0; idimensions[0]; lp = PyList_New(sz); for(i = 0; i < sz; i++) { - v=(PyArrayObject *)array_big_item(self, i); - if (v->nd >= self->nd) { - PyErr_SetString(PyExc_RuntimeError, - "array_item not returning smaller-" \ - "dimensional array"); - Py_DECREF(v); - Py_DECREF(lp); - return NULL; + if (PyArray_CheckExact(self)) { + v=(PyArrayObject *)array_big_item(self, i); + } + else { + v = PySequence_GetItem((PyObject *)self, i); + if ((!PyArray_Check(v)) || (v->nd >= self->nd)) { + PyErr_SetString(PyExc_RuntimeError, + "array_item not returning smaller-" \ + "dimensional array"); + Py_DECREF(v); + Py_DECREF(lp); + return NULL; + } } PyList_SetItem(lp, i, PyArray_ToList(v)); Py_DECREF(v); From numpy-svn at scipy.org Wed May 7 16:25:42 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 15:25:42 -0500 (CDT) Subject: [Numpy-svn] r5141 - branches/1.1.x/numpy/core/blasdot Message-ID: <20080507202542.AB7A039C15C@scipy.org> Author: oliphant Date: 2008-05-07 15:25:42 -0500 (Wed, 07 May 2008) New Revision: 5141 Modified: branches/1.1.x/numpy/core/blasdot/_dotblas.c Log: Fix ticket #551 in 1.1.x branch. Modified: branches/1.1.x/numpy/core/blasdot/_dotblas.c =================================================================== --- branches/1.1.x/numpy/core/blasdot/_dotblas.c 2008-05-07 20:24:34 UTC (rev 5140) +++ branches/1.1.x/numpy/core/blasdot/_dotblas.c 2008-05-07 20:25:42 UTC (rev 5141) @@ -176,6 +176,9 @@ } +/* This also makes sure that the data segment is aligned with + an itemsize address as well by returning one if not true. +*/ static int _bad_strides(PyArrayObject *ap) { @@ -183,6 +186,8 @@ register int i, N=PyArray_NDIM(ap); register intp *strides = PyArray_STRIDES(ap); + if (((intp)(ap->data) % itemsize) != 0) + return 1; for (i=0; i Author: oliphant Date: 2008-05-07 16:18:55 -0500 (Wed, 07 May 2008) New Revision: 5142 Modified: branches/1.1.x/numpy/core/defmatrix.py Log: Remove un-needed attribute look-up Modified: branches/1.1.x/numpy/core/defmatrix.py =================================================================== --- branches/1.1.x/numpy/core/defmatrix.py 2008-05-07 20:25:42 UTC (rev 5141) +++ branches/1.1.x/numpy/core/defmatrix.py 2008-05-07 21:18:55 UTC (rev 5142) @@ -261,7 +261,7 @@ if isinstance(other,(N.ndarray, list, tuple)) : # This promotes 1-D vectors to row vectors return N.dot(self, asmatrix(other)) - if N.isscalar(other) or not hasattr(other, '__rmul__') : + if isscalar(other) or not hasattr(other, '__rmul__') : return N.dot(self, other) return NotImplemented From numpy-svn at scipy.org Wed May 7 17:19:32 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 16:19:32 -0500 (CDT) Subject: [Numpy-svn] r5143 - trunk/numpy/core Message-ID: <20080507211932.80D1339C0E4@scipy.org> Author: oliphant Date: 2008-05-07 16:19:32 -0500 (Wed, 07 May 2008) New Revision: 5143 Modified: trunk/numpy/core/defmatrix.py Log: Remove un-needed attribute lookup in 1.2 Modified: trunk/numpy/core/defmatrix.py =================================================================== --- trunk/numpy/core/defmatrix.py 2008-05-07 21:18:55 UTC (rev 5142) +++ trunk/numpy/core/defmatrix.py 2008-05-07 21:19:32 UTC (rev 5143) @@ -261,7 +261,7 @@ if isinstance(other,(N.ndarray, list, tuple)) : # This promotes 1-D vectors to row vectors return N.dot(self, asmatrix(other)) - if N.isscalar(other) or not hasattr(other, '__rmul__') : + if isscalar(other) or not hasattr(other, '__rmul__') : return N.dot(self, other) return NotImplemented From numpy-svn at scipy.org Wed May 7 20:10:47 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 19:10:47 -0500 (CDT) Subject: [Numpy-svn] r5144 - trunk/numpy/core/tests Message-ID: <20080508001047.1814439C032@scipy.org> Author: charris Date: 2008-05-07 19:10:46 -0500 (Wed, 07 May 2008) New Revision: 5144 Modified: trunk/numpy/core/tests/test_defmatrix.py Log: Add some tests for scalar indexing, tolist(), and fancy indexing. Modified: trunk/numpy/core/tests/test_defmatrix.py =================================================================== --- trunk/numpy/core/tests/test_defmatrix.py 2008-05-07 21:19:32 UTC (rev 5143) +++ trunk/numpy/core/tests/test_defmatrix.py 2008-05-08 00:10:46 UTC (rev 5144) @@ -179,6 +179,35 @@ x[:,1] = y>0.5 assert_equal(x, [[0,1],[0,0],[0,0]]) +class TestNewScalarIndexing(NumpyTestCase): + a = matrix([[1, 2],[3,4]]) + + def check_dimesions(self): + a = self.a + x = a[0] + assert_equal(x.ndim, 1) + + def check_array_from_matrix_list(self): + a = self.a + x = array([a, a]) + assert_equal(x.shape, [2,2,2]) + + def check_array_to_list(self): + a = self.a + assert a.tolist() == [[1, 2], [3, 4]] + + def check_fancy_indexing(self): + a = self.a + x = a[1, [0,1,0]] + assert isinstance(x, matrix) + assert_equal(x, matrix([[3, 4, 3]])) + x = a[[1,0]] + assert isinstance(x, matrix) + assert_equal(x, matrix([[3, 4], [1, 2]])) + x = a[[[1],[0]],[[1,0],[0,1]]] + assert isinstance(x, matrix) + assert_equal(x, matrix([[4, 3], [1, 2]])) + ## def check_vector_element(self): ## x = matrix([[1,2,3],[4,5,6]]) ## assert_equal(x[0][0],1) From numpy-svn at scipy.org Wed May 7 20:15:04 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 19:15:04 -0500 (CDT) Subject: [Numpy-svn] r5145 - branches/1.1.x/numpy/core/tests Message-ID: <20080508001504.4925739C03D@scipy.org> Author: peridot Date: 2008-05-07 19:15:02 -0500 (Wed, 07 May 2008) New Revision: 5145 Modified: branches/1.1.x/numpy/core/tests/test_defmatrix.py Log: Additional matrix indexing tests for 1.1 branch Modified: branches/1.1.x/numpy/core/tests/test_defmatrix.py =================================================================== --- branches/1.1.x/numpy/core/tests/test_defmatrix.py 2008-05-08 00:10:46 UTC (rev 5144) +++ branches/1.1.x/numpy/core/tests/test_defmatrix.py 2008-05-08 00:15:02 UTC (rev 5145) @@ -2,6 +2,7 @@ set_package_path() import numpy.core;reload(numpy.core) from numpy.core import * +import numpy as np restore_path() class TestCtor(NumpyTestCase): @@ -193,6 +194,32 @@ def check_scalar_indexing(self): x = asmatrix(zeros((3,2),float)) assert_equal(x[0,0],x[0][0]) + def check_row_column_indexing(self): + x = asmatrix(np.eye(2)) + assert_array_equal(x[0,:],[[1,0]]) + assert_array_equal(x[1,:],[[0,1]]) + assert_array_equal(x[:,0],[[1],[0]]) + assert_array_equal(x[:,1],[[0],[1]]) + def check_boolean_indexing(self): + A = arange(6) + A.shape = (3,2) + x = asmatrix(A) + assert_array_equal(x[:,array([True,False])],x[:,0]) + assert_array_equal(x[array([True,False,False]),:],x[0,:]) + + def check_list_indexing(self): + A = arange(6) + A.shape = (3,2) + x = asmatrix(A) + assert_array_equal(x[:,[1,0]],x[:,::-1]) + assert_array_equal(x[[2,1,0],:],x[::-1,:]) + + def check_tolist(self): + x = asmatrix(np.eye(2)) + assert_equal(x.tolist(), [[1,0],[0,1]]) + + + if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Wed May 7 20:33:45 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 7 May 2008 19:33:45 -0500 (CDT) Subject: [Numpy-svn] r5146 - trunk/numpy/core/tests Message-ID: <20080508003345.3DEA939C519@scipy.org> Author: peridot Date: 2008-05-07 19:33:43 -0500 (Wed, 07 May 2008) New Revision: 5146 Modified: trunk/numpy/core/tests/test_defmatrix.py Log: Additional tests of matrix indexing. Modified: trunk/numpy/core/tests/test_defmatrix.py =================================================================== --- trunk/numpy/core/tests/test_defmatrix.py 2008-05-08 00:15:02 UTC (rev 5145) +++ trunk/numpy/core/tests/test_defmatrix.py 2008-05-08 00:33:43 UTC (rev 5146) @@ -2,6 +2,7 @@ set_package_path() import numpy.core;reload(numpy.core) from numpy.core import * +import numpy as np restore_path() class TestCtor(NumpyTestCase): @@ -180,7 +181,8 @@ assert_equal(x, [[0,1],[0,0],[0,0]]) class TestNewScalarIndexing(NumpyTestCase): - a = matrix([[1, 2],[3,4]]) + def setUp(self): + self.a = matrix([[1, 2],[3,4]]) def check_dimesions(self): a = self.a @@ -194,7 +196,7 @@ def check_array_to_list(self): a = self.a - assert a.tolist() == [[1, 2], [3, 4]] + assert_equal(a.tolist(),[[1, 2], [3, 4]]) def check_fancy_indexing(self): a = self.a @@ -219,6 +221,32 @@ ## assert_equal(x[0],0) ## assert_equal(x[:,0].shape,x.shape) + def check_scalar_indexing(self): + x = asmatrix(zeros((3,2),float)) + assert_equal(x[0,0],x[0][0]) + def check_row_column_indexing(self): + x = asmatrix(np.eye(2)) + assert_array_equal(x[0,:],[[1,0]]) + assert_array_equal(x[1,:],[[0,1]]) + assert_array_equal(x[:,0],[[1],[0]]) + assert_array_equal(x[:,1],[[0],[1]]) + + def check_boolean_indexing(self): + A = arange(6) + A.shape = (3,2) + x = asmatrix(A) + assert_array_equal(x[:,array([True,False])],x[:,0]) + assert_array_equal(x[array([True,False,False]),:],x[0,:]) + + def check_list_indexing(self): + A = arange(6) + A.shape = (3,2) + x = asmatrix(A) + assert_array_equal(x[:,[1,0]],x[:,::-1]) + assert_array_equal(x[[2,1,0],:],x[::-1,:]) + + + if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Thu May 8 04:02:25 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 8 May 2008 03:02:25 -0500 (CDT) Subject: [Numpy-svn] r5147 - trunk/numpy/core/tests Message-ID: <20080508080225.AE70A39C603@scipy.org> Author: ptvirtan Date: 2008-05-08 03:02:15 -0500 (Thu, 08 May 2008) New Revision: 5147 Modified: trunk/numpy/core/tests/test_regression.py Log: Test for ticket #551 part of r5140 Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2008-05-08 00:33:43 UTC (rev 5146) +++ trunk/numpy/core/tests/test_regression.py 2008-05-08 08:02:15 UTC (rev 5147) @@ -1016,6 +1016,14 @@ """Test for changeset r5065""" assert_array_equal(np.array([np.nan]), np.asfarray([None])) + def check_dot_alignment_sse2(self, level=rlevel): + """Test for ticket #551, changeset r5140""" + x = np.zeros((30,40)) + y = pickle.loads(pickle.dumps(x)) + # y is now typically not aligned on a 8-byte boundary + z = np.ones((1, y.shape[0])) + # This shouldn't cause a segmentation fault: + np.dot(z, y) if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Thu May 8 04:09:01 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 8 May 2008 03:09:01 -0500 (CDT) Subject: [Numpy-svn] r5148 - branches/1.1.x/numpy/core/tests Message-ID: <20080508080901.212B839C4F1@scipy.org> Author: ptvirtan Date: 2008-05-08 03:08:52 -0500 (Thu, 08 May 2008) New Revision: 5148 Modified: branches/1.1.x/numpy/core/tests/test_regression.py Log: Test for ticket #551 part of r5141 Modified: branches/1.1.x/numpy/core/tests/test_regression.py =================================================================== --- branches/1.1.x/numpy/core/tests/test_regression.py 2008-05-08 08:02:15 UTC (rev 5147) +++ branches/1.1.x/numpy/core/tests/test_regression.py 2008-05-08 08:08:52 UTC (rev 5148) @@ -1016,6 +1016,14 @@ """Test for changeset r5065""" assert_array_equal(np.array([np.nan]), np.asfarray([None])) + def check_dot_alignment_sse2(self, level=rlevel): + """Test for ticket #551, changeset r5141""" + x = np.zeros((30,40)) + y = pickle.loads(pickle.dumps(x)) + # y is now typically not aligned on a 8-byte boundary + z = np.ones((1, y.shape[0])) + # This shouldn't cause a segmentation fault: + np.dot(z, y) if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Thu May 8 22:51:16 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 8 May 2008 21:51:16 -0500 (CDT) Subject: [Numpy-svn] r5149 - trunk/numpy/core/src Message-ID: <20080509025116.A3A6439C04B@scipy.org> Author: peridot Date: 2008-05-08 21:51:14 -0500 (Thu, 08 May 2008) New Revision: 5149 Modified: trunk/numpy/core/src/ufuncobject.c Log: Docstrings for ufunc methods add, reduce, outer, and the arcane reduceat. Modified: trunk/numpy/core/src/ufuncobject.c =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-05-08 08:08:52 UTC (rev 5148) +++ trunk/numpy/core/src/ufuncobject.c 2008-05-09 02:51:14 UTC (rev 5149) @@ -3951,12 +3951,174 @@ static struct PyMethodDef ufunc_methods[] = { - {"reduce", (PyCFunction)ufunc_reduce, METH_VARARGS | METH_KEYWORDS}, + {"reduce", (PyCFunction)ufunc_reduce, METH_VARARGS | METH_KEYWORDS, + "reduce(array,axis=0,dtype=None,out=None)\n" + "reduce applies the operator to all elements of the array producing\n" + "a single result.\n" + "\n" + "For a one-dimensional array, reduce produces results equivalent to:\n" + "r = op.identity\n" + "for i in xrange(len(A)):\n" + " r = op(r,A[i])\n" + "return r\n" + "\n" + "For example, add.reduce() is equivalent to sum().\n" + "\n" + "Parameters:\n" + "-----------\n" + "\n" + "array : array-like\n" + " The array to act on.\n" + "axis : integer\n" + " The axis along which to apply the reduction.\n" + "dtype : data type or None\n" + " The type used to represent the intermediate results. Defaults\n" + " to the data type of the output array if this is provided, or\n" + " the data type of the input array if no output array is provided.\n" + "out : array-like or None\n" + " A location into which the result is stored. If not provided a\n" + " freshly-allocated array is returned.\n" + "\n" + "Returns:\n" + "--------\n" + "\n" + "r : array\n" + " The reduced values. If out was supplied, r is equal to out.\n" + "\n" + "Example:\n" + "--------\n" + ">>> np.multiply.reduce([2,3,5])\n" + "30\n" + "\n" + }, {"accumulate", (PyCFunction)ufunc_accumulate, - METH_VARARGS | METH_KEYWORDS}, + METH_VARARGS | METH_KEYWORDS, + "accumulate(array,axis=None,dtype=None,out=None)\n" + "accumulate applies the operator to all elements of the array producing\n" + "cumulative results.\n" + "\n" + "For a one-dimensional array, accumulate produces results equivalent to:\n" + "r = np.empty(len(A))\n" + "t = op.identity\n" + "for i in xrange(len(A)):\n" + " t = op(t,A[i])\n" + " r[i] = t\n" + "return r\n" + "\n" + "For example, add.accumulate() is equivalent to cumsum().\n" + "\n" + "Parameters:\n" + "-----------\n" + "\n" + "array : array-like\n" + " The array to act on.\n" + "axis : integer\n" + " The axis along which to apply the accumulation.\n" + "dtype : data type or None\n" + " The type used to represent the intermediate results. Defaults\n" + " to the data type of the output array if this is provided, or\n" + " the data type of the input array if no output array is provided.\n" + "out : array-like or None\n" + " A location into which the result is stored. If not provided a\n" + " freshly-allocated array is returned.\n" + "\n" + "Returns:\n" + "--------\n" + "\n" + "r : array\n" + " The accumulated values. If out was supplied, r is equal to out.\n" + "\n" + "Example:\n" + "--------\n" + ">>> np.multiply.accumulate([2,3,5])\n" + "array([2,6,30])\n" + "\n" + }, {"reduceat", (PyCFunction)ufunc_reduceat, - METH_VARARGS | METH_KEYWORDS}, - {"outer", (PyCFunction)ufunc_outer, METH_VARARGS | METH_KEYWORDS}, + METH_VARARGS | METH_KEYWORDS, + "reduceat(self,array,indices,axis=None,dtype=None,out=None)\n" + "reduceat performs a reduce over an axis using the indices as a guide\n" + "\n" + "op.reduceat(array,indices) computes\n" + "op.reduce(array[indices[i]:indices[i+1]]\n" + "for i=0..end with an implicit indices[i+1]=len(array)\n" + "assumed when i=end-1\n" + "\n" + "if indices[i+1] <= indices[i]+1\n" + "then the result is array[indices[i]] for that value\n" + "\n" + "op.accumulate(array) is the same as\n" + "op.reduceat(array,indices)[::2]\n" + "where indices is range(len(array)-1) with a zero placed\n" + "in every other sample:\n" + "indices = zeros(len(array)*2-1)\n" + "indices[1::2] = range(1,len(array))\n" + "\n" + "output shape is based on the size of indices\n" + "\n" + "Parameters:\n" + "-----------\n" + "\n" + "array : array-like\n" + " The array to act on.\n" + "indices : array-like\n" + " Indices specifying ranges to reduce.\n" + "axis : integer\n" + " The axis along which to apply the reduceat.\n" + "dtype : data type or None\n" + " The type used to represent the intermediate results. Defaults\n" + " to the data type of the output array if this is provided, or\n" + " the data type of the input array if no output array is provided.\n" + "out : array-like or None\n" + " A location into which the result is stored. If not provided a\n" + " freshly-allocated array is returned.\n" + "\n" + "Returns:\n" + "--------\n" + "\n" + "r : array\n" + " The reduced values. If out was supplied, r is equal to out.\n" + "\n" + "Example:\n" + "--------\n" + "To take the running sum of four successive values:\n" + ">>> np.multiply.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]\n" + "array([ 6, 10, 14, 18])\n" + "\n" + }, + {"outer", (PyCFunction)ufunc_outer, METH_VARARGS | METH_KEYWORDS, + "outer(A,B)\n" + "Compute the result of applying op to all pairs (a,b)\n" + "\n" + "op.outer(A,B) is equivalent to\n" + "op(A[:,:,...,:,newaxis,...,newaxis]*B[newaxis,...,newaxis,:,...,:]\n" + "where A has B.ndim new axes appended and B has A.ndim new axes prepended.\n" + "\n" + "For A and B one-dimensional, this is equivalent to\n" + "r = empty(len(A),len(B))\n" + "for i in xrange(len(A)):\n" + " for j in xrange(len(B)):\n" + " r[i,j] = A[i]*B[j]\n" + "If A and B are higher-dimensional, the result has dimension A.ndim+B.ndim\n" + "\n" + "Parameters:\n" + "-----------\n" + "\n" + "A : array-like\n" + "B : array-like\n" + "\n" + "Returns:\n" + "--------\n" + "\n" + "r : array\n" + "Example:\n" + "--------\n" + ">>> np.multiply.outer([1,2,3],[4,5,6])\n" + "array([[ 4, 5, 6],\n" + " [ 8, 10, 12],\n" + " [12, 15, 18]])\n" + "\n" + }, {NULL, NULL} /* sentinel */ }; From numpy-svn at scipy.org Fri May 9 01:12:34 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 9 May 2008 00:12:34 -0500 (CDT) Subject: [Numpy-svn] r5150 - in trunk/numpy: . core/src Message-ID: <20080509051234.2AC3F39C2DC@scipy.org> Author: peridot Date: 2008-05-09 00:12:31 -0500 (Fri, 09 May 2008) New Revision: 5150 Modified: trunk/numpy/add_newdocs.py trunk/numpy/core/src/ufuncobject.c Log: Moved docstrings to add_newdoc.py; added docstring for the class as a whole which describes __call__. Modified: trunk/numpy/add_newdocs.py =================================================================== --- trunk/numpy/add_newdocs.py 2008-05-09 02:51:14 UTC (rev 5149) +++ trunk/numpy/add_newdocs.py 2008-05-09 05:12:31 UTC (rev 5150) @@ -2249,3 +2249,212 @@ seterrcall """) + +add_newdoc("numpy.core","ufunc","""Optimized functions make it possible to implement arithmetic with arrays efficiently + +Unary ufuncs: +============= + +op(X, out=None) +Apply op to X elementwise + +Parameters +---------- +X : array-like +out : array-like + An array to store the output. Must be the same shape as X. + +Returns +------- +r : array-like + r will have the same shape as X; if out is provided, r will be + equal to out. + +Binary ufuncs: +============== + +op(X, Y, out=None) +Apply op to X and Y elementwise. May "broadcast" to make +the shapes of X and Y congruent. + +The broadcasting rules are: +* Dimensions of length 1 may be prepended to either array +* Arrays may be repeated along dimensions of length 1 + +Parameters +---------- +X : array-like +Y : array-like +out : array-like + An array to store the output. Must be the same shape as the + output would have. + +Returns +------- +r : array-like + The return value; if out is provided, r will be equal to out. +""") +add_newdoc("numpy.core","ufunc", + [("reduce","""reduce(array,axis=0,dtype=None,out=None) +reduce applies the operator to all elements of the array producing +a single result. + +For a one-dimensional array, reduce produces results equivalent to: +r = op.identity +for i in xrange(len(A)): + r = op(r,A[i]) +return r + +For example, add.reduce() is equivalent to sum(). + +Parameters: +----------- + +array : array-like + The array to act on. +axis : integer + The axis along which to apply the reduction. +dtype : data type or None + The type used to represent the intermediate results. Defaults + to the data type of the output array if this is provided, or + the data type of the input array if no output array is provided. +out : array-like or None + A location into which the result is stored. If not provided a + freshly-allocated array is returned. + +Returns: +-------- + +r : array + The reduced values. If out was supplied, r is equal to out. + +Example: +-------- +>>> np.multiply.reduce([2,3,5]) +30 + + +"""), + ("accumulate","""accumulate(array,axis=None,dtype=None,out=None) +accumulate applies the operator to all elements of the array producing +cumulative results. + +For a one-dimensional array, accumulate produces results equivalent to: +r = np.empty(len(A)) +t = op.identity +for i in xrange(len(A)): + t = op(t,A[i]) + r[i] = t +return r + +For example, add.accumulate() is equivalent to cumsum(). + +Parameters: +----------- + +array : array-like + The array to act on. +axis : integer + The axis along which to apply the accumulation. +dtype : data type or None + The type used to represent the intermediate results. Defaults + to the data type of the output array if this is provided, or + the data type of the input array if no output array is provided. +out : array-like or None + A location into which the result is stored. If not provided a + freshly-allocated array is returned. + +Returns: +-------- + +r : array + The accumulated values. If out was supplied, r is equal to out. + +Example: +-------- +>>> np.multiply.accumulate([2,3,5]) +array([2,6,30]) + +"""), + ("reduceat","""reduceat(self,array,indices,axis=None,dtype=None,out=None) +reduceat performs a reduce over an axis using the indices as a guide + +op.reduceat(array,indices) computes +op.reduce(array[indices[i]:indices[i+1]]) +for i=0..end with an implicit indices[i+1]=len(array) +assumed when i=end-1 + +if indices[i+1] <= indices[i]+1 +then the result is array[indices[i]] for that value + +op.accumulate(array) is the same as +op.reduceat(array,indices)[::2] +where indices is range(len(array)-1) with a zero placed +in every other sample: +indices = zeros(len(array)*2-1) +indices[1::2] = range(1,len(array)) + +output shape is based on the size of indices + +Parameters: +----------- + +array : array-like + The array to act on. +indices : array-like + Indices specifying ranges to reduce. +axis : integer + The axis along which to apply the reduceat. +dtype : data type or None + The type used to represent the intermediate results. Defaults + to the data type of the output array if this is provided, or + the data type of the input array if no output array is provided. +out : array-like or None + A location into which the result is stored. If not provided a + freshly-allocated array is returned. + +Returns: +-------- + +r : array + The reduced values. If out was supplied, r is equal to out. + +Example: +-------- +To take the running sum of four successive values: +>>> np.multiply.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] +array([ 6, 10, 14, 18]) + +"""), + ("outer","""outer(A,B) +Compute the result of applying op to all pairs (a,b) + +op.outer(A,B) is equivalent to +op(A[:,:,...,:,newaxis,...,newaxis]*B[newaxis,...,newaxis,:,...,:]) +where A has B.ndim new axes appended and B has A.ndim new axes prepended. + +For A and B one-dimensional, this is equivalent to +r = empty(len(A),len(B)) +for i in xrange(len(A)): + for j in xrange(len(B)): + r[i,j] = A[i]*B[j] +If A and B are higher-dimensional, the result has dimension A.ndim+B.ndim + +Parameters: +----------- + +A : array-like +B : array-like + +Returns: +-------- + +r : array +Example: +-------- +>>> np.multiply.outer([1,2,3],[4,5,6]) +array([[ 4, 5, 6], + [ 8, 10, 12], + [12, 15, 18]]) + +""")]) Modified: trunk/numpy/core/src/ufuncobject.c =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-05-09 02:51:14 UTC (rev 5149) +++ trunk/numpy/core/src/ufuncobject.c 2008-05-09 05:12:31 UTC (rev 5150) @@ -3951,174 +3951,12 @@ static struct PyMethodDef ufunc_methods[] = { - {"reduce", (PyCFunction)ufunc_reduce, METH_VARARGS | METH_KEYWORDS, - "reduce(array,axis=0,dtype=None,out=None)\n" - "reduce applies the operator to all elements of the array producing\n" - "a single result.\n" - "\n" - "For a one-dimensional array, reduce produces results equivalent to:\n" - "r = op.identity\n" - "for i in xrange(len(A)):\n" - " r = op(r,A[i])\n" - "return r\n" - "\n" - "For example, add.reduce() is equivalent to sum().\n" - "\n" - "Parameters:\n" - "-----------\n" - "\n" - "array : array-like\n" - " The array to act on.\n" - "axis : integer\n" - " The axis along which to apply the reduction.\n" - "dtype : data type or None\n" - " The type used to represent the intermediate results. Defaults\n" - " to the data type of the output array if this is provided, or\n" - " the data type of the input array if no output array is provided.\n" - "out : array-like or None\n" - " A location into which the result is stored. If not provided a\n" - " freshly-allocated array is returned.\n" - "\n" - "Returns:\n" - "--------\n" - "\n" - "r : array\n" - " The reduced values. If out was supplied, r is equal to out.\n" - "\n" - "Example:\n" - "--------\n" - ">>> np.multiply.reduce([2,3,5])\n" - "30\n" - "\n" - }, + {"reduce", (PyCFunction)ufunc_reduce, METH_VARARGS | METH_KEYWORDS }, {"accumulate", (PyCFunction)ufunc_accumulate, - METH_VARARGS | METH_KEYWORDS, - "accumulate(array,axis=None,dtype=None,out=None)\n" - "accumulate applies the operator to all elements of the array producing\n" - "cumulative results.\n" - "\n" - "For a one-dimensional array, accumulate produces results equivalent to:\n" - "r = np.empty(len(A))\n" - "t = op.identity\n" - "for i in xrange(len(A)):\n" - " t = op(t,A[i])\n" - " r[i] = t\n" - "return r\n" - "\n" - "For example, add.accumulate() is equivalent to cumsum().\n" - "\n" - "Parameters:\n" - "-----------\n" - "\n" - "array : array-like\n" - " The array to act on.\n" - "axis : integer\n" - " The axis along which to apply the accumulation.\n" - "dtype : data type or None\n" - " The type used to represent the intermediate results. Defaults\n" - " to the data type of the output array if this is provided, or\n" - " the data type of the input array if no output array is provided.\n" - "out : array-like or None\n" - " A location into which the result is stored. If not provided a\n" - " freshly-allocated array is returned.\n" - "\n" - "Returns:\n" - "--------\n" - "\n" - "r : array\n" - " The accumulated values. If out was supplied, r is equal to out.\n" - "\n" - "Example:\n" - "--------\n" - ">>> np.multiply.accumulate([2,3,5])\n" - "array([2,6,30])\n" - "\n" - }, + METH_VARARGS | METH_KEYWORDS }, {"reduceat", (PyCFunction)ufunc_reduceat, - METH_VARARGS | METH_KEYWORDS, - "reduceat(self,array,indices,axis=None,dtype=None,out=None)\n" - "reduceat performs a reduce over an axis using the indices as a guide\n" - "\n" - "op.reduceat(array,indices) computes\n" - "op.reduce(array[indices[i]:indices[i+1]]\n" - "for i=0..end with an implicit indices[i+1]=len(array)\n" - "assumed when i=end-1\n" - "\n" - "if indices[i+1] <= indices[i]+1\n" - "then the result is array[indices[i]] for that value\n" - "\n" - "op.accumulate(array) is the same as\n" - "op.reduceat(array,indices)[::2]\n" - "where indices is range(len(array)-1) with a zero placed\n" - "in every other sample:\n" - "indices = zeros(len(array)*2-1)\n" - "indices[1::2] = range(1,len(array))\n" - "\n" - "output shape is based on the size of indices\n" - "\n" - "Parameters:\n" - "-----------\n" - "\n" - "array : array-like\n" - " The array to act on.\n" - "indices : array-like\n" - " Indices specifying ranges to reduce.\n" - "axis : integer\n" - " The axis along which to apply the reduceat.\n" - "dtype : data type or None\n" - " The type used to represent the intermediate results. Defaults\n" - " to the data type of the output array if this is provided, or\n" - " the data type of the input array if no output array is provided.\n" - "out : array-like or None\n" - " A location into which the result is stored. If not provided a\n" - " freshly-allocated array is returned.\n" - "\n" - "Returns:\n" - "--------\n" - "\n" - "r : array\n" - " The reduced values. If out was supplied, r is equal to out.\n" - "\n" - "Example:\n" - "--------\n" - "To take the running sum of four successive values:\n" - ">>> np.multiply.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]\n" - "array([ 6, 10, 14, 18])\n" - "\n" - }, - {"outer", (PyCFunction)ufunc_outer, METH_VARARGS | METH_KEYWORDS, - "outer(A,B)\n" - "Compute the result of applying op to all pairs (a,b)\n" - "\n" - "op.outer(A,B) is equivalent to\n" - "op(A[:,:,...,:,newaxis,...,newaxis]*B[newaxis,...,newaxis,:,...,:]\n" - "where A has B.ndim new axes appended and B has A.ndim new axes prepended.\n" - "\n" - "For A and B one-dimensional, this is equivalent to\n" - "r = empty(len(A),len(B))\n" - "for i in xrange(len(A)):\n" - " for j in xrange(len(B)):\n" - " r[i,j] = A[i]*B[j]\n" - "If A and B are higher-dimensional, the result has dimension A.ndim+B.ndim\n" - "\n" - "Parameters:\n" - "-----------\n" - "\n" - "A : array-like\n" - "B : array-like\n" - "\n" - "Returns:\n" - "--------\n" - "\n" - "r : array\n" - "Example:\n" - "--------\n" - ">>> np.multiply.outer([1,2,3],[4,5,6])\n" - "array([[ 4, 5, 6],\n" - " [ 8, 10, 12],\n" - " [12, 15, 18]])\n" - "\n" - }, + METH_VARARGS | METH_KEYWORDS }, + {"outer", (PyCFunction)ufunc_outer, METH_VARARGS | METH_KEYWORDS }, {NULL, NULL} /* sentinel */ }; @@ -4258,9 +4096,8 @@ #undef _typecharfromnum -static char Ufunctype__doc__[] = - "Optimized functions make it possible to implement arithmetic "\ - "with arrays efficiently"; +/* Docstring is now set from python */ +/* static char *Ufunctype__doc__ = NULL; */ static PyGetSetDef ufunc_getset[] = { {"__doc__", (getter)ufunc_get_doc, NULL, "documentation string"}, @@ -4297,7 +4134,7 @@ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ - Ufunctype__doc__, /* tp_doc */ + NULL, /* tp_doc */ /* was Ufunctype__doc__ */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ From numpy-svn at scipy.org Fri May 9 13:03:40 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 9 May 2008 12:03:40 -0500 (CDT) Subject: [Numpy-svn] r5151 - in trunk/numpy/ma: . tests Message-ID: <20080509170340.2608C39C09F@scipy.org> Author: pierregm Date: 2008-05-09 12:03:36 -0500 (Fri, 09 May 2008) New Revision: 5151 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: core : power : mask all negative values when the exponent (b) doesn't satisfy (abs(b-int(b)) Author: pierregm Date: 2008-05-09 12:17:12 -0500 (Fri, 09 May 2008) New Revision: 5152 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: core : power : mask all negative values when the exponent (b) doesn't satisfy b==b.astype(int) Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-09 17:03:36 UTC (rev 5151) +++ trunk/numpy/ma/core.py 2008-05-09 17:17:12 UTC (rev 5152) @@ -2852,14 +2852,13 @@ fb = getdata(b) if fb.dtype.char in typecodes["Integer"]: return masked_array(umath.power(fa, fb), m) - md = (abs(fb-int(fb)) < numpy.finfo(float).precision) - m = mask_or(m, md) + m = mask_or(m, (fa < 0) & (fb != fb.astype(int))) if m is nomask: return masked_array(umath.power(fa, fb)) else: fa = fa.copy() if m.all(): - fa[m] = 1 + fa.flat = 1 else: numpy.putmask(fa,m,1) return masked_array(umath.power(fa, fb), m) Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-05-09 17:03:36 UTC (rev 5151) +++ trunk/numpy/ma/tests/test_core.py 2008-05-09 17:17:12 UTC (rev 5152) @@ -1567,6 +1567,11 @@ assert_almost_equal(power(x,2.), 1.21) assert_equal(power(x,0.5)._mask, 1) assert_equal(power(x,masked)._mask, 1) + x = array([-1.1,-1.1,1.1,1.1,0.]) + b = array([0.5,2.,0.5,2.,1.], mask=[0,0,0,0,1]) + y = power(x,b) + assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.] ) + assert_equal(y._mask, [1,0,0,0,1]) ############################################################################### From numpy-svn at scipy.org Fri May 9 20:18:04 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 9 May 2008 19:18:04 -0500 (CDT) Subject: [Numpy-svn] r5153 - in trunk/numpy/core: . tests Message-ID: <20080510001804.244B439C15A@scipy.org> Author: rkern Date: 2008-05-09 19:18:03 -0500 (Fri, 09 May 2008) New Revision: 5153 Modified: trunk/numpy/core/fromnumeric.py trunk/numpy/core/tests/test_numeric.py Log: Add the out= argument to the clip() function to bring it in line with the .clip() method. Modified: trunk/numpy/core/fromnumeric.py =================================================================== --- trunk/numpy/core/fromnumeric.py 2008-05-09 17:17:12 UTC (rev 5152) +++ trunk/numpy/core/fromnumeric.py 2008-05-10 00:18:03 UTC (rev 5153) @@ -889,30 +889,45 @@ return compress(condition, axis, out) -def clip(a, a_min, a_max): +def clip(a, a_min, a_max, out=None): """Return an array whose values are limited to [a_min, a_max]. Parameters ---------- a : {array_like} Array containing elements to clip. - a_min + a_min : Minimum value - a_max + a_max : Maximum value + out : array, optional + The results will be placed in this array. It may be the input array for + inplace clipping. Returns ------- clipped_array : {array} A new array whose elements are same as for a, but values < a_min are replaced with a_min, and > a_max with a_max. - + + Examples + -------- + >>> a = np.arange(10) + >>> np.clip(a, 1, 8) + array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, 3, 6, out=a) + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + """ try: clip = a.clip except AttributeError: - return _wrapit(a, 'clip', a_min, a_max) - return clip(a_min, a_max) + return _wrapit(a, 'clip', a_min, a_max, out) + return clip(a_min, a_max, out) def sum(a, axis=None, dtype=None, out=None): Modified: trunk/numpy/core/tests/test_numeric.py =================================================================== --- trunk/numpy/core/tests/test_numeric.py 2008-05-09 17:17:12 UTC (rev 5152) +++ trunk/numpy/core/tests/test_numeric.py 2008-05-10 00:18:03 UTC (rev 5153) @@ -668,6 +668,19 @@ self.clip(a, m, M, ac) assert_array_strict_equal(a, ac) + def test_clip_func_takes_out(self): + """ Ensure that the clip() function takes an out= argument. + """ + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + a2 = clip(a, m, M, out=a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a2, ac) + self.assert_(a2 is a) + + class test_allclose_inf(ParametricTestCase): rtol = 1e-5 atol = 1e-8 From numpy-svn at scipy.org Fri May 9 21:05:11 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 9 May 2008 20:05:11 -0500 (CDT) Subject: [Numpy-svn] r5154 - trunk/numpy Message-ID: <20080510010511.4139639C15A@scipy.org> Author: charris Date: 2008-05-09 20:05:05 -0500 (Fri, 09 May 2008) New Revision: 5154 Modified: trunk/numpy/add_newdocs.py Log: Break long line. Reformat a bit. Remove trailing whitespace. Modified: trunk/numpy/add_newdocs.py =================================================================== --- trunk/numpy/add_newdocs.py 2008-05-10 00:18:03 UTC (rev 5153) +++ trunk/numpy/add_newdocs.py 2008-05-10 01:05:05 UTC (rev 5154) @@ -319,12 +319,12 @@ add_newdoc('numpy.core.multiarray','fromfile', """fromfile(file=, dtype=float, count=-1, sep='') - + Return an array of the given data type from a text or binary file. - + Data written using the tofile() method can be conveniently recovered using this function. - + Parameters ---------- file : file or string @@ -342,21 +342,21 @@ See also -------- loadtxt : load data from text files - + Notes ----- WARNING: This function should be used sparingly as the binary files are not platform independent. In particular, they contain no endianess or datatype information. Nevertheless it can be useful for reading in simply formatted or binary data quickly. - + """) add_newdoc('numpy.core.multiarray','frombuffer', """frombuffer(buffer=, dtype=float, count=-1, offset=0) - + Returns a 1-d array of data type dtype from buffer. - + Parameters ---------- buffer @@ -367,13 +367,13 @@ Number of items to read. -1 means all data in the buffer. offset : int Number of bytes to jump from the start of the buffer before reading - + Notes ----- If the buffer has data that is not in machine byte-order, then use a proper data type descriptor. The data will not be byteswapped, but the array will manage it in future operations. - + """) add_newdoc('numpy.core.multiarray','concatenate', @@ -502,7 +502,7 @@ keys : (k,N) array or tuple of (N,) sequences Array containing values that the returned indices should sort, or a sequence of things that can be converted to arrays of the same shape. - + axis : integer Axis to be indirectly sorted. Default is -1 (i.e. last axis). @@ -802,7 +802,7 @@ out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. - + See Also -------- all : equivalent function @@ -814,7 +814,7 @@ """a.any(axis=None, out=None) Check if any of the elements of `a` are true. - + Performs a logical_or over the given axis and returns the result Parameters @@ -825,7 +825,7 @@ out : {None, array}, optional Array into which the result can be placed. Its type is preserved and it must be of the right shape to hold the output. - + See Also -------- any : equivalent function @@ -935,9 +935,9 @@ ============ ======= ============= ============ ======== kind speed worst case work space stable ============ ======= ============= ============ ======== - 'quicksort' 1 O(n^2) 0 no - 'mergesort' 2 O(n*log(n)) ~n/2 yes - 'heapsort' 3 O(n*log(n)) 0 no + 'quicksort' 1 O(n^2) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'heapsort' 3 O(n*log(n)) 0 no ============ ======= ============= ============ ======== All the sort algorithms make temporary copies of the data when the @@ -1011,7 +1011,7 @@ array([20, 31, 12, 3]) >>> a.choose(choices, mode='wrap') array([20, 1, 12, 3]) - + """)) @@ -1019,7 +1019,7 @@ """a.clip(a_min, a_max, out=None) Return an array whose values are limited to [a_min, a_max]. - + Parameters ---------- a_min @@ -1036,7 +1036,7 @@ clipped_array : array A new array whose elements are same as for a, but values < a_min are replaced with a_min, and > a_max with a_max. - + """)) @@ -1072,7 +1072,7 @@ [3]]) >>> a.compress([0,1,1]) array([2, 3]) - + """)) @@ -1080,7 +1080,7 @@ """a.conj() Return an array with all complex-valued elements conjugated. - + """)) @@ -1088,7 +1088,7 @@ """a.conjugate() Return an array with all complex-valued elements conjugated. - + """)) @@ -1104,7 +1104,7 @@ If order is 'Fortran' (True) then the result has fortran order. If order is 'Any' (None) then the result has fortran order only if the array already is in fortran order. - + """)) @@ -1245,15 +1245,15 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', """a.dump(file) - + Dump a pickle of the array to the specified file. The array can be read back with pickle.load or numpy.load. - + Parameters ---------- file : str A string naming the dump file. - + """)) @@ -1262,32 +1262,32 @@ Returns the pickle of the array as a string. pickle.loads or numpy.loads will convert the string back to an array. - + """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', """a.fill(value) - + Fill the array with a scalar value. - + """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', """a.flatten([order]) - + Return a 1-d array (always copy) - + Parameters ---------- order : {'C', 'F'} Whether to flatten in C or Fortran order. - + Notes ----- a.flatten('F') == a.T.flatten('C') - + """)) @@ -1329,7 +1329,7 @@ amax : array_like New array holding the result. If ``out`` was specified, ``out`` is returned. - + """)) @@ -1399,21 +1399,21 @@ """a.newbyteorder(byteorder) Equivalent to a.view(a.dtype.newbytorder(byteorder)) - + """)) add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', """a.nonzero() - + Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension. The corresponding non-zero values can be obtained with:: - + a[a.nonzero()]. To group the indices by element, rather than dimension, use:: - + transpose(a.nonzero()) instead. The result of this is always a 2d array, with a row for @@ -1475,10 +1475,10 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', """a.ptp(axis=None, out=None) - + Return (maximum - minimum) along the the given dimension (i.e. peak-to-peak value). - + Parameters ---------- axis : {None, int}, optional @@ -1488,13 +1488,13 @@ Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. - + Returns ------- ptp : ndarray. A new array holding the result, unless ``out`` was specified, in which case a reference to ``out`` is returned. - + Examples -------- >>> x = np.arange(4).reshape((2,2)) @@ -1505,7 +1505,7 @@ array([2, 2]) >>> x.ptp(1) array([1, 1]) - + """)) @@ -1514,7 +1514,7 @@ Set a.flat[n] = values[n] for all n in indices. If values is shorter than indices, it will repeat. - + Parameters ---------- indices : array_like @@ -1549,9 +1549,9 @@ add_newdoc('numpy.core.multiarray', 'putmask', """putmask(a, mask, values) - + Sets a.flat[n] = values[n] for each n where mask.flat[n] is true. - + If values is not the same size as `a` and `mask` then it will repeat. This gives behavior different from a[mask] = values. @@ -1563,6 +1563,7 @@ Boolean mask array values : {array_like} Values to put + """) @@ -1604,9 +1605,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', """a.repeat(repeats, axis=None) - + Repeat elements of an array. - + Parameters ---------- a : {array_like} @@ -1666,7 +1667,7 @@ ------- reshaped_array : array A new view to the array. - + """)) @@ -1681,7 +1682,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('round', """a.round(decimals=0, out=None) - + Return an array rounded a to the given number of decimals. The real and imaginary parts of complex numbers are rounded separately. The @@ -1727,7 +1728,7 @@ array([ 1, 2, 3, 11]) >>> x.round(decimals=-1) array([ 0, 0, 0, 10]) - + """)) @@ -1763,7 +1764,7 @@ ----- The array a must be 1-d and is assumed to be sorted in ascending order. Searchsorted uses binary search to find the required insertion points. - + """)) @@ -1813,13 +1814,13 @@ sort keeps items with the same key in the same relative order. The three available algorithms have the following properties: - =========== ======= ============= ============ ======= + =========== ======= ============= ============ ======= kind speed worst case work space stable - =========== ======= ============= ============ ======= - 'quicksort' 1 O(n^2) 0 no - 'mergesort' 2 O(n*log(n)) ~n/2 yes - 'heapsort' 3 O(n*log(n)) 0 no - =========== ======= ============= ============ ======= + =========== ======= ============= ============ ======= + 'quicksort' 1 O(n^2) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'heapsort' 3 O(n*log(n)) 0 no + =========== ======= ============= ============ ======= All the sort algorithms make temporary copies of the data when the sort is not along the last axis. Consequently, sorts along the last axis are faster @@ -1881,11 +1882,11 @@ Notes ----- The standard deviation is the square root of the average of the squared - deviations from the mean, i.e. var = sqrt(mean(abs(x - x.mean())**2)). - The computed standard deviation is computed by dividing by the number of - elements, N-ddof. The option ddof defaults to zero, that is, a - biased estimate. Note that for complex numbers std takes the absolute - value before squaring, so that the result is always real and nonnegative. + deviations from the mean, i.e. var = sqrt(mean(abs(x - x.mean())**2)). The + computed standard deviation is computed by dividing by the number of + elements, N-ddof. The option ddof defaults to zero, that is, a biased + estimate. Note that for complex numbers std takes the absolute value before + squaring, so that the result is always real and nonnegative. """)) @@ -1894,28 +1895,28 @@ """a.sum(axis=None, dtype=None, out=None) Return the sum of the array elements over the given axis - + Parameters ---------- axis : {None, integer} Axis over which the sum is taken. If None is used, then the sum is over all the array elements. dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are summed. If dtype has the value None and the - type of a is an integer type of precision less than the default - platform integer, then the default platform integer precision is - used. Otherwise, the dtype is the same as that of a. + Determines the type of the returned array and of the accumulator where + the elements are summed. If dtype has the value None and the type of a + is an integer type of precision less than the default platform integer, + then the default platform integer precision is used. Otherwise, the + dtype is the same as that of a. out : {None, array}, optional - Array into which the sum can be placed. Its type is preserved and - it must be of the right shape to hold the output. + Array into which the sum can be placed. Its type is preserved and it + must be of the right shape to hold the output. Returns ------- sum_along_axis : {array, scalar}, see dtype parameter above. - Returns an array whose shape is the same as a with the specified - axis removed. Returns a 0d array when a is 1d or axis=None. - Returns a reference to the specified output array if specified. + Returns an array whose shape is the same as a with the specified axis + removed. Returns a 0d array when a is 1d or axis=None. Returns a + reference to the specified output array if specified. See Also -------- @@ -2011,7 +2012,7 @@ See Also -------- take : equivalent function - + """)) @@ -2026,10 +2027,9 @@ This is a convenience function for quick storage of array data. Information on endianess and precision is lost, so this method is not a - good choice for files intended to archive data or transport data - between machines with different endianess. Some of these problems can - be overcome by outputting the data as text files at the expense of - speed and file size. + good choice for files intended to archive data or transport data between + machines with different endianess. Some of these problems can be overcome + by outputting the data as text files at the expense of speed and file size. Parameters ---------- @@ -2049,7 +2049,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', """a.tolist() - + Return the array as nested lists. Copy the data portion of the array to a hierarchical Python list and return @@ -2060,15 +2060,15 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', """a.tostring(order='C') - + Construct a Python string containing the raw data bytes in the array. - + Parameters ---------- order : {'C', 'F', None} Order of the data for multidimensional arrays: C, Fortran, or the same as for the original array. - + """)) @@ -2077,12 +2077,13 @@ Return the sum along diagonals of the array. - If a is 2-d, returns the sum along the diagonal of self with the given offset, i.e., the - collection of elements of the form a[i,i+offset]. If a has more than two - dimensions, then the axes specified by axis1 and axis2 are used to determine - the 2-d subarray whose trace is returned. The shape of the resulting - array can be determined by removing axis1 and axis2 and appending an index - to the right equal to the size of the resulting diagonals. + If a is 2-d, returns the sum along the diagonal of self with the given + offset, i.e., the collection of elements of the form a[i,i+offset]. If a + has more than two dimensions, then the axes specified by axis1 and axis2 + are used to determine the 2-d subarray whose trace is returned. The shape + of the resulting array can be determined by removing axis1 and axis2 and + appending an index to the right equal to the size of the resulting + diagonals. Parameters ---------- @@ -2223,238 +2224,252 @@ """)) add_newdoc('numpy.core.umath','geterrobj', - """geterrobj() + """geterrobj() - Used internally by `geterr`. + Used internally by `geterr`. - Returns - ------- - errobj : list - Internal numpy buffer size, error mask, error callback function. + Returns + ------- + errobj : list + Internal numpy buffer size, error mask, error callback function. - """) + """) add_newdoc('numpy.core.umath','seterrobj', - """seterrobj() + """seterrobj() - Used internally by `seterr`. + Used internally by `seterr`. - Parameters - ---------- - errobj : list - [buffer_size, error_mask, callback_func] + Parameters + ---------- + errobj : list + [buffer_size, error_mask, callback_func] - See Also - -------- - seterrcall + See Also + -------- + seterrcall - """) + """) -add_newdoc("numpy.core","ufunc","""Optimized functions make it possible to implement arithmetic with arrays efficiently +add_newdoc("numpy.core","ufunc", + """Functions that operate element by element on whole arrays. -Unary ufuncs: -============= + Unary ufuncs: + ============= -op(X, out=None) -Apply op to X elementwise + op(X, out=None) + Apply op to X elementwise -Parameters ----------- -X : array-like -out : array-like - An array to store the output. Must be the same shape as X. + Parameters + ---------- + X : array-like + out : array-like + An array to store the output. Must be the same shape as X. -Returns -------- -r : array-like - r will have the same shape as X; if out is provided, r will be - equal to out. + Returns + ------- + r : array-like + r will have the same shape as X; if out is provided, r will be + equal to out. -Binary ufuncs: -============== + Binary ufuncs: + ============== -op(X, Y, out=None) -Apply op to X and Y elementwise. May "broadcast" to make -the shapes of X and Y congruent. + op(X, Y, out=None) + Apply op to X and Y elementwise. May "broadcast" to make + the shapes of X and Y congruent. -The broadcasting rules are: -* Dimensions of length 1 may be prepended to either array -* Arrays may be repeated along dimensions of length 1 + The broadcasting rules are: + * Dimensions of length 1 may be prepended to either array + * Arrays may be repeated along dimensions of length 1 -Parameters ----------- -X : array-like -Y : array-like -out : array-like - An array to store the output. Must be the same shape as the - output would have. + Parameters + ---------- + X : array-like + Y : array-like + out : array-like + An array to store the output. Must be the same shape as the + output would have. -Returns -------- -r : array-like - The return value; if out is provided, r will be equal to out. -""") -add_newdoc("numpy.core","ufunc", - [("reduce","""reduce(array,axis=0,dtype=None,out=None) -reduce applies the operator to all elements of the array producing -a single result. + Returns + ------- + r : array-like + The return value; if out is provided, r will be equal to out. -For a one-dimensional array, reduce produces results equivalent to: -r = op.identity -for i in xrange(len(A)): - r = op(r,A[i]) -return r + """) -For example, add.reduce() is equivalent to sum(). -Parameters: ------------ +add_newdoc("numpy.core","ufunc",("reduce", + """reduce(array,axis=0,dtype=None,out=None) -array : array-like - The array to act on. -axis : integer - The axis along which to apply the reduction. -dtype : data type or None - The type used to represent the intermediate results. Defaults - to the data type of the output array if this is provided, or - the data type of the input array if no output array is provided. -out : array-like or None - A location into which the result is stored. If not provided a - freshly-allocated array is returned. + Reduce applies the operator to all elements of the array producing + a single result. -Returns: --------- + For a one-dimensional array, reduce produces results equivalent to: + r = op.identity + for i in xrange(len(A)): + r = op(r,A[i]) + return r -r : array - The reduced values. If out was supplied, r is equal to out. + For example, add.reduce() is equivalent to sum(). -Example: --------- ->>> np.multiply.reduce([2,3,5]) -30 + Parameters: + ----------- + array : array-like + The array to act on. + axis : integer + The axis along which to apply the reduction. + dtype : data type or None + The type used to represent the intermediate results. Defaults + to the data type of the output array if this is provided, or + the data type of the input array if no output array is provided. + out : array-like or None + A location into which the result is stored. If not provided a + freshly-allocated array is returned. -"""), - ("accumulate","""accumulate(array,axis=None,dtype=None,out=None) -accumulate applies the operator to all elements of the array producing -cumulative results. + Returns: + -------- -For a one-dimensional array, accumulate produces results equivalent to: -r = np.empty(len(A)) -t = op.identity -for i in xrange(len(A)): - t = op(t,A[i]) - r[i] = t -return r + r : array + The reduced values. If out was supplied, r is equal to out. -For example, add.accumulate() is equivalent to cumsum(). + Example: + -------- + >>> np.multiply.reduce([2,3,5]) + 30 -Parameters: ------------ + """)) -array : array-like - The array to act on. -axis : integer - The axis along which to apply the accumulation. -dtype : data type or None - The type used to represent the intermediate results. Defaults - to the data type of the output array if this is provided, or - the data type of the input array if no output array is provided. -out : array-like or None - A location into which the result is stored. If not provided a - freshly-allocated array is returned. +add_newdoc("numpy.core","ufunc",("accumulate", + """accumulate(array,axis=None,dtype=None,out=None) -Returns: --------- + Accumulate applies the operator to all elements of the array producing + cumulative results. -r : array - The accumulated values. If out was supplied, r is equal to out. + For a one-dimensional array, accumulate produces results equivalent to: + r = np.empty(len(A)) + t = op.identity + for i in xrange(len(A)): + t = op(t,A[i]) + r[i] = t + return r -Example: --------- ->>> np.multiply.accumulate([2,3,5]) -array([2,6,30]) + For example, add.accumulate() is equivalent to cumsum(). -"""), - ("reduceat","""reduceat(self,array,indices,axis=None,dtype=None,out=None) -reduceat performs a reduce over an axis using the indices as a guide + Parameters: + ----------- -op.reduceat(array,indices) computes -op.reduce(array[indices[i]:indices[i+1]]) -for i=0..end with an implicit indices[i+1]=len(array) -assumed when i=end-1 + array : array-like + The array to act on. + axis : integer + The axis along which to apply the accumulation. + dtype : data type or None + The type used to represent the intermediate results. Defaults + to the data type of the output array if this is provided, or + the data type of the input array if no output array is provided. + out : array-like or None + A location into which the result is stored. If not provided a + freshly-allocated array is returned. -if indices[i+1] <= indices[i]+1 -then the result is array[indices[i]] for that value + Returns: + -------- -op.accumulate(array) is the same as -op.reduceat(array,indices)[::2] -where indices is range(len(array)-1) with a zero placed -in every other sample: -indices = zeros(len(array)*2-1) -indices[1::2] = range(1,len(array)) + r : array + The accumulated values. If out was supplied, r is equal to out. -output shape is based on the size of indices + Example: + -------- + >>> np.multiply.accumulate([2,3,5]) + array([2,6,30]) -Parameters: ------------ + """)) -array : array-like - The array to act on. -indices : array-like - Indices specifying ranges to reduce. -axis : integer - The axis along which to apply the reduceat. -dtype : data type or None - The type used to represent the intermediate results. Defaults - to the data type of the output array if this is provided, or - the data type of the input array if no output array is provided. -out : array-like or None - A location into which the result is stored. If not provided a - freshly-allocated array is returned. +add_newdoc("numpy.core","ufunc",("reduceat", + """reduceat(self,array,indices,axis=None,dtype=None,out=None) -Returns: --------- + Reduceat performs a reduce over an axis using the indices as a guide -r : array - The reduced values. If out was supplied, r is equal to out. + op.reduceat(array,indices) computes + op.reduce(array[indices[i]:indices[i+1]]) + for i=0..end with an implicit indices[i+1]=len(array) + assumed when i=end-1 -Example: --------- -To take the running sum of four successive values: ->>> np.multiply.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] -array([ 6, 10, 14, 18]) + if indices[i+1] <= indices[i]+1 + then the result is array[indices[i]] for that value -"""), - ("outer","""outer(A,B) -Compute the result of applying op to all pairs (a,b) + op.accumulate(array) is the same as + op.reduceat(array,indices)[::2] + where indices is range(len(array)-1) with a zero placed + in every other sample: + indices = zeros(len(array)*2-1) + indices[1::2] = range(1,len(array)) -op.outer(A,B) is equivalent to -op(A[:,:,...,:,newaxis,...,newaxis]*B[newaxis,...,newaxis,:,...,:]) -where A has B.ndim new axes appended and B has A.ndim new axes prepended. + output shape is based on the size of indices -For A and B one-dimensional, this is equivalent to -r = empty(len(A),len(B)) -for i in xrange(len(A)): - for j in xrange(len(B)): - r[i,j] = A[i]*B[j] -If A and B are higher-dimensional, the result has dimension A.ndim+B.ndim + Parameters: + ----------- -Parameters: ------------ + array : array-like + The array to act on. + indices : array-like + Indices specifying ranges to reduce. + axis : integer + The axis along which to apply the reduceat. + dtype : data type or None + The type used to represent the intermediate results. Defaults + to the data type of the output array if this is provided, or + the data type of the input array if no output array is provided. + out : array-like or None + A location into which the result is stored. If not provided a + freshly-allocated array is returned. -A : array-like -B : array-like + Returns: + -------- -Returns: --------- + r : array + The reduced values. If out was supplied, r is equal to out. -r : array -Example: --------- ->>> np.multiply.outer([1,2,3],[4,5,6]) -array([[ 4, 5, 6], - [ 8, 10, 12], - [12, 15, 18]]) + Example: + -------- + To take the running sum of four successive values: + >>> np.multiply.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] + array([ 6, 10, 14, 18]) -""")]) + """)) + +add_newdoc("numpy.core","ufunc",("outer", + """outer(A,B) + + Compute the result of applying op to all pairs (a,b) + + op.outer(A,B) is equivalent to + op(A[:,:,...,:,newaxis,...,newaxis]*B[newaxis,...,newaxis,:,...,:]) + where A has B.ndim new axes appended and B has A.ndim new axes prepended. + + For A and B one-dimensional, this is equivalent to + r = empty(len(A),len(B)) + for i in xrange(len(A)): + for j in xrange(len(B)): + r[i,j] = A[i]*B[j] + If A and B are higher-dimensional, the result has dimension A.ndim+B.ndim + + Parameters: + ----------- + + A : array-like + B : array-like + + Returns: + -------- + + r : array + Example: + -------- + >>> np.multiply.outer([1,2,3],[4,5,6]) + array([[ 4, 5, 6], + [ 8, 10, 12], + [12, 15, 18]]) + + """)) + From numpy-svn at scipy.org Sat May 10 23:08:07 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 10 May 2008 22:08:07 -0500 (CDT) Subject: [Numpy-svn] r5155 - trunk/numpy/core/src Message-ID: <20080511030807.01B2439C1B1@scipy.org> Author: oliphant Date: 2008-05-10 22:08:06 -0500 (Sat, 10 May 2008) New Revision: 5155 Modified: trunk/numpy/core/src/arraymethods.c Log: Use EquivTypes instead of equality testing for data-type comparison. Modified: trunk/numpy/core/src/arraymethods.c =================================================================== --- trunk/numpy/core/src/arraymethods.c 2008-05-10 01:05:05 UTC (rev 5154) +++ trunk/numpy/core/src/arraymethods.c 2008-05-11 03:08:06 UTC (rev 5155) @@ -685,7 +685,7 @@ return NULL; } - if (descr == self->descr) { + if (PyArray_EquivTypes(descr, self->descr)) { obj = _ARET(PyArray_NewCopy(self,NPY_ANYORDER)); Py_XDECREF(descr); return obj; From numpy-svn at scipy.org Mon May 12 11:48:30 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 12 May 2008 10:48:30 -0500 (CDT) Subject: [Numpy-svn] r5156 - in trunk/numpy/ma: . tests Message-ID: <20080512154830.3478D39C33F@scipy.org> Author: pierregm Date: 2008-05-12 10:48:27 -0500 (Mon, 12 May 2008) New Revision: 5156 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: core : power : use the quick-and-dirty approach: compute everything and mask afterwards : MaskedArray._update_from(obj) : ensure that _baseclass is a ndarray if obj wasn't one already : introduced clip in the namespace, just for convenience Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-11 03:08:06 UTC (rev 5155) +++ trunk/numpy/ma/core.py 2008-05-12 15:48:27 UTC (rev 5156) @@ -26,8 +26,8 @@ 'arctanh', 'argmax', 'argmin', 'argsort', 'around', 'array', 'asarray','asanyarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', - 'ceil', 'choose', 'common_fill_value', 'compress', 'compressed', - 'concatenate', 'conjugate', 'cos', 'cosh', 'count', + 'ceil', 'choose', 'clip', 'common_fill_value', 'compress', + 'compressed', 'concatenate', 'conjugate', 'cos', 'cosh', 'count', 'default_fill_value', 'diagonal', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', 'fabs', 'fmod', 'filled', 'floor', 'floor_divide','fix_invalid', @@ -1226,7 +1226,7 @@ def _update_from(self, obj): """Copies some attributes of obj to self. """ - if obj is not None: + if obj is not None and isinstance(obj,ndarray): _baseclass = type(obj) else: _baseclass = ndarray @@ -2845,23 +2845,45 @@ """ if third is not None: raise MAError, "3-argument power not supported." + # Get the masks ma = getmask(a) mb = getmask(b) m = mask_or(ma, mb) + # Get the rawdata fa = getdata(a) fb = getdata(b) - if fb.dtype.char in typecodes["Integer"]: - return masked_array(umath.power(fa, fb), m) - m = mask_or(m, (fa < 0) & (fb != fb.astype(int))) - if m is nomask: - return masked_array(umath.power(fa, fb)) + # Get the type of the result (so that we preserve subclasses) + if isinstance(a,MaskedArray): + basetype = type(a) else: - fa = fa.copy() - if m.all(): - fa.flat = 1 - else: - numpy.putmask(fa,m,1) - return masked_array(umath.power(fa, fb), m) + basetype = MaskedArray + # Get the result and view it as a (subclass of) MaskedArray + result = umath.power(fa,fb).view(basetype) + # Retrieve some extra attributes if needed + result._update_from(a) + # Find where we're in trouble w/ NaNs and Infs + invalid = numpy.logical_not(numpy.isfinite(result.view(ndarray))) + # Add the initial mask + if m is not nomask: + result._mask = m + # Fix the invalid parts + if invalid.any(): + result[invalid] = masked + result._data[invalid] = result.fill_value + return result + +# if fb.dtype.char in typecodes["Integer"]: +# return masked_array(umath.power(fa, fb), m) +# m = mask_or(m, (fa < 0) & (fb != fb.astype(int))) +# if m is nomask: +# return masked_array(umath.power(fa, fb)) +# else: +# fa = fa.copy() +# if m.all(): +# fa.flat = 1 +# else: +# numpy.putmask(fa,m,1) +# return masked_array(umath.power(fa, fb), m) #.............................................................................. def argsort(a, axis=None, kind='quicksort', order=None, fill_value=None): @@ -3373,6 +3395,7 @@ fromfunction = _convert2ma('fromfunction') identity = _convert2ma('identity') indices = numpy.indices +clip = numpy.clip ############################################################################### Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-05-11 03:08:06 UTC (rev 5155) +++ trunk/numpy/ma/tests/test_core.py 2008-05-12 15:48:27 UTC (rev 5156) @@ -1571,7 +1571,11 @@ b = array([0.5,2.,0.5,2.,1.], mask=[0,0,0,0,1]) y = power(x,b) assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.] ) - assert_equal(y._mask, [1,0,0,0,1]) + assert_equal(y._mask, [1,0,0,0,1]) + b.mask = nomask + y = power(x,b) + assert_equal(y._mask, [1,0,0,0,1]) + ############################################################################### From numpy-svn at scipy.org Mon May 12 13:09:14 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 12 May 2008 12:09:14 -0500 (CDT) Subject: [Numpy-svn] r5157 - in trunk/numpy/ma: . tests Message-ID: <20080512170914.047DA39C731@scipy.org> Author: pierregm Date: 2008-05-12 12:09:08 -0500 (Mon, 12 May 2008) New Revision: 5157 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: power : fixed a bug when a scalar is the first argument MaskedArray.__pow__ : call power MaskedArray.__ipow__: works in place. Note that the _data part gets "fixed" (NaNs/Infs set to fill_value) Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-12 15:48:27 UTC (rev 5156) +++ trunk/numpy/ma/core.py 2008-05-12 17:09:08 UTC (rev 5157) @@ -1685,7 +1685,7 @@ return multiply(self, other) # def __div__(self, other): - "Divides other into self, and return a new masked array." + "Divide other into self, and return a new masked array." return divide(self, other) # def __truediv__(self, other): @@ -1695,7 +1695,10 @@ def __floordiv__(self, other): "Divide other into self, and return a new masked array." return floor_divide(self, other) - + # + def __pow__(self, other): + "Raise self to the power other, masking the potential NaNs/Infs" + return power(self, other) #............................................ def __iadd__(self, other): "Add other to self in-place." @@ -1740,6 +1743,19 @@ ndarray.__idiv__(self._data, other_data) self._mask = mask_or(self._mask, new_mask) return self + #... + def __ipow__(self, other): + "Raise self to the power other, in place" + _data = self._data + other_data = getdata(other) + other_mask = getmask(other) + ndarray.__ipow__(_data, other_data) + invalid = numpy.logical_not(numpy.isfinite(_data)) + new_mask = mask_or(other_mask,invalid) + self._mask = mask_or(self._mask, new_mask) + # The following line is potentially problematic, as we change _data... + numpy.putmask(self._data,invalid,self.fill_value) + return self #............................................ def __float__(self): "Convert to float." @@ -2859,15 +2875,20 @@ basetype = MaskedArray # Get the result and view it as a (subclass of) MaskedArray result = umath.power(fa,fb).view(basetype) - # Retrieve some extra attributes if needed - result._update_from(a) # Find where we're in trouble w/ NaNs and Infs invalid = numpy.logical_not(numpy.isfinite(result.view(ndarray))) + # Retrieve some extra attributes if needed + if isinstance(result,MaskedArray): + result._update_from(a) # Add the initial mask if m is not nomask: + if numpy.isscalar(result): + return masked result._mask = m # Fix the invalid parts if invalid.any(): + if not result.ndim: + return masked result[invalid] = masked result._data[invalid] = result.fill_value return result Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-05-12 15:48:27 UTC (rev 5156) +++ trunk/numpy/ma/tests/test_core.py 2008-05-12 17:09:08 UTC (rev 5157) @@ -1565,16 +1565,23 @@ def test_power(self): x = -1.1 assert_almost_equal(power(x,2.), 1.21) - assert_equal(power(x,0.5)._mask, 1) - assert_equal(power(x,masked)._mask, 1) + assert(power(x,masked) is masked) x = array([-1.1,-1.1,1.1,1.1,0.]) - b = array([0.5,2.,0.5,2.,1.], mask=[0,0,0,0,1]) + b = array([0.5,2.,0.5,2.,-1.], mask=[0,0,0,0,1]) y = power(x,b) assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.] ) assert_equal(y._mask, [1,0,0,0,1]) b.mask = nomask y = power(x,b) assert_equal(y._mask, [1,0,0,0,1]) + z = x**b + assert_equal(z._mask, y._mask) + assert_almost_equal(z,y) + assert_almost_equal(z._data,y._data) + x **= b + assert_equal(x._mask, y._mask) + assert_almost_equal(x,y) + assert_almost_equal(x._data,y._data) From numpy-svn at scipy.org Mon May 12 17:59:08 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 12 May 2008 16:59:08 -0500 (CDT) Subject: [Numpy-svn] r5158 - trunk/numpy/ma Message-ID: <20080512215908.3799839C0CA@scipy.org> Author: stefan Date: 2008-05-12 16:58:48 -0500 (Mon, 12 May 2008) New Revision: 5158 Modified: trunk/numpy/ma/API_CHANGES.txt Log: Fix reST markup. Modified: trunk/numpy/ma/API_CHANGES.txt =================================================================== --- trunk/numpy/ma/API_CHANGES.txt 2008-05-12 17:09:08 UTC (rev 5157) +++ trunk/numpy/ma/API_CHANGES.txt 2008-05-12 21:58:48 UTC (rev 5158) @@ -7,7 +7,8 @@ Masked arrays are subclasses of ndarray --------------------------------------- -Contrary to the original implementation, masked arrays are now regular ndarrays:: +Contrary to the original implementation, masked arrays are now regular +ndarrays:: >>> x = masked_array([1,2,3],mask=[0,0,1]) >>> print isinstance(x, numpy.ndarray) @@ -38,7 +39,7 @@ ``filled(x)`` can return a subclass of ndarray -------------- +---------------------------------------------- The function ``filled(a)`` returns an array of the same type as ``a._data``:: >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) @@ -122,9 +123,14 @@ ``mr_`` mimics the behavior of ``r_`` for masked arrays:: + >>> np.ma.mr_[3,4,5] + masked_array(data = [3 4 5], + mask = False, + fill_value=999999) + + ``anom`` -------- The ``anom`` method returns the deviations from the average (anomalies). - From numpy-svn at scipy.org Mon May 12 23:54:31 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 12 May 2008 22:54:31 -0500 (CDT) Subject: [Numpy-svn] r5159 - branches/1.1.x/numpy/core Message-ID: <20080513035431.8C1B239C064@scipy.org> Author: oliphant Date: 2008-05-12 22:54:30 -0500 (Mon, 12 May 2008) New Revision: 5159 Modified: branches/1.1.x/numpy/core/defmatrix.py Log: Back out scalar indexing change for 1.1 matrices. Modified: branches/1.1.x/numpy/core/defmatrix.py =================================================================== --- branches/1.1.x/numpy/core/defmatrix.py 2008-05-12 21:58:48 UTC (rev 5158) +++ branches/1.1.x/numpy/core/defmatrix.py 2008-05-13 03:54:30 UTC (rev 5159) @@ -223,8 +223,6 @@ return def __getitem__(self, index): - if isscalar(index): - return self.__array__()[index] self._getitem = True try: @@ -304,6 +302,10 @@ else: raise ValueError, "unsupported axis" + # To overcome dimension non-reduction of x[0] + def tolist(self): + return self.__array__().tolist() + # To preserve orientation of result... def sum(self, axis=None, dtype=None, out=None): """Sum the matrix over the given axis. If the axis is None, sum From numpy-svn at scipy.org Tue May 13 12:40:20 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 13 May 2008 11:40:20 -0500 (CDT) Subject: [Numpy-svn] r5160 - in trunk/numpy: core/src core/tests lib Message-ID: <20080513164020.D61B039C293@scipy.org> Author: oliphant Date: 2008-05-13 11:40:19 -0500 (Tue, 13 May 2008) New Revision: 5160 Modified: trunk/numpy/core/src/arrayobject.c trunk/numpy/core/src/multiarraymodule.c trunk/numpy/core/tests/test_multiarray.py trunk/numpy/lib/io.py Log: Fix ticket #791. Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2008-05-13 03:54:30 UTC (rev 5159) +++ trunk/numpy/core/src/arrayobject.c 2008-05-13 16:40:19 UTC (rev 5160) @@ -1976,7 +1976,7 @@ v=(PyArrayObject *)array_big_item(self, i); } else { - v = PySequence_GetItem((PyObject *)self, i); + v = (PyArrayObject *)PySequence_GetItem((PyObject *)self, i); if ((!PyArray_Check(v)) || (v->nd >= self->nd)) { PyErr_SetString(PyExc_RuntimeError, "array_item not returning smaller-" \ Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2008-05-13 03:54:30 UTC (rev 5159) +++ trunk/numpy/core/src/multiarraymodule.c 2008-05-13 16:40:19 UTC (rev 5160) @@ -849,7 +849,7 @@ if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; /* Compute and reshape mean */ - obj1 = PyArray_EnsureArray(PyArray_Mean((PyAO *)new, axis, rtype, NULL)); + obj1 = PyArray_EnsureAnyArray(PyArray_Mean((PyAO *)new, axis, rtype, NULL)); if (obj1 == NULL) {Py_DECREF(new); return NULL;} n = PyArray_NDIM(new); newshape = PyTuple_New(n); @@ -865,7 +865,7 @@ if (obj2 == NULL) {Py_DECREF(new); return NULL;} /* Compute x = x - mx */ - obj1 = PyArray_EnsureArray(PyNumber_Subtract((PyObject *)new, obj2)); + obj1 = PyArray_EnsureAnyArray(PyNumber_Subtract((PyObject *)new, obj2)); Py_DECREF(obj2); if (obj1 == NULL) {Py_DECREF(new); return NULL;} @@ -878,7 +878,7 @@ Py_INCREF(obj1); } if (obj3 == NULL) {Py_DECREF(new); return NULL;} - obj2 = PyArray_EnsureArray \ + obj2 = PyArray_EnsureAnyArray \ (PyArray_GenericBinaryFunction((PyAO *)obj1, obj3, n_ops.multiply)); Py_DECREF(obj1); Py_DECREF(obj3); @@ -921,7 +921,7 @@ Py_DECREF(obj2); if (!variance) { - obj1 = PyArray_EnsureArray(ret); + obj1 = PyArray_EnsureAnyArray(ret); /* sqrt() */ ret = PyArray_GenericUnaryFunction((PyAO *)obj1, n_ops.sqrt); Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2008-05-13 03:54:30 UTC (rev 5159) +++ trunk/numpy/core/tests/test_multiarray.py 2008-05-13 16:40:19 UTC (rev 5160) @@ -854,6 +854,23 @@ assert(isinstance(y,np.matrix)) assert_equal(y.dtype,np.int16) +class TestStats(NumpyTestCase): + def test_subclass(self): + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + def __array_finalize__(self, obj): + self.info = getattr(obj, "info", '') + dat = TestArray([[1,2,3,4],[5,6,7,8]], 'jubba') + res = dat.mean(1) + assert res.info == dat.info + res = dat.std(1) + assert res.info == dat.info + res = dat.var(1) + assert res.info == dat.info # Import tests without matching module names set_local_path() Modified: trunk/numpy/lib/io.py =================================================================== --- trunk/numpy/lib/io.py 2008-05-13 03:54:30 UTC (rev 5159) +++ trunk/numpy/lib/io.py 2008-05-13 16:40:19 UTC (rev 5160) @@ -428,7 +428,7 @@ import re def fromregex(file, regexp, dtype): - """Construct a record array from a text file, using regular-expressions parsing. + """Construct an array from a text file, using regular-expressions parsing. Array is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields. @@ -440,7 +440,7 @@ regexp : str or regexp Regular expression to use to parse the file dtype : dtype or dtype list - Dtype for the record array + Dtype for the structured array Example ------- From numpy-svn at scipy.org Tue May 13 12:44:05 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 13 May 2008 11:44:05 -0500 (CDT) Subject: [Numpy-svn] r5161 - trunk/numpy/core Message-ID: <20080513164405.7D68A39C293@scipy.org> Author: oliphant Date: 2008-05-13 11:44:03 -0500 (Tue, 13 May 2008) New Revision: 5161 Modified: trunk/numpy/core/defmatrix.py Log: Back-out matrix changes from trunk. This breaks some test which need to be fixed differently. Modified: trunk/numpy/core/defmatrix.py =================================================================== --- trunk/numpy/core/defmatrix.py 2008-05-13 16:40:19 UTC (rev 5160) +++ trunk/numpy/core/defmatrix.py 2008-05-13 16:44:03 UTC (rev 5161) @@ -223,8 +223,6 @@ return def __getitem__(self, index): - if isscalar(index): - return self.__array__()[index] self._getitem = True try: @@ -304,6 +302,11 @@ else: raise ValueError, "unsupported axis" + # Necessary because base-class tolist expects dimension + # reduction by x[0] + def tolist(self): + return self.__array__().tolist() + # To preserve orientation of result... def sum(self, axis=None, dtype=None, out=None): """Sum the matrix over the given axis. If the axis is None, sum From numpy-svn at scipy.org Tue May 13 17:08:08 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 13 May 2008 16:08:08 -0500 (CDT) Subject: [Numpy-svn] r5162 - in trunk/numpy/ma: . tests Message-ID: <20080513210808.51AC239C457@scipy.org> Author: pierregm Date: 2008-05-13 16:08:01 -0500 (Tue, 13 May 2008) New Revision: 5162 Modified: trunk/numpy/ma/extras.py trunk/numpy/ma/tests/test_extras.py Log: extras: introduced mvander and mpolyfit Modified: trunk/numpy/ma/extras.py =================================================================== --- trunk/numpy/ma/extras.py 2008-05-13 16:44:03 UTC (rev 5161) +++ trunk/numpy/ma/extras.py 2008-05-13 21:08:01 UTC (rev 5162) @@ -20,7 +20,7 @@ 'flatnotmasked_contiguous','flatnotmasked_edges', 'hsplit','hstack', 'mask_cols','mask_rowcols','mask_rows','masked_all','masked_all_like', - 'median','mediff1d','mr_', + 'median','mediff1d','mpolyfit','mr_','mvander', 'notmasked_contiguous','notmasked_edges', 'row_stack', 'vstack', @@ -29,18 +29,16 @@ from itertools import groupby import core -from core import * +from core import MaskedArray, MAError, add, array, asarray, concatenate, count,\ + filled, getmask, getmaskarray, masked, masked_array, mask_or, nomask, ones,\ + sort, zeros +#from core import * -import numpy -from numpy import float_ +import numpy as np +from numpy import ndarray, array as nxarray import numpy.core.umath as umath -import numpy.core.numeric as numeric -from numpy.core.numeric import ndarray -from numpy.core.numeric import array as nxarray -from numpy.core.fromnumeric import asarray as nxasarray - from numpy.lib.index_tricks import AxisConcatenator -import numpy.lib.function_base as function_base +from numpy.lib.polynomial import _lstsq, _single_eps, _double_eps #............................................................................... def issequence(seq): @@ -66,7 +64,7 @@ m = getmaskarray(arr) return m.sum(axis) -def masked_all(shape, dtype=float_): +def masked_all(shape, dtype=float): """Return an empty masked array of the given shape and dtype, where all the data are masked. @@ -76,8 +74,8 @@ Data type of the output. """ - a = masked_array(numeric.empty(shape, dtype), - mask=numeric.ones(shape, bool_)) + a = masked_array(np.empty(shape, dtype), + mask=np.ones(shape, bool)) return a def masked_all_like(arr): @@ -85,8 +83,8 @@ the array `a`, where all the data are masked. """ - a = masked_array(numeric.empty_like(arr), - mask=numeric.ones(arr.shape, bool_)) + a = masked_array(np.empty_like(arr), + mask=np.ones(arr.shape, bool)) return a @@ -100,18 +98,18 @@ self.__doc__ = self.getdoc() def getdoc(self): "Retrieves the __doc__ string from the function." - return getattr(numpy, self._function).__doc__ +\ + return getattr(np, self._function).__doc__ +\ "*Notes*:\n (The function is applied to both the _data and the _mask, if any.)" def __call__(self, *args, **params): - func = getattr(numpy, self._function) + func = getattr(np, self._function) if len(args)==1: x = args[0] - if isinstance(x,ndarray): + if isinstance(x, ndarray): _d = func(nxasarray(x), **params) _m = func(getmaskarray(x), **params) return masked_array(_d, mask=_m) elif isinstance(x, tuple) or isinstance(x, list): - _d = func(tuple([nxasarray(a) for a in x]), **params) + _d = func(tuple([np.asarray(a) for a in x]), **params) _m = func(tuple([getmaskarray(a) for a in x]), **params) return masked_array(_d, mask=_m) else: @@ -121,7 +119,7 @@ arrays.append(args.pop(0)) res = [] for x in arrays: - _d = func(nxasarray(x), *args, **params) + _d = func(np.asarray(x), *args, **params) _m = func(getmaskarray(x), *args, **params) res.append(masked_array(_d, mask=_m)) return res @@ -141,12 +139,12 @@ """Expands the shape of a by including newaxis before axis. """ if not isinstance(a, MaskedArray): - return numpy.expand_dims(a,axis) + return np.expand_dims(a, axis) elif getmask(a) is nomask: - return numpy.expand_dims(a,axis).view(MaskedArray) + return np.expand_dims(a, axis).view(MaskedArray) m = getmaskarray(a) - return masked_array(numpy.expand_dims(a,axis), - mask=numpy.expand_dims(m,axis)) + return masked_array(np.expand_dims(a, axis), + mask=np.expand_dims(m, axis)) #####-------------------------------------------------------------------------- #---- @@ -161,7 +159,7 @@ return seq -def apply_along_axis(func1d,axis,arr,*args,**kwargs): +def apply_along_axis(func1d, axis, arr, *args, **kwargs): """Execute func1d(arr[i],*args) where func1d takes 1-D arrays and arr is an N-d array. i varies so as to apply the function along the given axis for each 1-d subarray in arr. @@ -174,16 +172,16 @@ raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." % (axis,nd)) ind = [0]*(nd-1) - i = numeric.zeros(nd,'O') + i = np.zeros(nd,'O') indlist = range(nd) indlist.remove(axis) i[axis] = slice(None,None) - outshape = numeric.asarray(arr.shape).take(indlist) + outshape = np.asarray(arr.shape).take(indlist) i.put(indlist, ind) j = i.copy() - res = func1d(arr[tuple(i.tolist())],*args,**kwargs) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) # if res is a number, then we have a smaller output array - asscalar = numeric.isscalar(res) + asscalar = np.isscalar(res) if not asscalar: try: len(res) @@ -194,10 +192,10 @@ #...we'll just take the largest, to avoid some downcasting dtypes = [] if asscalar: - dtypes.append(numeric.asarray(res).dtype) - outarr = zeros(outshape, object_) + dtypes.append(np.asarray(res).dtype) + outarr = zeros(outshape, object) outarr[tuple(ind)] = res - Ntot = numeric.product(outshape) + Ntot = np.product(outshape) k = 1 while k < Ntot: # increment the index @@ -207,17 +205,17 @@ ind[n-1] += 1 ind[n] = 0 n -= 1 - i.put(indlist,ind) - res = func1d(arr[tuple(i.tolist())],*args,**kwargs) + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) outarr[tuple(ind)] = res dtypes.append(asarray(res).dtype) k += 1 else: res = core.array(res, copy=False, subok=True) j = i.copy() - j[axis] = ([slice(None,None)] * res.ndim) + j[axis] = ([slice(None, None)] * res.ndim) j.put(indlist, ind) - Ntot = numeric.product(outshape) + Ntot = np.product(outshape) holdshape = outshape outshape = list(arr.shape) outshape[axis] = res.shape @@ -236,13 +234,13 @@ n -= 1 i.put(indlist, ind) j.put(indlist, ind) - res = func1d(arr[tuple(i.tolist())],*args,**kwargs) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) outarr[tuple(flatten_inplace(j.tolist()))] = res dtypes.append(asarray(res).dtype) k += 1 - max_dtypes = numeric.dtype(numeric.asarray(dtypes).max()) + max_dtypes = np.dtype(np.asarray(dtypes).max()) if not hasattr(arr, '_mask'): - result = numeric.asarray(outarr, dtype=max_dtypes) + result = np.asarray(outarr, dtype=max_dtypes) else: result = core.asarray(outarr, dtype=max_dtypes) result.fill_value = core.default_fill_value(result) @@ -284,7 +282,7 @@ else: if weights is None: n = a.filled(0).sum(axis=None) - d = umath.add.reduce((-mask).ravel().astype(int_)) + d = umath.add.reduce((-mask).ravel().astype(int)) else: w = array(filled(weights, 0.0), float, mask=mask).ravel() n = add.reduce(a.ravel() * w) @@ -294,14 +292,14 @@ if mask is nomask: if weights is None: d = ash[axis] * 1.0 - n = add.reduce(a._data, axis, dtype=float_) + n = add.reduce(a._data, axis, dtype=float) else: w = filled(weights, 0.0) wsh = w.shape if wsh == (): wsh = (1,) if wsh == ash: - w = numeric.array(w, float_, copy=0) + w = np.array(w, float, copy=0) n = add.reduce(a*w, axis) d = add.reduce(w, axis) del w @@ -310,32 +308,32 @@ r = [None]*len(ash) r[axis] = slice(None, None, 1) w = eval ("w["+ repr(tuple(r)) + "] * ones(ash, float)") - n = add.reduce(a*w, axis, dtype=float_) - d = add.reduce(w, axis, dtype=float_) + n = add.reduce(a*w, axis, dtype=float) + d = add.reduce(w, axis, dtype=float) del w, r else: raise ValueError, 'average: weights wrong shape.' else: if weights is None: - n = add.reduce(a, axis, dtype=float_) - d = umath.add.reduce((-mask), axis=axis, dtype=float_) + n = add.reduce(a, axis, dtype=float) + d = umath.add.reduce((-mask), axis=axis, dtype=float) else: w = filled(weights, 0.0) wsh = w.shape if wsh == (): wsh = (1,) if wsh == ash: - w = array(w, dtype=float_, mask=mask, copy=0) - n = add.reduce(a*w, axis, dtype=float_) - d = add.reduce(w, axis, dtype=float_) + w = array(w, dtype=float, mask=mask, copy=0) + n = add.reduce(a*w, axis, dtype=float) + d = add.reduce(w, axis, dtype=float) elif wsh == (ash[axis],): ni = ash[axis] r = [None]*len(ash) r[axis] = slice(None, None, 1) w = eval ("w["+ repr(tuple(r)) + \ "] * masked_array(ones(ash, float), mask)") - n = add.reduce(a*w, axis, dtype=float_) - d = add.reduce(w, axis, dtype=float_) + n = add.reduce(a*w, axis, dtype=float) + d = add.reduce(w, axis, dtype=float) else: raise ValueError, 'average: weights wrong shape.' del w @@ -344,15 +342,15 @@ result = n/d del n - if isMaskedArray(result): + if isinstance(result, MaskedArray): if ((axis is None) or (axis==0 and a.ndim == 1)) and \ (result.mask is nomask): result = result._data if returned: - if not isMaskedArray(d): + if not isinstance(d, MaskedArray): d = masked_array(d) if isinstance(d, ndarray) and (not d.shape == result.shape): - d = ones(result.shape, dtype=float_) * d + d = ones(result.shape, dtype=float) * d if returned: return result, d else: @@ -410,12 +408,12 @@ """ def _median1D(data): - counts = filled(count(data,axis),0) - (idx,rmd) = divmod(counts, 2) + counts = filled(count(data, axis),0) + (idx, rmd) = divmod(counts, 2) if rmd: - choice = slice(idx,idx+1) + choice = slice(idx, idx+1) else: - choice = slice(idx-1,idx+1) + choice = slice(idx-1, idx+1) return data[choice].mean(0) # if overwrite_input: @@ -471,10 +469,10 @@ (idxr, idxc) = (range(len(x)), range(x.shape[1])) masked = m.nonzero() if not axis: - for i in function_base.unique(masked[0]): + for i in np.unique(masked[0]): idxr.remove(i) if axis in [None, 1, -1]: - for j in function_base.unique(masked[1]): + for j in np.unique(masked[1]): idxc.remove(j) return x._data[idxr][:,idxc] @@ -482,13 +480,13 @@ """Suppress whole rows of a 2D array that contain masked values. """ - return compress_rowcols(a,0) + return compress_rowcols(a, 0) def compress_cols(a): """Suppress whole columnss of a 2D array that contain masked values. """ - return compress_rowcols(a,1) + return compress_rowcols(a, 1) def mask_rowcols(a, axis=None): """Mask whole rows and/or columns of a 2D array that contain @@ -520,9 +518,9 @@ maskedval = m.nonzero() a._mask = a._mask.copy() if not axis: - a[function_base.unique(maskedval[0])] = masked + a[np.unique(maskedval[0])] = masked if axis in [None, 1, -1]: - a[:,function_base.unique(maskedval[1])] = masked + a[:,np.unique(maskedval[1])] = masked return a def mask_rows(a, axis=None): @@ -573,11 +571,11 @@ a = mask_rows(a) b = mask_cols(b) # - d = numpy.dot(filled(a, 0), filled(b, 0)) + d = np.dot(filled(a, 0), filled(b, 0)) # am = (~getmaskarray(a)) bm = (~getmaskarray(b)) - m = ~numpy.dot(am,bm) + m = ~np.dot(am, bm) return masked_array(d, mask=m) #............................................................................... @@ -618,15 +616,15 @@ if to_begin is not None: to_begin = asarray(to_begin) nbegin = to_begin.size - r_data = numeric.empty((n+nend+nbegin,), dtype=a.dtype) - r_mask = numeric.zeros((n+nend+nbegin,), dtype=bool_) + r_data = np.empty((n+nend+nbegin,), dtype=a.dtype) + r_mask = np.zeros((n+nend+nbegin,), dtype=bool) r_data[:nbegin] = to_begin._data r_mask[:nbegin] = to_begin._mask r_data[nbegin:-nend] = dd r_mask[nbegin:-nend] = dm else: - r_data = numeric.empty((n+nend,), dtype=a.dtype) - r_mask = numeric.zeros((n+nend,), dtype=bool_) + r_data = np.empty((n+nend,), dtype=a.dtype) + r_mask = np.zeros((n+nend,), dtype=bool) r_data[:-nend] = dd r_mask[:-nend] = dm r_data[-nend:] = to_end._data @@ -635,8 +633,8 @@ elif to_begin is not None: to_begin = asarray(to_begin) nbegin = to_begin.size - r_data = numeric.empty((n+nbegin,), dtype=a.dtype) - r_mask = numeric.zeros((n+nbegin,), dtype=bool_) + r_data = np.empty((n+nbegin,), dtype=a.dtype) + r_mask = np.zeros((n+nbegin,), dtype=bool) r_data[:nbegin] = to_begin._data r_mask[:nbegin] = to_begin._mask r_data[nbegin:] = dd @@ -682,9 +680,9 @@ step = 1 if type(step) is type(1j): size = int(abs(step)) - newobj = function_base.linspace(start, stop, num=size) + newobj = np.linspace(start, stop, num=size) else: - newobj = numeric.arange(start, stop, step) + newobj = np.arange(start, stop, step) elif type(key[k]) is str: if (key[k] in 'rc'): self.matrix = True @@ -695,14 +693,14 @@ continue except (ValueError, TypeError): raise ValueError, "Unknown special directive" - elif type(key[k]) in numeric.ScalarType: + elif type(key[k]) in np.ScalarType: newobj = asarray([key[k]]) scalars.append(k) scalar = True else: newobj = key[k] objs.append(newobj) - if isinstance(newobj, numeric.ndarray) and not scalar: + if isinstance(newobj, ndarray) and not scalar: if final_dtypedescr is None: final_dtypedescr = newobj.dtype elif newobj.dtype > final_dtypedescr: @@ -727,7 +725,7 @@ mr_ = mr_class() #####-------------------------------------------------------------------------- -#---- --- +#---- Find unmasked data --- #####-------------------------------------------------------------------------- def flatnotmasked_edges(a): @@ -736,9 +734,9 @@ """ m = getmask(a) - if m is nomask or not numpy.any(m): + if m is nomask or not np.any(m): return [0,-1] - unmasked = numeric.flatnonzero(~m) + unmasked = np.flatnonzero(~m) if len(unmasked) > 0: return unmasked[[0,-1]] else: @@ -762,7 +760,7 @@ if axis is None or a.ndim == 1: return flatnotmasked_edges(a) m = getmask(a) - idx = array(numpy.indices(a.shape), mask=nxasarray([m]*a.ndim)) + idx = array(np.indices(a.shape), mask=np.asarray([m]*a.ndim)) return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]),] @@ -775,14 +773,14 @@ m = getmask(a) if m is nomask: return (a.size, [0,-1]) - unmasked = numeric.flatnonzero(~m) + unmasked = np.flatnonzero(~m) if len(unmasked) == 0: return None result = [] for k, group in groupby(enumerate(unmasked), lambda (i,x):i-x): - tmp = numpy.array([g[1] for g in group], int_) + tmp = np.array([g[1] for g in group], int) # result.append((tmp.size, tuple(tmp[[0,-1]]))) - result.append( slice(tmp[0],tmp[-1]) ) + result.append( slice(tmp[0], tmp[-1]) ) result.sort() return result @@ -798,7 +796,7 @@ Returns ------- - a sorted sequence of slices (start index, end index). + A sorted sequence of slices (start index, end index). Notes ----- @@ -816,11 +814,88 @@ # other = (axis+1)%2 idx = [0,0] - idx[axis] = slice(None,None) + idx[axis] = slice(None, None) # for i in range(a.shape[other]): idx[other] = i result.append( flatnotmasked_contiguous(a[idx]) ) return result + +#####-------------------------------------------------------------------------- +#---- Polynomial fit --- +#####-------------------------------------------------------------------------- + +def mvander(x, n=None): + """%s + Notes + ----- + Masked values in x will result in rows of zeros. + """ + _vander = np.vander(x, n) + m = getmask(x) + if m is not nomask: + _vander[m] = 0 + return _vander + + +def mpolyfit(x, y, deg, rcond=None, full=False): + """%s + + Notes + ----- + Any masked values in x is propagated in y, and vice-versa. + """ + order = int(deg) + 1 + x = asarray(x) + mx = getmask(x) + y = asarray(y) + if y.ndim == 1: + m = mask_or(mx, getmask(y)) + elif y.ndim == 2: + y = mask_rows(y) + my = getmask(y) + if my is not nomask: + m = mask_or(mx, my[:,0]) + else: + m = mx + else: + raise TypeError,"Expected a 1D or 2D array for y!" + if m is not nomask: + x[m] = y[m] = masked + # Set rcond + if rcond is None : + if x.dtype in (np.single, np.csingle): + rcond = len(x)*_single_eps + else : + rcond = len(x)*_double_eps + # Scale x to improve condition number + scale = abs(x).max() + if scale != 0 : + x = x / scale + # solve least squares equation for powers of x + v = mvander(x, order) + c, resids, rank, s = _lstsq(v, y.filled(0), rcond) + # warn on rank reduction, which indicates an ill conditioned matrix + if rank != order and not full: + warnings.warn("Polyfit may be poorly conditioned", np.RankWarning) + # scale returned coefficients + if scale != 0 : + if c.ndim == 1 : + c /= np.vander([scale], order)[0] + else : + c /= np.vander([scale], order).T + if full : + return c, resids, rank, s, rcond + else : + return c + +_g = globals() +for nfunc in ('vander', 'polyfit'): + mfunc = "m%s" % nfunc + _g[mfunc].func_doc = _g[mfunc].func_doc % getattr(np,nfunc).__doc__ + + + + ################################################################################ Modified: trunk/numpy/ma/tests/test_extras.py =================================================================== --- trunk/numpy/ma/tests/test_extras.py 2008-05-13 16:44:03 UTC (rev 5161) +++ trunk/numpy/ma/tests/test_extras.py 2008-05-13 21:08:01 UTC (rev 5162) @@ -355,6 +355,36 @@ assert_equal(median(x,0), [[12,10],[8,9],[16,17]]) +class TestPolynomial(NumpyTestCase): + # + def test_polyfit(self): + "Tests polyfit" + # On ndarrays + x = numpy.random.rand(10) + y = numpy.random.rand(20).reshape(-1,2) + assert_almost_equal(mpolyfit(x,y,3),numpy.polyfit(x,y,3)) + # ON 1D maskedarrays + x = x.view(MaskedArray) + x[0] = masked + y = y.view(MaskedArray) + y[0,0] = y[-1,-1] = masked + # + (C,R,K,S,D) = mpolyfit(x,y[:,0],3,full=True) + (c,r,k,s,d) = numpy.polyfit(x[1:], y[1:,0].compressed(), 3, full=True) + for (a,a_) in zip((C,R,K,S,D),(c,r,k,s,d)): + assert_almost_equal(a, a_) + # + (C,R,K,S,D) = mpolyfit(x,y[:,-1],3,full=True) + (c,r,k,s,d) = numpy.polyfit(x[1:-1], y[1:-1,-1], 3, full=True) + for (a,a_) in zip((C,R,K,S,D),(c,r,k,s,d)): + assert_almost_equal(a, a_) + # + (C,R,K,S,D) = mpolyfit(x,y,3,full=True) + (c,r,k,s,d) = numpy.polyfit(x[1:-1], y[1:-1,:], 3, full=True) + for (a,a_) in zip((C,R,K,S,D),(c,r,k,s,d)): + assert_almost_equal(a, a_) + + ############################################################################### #------------------------------------------------------------------------------ if __name__ == "__main__": From numpy-svn at scipy.org Tue May 13 20:52:08 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 13 May 2008 19:52:08 -0500 (CDT) Subject: [Numpy-svn] r5163 - branches Message-ID: <20080514005208.574D439C245@scipy.org> Author: jarrod.millman Date: 2008-05-13 19:52:06 -0500 (Tue, 13 May 2008) New Revision: 5163 Removed: branches/1.1.x/ Log: this branch turned out to be premature From numpy-svn at scipy.org Tue May 13 20:52:48 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 13 May 2008 19:52:48 -0500 (CDT) Subject: [Numpy-svn] r5164 - trunk/numpy Message-ID: <20080514005248.B153939C00F@scipy.org> Author: jarrod.millman Date: 2008-05-13 19:52:46 -0500 (Tue, 13 May 2008) New Revision: 5164 Modified: trunk/numpy/version.py Log: the trunk is still cooking 1.1 Modified: trunk/numpy/version.py =================================================================== --- trunk/numpy/version.py 2008-05-14 00:52:06 UTC (rev 5163) +++ trunk/numpy/version.py 2008-05-14 00:52:46 UTC (rev 5164) @@ -1,4 +1,4 @@ -version='1.2.0' +version='1.1.0' release=False if not release: From numpy-svn at scipy.org Wed May 14 01:57:54 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 14 May 2008 00:57:54 -0500 (CDT) Subject: [Numpy-svn] r5165 - trunk Message-ID: <20080514055754.BAFC339C093@scipy.org> Author: jarrod.millman Date: 2008-05-14 00:57:52 -0500 (Wed, 14 May 2008) New Revision: 5165 Modified: trunk/TEST_COMMIT Log: test Modified: trunk/TEST_COMMIT =================================================================== --- trunk/TEST_COMMIT 2008-05-14 00:52:46 UTC (rev 5164) +++ trunk/TEST_COMMIT 2008-05-14 05:57:52 UTC (rev 5165) @@ -12,3 +12,4 @@ dubois: no sasha: yes tim_hochberg: yes +jarrod.millman: yes From numpy-svn at scipy.org Wed May 14 08:51:44 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 14 May 2008 07:51:44 -0500 (CDT) Subject: [Numpy-svn] r5166 - in trunk/numpy: . core lib Message-ID: <20080514125144.F242739C0A0@scipy.org> Author: stefan Date: 2008-05-14 07:51:23 -0500 (Wed, 14 May 2008) New Revision: 5166 Modified: trunk/numpy/__init__.py trunk/numpy/core/defmatrix.py trunk/numpy/lib/io.py Log: Merge docstrings from wiki. Modified: trunk/numpy/__init__.py =================================================================== --- trunk/numpy/__init__.py 2008-05-14 05:57:52 UTC (rev 5165) +++ trunk/numpy/__init__.py 2008-05-14 12:51:23 UTC (rev 5166) @@ -1,14 +1,65 @@ """ NumPy -========== +===== + Provides - 1) An array object of arbitrary homogeneous items - 2) Fast mathematical operations over arrays - 3) Linear Algebra, Fourier Transforms, Random Number Generation + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation -Documentation is available in the docstrings and at +Documentation is available in the docstrings and at http://www.scipy.org -http://www.scipy.org +Available subpackages +--------------------- +core + Defines a multi-dimensional array and useful procedures + for Numerical computation. +lib + Basic functions used by several sub-packages and useful + to have in the main name-space. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +testing + Numpy testing tools + +The following sub-packages must be explicitly imported: + +f2py + Fortran to Python Interface Generator. +distutils + Enhancements to distutils with support for + Fortran compilers support and more. + + +Global symbols from subpackages +------------------------------- +======== ================================= +core all (use numpy.* not numpy.core.*) +lib all (use numpy.* not numpy.lib.*) +testing NumpyTest +======== ================================= + + +Utility tools +------------- + +test + Run numpy unittests +pkgload + Load numpy packages +show_config + Show numpy build configuration +dual + Overwrite certain functions with high-performance Scipy tools +matlib + Make everything matrices. +__version__ + Numpy version string + """ # We first need to detect if we're being called as part of the numpy setup Modified: trunk/numpy/core/defmatrix.py =================================================================== --- trunk/numpy/core/defmatrix.py 2008-05-14 05:57:52 UTC (rev 5165) +++ trunk/numpy/core/defmatrix.py 2008-05-14 12:51:23 UTC (rev 5166) @@ -125,19 +125,20 @@ class matrix(N.ndarray): - """mat = matrix(data, dtype=None, copy=True) + """ + mat = matrix(data, dtype=None, copy=True) Returns a matrix from an array-like object, or a string of data. A matrix is a specialized 2-d array that retains - it's 2-d nature through operations and where '*' means matrix + its 2-d nature through operations and where '*' means matrix multiplication and '**' means matrix power. Parameters ---------- data : array-like or string If data is a string, then interpret the string as a matrix - with commas or spaces separating columns and semicolons - separating rows. + with commas or spaces separating columns and semicolons + separating rows. If data is array-like than convert the array to a matrix. dtype : data-type Anything that can be interpreted as a NumPy datatype. @@ -152,6 +153,7 @@ >>> print a [[1 2] [3 4]] + """ __array_priority__ = 10.0 def __new__(subtype, data, dtype=None, copy=True): @@ -532,18 +534,22 @@ def bmat(obj, ldict=None, gdict=None): - """Build a matrix object from string, nested sequence, or array. + """ + Build a matrix object from string, nested sequence, or array. - Example + Examples -------- - F = bmat('A, B; C, D') - F = bmat([[A,B],[C,D]]) - F = bmat(r_[c_[A,B],c_[C,D]]) + >>> F = bmat('A, B; C, D') + >>> F = bmat([[A,B],[C,D]]) + >>> F = bmat(r_[c_[A,B],c_[C,D]]) - all produce the same Matrix Object [ A B ] - [ C D ] + All of these produce the same matrix:: + [ A B ] + [ C D ] + if A, B, C, and D are appropriately shaped 2-d arrays. + """ if isinstance(obj, str): if gdict is None: Modified: trunk/numpy/lib/io.py =================================================================== --- trunk/numpy/lib/io.py 2008-05-14 05:57:52 UTC (rev 5165) +++ trunk/numpy/lib/io.py 2008-05-14 12:51:23 UTC (rev 5166) @@ -428,7 +428,9 @@ import re def fromregex(file, regexp, dtype): - """Construct an array from a text file, using regular-expressions parsing. + """ + Construct a record array from a text file, using + regular-expressions parsing. Array is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields. @@ -436,19 +438,20 @@ Parameters ---------- file : str or file - File name or file object to read + File name or file object to read. regexp : str or regexp - Regular expression to use to parse the file + Regular expression used to parse the file. + Groups in the regular expression correspond to fields in the dtype. dtype : dtype or dtype list Dtype for the structured array - Example - ------- - >>> import numpy as np + Examples + -------- >>> f = open('test.dat', 'w') - >>> f.write("1312 foo\n1534 bar\n 444 qux") + >>> f.write("1312 foo\\n1534 bar\\n444 qux") >>> f.close() - >>> np.fromregex('test.dat', r"(\d+)\s+(...)", [('num', np.int64), ('key', 'S3')]) + >>> np.fromregex('test.dat', r"(\\d+)\\s+(...)", + ... [('num', np.int64), ('key', 'S3')]) array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], dtype=[('num', ' Author: stefan Date: 2008-05-14 15:01:18 -0500 (Wed, 14 May 2008) New Revision: 5167 Modified: trunk/numpy/lib/io.py Log: Restore change to docstring overwritten by wiki merge. Modified: trunk/numpy/lib/io.py =================================================================== --- trunk/numpy/lib/io.py 2008-05-14 12:51:23 UTC (rev 5166) +++ trunk/numpy/lib/io.py 2008-05-14 20:01:18 UTC (rev 5167) @@ -429,8 +429,8 @@ import re def fromregex(file, regexp, dtype): """ - Construct a record array from a text file, using - regular-expressions parsing. + Construct an array from a text file, using regular-expressions + parsing. Array is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields. From numpy-svn at scipy.org Wed May 14 18:08:37 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 14 May 2008 17:08:37 -0500 (CDT) Subject: [Numpy-svn] r5168 - in trunk/numpy/ma: . tests Message-ID: <20080514220837.0389B39C455@scipy.org> Author: pierregm Date: 2008-05-14 17:08:33 -0500 (Wed, 14 May 2008) New Revision: 5168 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/extras.py trunk/numpy/ma/mrecords.py trunk/numpy/ma/tests/test_extras.py trunk/numpy/ma/tests/test_mrecords.py Log: extras : dropped the m prefix in mediff1d, mvander, mpolyfit mrecords: fixed __setitem__ to update the mask if needed. Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-14 20:01:18 UTC (rev 5167) +++ trunk/numpy/ma/core.py 2008-05-14 22:08:33 UTC (rev 5168) @@ -1354,7 +1354,8 @@ # raise IndexError, msg if isinstance(indx, basestring): ndarray.__setitem__(self._data,indx, getdata(value)) - warnings.warn("The mask is NOT affected!") + warnings.warn("MaskedArray.__setitem__ on fields: "\ + "The mask is NOT affected!") return #.... if value is masked: Modified: trunk/numpy/ma/extras.py =================================================================== --- trunk/numpy/ma/extras.py 2008-05-14 20:01:18 UTC (rev 5167) +++ trunk/numpy/ma/extras.py 2008-05-14 22:08:33 UTC (rev 5168) @@ -16,14 +16,15 @@ 'column_stack','compress_cols','compress_rowcols', 'compress_rows', 'count_masked', 'dot','dstack', - 'expand_dims', + 'ediff1d','expand_dims', 'flatnotmasked_contiguous','flatnotmasked_edges', 'hsplit','hstack', 'mask_cols','mask_rowcols','mask_rows','masked_all','masked_all_like', - 'median','mediff1d','mpolyfit','mr_','mvander', + 'median','mr_', 'notmasked_contiguous','notmasked_edges', + 'polyfit', 'row_stack', - 'vstack', + 'vander','vstack', ] from itertools import groupby @@ -579,7 +580,7 @@ return masked_array(d, mask=m) #............................................................................... -def mediff1d(array, to_end=None, to_begin=None): +def ediff1d(array, to_end=None, to_begin=None): """Return the differences between consecutive elements of an array, possibly with prefixed and/or appended values. @@ -826,7 +827,7 @@ #---- Polynomial fit --- #####-------------------------------------------------------------------------- -def mvander(x, n=None): +def vander(x, n=None): """%s Notes ----- @@ -839,7 +840,7 @@ return _vander -def mpolyfit(x, y, deg, rcond=None, full=False): +def polyfit(x, y, deg, rcond=None, full=False): """%s Notes @@ -874,7 +875,7 @@ if scale != 0 : x = x / scale # solve least squares equation for powers of x - v = mvander(x, order) + v = vander(x, order) c, resids, rank, s = _lstsq(v, y.filled(0), rcond) # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: @@ -892,8 +893,7 @@ _g = globals() for nfunc in ('vander', 'polyfit'): - mfunc = "m%s" % nfunc - _g[mfunc].func_doc = _g[mfunc].func_doc % getattr(np,nfunc).__doc__ + _g[nfunc].func_doc = _g[nfunc].func_doc % getattr(np,nfunc).__doc__ Modified: trunk/numpy/ma/mrecords.py =================================================================== --- trunk/numpy/ma/mrecords.py 2008-05-14 20:01:18 UTC (rev 5167) +++ trunk/numpy/ma/mrecords.py 2008-05-14 22:08:33 UTC (rev 5168) @@ -349,6 +349,9 @@ def __setitem__(self, indx, value): "Sets the given record to value." MaskedArray.__setitem__(self, indx, value) + if isinstance(indx, basestring): + self._fieldmask[indx] = ma.getmaskarray(value) + #............................................ def __setslice__(self, i, j, value): "Sets the slice described by [i,j] to `value`." Modified: trunk/numpy/ma/tests/test_extras.py =================================================================== --- trunk/numpy/ma/tests/test_extras.py 2008-05-14 20:01:18 UTC (rev 5167) +++ trunk/numpy/ma/tests/test_extras.py 2008-05-14 22:08:33 UTC (rev 5168) @@ -282,37 +282,37 @@ c = dot(b,a,False) assert_equal(c, N.dot(b.filled(0),a.filled(0))) - def test_mediff1d(self): + def test_ediff1d(self): "Tests mediff1d" x = masked_array(N.arange(5), mask=[1,0,0,0,1]) difx_d = (x._data[1:]-x._data[:-1]) difx_m = (x._mask[1:]-x._mask[:-1]) - dx = mediff1d(x) + dx = ediff1d(x) assert_equal(dx._data, difx_d) assert_equal(dx._mask, difx_m) # - dx = mediff1d(x, to_begin=masked) + dx = ediff1d(x, to_begin=masked) assert_equal(dx._data, N.r_[0,difx_d]) assert_equal(dx._mask, N.r_[1,difx_m]) - dx = mediff1d(x, to_begin=[1,2,3]) + dx = ediff1d(x, to_begin=[1,2,3]) assert_equal(dx._data, N.r_[[1,2,3],difx_d]) assert_equal(dx._mask, N.r_[[0,0,0],difx_m]) # - dx = mediff1d(x, to_end=masked) + dx = ediff1d(x, to_end=masked) assert_equal(dx._data, N.r_[difx_d,0]) assert_equal(dx._mask, N.r_[difx_m,1]) - dx = mediff1d(x, to_end=[1,2,3]) + dx = ediff1d(x, to_end=[1,2,3]) assert_equal(dx._data, N.r_[difx_d,[1,2,3]]) assert_equal(dx._mask, N.r_[difx_m,[0,0,0]]) # - dx = mediff1d(x, to_end=masked, to_begin=masked) + dx = ediff1d(x, to_end=masked, to_begin=masked) assert_equal(dx._data, N.r_[0,difx_d,0]) assert_equal(dx._mask, N.r_[1,difx_m,1]) - dx = mediff1d(x, to_end=[1,2,3], to_begin=masked) + dx = ediff1d(x, to_end=[1,2,3], to_begin=masked) assert_equal(dx._data, N.r_[0,difx_d,[1,2,3]]) assert_equal(dx._mask, N.r_[1,difx_m,[0,0,0]]) # - dx = mediff1d(x._data, to_end=masked, to_begin=masked) + dx = ediff1d(x._data, to_end=masked, to_begin=masked) assert_equal(dx._data, N.r_[0,difx_d,0]) assert_equal(dx._mask, N.r_[1,0,0,0,0,1]) @@ -362,24 +362,24 @@ # On ndarrays x = numpy.random.rand(10) y = numpy.random.rand(20).reshape(-1,2) - assert_almost_equal(mpolyfit(x,y,3),numpy.polyfit(x,y,3)) + assert_almost_equal(polyfit(x,y,3),numpy.polyfit(x,y,3)) # ON 1D maskedarrays x = x.view(MaskedArray) x[0] = masked y = y.view(MaskedArray) y[0,0] = y[-1,-1] = masked # - (C,R,K,S,D) = mpolyfit(x,y[:,0],3,full=True) + (C,R,K,S,D) = polyfit(x,y[:,0],3,full=True) (c,r,k,s,d) = numpy.polyfit(x[1:], y[1:,0].compressed(), 3, full=True) for (a,a_) in zip((C,R,K,S,D),(c,r,k,s,d)): assert_almost_equal(a, a_) # - (C,R,K,S,D) = mpolyfit(x,y[:,-1],3,full=True) + (C,R,K,S,D) = polyfit(x,y[:,-1],3,full=True) (c,r,k,s,d) = numpy.polyfit(x[1:-1], y[1:-1,-1], 3, full=True) for (a,a_) in zip((C,R,K,S,D),(c,r,k,s,d)): assert_almost_equal(a, a_) # - (C,R,K,S,D) = mpolyfit(x,y,3,full=True) + (C,R,K,S,D) = polyfit(x,y,3,full=True) (c,r,k,s,d) = numpy.polyfit(x[1:-1], y[1:-1,:], 3, full=True) for (a,a_) in zip((C,R,K,S,D),(c,r,k,s,d)): assert_almost_equal(a, a_) Modified: trunk/numpy/ma/tests/test_mrecords.py =================================================================== --- trunk/numpy/ma/tests/test_mrecords.py 2008-05-14 20:01:18 UTC (rev 5167) +++ trunk/numpy/ma/tests/test_mrecords.py 2008-05-14 22:08:33 UTC (rev 5168) @@ -120,6 +120,16 @@ mbase.b[3:] = masked assert_equal(mbase.b, base['b']) assert_equal(mbase.b._mask, [0,1,0,1,1]) + # Set fields globally.......................... + ndtype = [('alpha','|S1'),('num',int)] + data = ma.array([('a',1),('b',2),('c',3)], dtype=ndtype) + rdata = data.view(MaskedRecords) + val = ma.array([10,20,30], mask=[1,0,0]) + # + rdata['num'] = val + assert_equal(rdata.num, val) + assert_equal(rdata.num.mask, [1,0,0]) + # def test_set_mask(self): base = self.base.copy() From numpy-svn at scipy.org Wed May 14 18:41:37 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 14 May 2008 17:41:37 -0500 (CDT) Subject: [Numpy-svn] r5169 - trunk/numpy/linalg Message-ID: <20080514224137.49FF139C0E4@scipy.org> Author: stefan Date: 2008-05-14 17:41:16 -0500 (Wed, 14 May 2008) New Revision: 5169 Modified: trunk/numpy/linalg/linalg.py Log: Merge docstrings from wiki. Modified: trunk/numpy/linalg/linalg.py =================================================================== --- trunk/numpy/linalg/linalg.py 2008-05-14 22:08:33 UTC (rev 5168) +++ trunk/numpy/linalg/linalg.py 2008-05-14 22:41:16 UTC (rev 5169) @@ -335,10 +335,11 @@ # Cholesky decomposition def cholesky(a): - """Compute the Cholesky decomposition of a matrix. + """ + Compute the Cholesky decomposition of a matrix. - Returns the Cholesky decomposition, :lm:`A = L L^*` of a Hermitian - positive-definite matrix :lm:`A`. + Returns the Cholesky decomposition, :math:`A = L L^*` of a Hermitian + positive-definite matrix :math:`A`. Parameters ---------- @@ -354,9 +355,8 @@ Examples -------- - >>> from numpy import array, linalg - >>> a = array([[1,-2j],[2j,5]]) - >>> L = linalg.cholesky(a) + >>> A = np.array([[1,-2j],[2j,5]]) + >>> L = np.linalg.cholesky(A) >>> L array([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) @@ -636,7 +636,7 @@ determinant and I is the identity matrix. """ - a, wrap = _makearray(a) + a, wrap = _makearray(a) _assertRank2(a) _assertSquareness(a) t, result_t = _commonType(a) From numpy-svn at scipy.org Wed May 14 18:42:35 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 14 May 2008 17:42:35 -0500 (CDT) Subject: [Numpy-svn] r5170 - trunk/numpy Message-ID: <20080514224235.5121939C4DC@scipy.org> Author: stefan Date: 2008-05-14 17:42:18 -0500 (Wed, 14 May 2008) New Revision: 5170 Modified: trunk/numpy/add_newdocs.py Log: Minor reST cleanup in docstrings. Modified: trunk/numpy/add_newdocs.py =================================================================== --- trunk/numpy/add_newdocs.py 2008-05-14 22:41:16 UTC (rev 5169) +++ trunk/numpy/add_newdocs.py 2008-05-14 22:42:18 UTC (rev 5170) @@ -920,8 +920,8 @@ indices : integer array Array of indices that sort 'a' along the specified axis. - SeeAlso - ------- + See Also + -------- lexsort : indirect stable sort with multiple keys sort : inplace sort @@ -1212,8 +1212,8 @@ If a is 2-d, then a 1-d array containing the diagonal is returned. If a is n-d, n > 2, then an array of diagonals is returned. - SeeAlso - ------- + See Also + -------- diag : matlab workalike for 1-d and 2-d arrays. diagflat : creates diagonal arrays trace : sum along diagonals @@ -1324,7 +1324,7 @@ Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. - Results + Returns ------- amax : array_like New array holding the result. @@ -1359,8 +1359,8 @@ A new array holding the result is returned unless out is specified, in which case a reference to out is returned. - SeeAlso - ------- + See Also + -------- var : variance std : standard deviation @@ -1386,7 +1386,7 @@ Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. - Results + Returns ------- amin : array_like New array holding the result. @@ -1800,8 +1800,8 @@ which fields to compare first, second, etc. Not all fields need be specified. - SeeAlso - ------- + See Also + -------- argsort : indirect sort lexsort : indirect stable sort on multiple keys searchsorted : find keys in sorted array @@ -1874,8 +1874,8 @@ A new array holding the result is returned unless out is specified, in which case a reference to out is returned. - SeeAlso - ------- + See Also + -------- var : variance mean : average @@ -2181,8 +2181,8 @@ A new array holding the result is returned unless out is specified, in which case a reference to out is returned. - SeeAlso - ------- + See Also + -------- std : standard deviation mean: average From numpy-svn at scipy.org Wed May 14 18:52:30 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 14 May 2008 17:52:30 -0500 (CDT) Subject: [Numpy-svn] r5171 - trunk/numpy Message-ID: <20080514225230.6C28A39C0E4@scipy.org> Author: ptvirtan Date: 2008-05-14 17:52:08 -0500 (Wed, 14 May 2008) New Revision: 5171 Modified: trunk/numpy/__init__.py Log: Don't assemble numpy module docstring from several parts; removes duplication Modified: trunk/numpy/__init__.py =================================================================== --- trunk/numpy/__init__.py 2008-05-14 22:42:18 UTC (rev 5170) +++ trunk/numpy/__init__.py 2008-05-14 22:52:08 UTC (rev 5171) @@ -118,33 +118,6 @@ __all__.extend(lib.__all__) __all__.extend(['linalg', 'fft', 'random', 'ctypeslib']) - if __doc__ is not None: - __doc__ += """ - -Available subpackages ---------------------- -core --- Defines a multi-dimensional array and useful procedures - for Numerical computation. -lib --- Basic functions used by several sub-packages and useful - to have in the main name-space. -random --- Core Random Tools -linalg --- Core Linear Algebra Tools -fft --- Core FFT routines -testing --- Numpy testing tools - - These packages require explicit import -f2py --- Fortran to Python Interface Generator. -distutils --- Enhancements to distutils with support for - Fortran compilers support and more. - - -Global symbols from subpackages -------------------------------- -core --> * (use numpy.* not numpy.core.*) -lib --> * (use numpy.* not numpy.lib.*) -testing --> NumpyTest -""" - def test(*args, **kw): import os, sys print 'Numpy is installed in %s' % (os.path.split(__file__)[0],) @@ -153,16 +126,3 @@ return NumpyTest().test(*args, **kw) test.__doc__ = NumpyTest.test.__doc__ - if __doc__ is not None: - __doc__ += """ - -Utility tools -------------- - - test --- Run numpy unittests - pkgload --- Load numpy packages - show_config --- Show numpy build configuration - dual --- Overwrite certain functions with high-performance Scipy tools - matlib --- Make everything matrices. - __version__ --- Numpy version string -""" From numpy-svn at scipy.org Wed May 14 22:06:45 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 14 May 2008 21:06:45 -0500 (CDT) Subject: [Numpy-svn] r5172 - in trunk/numpy/core: src tests Message-ID: <20080515020645.4358A39C0EB@scipy.org> Author: oliphant Date: 2008-05-14 21:06:28 -0500 (Wed, 14 May 2008) New Revision: 5172 Modified: trunk/numpy/core/src/arrayobject.c trunk/numpy/core/tests/test_defmatrix.py Log: Fix the unit tests and eliminate the dimensionality reduction assumption for non base-class arrays. Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2008-05-14 22:52:08 UTC (rev 5171) +++ trunk/numpy/core/src/arrayobject.c 2008-05-15 02:06:28 UTC (rev 5172) @@ -7082,6 +7082,11 @@ int n, r, i; PyObject *e; + if (PyArray_Check(s)) { + *itemsize = MAX(*itemsize, PyArray_ITEMSIZE(s)); + return 0; + } + n = PyObject_Length(s); if ((nd == 0) || PyString_Check(s) || @@ -7112,6 +7117,14 @@ PyObject *e; int r, n, i, n_lower; + + if (PyArray_Check(s)) { + for (i=0; i a->nd) { PyErr_Format(PyExc_ValueError, "setArrayFromSequence: sequence/array dimensions mismatch."); return -1; } + slen = PySequence_Length(s); + if (slen != a->dimensions[dim]) { PyErr_Format(PyExc_ValueError, "setArrayFromSequence: sequence/array shape mismatch."); Modified: trunk/numpy/core/tests/test_defmatrix.py =================================================================== --- trunk/numpy/core/tests/test_defmatrix.py 2008-05-14 22:52:08 UTC (rev 5171) +++ trunk/numpy/core/tests/test_defmatrix.py 2008-05-15 02:06:28 UTC (rev 5172) @@ -187,7 +187,7 @@ def check_dimesions(self): a = self.a x = a[0] - assert_equal(x.ndim, 1) + assert_equal(x.ndim, 2) def check_array_from_matrix_list(self): a = self.a From numpy-svn at scipy.org Thu May 15 03:56:10 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 15 May 2008 02:56:10 -0500 (CDT) Subject: [Numpy-svn] r5173 - trunk/numpy/f2py/src Message-ID: <20080515075610.31FA739C080@scipy.org> Author: pearu Date: 2008-05-15 02:56:05 -0500 (Thu, 15 May 2008) New Revision: 5173 Modified: trunk/numpy/f2py/src/fortranobject.c Log: Changed an idiom of appending strings to a buffer. Fixes numpy ticket 792. Modified: trunk/numpy/f2py/src/fortranobject.c =================================================================== --- trunk/numpy/f2py/src/fortranobject.c 2008-05-15 02:06:28 UTC (rev 5172) +++ trunk/numpy/f2py/src/fortranobject.c 2008-05-15 07:56:05 UTC (rev 5173) @@ -100,44 +100,71 @@ static PyObject * fortran_doc (FortranDataDef def) { char *p; + /* + p is used as a buffer to hold generated documentation strings. + A common operation in generating the documentation strings, is + appending a string to the buffer p. Earlier, the following + idiom was: + + sprintf(p, "%s", p); + + but this does not work when _FORTIFY_SOURCE=2 is enabled: instead + of appending the string, the string is inserted. + + As a fix, the following idiom should be used for appending + strings to a buffer p: + + sprintf(p + strlen(p), ""); + */ PyObject *s = NULL; int i; unsigned size=100; if (def.doc!=NULL) size += strlen(def.doc); p = (char*)malloc (size); + p[0] = '\0'; /* make sure that the buffer has zero length */ if (sprintf(p,"%s - ",def.name)==0) goto fail; if (def.rank==-1) { if (def.doc==NULL) { - if (sprintf(p,"%sno docs available",p)==0) + if (sprintf(p+strlen(p),"no docs available")==0) goto fail; } else { - if (sprintf(p,"%s%s",p,def.doc)==0) + if (sprintf(p+strlen(p),"%s",def.doc)==0) goto fail; } } else { PyArray_Descr *d = PyArray_DescrFromType(def.type); - if (sprintf(p,"%s'%c'-",p,d->type)==0) {Py_DECREF(d); goto fail;} + if (sprintf(p+strlen(p),"'%c'-",d->type)==0) { + Py_DECREF(d); + goto fail; + } Py_DECREF(d); if (def.data==NULL) { - if (sprintf(p,"%sarray(%" NPY_INTP_FMT,p,def.dims.d[0])==0) goto fail; + if (sprintf(p+strlen(p),"array(%" NPY_INTP_FMT,def.dims.d[0])==0) + goto fail; for(i=1;i0) { - if (sprintf(p,"%sarray(%"NPY_INTP_FMT,p,def.dims.d[0])==0) goto fail; + if (sprintf(p+strlen(p),"array(%"NPY_INTP_FMT,def.dims.d[0])==0) + goto fail; for(i=1;isize) { - fprintf(stderr,"fortranobject.c:fortran_doc:len(p)=%zd>%d(size): too long doc string required, increase size\n",strlen(p),size); + fprintf(stderr,"fortranobject.c:fortran_doc:len(p)=%zd>%d(size):"\ + " too long doc string required, increase size\n",\ + strlen(p),size); goto fail; } s = PyString_FromString(p); From numpy-svn at scipy.org Thu May 15 05:51:50 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 15 May 2008 04:51:50 -0500 (CDT) Subject: [Numpy-svn] r5174 - in trunk/numpy/core: . tests tests/data Message-ID: <20080515095150.8AD4439C25E@scipy.org> Author: stefan Date: 2008-05-15 04:50:58 -0500 (Thu, 15 May 2008) New Revision: 5174 Added: trunk/numpy/core/tests/data/ trunk/numpy/core/tests/data/astype_copy.pkl trunk/numpy/core/tests/data/recarray_from_file.fits Removed: trunk/numpy/core/tests/testdata.fits Modified: trunk/numpy/core/setup.py trunk/numpy/core/tests/test_records.py trunk/numpy/core/tests/test_regression.py Log: Add test for #788 [patch by Eric Firing]. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2008-05-15 07:56:05 UTC (rev 5173) +++ trunk/numpy/core/setup.py 2008-05-15 09:50:58 UTC (rev 5174) @@ -307,6 +307,8 @@ config.add_data_dir('tests') + config.add_data_dir('tests/data') + config.make_svn_version_py() return config Added: trunk/numpy/core/tests/data/astype_copy.pkl =================================================================== (Binary files differ) Property changes on: trunk/numpy/core/tests/data/astype_copy.pkl ___________________________________________________________________ Name: svn:mime-type + application/octet-stream Copied: trunk/numpy/core/tests/data/recarray_from_file.fits (from rev 5168, trunk/numpy/core/tests/testdata.fits) Modified: trunk/numpy/core/tests/test_records.py =================================================================== --- trunk/numpy/core/tests/test_records.py 2008-05-15 07:56:05 UTC (rev 5173) +++ trunk/numpy/core/tests/test_records.py 2008-05-15 09:50:58 UTC (rev 5174) @@ -1,7 +1,7 @@ from numpy.testing import * set_package_path() -import os as _os +from os import path import numpy.core;reload(numpy.core) from numpy.core import * restore_path() @@ -33,8 +33,8 @@ assert_equal(r.a,array([1,2,3,4])) def check_recarray_fromfile(self): - __path__ = _os.path.split(__file__) - filename = _os.path.join(__path__[0], "testdata.fits") + data_dir = path.join(path.dirname(__file__),'data') + filename = path.join(data_dir,'recarray_from_file.fits') fd = open(filename) fd.seek(2880*2) r = rec.fromfile(fd, formats='f8,i4,a5', shape=3, byteorder='big') Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2008-05-15 07:56:05 UTC (rev 5173) +++ trunk/numpy/core/tests/test_regression.py 2008-05-15 09:50:58 UTC (rev 5174) @@ -1,7 +1,9 @@ from numpy.testing import * + from StringIO import StringIO import pickle import sys +from os import path set_local_path() import numpy as np @@ -1025,5 +1027,16 @@ # This shouldn't cause a segmentation fault: np.dot(z, y) + def check_astype_copy(self, level=rlevel): + """Ticket 788, changeset r5155""" + # The test data file was generated by scipy.io.savemat. + # The dtype is float64, but the isbuiltin attribute is 0. + data_dir = path.join(path.dirname(__file__), 'data') + filename = path.join(data_dir, "astype_copy.pkl") + xp = pickle.load(open(filename)) + xpd = xp.astype(np.float64) + assert (xp.__array_interface__['data'][0] != + xpd.__array_interface__['data'][0]) + if __name__ == "__main__": NumpyTest().run() Deleted: trunk/numpy/core/tests/testdata.fits =================================================================== --- trunk/numpy/core/tests/testdata.fits 2008-05-15 07:56:05 UTC (rev 5173) +++ trunk/numpy/core/tests/testdata.fits 2008-05-15 09:50:58 UTC (rev 5174) @@ -1 +0,0 @@ -SIMPLE = T / file does conform to FITS standard BITPIX = 16 / number of bits per data pixel NAXIS = 0 / number of data axes EXTEND = T / FITS dataset may contain extensions COMMENT FITS (Flexible Image Transport System) format defined in Astronomy andCOMMENT Astrophysics Supplement Series v44/p363, v44/p371, v73/p359, v73/p365.COMMENT Contact the NASA Science Office of Standards and Technology for the COMMENT FITS Definition document #100 and other FITS information. ORIGIN = 'STScI-STSDAS/TABLES' / Tables version 1999-09-07 FILENAME= 'tb.fits ' / name of file NEXTEND = 1 / number of extensions in file END XTENSION= 'BINTABLE' / binary table extension BITPIX = 8 / 8-bit bytes NAXIS = 2 / 2-dimensional binary table NAXIS1 = 17 / width of table in bytes NAXIS2 = 3 PCOUNT = 0 / size of special data area GCOUNT = 1 / one data group (required keyword) TFIELDS = 3 TTYPE1 = 'a ' / label for field 1 TFORM1 = '1D ' / data format of field: 8-byte DOUBLE TTYPE2 = 'b ' / label for field 2 TFORM2 = '1J ' / data format of field: 4-byte INTEGER TTYPE3 = 'c ' / label for field 3 TFORM3 = '5A ' / data format of field: ASCII Character TDISP1 = 'G25.16 ' / display format TDISP2 = 'I11 ' / display format TNULL2 = -2147483647 / undefined value for column TDISP3 = 'A5 ' / display format HISTORY Created Fri 16:25:07 22-Jun-2001 END @fffffg=abcde@??????>fghij@333334?kl \ No newline at end of file From numpy-svn at scipy.org Thu May 15 10:02:33 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 15 May 2008 09:02:33 -0500 (CDT) Subject: [Numpy-svn] r5175 - trunk/numpy Message-ID: <20080515140233.5ECB039C4FD@scipy.org> Author: stefan Date: 2008-05-15 09:02:11 -0500 (Thu, 15 May 2008) New Revision: 5175 Modified: trunk/numpy/add_newdocs.py Log: Merge `choose` docstring fix from wiki. Modified: trunk/numpy/add_newdocs.py =================================================================== --- trunk/numpy/add_newdocs.py 2008-05-15 09:50:58 UTC (rev 5174) +++ trunk/numpy/add_newdocs.py 2008-05-15 14:02:11 UTC (rev 5175) @@ -967,8 +967,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', - """a.choose(choices, out=None, mode='raise') - a.choose(*choices, out=None, mode='raise') + """ + a.choose(choices, out=None, mode='raise') Use an index array to construct a new array from a set of choices. @@ -2472,4 +2472,3 @@ [12, 15, 18]]) """)) - From numpy-svn at scipy.org Thu May 15 10:37:07 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 15 May 2008 09:37:07 -0500 (CDT) Subject: [Numpy-svn] r5176 - trunk/numpy/f2py Message-ID: <20080515143707.5EE7039C080@scipy.org> Author: pearu Date: 2008-05-15 09:37:02 -0500 (Thu, 15 May 2008) New Revision: 5176 Modified: trunk/numpy/f2py/crackfortran.py Log: f2py: disallow matching module procedure-s as module-s. Modified: trunk/numpy/f2py/crackfortran.py =================================================================== --- trunk/numpy/f2py/crackfortran.py 2008-05-15 14:02:11 UTC (rev 5175) +++ trunk/numpy/f2py/crackfortran.py 2008-05-15 14:37:02 UTC (rev 5176) @@ -456,7 +456,7 @@ # groupbegins77=r'program|block\s*data' beginpattern77=re.compile(beforethisafter%('',groupbegins77,groupbegins77,'.*'),re.I),'begin' -groupbegins90=groupbegins77+r'|module|python\s*module|interface|type(?!\s*\()' +groupbegins90=groupbegins77+r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()' beginpattern90=re.compile(beforethisafter%('',groupbegins90,groupbegins90,'.*'),re.I),'begin' groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface' endpattern=re.compile(beforethisafter%('',groupends,groupends,'[\w\s]*'),re.I),'end' From numpy-svn at scipy.org Fri May 16 04:56:06 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 16 May 2008 03:56:06 -0500 (CDT) Subject: [Numpy-svn] r5177 - trunk/numpy/core Message-ID: <20080516085606.2246E39C789@scipy.org> Author: pearu Date: 2008-05-16 03:56:03 -0500 (Fri, 16 May 2008) New Revision: 5177 Modified: trunk/numpy/core/setup.py Log: get_build_architecture is not available in python 2.4 and older, so use numpy.distutils one. This should fix undiscovered/not reported bug of building numpy with python 2.4 on windows systems. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2008-05-15 14:37:02 UTC (rev 5176) +++ trunk/numpy/core/setup.py 2008-05-16 08:56:03 UTC (rev 5177) @@ -115,7 +115,7 @@ moredefs.append('__NPY_PRIVATE_NO_SIGNAL') if sys.platform=='win32' or os.name=='nt': - from distutils.msvccompiler import get_build_architecture + from numpy.distutils.misc_util import get_build_architecture a = get_build_architecture() print 'BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % (a, os.name, sys.platform) if a == 'AMD64': From numpy-svn at scipy.org Fri May 16 05:16:51 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 16 May 2008 04:16:51 -0500 (CDT) Subject: [Numpy-svn] r5178 - trunk/numpy/lib Message-ID: <20080516091651.2507D39C12A@scipy.org> Author: stefan Date: 2008-05-16 04:16:36 -0500 (Fri, 16 May 2008) New Revision: 5178 Modified: trunk/numpy/lib/function_base.py Log: Fix ReST markup in docstrings. Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2008-05-16 08:56:03 UTC (rev 5177) +++ trunk/numpy/lib/function_base.py 2008-05-16 09:16:36 UTC (rev 5178) @@ -39,33 +39,37 @@ endpoint is True, the last sample is stop. If retstep is True then return (seq, step_value), where step_value used. - :Parameters: - start : {float} - The value the sequence starts at. - stop : {float} - The value the sequence stops at. If ``endpoint`` is false, then - this is not included in the sequence. Otherwise it is - guaranteed to be the last value. - num : {integer} - Number of samples to generate. Default is 50. - endpoint : {boolean} - If true, ``stop`` is the last sample. Otherwise, it is not - included. Default is true. - retstep : {boolean} - If true, return ``(samples, step)``, where ``step`` is the - spacing used in generating the samples. + Parameters + ---------- + start : {float} + The value the sequence starts at. + stop : {float} + The value the sequence stops at. If ``endpoint`` is false, then + this is not included in the sequence. Otherwise it is + guaranteed to be the last value. + num : {integer} + Number of samples to generate. Default is 50. + endpoint : {boolean} + If true, ``stop`` is the last sample. Otherwise, it is not + included. Default is true. + retstep : {boolean} + If true, return ``(samples, step)``, where ``step`` is the + spacing used in generating the samples. - :Returns: - samples : {array} - ``num`` equally spaced samples from the range [start, stop] - or [start, stop). - step : {float} (Only if ``retstep`` is true) - Size of spacing between samples. + Returns + ------- + samples : {array} + ``num`` equally spaced samples from the range [start, stop] + or [start, stop). + step : {float} (Only if ``retstep`` is true) + Size of spacing between samples. - :See Also: - `arange` : Similiar to linspace, however, when used with - a float endpoint, that endpoint may or may not be included. - `logspace` + See Also + -------- + arange : Similiar to linspace, however, when used with + a float endpoint, that endpoint may or may not be included. + logspace + """ num = int(num) if num <= 0: @@ -103,63 +107,62 @@ Parameters ---------- - a : array - The data to histogram. + The data to histogram. bins : int or sequence - If an int, then the number of equal-width bins in the given range. - If new=True, bins can also be the bin edges, allowing for non-constant - bin widths. + If an int, then the number of equal-width bins in the given + range. If new=True, bins can also be the bin edges, allowing + for non-constant bin widths. range : (float, float) - The lower and upper range of the bins. If not provided, then - range is simply (a.min(), a.max()). Using new=False, lower than range - are ignored, and values higher than range are tallied in the rightmost - bin. Using new=True, both lower and upper outliers are ignored. + The lower and upper range of the bins. If not provided, range + is simply (a.min(), a.max()). Using new=False, lower than + range are ignored, and values higher than range are tallied in + the rightmost bin. Using new=True, both lower and upper + outliers are ignored. normed : bool - If False, the result array will contain the number of samples in - each bin. If True, the result array is the value of the - probability *density* function at the bin normalized such that the - *integral* over the range is 1. Note that the sum of all of the - histogram values will not usually be 1; it is not a probability - *mass* function. + If False, the result array will contain the number of samples + in each bin. If True, the result array is the value of the + probability *density* function at the bin normalized such that + the *integral* over the range is 1. Note that the sum of all + of the histogram values will not usually be 1; it is not a + probability *mass* function. weights : array - An array of weights, the same shape as a. If normed is False, the - histogram is computed by summing the weights of the values falling into - each bin. If normed is True, the weights are normalized, so that the - integral of the density over the range is 1. This option is only - available with new=True. - + An array of weights, the same shape as a. If normed is False, + the histogram is computed by summing the weights of the values + falling into each bin. If normed is True, the weights are + normalized, so that the integral of the density over the range + is 1. This option is only available with new=True. + new : bool - Compatibility argument to transition from the old version (v1.1) to - the new version (v1.2). - - - Return - ------ + Compatibility argument to transition from the old version + (v1.1) to the new version (v1.2). + + Returns + ------- hist : array - The values of the histogram. See `normed` and `weights` for a + The values of the histogram. See `normed` and `weights` for a description of the possible semantics. bin_edges : float array With new=False, return the left bin edges (length(hist)). - With new=True, return the bin edges (length(hist)+1). + With new=True, return the bin edges (length(hist)+1). - SeeAlso: + See Also + -------- + histogramdd - histogramdd - """ # Old behavior if new is False: warnings.warn(""" - The semantics of histogram will be modified in - release 1.2 to improve outlier handling. The new behavior can be - obtained using new=True. Note that the new version accepts/returns - the bin edges instead of the left bin edges. + The semantics of histogram will be modified in + release 1.2 to improve outlier handling. The new behavior can be + obtained using new=True. Note that the new version accepts/returns + the bin edges instead of the left bin edges. Please read the docstring for more information.""", FutureWarning) a = asarray(a).ravel() @@ -168,13 +171,13 @@ if (mn > mx): raise AttributeError, \ 'max must be larger than min in range parameter.' - + if not iterable(bins): if range is None: range = (a.min(), a.max()) else: warnings.warn(""" - Outliers handling will change in version 1.2. + Outliers handling will change in version 1.2. Please read the docstring for details.""", FutureWarning) mn, mx = [mi+0.0 for mi in range] if mn == mx: @@ -182,20 +185,20 @@ mx += 0.5 bins = linspace(mn, mx, bins, endpoint=False) else: - if normed: + if normed: raise ValueError, 'Use new=True to pass bin edges explicitly.' warnings.warn(""" - The semantic for bins will change in version 1.2. + The semantic for bins will change in version 1.2. The bins will become the bin edges, instead of the left bin edges. """, FutureWarning) bins = asarray(bins) if (np.diff(bins) < 0).any(): raise AttributeError, 'bins must increase monotonically.' - - + + if weights is not None: raise ValueError, 'weights are only available with new=True.' - + # best block size probably depends on processor cache size block = 65536 n = sort(a[:block]).searchsorted(bins) @@ -203,15 +206,15 @@ n += sort(a[i:i+block]).searchsorted(bins) n = concatenate([n, [len(a)]]) n = n[1:]-n[:-1] - + if normed: db = bins[1] - bins[0] return 1.0/(a.size*db) * n, bins else: return n, bins - - + + # New behavior elif new is True: a = asarray(a) @@ -221,13 +224,13 @@ raise ValueError, 'weights should have the same shape as a.' weights = weights.ravel() a = a.ravel() - + if (range is not None): mn, mx = range if (mn > mx): raise AttributeError, \ 'max must be larger than min in range parameter.' - + if not iterable(bins): if range is None: range = (a.min(), a.max()) @@ -240,7 +243,7 @@ bins = asarray(bins) if (np.diff(bins) < 0).any(): raise AttributeError, 'bins must increase monotonically.' - + # Histogram is an integer or a float array depending on the weights. if weights is None: ntype = int @@ -261,64 +264,64 @@ tmp_w = weights[i:i+block] sorting_index = np.argsort(tmp_a) sa = tmp_a[sorting_index] - sw = tmp_w[sorting_index] + sw = tmp_w[sorting_index] cw = np.concatenate(([zero,], sw.cumsum())) bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \ sa.searchsorted(bins[-1], 'right')] n += cw[bin_index] - + n = np.diff(n) - + if normed is False: return n, bins elif normed is True: db = array(np.diff(bins), float) return n/(n*db).sum(), bins - + def histogramdd(sample, bins=10, range=None, normed=False, weights=None): """histogramdd(sample, bins=10, range=None, normed=False, weights=None) Return the N-dimensional histogram of the sample. - Parameters: + Parameters + ---------- + sample : sequence or array + A sequence containing N arrays or an NxM array. Input data. - sample : sequence or array - A sequence containing N arrays or an NxM array. Input data. + bins : sequence or scalar + A sequence of edge arrays, a sequence of bin counts, or a scalar + which is the bin count for all dimensions. Default is 10. - bins : sequence or scalar - A sequence of edge arrays, a sequence of bin counts, or a scalar - which is the bin count for all dimensions. Default is 10. + range : sequence + A sequence of lower and upper bin edges. Default is [min, max]. - range : sequence - A sequence of lower and upper bin edges. Default is [min, max]. + normed : boolean + If False, return the number of samples in each bin, if True, + returns the density. - normed : boolean - If False, return the number of samples in each bin, if True, - returns the density. + weights : array + Array of weights. The weights are normed only if normed is True. + Should the sum of the weights not equal N, the total bin count will + not be equal to the number of samples. - weights : array - Array of weights. The weights are normed only if normed is True. - Should the sum of the weights not equal N, the total bin count will - not be equal to the number of samples. + Returns + ------- + hist : array + Histogram array. - Returns: + edges : list + List of arrays defining the lower bin edges. - hist : array - Histogram array. + See Also + -------- + histogram - edges : list - List of arrays defining the lower bin edges. + Examples + -------- + >>> x = random.randn(100,3) + >>> hist3d, edges = histogramdd(x, bins = (5, 6, 7)) - SeeAlso: - - histogram - - Example - - >>> x = random.randn(100,3) - >>> hist3d, edges = histogramdd(x, bins = (5, 6, 7)) - """ try: @@ -338,7 +341,8 @@ try: M = len(bins) if M != D: - raise AttributeError, 'The dimension of bins must be a equal to the dimension of the sample x.' + raise AttributeError, 'The dimension of bins must be equal ' \ + 'to the dimension of the sample x.' except TypeError: bins = D*[bins] @@ -384,7 +388,8 @@ # Rounding precision decimal = int(-log10(dedges[i].min())) +6 # Find which points are on the rightmost edge. - on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1], decimal))[0] + on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1], + decimal))[0] # Shift these points one bin to the left. Ncount[i][on_edge] -= 1 @@ -399,7 +404,8 @@ xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod() xy += Ncount[ni[-1]] - # Compute the number of repetitions in xy and assign it to the flattened histmat. + # Compute the number of repetitions in xy and assign it to the + # flattened histmat. if len(xy) == 0: return zeros(nbin-2, int), edges @@ -463,18 +469,18 @@ sum_of_weights is has the same type as the average. - Example - ------- + Examples + -------- >>> average(range(1,11), weights=range(10,0,-1)) 4.0 - Exceptions - ---------- + Raises + ------ ZeroDivisionError - Raised when all weights along axis are zero. See numpy.ma.average for a + When all weights along axis are zero. See numpy.ma.average for a version robust to this type of error. TypeError - Raised when the length of 1D weights is not the same as the shape of a + When the length of 1D weights is not the same as the shape of a along axis. """ @@ -857,11 +863,12 @@ def trim_zeros(filt, trim='fb'): """ Trim the leading and trailing zeros from a 1D array. - Example: - >>> import numpy - >>> a = array((0, 0, 0, 1, 2, 3, 2, 1, 0)) - >>> numpy.trim_zeros(a) - array([1, 2, 3, 2, 1]) + Examples + -------- + >>> import numpy + >>> a = array((0, 0, 0, 1, 2, 3, 2, 1, 0)) + >>> numpy.trim_zeros(a) + array([1, 2, 3, 2, 1]) """ first = 0 @@ -884,7 +891,8 @@ def unique(x): """Return sorted unique items from an array or sequence. - Example: + Examples + -------- >>> unique([5,2,4,0,4,4,2,2,1]) array([0, 1, 2, 4, 5]) @@ -1004,10 +1012,9 @@ class vectorize(object): """ - vectorize(somefunction, otypes=None, doc=None) - Generalized Function class. + vectorize(somefunction, otypes=None, doc=None) - Description: + Generalized function class. Define a vectorized function which takes nested sequence of objects or numpy arrays as inputs and returns a @@ -1021,12 +1028,13 @@ of data-types specifiers. There should be one data-type specifier for each output. - Input: + Parameters + ---------- + f : callable + A Python function or method. - somefunction -- a Python function or method - - Example: - + Examples + -------- >>> def myfunc(a, b): ... if a > b: ... return a-b @@ -1510,7 +1518,8 @@ If axis is None, then ravel the array first. - Example: + Examples + -------- >>> arr = [[3,4,5], ... [1,2,3], ... [6,7,8]] @@ -1613,7 +1622,8 @@ The obj argument can be an integer, a slice, or a sequence of integers. - Example: + Examples + -------- >>> a = array([[1,2,3], ... [4,5,6], ... [7,8,9]]) From numpy-svn at scipy.org Fri May 16 10:57:08 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 16 May 2008 09:57:08 -0500 (CDT) Subject: [Numpy-svn] r5179 - trunk/numpy/doc Message-ID: <20080516145708.3201439C771@scipy.org> Author: stefan Date: 2008-05-16 09:56:52 -0500 (Fri, 16 May 2008) New Revision: 5179 Added: trunk/numpy/doc/HOWTO_BUILD_DOCS.txt Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt Log: Update documentation standard. Added: trunk/numpy/doc/HOWTO_BUILD_DOCS.txt =================================================================== --- trunk/numpy/doc/HOWTO_BUILD_DOCS.txt 2008-05-16 09:16:36 UTC (rev 5178) +++ trunk/numpy/doc/HOWTO_BUILD_DOCS.txt 2008-05-16 14:56:52 UTC (rev 5179) @@ -0,0 +1,60 @@ +========================================= +Building the NumPy API and reference docs +========================================= + +Using Epydoc_ +------------- + +Currently, we recommend that you build epydoc from the trunk:: + + svn co https://epydoc.svn.sf.net/svnroot/epydoc/trunk/epydoc epydoc + cd epydoc/src + sudo python setup.py install + +The appearance of some elements can be changed in the epydoc.css +style sheet. + +Emphasized text appearance can be controlled by the definition of the +tag. For instance, to make them bold, insert:: + + em {font-weight: bold;} + +The variables' types are in a span of class rst-classifier, hence can be +changed by inserting something like:: + + span.rst-classifier {font-weight: normal;} + +The first line of the signature should **not** copy the signature unless +the function is written in C, in which case it is mandatory. If the function +signature is generic (uses ``*args`` or ``**kwds``), then a function signature +may be included. + +Use optional in the "type" field for parameters that are non-keyword +optional for C-functions. + +Epydoc depends on Docutils for reStructuredText parsing. You can +download Docutils from the `Docutils sourceforge +page. `_. The version in SVN is +broken, so use 0.4 or the patched version from Debian. You may also +be able to use a package manager like yum to install it:: + + $ sudo yum install python-docutils + + +Example +------- +Here is a short example module, +`plain text `_ +or +`rendered `_ in HTML. + +To try this yourself, simply download the example.py:: + + svn co http://svn.scipy.org/svn/numpy/trunk/numpy/doc/example.py . + +Then, run epydoc:: + + $ epydoc --docformat=restructuredtext example.py + +The output is placed in ``./html``, and may be viewed by loading the +``index.html`` file into your browser. Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt =================================================================== --- trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-16 09:16:36 UTC (rev 5178) +++ trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-16 14:56:52 UTC (rev 5179) @@ -4,111 +4,169 @@ .. Contents:: -.. Attention:: +.. Note:: - This document is slightly out of date. During the December 2007 sprint, - Travis Oliphant made some changes to the NumPy/SciPy docstring standard. - The changes are relatively minor, but the standard no longer follows the - epydoc/restructured text standards. The changes brings our docstring - standard more in line with the ETS standard; in addition, it also - conserves horizontal real-estate and arguably looks better when printed as - plain text. Unfortunately, these changes mean that currently it isn't - possible to render the docstrings as desired. Travis has committed to - writing something to render the docstrings. At that point, we will update - this document to correspond with the new standard. For now, just refer - to: `example.py - `__ + For an accompanying example, see `example.py + `_. Overview -------- In general, we follow the standard Python style conventions as described here: - * `Style Guide for C Code `__ - * `Style Guide for Python Code `__ - * `Docstring Conventions `__ + * `Style Guide for C Code `_ + * `Style Guide for Python Code `_ + * `Docstring Conventions `_ Additional PEPs of interest regarding documentation of code: - * `Docstring Processing Framework `__ - * `Docutils Design Specification `__ + * `Docstring Processing Framework `_ + * `Docutils Design Specification `_ Use a code checker: - * `pylint `__ + * `pylint `_ * `pyflakes` easy_install pyflakes - * `pep8.py `__ + * `pep8.py `_ -Common import standards:: +If you prefer the use of abbreviated module names, we suggest +the following commonly used import conventions:: import numpy as np import scipy as sp + import matplotlib as mpl + import matplotlib.pyplot as plt +It is still perfectly acceptable to use unnabreviated module names. + Docstring Standard ------------------ - A documentation string (docstring) is a string that describes a module, function, class, or method definition. The docstring is a special attribute of the object (``object.__doc__``) and, for consistency, is surrounded by -triple double quotes. +triple double quotes, i.e.:: -It is highly desireable that both NumPy and SciPy_ and scikits to -follow a common -convention for docstrings that provide for consistency while also -allowing epydoc_ to produce nicely-formatted reference guides. This -document describes the current community consensus for this standard. -If you have suggestions for improvements, post them on the -`numpy-discussion list`_, together with the epydoc output. + """This is the form of a docstring. -Our docstring standard uses `reST -`__ syntax and is rendered -using something like epydoc_ (+ a pre-processor which understands the -particular documentation style we are using). The markup in this -proposal is as basic as possible which still looks reasonable when the -text is just printed. In particular, it avoids too much cruft in the -reST syntax and other epydoc_-isms. + It can be spread over several lines. -The guiding principle is that human readers of the text itself are -given precedence over contorting the docstring so that epydoc_ -produces nice output. In order to improve the rendered output we -should work on making pre-processor tools to assist epydoc_ or another -similar tool, rather than making human readers conform to a particular -computer-imposed style. + """ +NumPy, SciPy_, and the scikits follow a common convention for +docstrings that provides for consistency, while also allowing our +toolchain to produce well-formatted reference guides. This document +describes the current community consensus for such a standard. If you +have suggestions for improvements, post them on the `numpy-discussion +list`_, together with the epydoc output. + +Our docstring standard uses `re-structured text (reST) +`_ syntax and is rendered +using tools like epydoc_ or sphinx_ (pre-processors that understand +the particular documentation style we are using). While a rich set of +markup is available, we limit ourselves to a very basic subset, in +order to provide docstrings that are easy to read on text-only +terminals. + +A guiding principle is that human readers of the text are given +precedence over contorting docstrings so our tools produce nice +output. Rather than sacrificing the readability of the docstrings, we +have chosen to write pre-processors to assist tools like epydoc_ or +sphinx_ in their task. + Status ------ +We are busy converting existing docstrings to the new format, +expanding them where they are lacking, as well as writing new ones for +undocumented functions. Volunteers are welcome to join the effort on +our new wiki-based documentation system (see the `Developer Zone +`_). -We are currently trying to convert existing docstrings to the new -format and write them for those that currently lack docstrings. - -We are also trying to improve the rendered output either using a -pre-processor to epydoc or another tool similar to epydoc. - Sections -------- +The sections of the docstring are: -The proposed sections of the docstring are: +1. **Short summary** -1. **Short summary:** - A one-line summary not using variable names or the function name - (unless a C-function). + A one-line summary that does not use variable names or the function + name, e.g. -2. **Extended summary:** - A few sentences giving an extended description. + :: -3. **Parameters:** + def add(a,b): + """Return the sum of two numbers. + + """ + + The function signature is normally found by introspection and + displayed by the help function. For some functions (notably those + written in C) the signature is not available, so we have to specify + it as the first line of the docstring:: + + """ + add(a,b) + + Return the sum of two numbers. + + """ + +2. **Extended summary** + + A few sentences giving an extended description. This section + should be used to clarify *functionality*, not to discuss + implementation detail or background theory, which should rather be + explored in the **notes** section below. You may refer to the + parameters and the function name, but parameter descriptions still + belong in the **parameters** section. + +3. **Parameters** + Description of the function arguments, keywords and their respective types. -4. **Returns:** - Explanation of the returned values and their types. + :: -5. **Other parameters:** - An optional section used to describe little used parameters so that - functions with a large number of keyword argument can still be well - documented without cluttering the main parameters' list. + Parameters + ---------- + x : type + Description of parameter `x`. -6. **Raises:** - An optional section detailing which errors get raised under what - conditions. + Enclose variables in single back-tics. If it is not necessary to + specify a keyword argument, use ``optional``:: -7. **See also:** + x : int, optional + + Optional keyword parameters have default values, which are + displayed as part of the function signature. They can also be + detailed in the description:: + + Description of parameter `x` (the default is -1, which implies summation + over all axes). + + When a parameter can only assume one of a fixed set of values, + those values can be listed in braces :: + + x : {True, False} + Description of `x`. + +4. **Returns** + + Explanation of the returned values and their types, of the same + format as **parameters**. + +5. **Other parameters** + + An optional section used to describe infrequently used parameters. + It should only be used if a function has a large number of keyword + prameters, to prevent cluttering the **parameters** section. + +6. **Raises** + + An optional section detailing which errors get raised and under + what conditions:: + + Raises + ------ + LinAlgException + If the matrix is not numerically invertible. + +7. **See Also** + An optional section used to refer to related code. This section can be very useful, but should be used judiciously. The goal is to direct users to other functions they may not be aware of, or have @@ -116,117 +174,158 @@ example). Routines whose docstrings further explain parameters used by this function are good candidates. -8. **Notes:** + As an example, for ``numpy.mean`` we would have:: + + See Also + -------- + average : Weighted average + +8. **Notes** + An optional section that provides additional information about the code, possibly including a discussion of the algorithm. This - section may include mathematical equations, possibly written in - `LaTeX `__. + section may include mathematical equations, written in + `LaTeX `_ format:: -9. **Examples:** - An optional section for examples, using the `doctest - `__ format. It - can provide an inline mini-tutorial as well as additional - regression testing. While optional, this section is very strongly - encouraged. You can run the tests by doing:: + The FFT is a fast implementation of the discrete Fourier transform: - >>> import doctest - >>> doctest.testfile('example.py') + .. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n} - Blank lines are used to seperate doctests. When they occur in the - expected output, they should be replaced by ```` (see - `doctest options - `_), e.g. + Equations can also be typeset underneath the math directive:: + The discrete-time Fourier time-convolution property states that + + .. math:: + + x(n) * y(n) \Leftrightarrow X(e^{j\omega } )Y(e^{j\omega } )\\ + another equation here + + Math can furthermore be used inline, i.e. + :: - >>> print "a\n\nb" - a - - b + The value of :math:`omega` is larger than 5. -Common reST concepts --------------------- + Note that LaTeX is not particularly easy to read, so use equations + sparingly. -For paragraphs, indentation is significant and indicates indentation in the -output. New paragraphs are marked with blank line. + Images are allowed, but should not be central to the explanation; + users viewing the docstring as text must be able to comprehend its + meaning without resorting to an image viewer. These additional + illustrations are included using:: -Use *italics*, **bold**, and ``courier`` if needed in any explanations (but -not for variable names and doctest code or multi-line code) + .. image:: filename -Use ``:lm:`eqn``` for in-line math in latex format (remember to use the -raw-format for your text string or escape any '\' symbols). Use ``:m:`eqn``` -for non-latex math. + where filename is a path relative to the reference guide source + directory. -A more extensive example of reST markup can be found here: -http://docutils.sourceforge.net/docs/user/rst/demo.txt -Line spacing and indentation are significant and should -be carefully followed. +9. **References** + References cited in the **notes** section may be listed here, + e.g. if you cited the article below using the text ``[1]``, include it as + in the list as follows:: + .. [1] O. McNoleg, "The integration of GIS, remote sensing, + expert systems and adaptive co-kriging for environmental habitat + modelling of the Highland Haggis using object-oriented, fuzzy-logic + and neural-network techniques," Computers & Geosciences, vol. 22, + pp. 585-588, 1996. -Using Epydoc_ -------------- + which renders as -Currently, we recommend that you build epydoc from the trunk:: + .. [1] O. McNoleg, "The integration of GIS, remote sensing, + expert systems and adaptive co-kriging for environmental habitat + modelling of the Highland Haggis using object-oriented, fuzzy-logic + and neural-network techniques," Computers & Geosciences, vol. 22, + pp. 585-588, 1996. - svn co https://epydoc.svn.sourceforge.net/svnroot/epydoc/trunk/epydoc epydoc - cd epydoc/src - sudo python setup.py install + Referencing sources of a temporary nature, like web pages, is + discouraged. References are meant to augment the docstring, but + should not be required to understand it. Follow the `citation + format of the IEEE + `_, which + states that references are numbered, starting from one, in the + order in which they are cited. -Since we use reST-formatted docstrings instead of the epytext markup, you will -need to include the following line near the top of your module:: +10. **Examples** - __docformat__ = "restructuredtext en" + An optional section for examples, using the `doctest + `_ format. + This section is meant to illustrate usage, not to provide a + testing framework -- for that, use the ``tests/`` directory. + While optional, this section is very strongly encouraged. You can + run these examples by doing:: -The appearance of some elements can be changed in the epydoc.css -style sheet. + >>> import doctest + >>> doctest.testfile('example.py') -Emphasized text appearance can be controlled by the definition of the -tag. For instance, to make them bold, insert:: + or, using nose, - em {font-weight: bold;} + :: -The variables' types are in a span of class rst-classifier, hence can be -changed by inserting something like:: + $ nosetests --with-doctest example.py - span.rst-classifier {font-weight: normal;} + Blank lines are used to seperate doctests. When they occur in the + expected output, they should be replaced by ```` (see + `doctest options + `_ for other such + special strings), e.g. -The first line of the signature should **not** copy the signature unless -the function is written in C, in which case it is mandatory. If the function -signature is generic (uses ``*args`` or ``**kwds``), then a function signature -may be included. + :: -Use optional in the "type" field for parameters that are non-keyword -optional for C-functions. + >>> print "a\n\nb" + a + + b -Epydoc depends on Docutils for reStructuredText parsing. You can download -Docutils from the -`Docutils sourceforge page. `__ -You may also be able to use a package manager like yum to install a -current version:: +11. **Indexing tags*** - $ sudo yum install python-docutils + Each function needs to be categorised for indexing purposes. Use + the ``index`` directive:: + .. index:: + :refguide: ufunc, trigonometry -Example -------- + To index a function as a sub-category of a class, separate index + entries by a semi-colon, e.g. -Here is a short example module, -`plain text `__ -or -`rendered `__ in HTML. + :: -To try this yourself, simply download the example.py:: + :refguide: ufunc, numpy;reshape, other - svn co http://svn.scipy.org/svn/numpy/trunk/numpy/doc/example.py . + A `list of available categories + `_ is + available. -Then, run epydoc:: - $ epydoc example.py +Common reST concepts +-------------------- +For paragraphs, indentation is significant and indicates indentation in the +output. New paragraphs are marked with a blank line. -The output is placed in ``./html``, and may be viewed by loading the -``index.html`` file into your browser. +Use *italics*, **bold**, and ``courier`` if needed in any explanations +(but not for variable names and doctest code or multi-line code). +Variable, module and class names should be written between single +backticks (```numpy```). +A more extensive example of reST markup can be found in `this example +document `_; +the `quick reference +`_ is +useful while editing. + +Line spacing and indentation are significant and should be carefully +followed. + +Conclusion +---------- + +`An example +`_ of the +format shown here is available. Refer to `How to Build API/Reference +Documentation `_ on how to use epydoc_ or sphinx_ to +construct a manual and web page. + This document itself was written in ReStructuredText, and may be converted to HTML using:: @@ -235,3 +334,4 @@ .. _SciPy: http://www.scipy.org .. _numpy-discussion list: http://www.scipy.org/Mailing_Lists .. _epydoc: http://epydoc.sourceforge.net/ +.. _sphinx: http://sphinx.pocoo.org From numpy-svn at scipy.org Fri May 16 11:13:45 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 16 May 2008 10:13:45 -0500 (CDT) Subject: [Numpy-svn] r5180 - trunk/numpy/doc Message-ID: <20080516151345.7E74939C0B6@scipy.org> Author: stefan Date: 2008-05-16 10:13:32 -0500 (Fri, 16 May 2008) New Revision: 5180 Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt trunk/numpy/doc/example.py Log: Update example. Fix reference. Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt =================================================================== --- trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-16 14:56:52 UTC (rev 5179) +++ trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-16 15:13:32 UTC (rev 5180) @@ -222,8 +222,8 @@ 9. **References** References cited in the **notes** section may be listed here, - e.g. if you cited the article below using the text ``[1]``, include it as - in the list as follows:: + e.g. if you cited the article below using the text ``[1]_``, + include it as in the list as follows:: .. [1] O. McNoleg, "The integration of GIS, remote sensing, expert systems and adaptive co-kriging for environmental habitat Modified: trunk/numpy/doc/example.py =================================================================== --- trunk/numpy/doc/example.py 2008-05-16 14:56:52 UTC (rev 5179) +++ trunk/numpy/doc/example.py 2008-05-16 15:13:32 UTC (rev 5180) @@ -8,10 +8,6 @@ a line by itself, preferably preceeded by a blank line. """ -# Make sure this line is here such that epydoc 3 can parse the docstrings for -# auto-generated documentation. -__docformat__ = "restructuredtext en" - import os # standard library imports first import numpy as np # related third party imports next @@ -19,6 +15,8 @@ import matplotlib as mpl # imports should usually be on separate lines import matplotlib.pyplot as plt +from my_module import my_func, other_func + def foo(var1, var2, long_var_name='hi') : """One-line summary or signature. @@ -53,6 +51,11 @@ common_parametrs_listed_above : type Explanation + Raises + ------ + BadException + Because you shouldn't have done that. + See Also -------- otherfunc : relationship (optional) @@ -64,34 +67,37 @@ This can have multiple paragraphs as can all sections. - Examples - -------- - examples in doctest format + You may include some math: - >>> a=[1,2,3] - >>> [x + 3 for x in a] - [4, 5, 6] + .. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n} - """ + And even use a greek symbol like :math:`omega` inline. - pass + References + ---------- + Cite the relevant literature, e.g. [1]_. You may also cite these + references in the notes section above. + .. [1] O. McNoleg, "The integration of GIS, remote sensing, + expert systems and adaptive co-kriging for environmental habitat + modelling of the Highland Haggis using object-oriented, fuzzy-logic + and neural-network techniques," Computers & Geosciences, vol. 22, + pp. 585-588, 1996. -def newfunc() : - """Do nothing. + Examples + -------- + These are written in doctest format, and should illustrate how to + use the function. - I never saw a purple cow. + >>> a=[1,2,3] + >>> print [x + 3 for x in a] + [4, 5, 6] + >>> print "a\n\nb" + a + + b """ pass - -def otherfunc() : - """Do nothing. - - I never hope to see one. - - """ - - pass From numpy-svn at scipy.org Fri May 16 11:45:04 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 16 May 2008 10:45:04 -0500 (CDT) Subject: [Numpy-svn] r5181 - in trunk/numpy/ma: . tests Message-ID: <20080516154504.D5C5C39C769@scipy.org> Author: pierregm Date: 2008-05-16 10:45:01 -0500 (Fri, 16 May 2008) New Revision: 5181 Modified: trunk/numpy/ma/extras.py trunk/numpy/ma/mrecords.py trunk/numpy/ma/tests/test_mrecords.py Log: mrecords : fixed fromarrays when importing only one record extras : fixed a bug in the naming convention Modified: trunk/numpy/ma/extras.py =================================================================== --- trunk/numpy/ma/extras.py 2008-05-16 15:13:32 UTC (rev 5180) +++ trunk/numpy/ma/extras.py 2008-05-16 15:45:01 UTC (rev 5181) @@ -106,7 +106,7 @@ if len(args)==1: x = args[0] if isinstance(x, ndarray): - _d = func(nxasarray(x), **params) + _d = func(np.asarray(x), **params) _m = func(getmaskarray(x), **params) return masked_array(_d, mask=_m) elif isinstance(x, tuple) or isinstance(x, list): @@ -222,7 +222,7 @@ outshape[axis] = res.shape dtypes.append(asarray(res).dtype) outshape = flatten_inplace(outshape) - outarr = zeros(outshape, object_) + outarr = zeros(outshape, object) outarr[tuple(flatten_inplace(j.tolist()))] = res k = 1 while k < Ntot: @@ -252,17 +252,17 @@ Parameters ---------- - axis : int, optional - Axis along which to perform the operation. - If None, applies to a flattened version of the array. - weights : sequence, optional - Sequence of weights. - The weights must have the shape of a, or be 1D with length - the size of a along the given axis. - If no weights are given, weights are assumed to be 1. - returned : bool - Flag indicating whether a tuple (result, sum of weights/counts) - should be returned as output (True), or just the result (False). + axis : {None,int}, optional + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + weights : {None, sequence}, optional + Sequence of weights. + The weights must have the shape of a, or be 1D with length + the size of a along the given axis. + If no weights are given, weights are assumed to be 1. + returned : {False, True}, optional + Flag indicating whether a tuple (result, sum of weights/counts) + should be returned as output (True), or just the result (False). """ a = asarray(a) @@ -374,12 +374,10 @@ Axis along which the medians are computed. The default is to compute the median along the first dimension. axis=None returns the median of the flattened array - - out : ndarray, optional + out : {None, ndarray}, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output but the type will be cast if necessary. - overwrite_input : {False, True}, optional If True, then allow use of memory of input array (a) for calculations. The input array will be modified by the call to @@ -419,17 +417,19 @@ # if overwrite_input: if axis is None: - sorted = a.ravel() - sorted.sort() + asorted = a.ravel() + asorted.sort() else: a.sort(axis=axis) - sorted = a + asorted = a else: - sorted = sort(a, axis=axis) + asorted = sort(a, axis=axis) if axis is None: - result = _median1D(sorted) + result = _median1D(asorted) else: - result = apply_along_axis(_median1D, axis, sorted) + result = apply_along_axis(_median1D, axis, asorted) + if out is not None: + out = result return result Modified: trunk/numpy/ma/mrecords.py =================================================================== --- trunk/numpy/ma/mrecords.py 2008-05-16 15:13:32 UTC (rev 5180) +++ trunk/numpy/ma/mrecords.py 2008-05-16 15:45:01 UTC (rev 5181) @@ -546,24 +546,18 @@ A list of (masked) arrays. Each element of the sequence is first converted to a masked array if needed. If a 2D array is passed as argument, it is processed line by line - dtype : numeric.dtype + dtype : {None, dtype}, optional Data type descriptor. - shape : integer + shape : {None, integer}, optional Number of records. If None, shape is defined from the shape of the first array in the list. - formats : sequence + formats : {None, sequence}, optional Sequence of formats for each individual field. If None, the formats will be autodetected by inspecting the fields and selecting the highest dtype possible. - names : sequence + names : {None, sequence}, optional Sequence of the names of each field. - titles : sequence - (Description to write) - aligned : boolean - (Description to write, not used anyway) - byteorder: boolean - (Description to write, not used anyway) - fill_value : sequence + fill_value : {None, sequence}, optional Sequence of data to be used as filling values. Notes @@ -571,12 +565,12 @@ Lists of tuples should be preferred over lists of lists for faster processing. """ datalist = [getdata(x) for x in arraylist] - masklist = [getmaskarray(x) for x in arraylist] + masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist] _array = recfromarrays(datalist, dtype=dtype, shape=shape, formats=formats, names=names, titles=titles, aligned=aligned, byteorder=byteorder).view(mrecarray) - _array._fieldmask[:] = zip(*masklist) + _array._fieldmask.flat = zip(*masklist) if fill_value is not None: _array.fill_value = fill_value return _array @@ -590,30 +584,24 @@ Parameters ---------- - arraylist : sequence - A list of (masked) arrays. Each element of the sequence is first converted + reclist : sequence + A list of records. Each element of the sequence is first converted to a masked array if needed. If a 2D array is passed as argument, it is processed line by line - dtype : numeric.dtype + dtype : {None, dtype}, optional Data type descriptor. - shape : integer + shape : {None,int}, optional Number of records. If None, ``shape`` is defined from the shape of the first array in the list. - formats : sequence + formats : {None, sequence}, optional Sequence of formats for each individual field. If None, the formats will be autodetected by inspecting the fields and selecting the highest dtype possible. - names : sequence + names : {None, sequence}, optional Sequence of the names of each field. - titles : sequence - (Description to write) - aligned : boolean - (Description to write, not used anyway) - byteorder: boolean - (Description to write, not used anyway) - fill_value : sequence + fill_value : {None, sequence}, optional Sequence of data to be used as filling values. - mask : sequence or boolean. + mask : {nomask, sequence}, optional. External mask to apply on the data. *Notes*: @@ -703,20 +691,21 @@ varnames=None, vartypes=None): """Creates a mrecarray from data stored in the file `filename`. -*Parameters* : + Parameters + ---------- filename : {file name/handle} Handle of an opened file. - delimitor : {string} + delimitor : {None, string}, optional Alphanumeric character used to separate columns in the file. If None, any (group of) white spacestring(s) will be used. - commentchar : {string} + commentchar : {'#', string}, optional Alphanumeric character used to mark the start of a comment. - missingchar` : {string} + missingchar : {'', string}, optional String indicating missing data, and used to create the masks. - varnames : {sequence} + varnames : {None, sequence}, optional Sequence of the variable names. If None, a list will be created from the first non empty line of the file. - vartypes : {sequence} + vartypes : {None, sequence}, optional Sequence of the variables dtypes. If None, it will be estimated from the first non-commented line. Modified: trunk/numpy/ma/tests/test_mrecords.py =================================================================== --- trunk/numpy/ma/tests/test_mrecords.py 2008-05-16 15:13:32 UTC (rev 5180) +++ trunk/numpy/ma/tests/test_mrecords.py 2008-05-16 15:45:01 UTC (rev 5181) @@ -294,6 +294,10 @@ (mrec, nrec, _) = self.data for (f,l) in zip(('a','b','c'),(_a,_b,_c)): assert_equal(getattr(mrec,f)._mask, l._mask) + # One record only + _x = ma.array([1,1.1,'one'], mask=[1,0,0],) + assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0]) + def test_fromrecords(self): From numpy-svn at scipy.org Fri May 16 14:10:52 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 16 May 2008 13:10:52 -0500 (CDT) Subject: [Numpy-svn] r5182 - trunk/numpy/doc Message-ID: <20080516181052.53D5C39C7EE@scipy.org> Author: stefan Date: 2008-05-16 13:10:28 -0500 (Fri, 16 May 2008) New Revision: 5182 Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt trunk/numpy/doc/example.py Log: Update documentation format and example. Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt =================================================================== --- trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-16 15:45:01 UTC (rev 5181) +++ trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-16 18:10:28 UTC (rev 5182) @@ -25,16 +25,15 @@ * `pyflakes` easy_install pyflakes * `pep8.py `_ -If you prefer the use of abbreviated module names, we suggest -the following commonly used import conventions:: +For documentation purposes, use unabbreviated module names. If you +prefer the use of abbreviated module names in code (*not* the +docstrings), we suggest the import conventions used by NumPy itself:: import numpy as np import scipy as sp import matplotlib as mpl import matplotlib.pyplot as plt -It is still perfectly acceptable to use unnabreviated module names. - Docstring Standard ------------------ A documentation string (docstring) is a string that describes a module, @@ -89,7 +88,7 @@ :: def add(a,b): - """Return the sum of two numbers. + """The sum of two numbers. """ @@ -101,7 +100,7 @@ """ add(a,b) - Return the sum of two numbers. + The sum of two numbers. """ Modified: trunk/numpy/doc/example.py =================================================================== --- trunk/numpy/doc/example.py 2008-05-16 15:45:01 UTC (rev 5181) +++ trunk/numpy/doc/example.py 2008-05-16 18:10:28 UTC (rev 5182) @@ -8,8 +8,30 @@ a line by itself, preferably preceeded by a blank line. """ -import os # standard library imports first +import os # standard library imports first +# Do NOT import using *, e.g. from numpy import * +# +# Import the module using +# +# import numpy +# +# instead or import individual functions as needed, e.g +# +# from numpy import array, zeros +# +# If you prefer the use of abbreviated module names, we suggest the +# convention used by NumPy itself:: +# +# import numpy as np +# import scipy as sp +# import matplotlib as mpl +# import matplotlib.pyplot as plt +# +# These abbreviated names are not to be used in docstrings; users must +# be able to paste and execute docstrings after importing only the +# numpy module itself, unabbreviated. + import numpy as np # related third party imports next import scipy as sp # imports should be at the top of the module import matplotlib as mpl # imports should usually be on separate lines @@ -18,7 +40,8 @@ from my_module import my_func, other_func def foo(var1, var2, long_var_name='hi') : - """One-line summary or signature. + """A one-line summary that does not use variable names or the + function name. Several sentences providing an extended description. You can put text in mono-spaced type like so: ``var``. @@ -27,28 +50,31 @@ ---------- var1 : array_like Array_like means all those objects -- lists, nested lists, etc. -- - that can be converted to an array. - var2 : integer - Write out the full type - long_variable_name : {'hi', 'ho'}, optional + that can be converted to an array. We can also refer to + variables like `var1`. + var2 : int + The type above can either refer to an actual Python type + (e.g. ``int``), or describe the type of the variable in more + detail, e.g. ``(N,) ndarray`` or ``array_like``. + Long_variable_name : {'hi', 'ho'}, optional Choices in brackets, default first when optional. Returns ------- - named : type + describe : type Explanation - list + output Explanation - of + tuple Explanation - outputs + items even more explaining Other Parameters ---------------- only_seldom_used_keywords : type Explanation - common_parametrs_listed_above : type + common_parameters_listed_above : type Explanation Raises @@ -65,7 +91,7 @@ ----- Notes about the implementation algorithm (if needed). - This can have multiple paragraphs as can all sections. + This can have multiple paragraphs. You may include some math: From numpy-svn at scipy.org Fri May 16 14:14:02 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 16 May 2008 13:14:02 -0500 (CDT) Subject: [Numpy-svn] r5183 - trunk/numpy/doc Message-ID: <20080516181402.2675539C7EE@scipy.org> Author: stefan Date: 2008-05-16 13:13:44 -0500 (Fri, 16 May 2008) New Revision: 5183 Modified: trunk/numpy/doc/example.py Log: Minor update to example docstring. Modified: trunk/numpy/doc/example.py =================================================================== --- trunk/numpy/doc/example.py 2008-05-16 18:10:28 UTC (rev 5182) +++ trunk/numpy/doc/example.py 2008-05-16 18:13:44 UTC (rev 5183) @@ -22,29 +22,24 @@ # # If you prefer the use of abbreviated module names, we suggest the # convention used by NumPy itself:: -# -# import numpy as np -# import scipy as sp -# import matplotlib as mpl -# import matplotlib.pyplot as plt -# + +import numpy as np +import scipy as sp +import matplotlib as mpl +import matplotlib.pyplot as plt + # These abbreviated names are not to be used in docstrings; users must # be able to paste and execute docstrings after importing only the # numpy module itself, unabbreviated. -import numpy as np # related third party imports next -import scipy as sp # imports should be at the top of the module -import matplotlib as mpl # imports should usually be on separate lines -import matplotlib.pyplot as plt - from my_module import my_func, other_func def foo(var1, var2, long_var_name='hi') : """A one-line summary that does not use variable names or the function name. - Several sentences providing an extended description. You can put - text in mono-spaced type like so: ``var``. + Several sentences providing an extended description. Refer to + variables using back-ticks, e.g. `var`. Parameters ---------- From numpy-svn at scipy.org Sat May 17 23:49:26 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 17 May 2008 22:49:26 -0500 (CDT) Subject: [Numpy-svn] r5184 - tags Message-ID: <20080518034926.864B039C05F@scipy.org> Author: jarrod.millman Date: 2008-05-17 22:49:17 -0500 (Sat, 17 May 2008) New Revision: 5184 Added: tags/0.1.1rc1/ Log: creating release candidate Copied: tags/0.1.1rc1 (from rev 5183, trunk) From numpy-svn at scipy.org Sat May 17 23:51:13 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 17 May 2008 22:51:13 -0500 (CDT) Subject: [Numpy-svn] r5185 - tags Message-ID: <20080518035113.3071439C05F@scipy.org> Author: jarrod.millman Date: 2008-05-17 22:51:01 -0500 (Sat, 17 May 2008) New Revision: 5185 Added: tags/1.1.0rc1/ Log: creating release candidate Copied: tags/1.1.0rc1 (from rev 5184, trunk) From numpy-svn at scipy.org Sat May 17 23:51:31 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 17 May 2008 22:51:31 -0500 (CDT) Subject: [Numpy-svn] r5186 - tags Message-ID: <20080518035131.A3D8139C05F@scipy.org> Author: jarrod.millman Date: 2008-05-17 22:51:28 -0500 (Sat, 17 May 2008) New Revision: 5186 Removed: tags/0.1.1rc1/ Log: removing (typo) From numpy-svn at scipy.org Sat May 17 23:56:22 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 17 May 2008 22:56:22 -0500 (CDT) Subject: [Numpy-svn] r5187 - tags/1.1.0rc1/numpy Message-ID: <20080518035622.6FA6639C05F@scipy.org> Author: jarrod.millman Date: 2008-05-17 22:56:19 -0500 (Sat, 17 May 2008) New Revision: 5187 Modified: tags/1.1.0rc1/numpy/version.py Log: changing version for release candidate Modified: tags/1.1.0rc1/numpy/version.py =================================================================== --- tags/1.1.0rc1/numpy/version.py 2008-05-18 03:51:28 UTC (rev 5186) +++ tags/1.1.0rc1/numpy/version.py 2008-05-18 03:56:19 UTC (rev 5187) @@ -1,5 +1,5 @@ -version='1.1.0' -release=False +version='1.1.0rc1' +release=True if not release: version += '.dev' From numpy-svn at scipy.org Sun May 18 04:37:28 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 18 May 2008 03:37:28 -0500 (CDT) Subject: [Numpy-svn] r5188 - trunk/numpy/distutils/command Message-ID: <20080518083728.4108039C14B@scipy.org> Author: cdavid Date: 2008-05-18 03:37:24 -0500 (Sun, 18 May 2008) New Revision: 5188 Modified: trunk/numpy/distutils/command/scons.py Log: Add --package-list to scons command, to speed-up no-op when working on scipy. Modified: trunk/numpy/distutils/command/scons.py =================================================================== --- trunk/numpy/distutils/command/scons.py 2008-05-18 03:56:19 UTC (rev 5187) +++ trunk/numpy/distutils/command/scons.py 2008-05-18 08:37:24 UTC (rev 5188) @@ -173,6 +173,29 @@ # already quoted path, for example). return '"' + path + '"' +def parse_package_list(pkglist): + return pkglist.split(",") + +def find_common(seq1, seq2): + """Given two list, return the index of the common items. + + The index are relative to seq1. + + Note: do not handle duplicate items.""" + dict2 = dict([(i, None) for i in seq2]) + + return [i for i in range(len(seq1)) if dict2.has_key(seq1[i])] + +def select_packages(sconspkg, pkglist): + """Given a list of packages in pkglist, return the list of packages which + match this list.""" + common = find_common(sconspkg, pkglist) + if not len(common) == len(pkglist): + msg = "the package list contains a package not found in "\ + "the current list. The current list is %s" % sconspkg + raise ValueError(msg) + return common + class scons(old_build_ext): # XXX: add an option to the scons command for configuration (auto/force/cache). description = "Scons builder" @@ -182,7 +205,10 @@ ('scons-tool-path=', None, 'specify additional path '\ '(absolute) to look for scons tools'), ('silent=', None, 'specify whether scons output should less verbose'\ - '(1), silent (2), super silent (3) or not (0, default)')] + '(1), silent (2), super silent (3) or not (0, default)'), + ('package-list=', None, 'If specified, only run scons on the given '\ + 'packages (example: --package-list=scipy.cluster). If empty, '\ + 'no package is built')] def initialize_options(self): old_build_ext.initialize_options(self) @@ -197,6 +223,8 @@ self.scons_compiler_path = None self.scons_fcompiler = None + self.package_list = None + def finalize_options(self): old_build_ext.finalize_options(self) if self.distribution.has_scons_scripts(): @@ -260,6 +288,9 @@ cxxcompiler.customize_cmd(self) self.cxxcompiler = cxxcompiler.cxx_compiler() #print self.cxxcompiler.compiler_cxx[0] + + if self.package_list: + self.package_list = parse_package_list(self.package_list) def run(self): if len(self.sconscripts) > 0: @@ -283,9 +314,21 @@ scons_exec = get_python_exec_invoc() scons_exec += ' ' + protect_path(pjoin(get_scons_local_path(), 'scons.py')) - for sconscript, pre_hook, post_hook, pkg_name in zip(self.sconscripts, - self.pre_hooks, self.post_hooks, - self.pkg_names): + if self.package_list is not None: + id = select_packages(self.pkg_names, self.package_list) + sconscripts = [self.sconscripts[i] for i in id] + pre_hooks = [self.pre_hooks[i] for i in id] + post_hooks = [self.post_hooks[i] for i in id] + pkg_names = [self.pkg_names[i] for i in id] + else: + sconscripts = self.sconscripts + pre_hooks = self.pre_hooks + post_hooks = self.post_hooks + pkg_names = self.pkg_names + + for sconscript, pre_hook, post_hook, pkg_name in zip(sconscripts, + pre_hooks, post_hooks, + pkg_names): if pre_hook: pre_hook() From numpy-svn at scipy.org Sun May 18 09:24:47 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 18 May 2008 08:24:47 -0500 (CDT) Subject: [Numpy-svn] r5189 - trunk Message-ID: <20080518132447.2932339C148@scipy.org> Author: cdavid Date: 2008-05-18 08:24:43 -0500 (Sun, 18 May 2008) New Revision: 5189 Modified: trunk/site.cfg.example Log: Put one example for mkl 10.0 in site.cfg.example. Modified: trunk/site.cfg.example =================================================================== --- trunk/site.cfg.example 2008-05-18 08:37:24 UTC (rev 5188) +++ trunk/site.cfg.example 2008-05-18 13:24:43 UTC (rev 5189) @@ -127,3 +127,9 @@ # [mkl] # library_dirs = /opt/intel/mkl/9.1.023/lib/32/ # lapack_libs = mkl_lapack +# +# For 10.*, on 32 bits machines: +# [mkl] +# library_dirs = /opt/intel/mkl/10.0.1.014/lib/32/ +# lapack_libs = mkl_lapack +# mkl_libs = mkl, guide From numpy-svn at scipy.org Mon May 19 06:45:57 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 19 May 2008 05:45:57 -0500 (CDT) Subject: [Numpy-svn] r5190 - in trunk/numpy: . core lib Message-ID: <20080519104557.B778139C633@scipy.org> Author: stefan Date: 2008-05-19 05:45:14 -0500 (Mon, 19 May 2008) New Revision: 5190 Modified: trunk/numpy/add_newdocs.py trunk/numpy/core/fromnumeric.py trunk/numpy/lib/arraysetops.py trunk/numpy/lib/financial.py trunk/numpy/lib/function_base.py trunk/numpy/lib/index_tricks.py Log: Merge documentation changes from wiki. Modified: trunk/numpy/add_newdocs.py =================================================================== --- trunk/numpy/add_newdocs.py 2008-05-18 13:24:43 UTC (rev 5189) +++ trunk/numpy/add_newdocs.py 2008-05-19 10:45:14 UTC (rev 5190) @@ -569,28 +569,33 @@ add_newdoc('numpy.core.multiarray', 'ndarray', - """An array object represents a multidimensional, homogeneous array + """ + An array object represents a multidimensional, homogeneous array of fixed-size items. An associated data-type-descriptor object details the data-type in an array (including byteorder and any - fields). An array can be constructed using the numpy.array + fields). An array can be constructed using the `numpy.array` command. Arrays are sequence, mapping and numeric objects. More information is available in the numpy module and by looking at the methods and attributes of an array. - ndarray.__new__(subtype, shape=, dtype=float, buffer=None, - offset=0, strides=None, order=None) + :: - There are two modes of creating an array using __new__: - 1) If buffer is None, then only shape, dtype, and order - are used - 2) If buffer is an object exporting the buffer interface, then - all keywords are interpreted. - The dtype parameter can be any object that can be interpreted - as a numpy.dtype object. + ndarray.__new__(subtype, shape=, dtype=float, buffer=None, + offset=0, strides=None, order=None) - No __init__ method is needed because the array is fully - initialized after the __new__ method. + There are two modes of creating an array using __new__: + 1. If buffer is None, then only shape, dtype, and order + are used + 2. If buffer is an object exporting the buffer interface, then + all keywords are interpreted. + + The dtype parameter can be any object that can be interpreted as + a numpy.dtype object. + + No __init__ method is needed because the array is fully initialized + after the __new__ method. + """) @@ -1109,7 +1114,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', - """a.cumprod(axis=None, dtype=None, out=None) + """ + a.cumprod(axis=None, dtype=None, out=None) Return the cumulative product of the elements along the given axis. @@ -1120,7 +1126,7 @@ ---------- axis : {None, -1, int}, optional Axis along which the product is computed. The default - (``axis``= None) is to compute over the flattened array. + (`axis` = None) is to compute over the flattened array. dtype : {None, dtype}, optional Determines the type of the returned array and of the accumulator where the elements are multiplied. If dtype has the value None and @@ -1158,7 +1164,7 @@ ---------- axis : {None, -1, int}, optional Axis along which the sum is computed. The default - (``axis``= None) is to compute over the flattened array. + (`axis` = None) is to compute over the flattened array. dtype : {None, dtype}, optional Determines the type of the returned array and of the accumulator where the elements are summed. If dtype has the value None and @@ -1185,7 +1191,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', - """a.diagonal(offset=0, axis1=0, axis2=1) -> diagonals + """a.diagonal(offset=0, axis1=0, axis2=1) If a is 2-d, return the diagonal of self with the given offset, i.e., the collection of elements of the form a[i,i+offset]. If a is n-d with n > 2, @@ -1233,7 +1239,7 @@ >>> a array([[[0, 1], [2, 3]], - + [[4, 5], [6, 7]]]) >>> a.diagonal(0,-2,-1) @@ -1410,7 +1416,7 @@ the indices of the non-zero elements in that dimension. The corresponding non-zero values can be obtained with:: - a[a.nonzero()]. + a[a.nonzero()] To group the indices by element, rather than dimension, use:: @@ -1647,7 +1653,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', """a.reshape(shape, order='C') - a.reshape(*shape, order='C') Returns an array containing the data of a, but with a new shape. @@ -1967,13 +1972,13 @@ >>> x array([[[0, 1], [2, 3]], - + [[4, 5], [6, 7]]]) >>> x.swapaxes(0,2) array([[[0, 4], [2, 6]], - + [[1, 5], [3, 7]]]) Modified: trunk/numpy/core/fromnumeric.py =================================================================== --- trunk/numpy/core/fromnumeric.py 2008-05-18 13:24:43 UTC (rev 5189) +++ trunk/numpy/core/fromnumeric.py 2008-05-19 10:45:14 UTC (rev 5190) @@ -257,7 +257,7 @@ def swapaxes(a, axis1, axis2): """Return a view of array a with axis1 and axis2 interchanged. - + Parameters ---------- a : array_like @@ -369,13 +369,13 @@ order. The three available algorithms have the following properties: - =========== ======= ============= ============ ======= + =========== ======= ============= ============ ======= kind speed worst case work space stable - =========== ======= ============= ============ ======= - 'quicksort' 1 O(n^2) 0 no - 'mergesort' 2 O(n*log(n)) ~n/2 yes - 'heapsort' 3 O(n*log(n)) 0 no - =========== ======= ============= ============ ======= + =========== ======= ============= ============ ======= + 'quicksort' 1 O(n^2) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'heapsort' 3 O(n*log(n)) 0 no + =========== ======= ============= ============ ======= All the sort algorithms make temporary copies of the data when the sort is not along the last axis. Consequently, sorts along @@ -880,7 +880,7 @@ [3]]) >>> np.compress([0,1,1], a) array([2, 3]) - + """ try: compress = a.compress @@ -1051,22 +1051,59 @@ return prod(axis, dtype, out) -def sometrue (a, axis=None, out=None): - """Check if any of the elements of `a` are true. +def sometrue(a, axis=None, out=None): + """ + Assert whether some values are true. - Performs a logical_or over the given axis and returns the result + `sometrue` performs a logical_or over the given axis. Parameters ---------- - a : {array_like} - Array on which to operate + a : array_like + Array on which to operate. axis : {None, integer} Axis to perform the operation over. + If `None` (default), perform over flattened array. + out : {None, array}, optional + Array into which the product can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + See Also + -------- + ndarray.any : equivalent method + + Examples + -------- + >>> b = numpy.array([True, False, True, True]) + >>> numpy.sometrue(b) + True + >>> a = numpy.array([1, 5, 2, 7]) + >>> numpy.sometrue(a >= 5) + True + + """ + try: + any = a.any + except AttributeError: + return _wrapit(a, 'any', axis, out) + return any(axis, out) + + +def alltrue (a, axis=None, out=None): + """Check if all of the elements of `a` are true. + + Performs a logical_and over the given axis and returns the result + + Parameters + ---------- + a : array_like + axis : {None, integer} + Axis to perform the operation over. If None, perform over flattened array. out : {None, array}, optional Array into which the product can be placed. Its type is preserved and it must be of the right shape to hold the output. - + See Also -------- ndarray.any : equivalent method @@ -1086,14 +1123,14 @@ Parameters ---------- - a : {array_like} + a : array_like axis : {None, integer} Axis to perform the operation over. If None, perform over flattened array. out : {None, array}, optional Array into which the product can be placed. Its type is preserved and it must be of the right shape to hold the output. - + See Also -------- ndarray.all : equivalent method @@ -1108,19 +1145,19 @@ def any(a,axis=None, out=None): """Check if any of the elements of `a` are true. - + Performs a logical_or over the given axis and returns the result Parameters ---------- - a : {array_like} + a : array_like axis : {None, integer} Axis to perform the operation over. If None, perform over flattened array and return a scalar. out : {None, array}, optional Array into which the product can be placed. Its type is preserved and it must be of the right shape to hold the output. - + See Also -------- ndarray.any : equivalent method @@ -1135,19 +1172,19 @@ def all(a,axis=None, out=None): """Check if all of the elements of `a` are true. - + Performs a logical_and over the given axis and returns the result Parameters ---------- - a : {array_like} + a : array_like axis : {None, integer} Axis to perform the operation over. If None, perform over flattened array and return a scalar. out : {None, array}, optional Array into which the product can be placed. Its type is preserved and it must be of the right shape to hold the output. - + See Also -------- ndarray.all : equivalent method @@ -1161,24 +1198,22 @@ def cumsum (a, axis=None, dtype=None, out=None): - """Return the cumulative sum of the elements along the given axis. + """ + Return the cumulative sum of the elements along a given axis. - The cumulative sum is calculated over the flattened array by - default, otherwise over the specified axis. - Parameters ---------- a : array-like Input array or object that can be converted to an array. axis : {None, -1, int}, optional Axis along which the sum is computed. The default - (``axis``= None) is to compute over the flattened array. + (`axis` = `None`) is to compute over the flattened array. dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are summed. If dtype has the value None and - the type of a is an integer type of precision less than the default - platform integer, then the default platform integer precision is - used. Otherwise, the dtype is the same as that of a. + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output @@ -1187,14 +1222,30 @@ Returns ------- cumsum : ndarray. - A new array holding the result is returned unless ``out`` is - specified, in which case a reference to ``out`` is returned. + A new array holding the result is returned unless `out` is + specified, in which case a reference to `out` is returned. Notes ----- Arithmetic is modular when using integer types, and no error is raised on overflow. + + Examples + -------- + >>> import numpy + >>> a=numpy.array([[1,2,3],[4,5,6]]) + >>> numpy.cumsum(a) # cumulative sum = intermediate summing results & total sum. Default axis=None results in raveling the array first. + array([ 1, 3, 6, 10, 15, 21]) + >>> numpy.cumsum(a,dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + >>> numpy.cumsum(a,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> numpy.cumsum(a,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + """ try: cumsum = a.cumsum @@ -1273,7 +1324,7 @@ Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. - Results + Returns ------- amax : array_like New array holding the result, unless ``out`` was specified. @@ -1311,7 +1362,7 @@ Alternative output array in which to place the result. Must be of the same shape and buffer length as the expected output. - Results + Returns ------- amin : array_like New array holding the result, unless ``out`` was specified. @@ -1336,7 +1387,8 @@ def alen(a): - """Return the length of a Python object interpreted as an array + """ + Return the length of a Python object interpreted as an array of at least 1 dimension. Parameters @@ -1346,14 +1398,14 @@ Returns ------- alen : int - Length of the first dimension of a. + Length of the first dimension of `a`. Examples -------- - >>> z = np.zeros((7,4,5)) + >>> z = numpy.zeros((7,4,5)) >>> z.shape[0] 7 - >>> np.alen(z) + >>> numpy.alen(z) 7 """ @@ -1421,7 +1473,8 @@ def cumprod(a, axis=None, dtype=None, out=None): - """Return the cumulative product of the elements along the given axis. + """ + Return the cumulative product of the elements along the given axis. The cumulative product is taken over the flattened array by default, otherwise over the specified axis. @@ -1432,13 +1485,13 @@ Input array or object that can be converted to an array. axis : {None, -1, int}, optional Axis along which the product is computed. The default - (``axis``= None) is to compute over the flattened array. + (`axis` = `None`) is to compute over the flattened array. dtype : {None, dtype}, optional - Determines the type of the returned array and of the accumulator - where the elements are multiplied. If dtype has the value None and - the type of a is an integer type of precision less than the default + Type of the returned array and of the accumulator + where the elements are multiplied. If `dtype` has the value `None` and + the type of `a` is an integer type of precision less than the default platform integer, then the default platform integer precision is - used. Otherwise, the dtype is the same as that of a. + used. Otherwise, the `dtype` is the same as that of `a`. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output @@ -1447,7 +1500,7 @@ Returns ------- cumprod : ndarray. - A new array holding the result is returned unless out is + A new array holding the result is returned unless `out` is specified, in which case a reference to out is returned. Notes @@ -1455,6 +1508,25 @@ Arithmetic is modular when using integer types, and no error is raised on overflow. + Examples + -------- + >>> a=numpy.array([[1,2,3],[4,5,6]]) + >>> a=numpy.array([1,2,3]) + >>> numpy.cumprod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a=numpy.array([[1,2,3],[4,5,6]]) + >>> numpy.cumprod(a,dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + >>> numpy.cumprod(a,axis=0) # for each of the 3 columns: + ... # product and intermediate results + array([[ 1, 2, 3], + [ 4, 10, 18]]) + >>> numpy.cumprod(a,axis=1) # for each of the two rows: + ... # product and intermediate results + array([[ 1, 2, 6], + [ 4, 20, 120]]) + """ try: cumprod = a.cumprod Modified: trunk/numpy/lib/arraysetops.py =================================================================== --- trunk/numpy/lib/arraysetops.py 2008-05-18 13:24:43 UTC (rev 5189) +++ trunk/numpy/lib/arraysetops.py 2008-05-19 10:45:14 UTC (rev 5190) @@ -38,23 +38,26 @@ import time import numpy as nm -def ediff1d(ary, to_end = None, to_begin = None): +def ediff1d(ary, to_end=None, to_begin=None): """The differences between consecutive elements of an array, possibly with prefixed and/or appended values. - :Parameters: - - `ary` : array + Parameters + ---------- + ary : array This array will be flattened before the difference is taken. - - `to_end` : number, optional + to_end : number, optional If provided, this number will be tacked onto the end of the returned differences. - - `to_begin` : number, optional + to_begin : number, optional If provided, this number will be taked onto the beginning of the returned differences. - :Returns: - - `ed` : array + Returns + ------- + ed : array The differences. Loosely, this will be (ary[1:] - ary[:-1]). + """ ary = nm.asarray(ary).flat ed = ary[1:] - ary[:-1] @@ -77,22 +80,26 @@ Most of the other array set operations operate on the unique arrays generated by this function. - :Parameters: - - `ar1` : array + Parameters + ---------- + ar1 : array This array will be flattened if it is not already 1D. - - `return_index` : bool, optional + return_index : bool, optional If True, also return the indices against ar1 that result in the unique array. - :Returns: - - `unique` : array + Returns + ------- + unique : array The unique values. - - `unique_indices` : int array, optional + unique_indices : int array, optional The indices of the unique values. Only provided if return_index is True. - :See also: - numpy.lib.arraysetops has a number of other functions for performing set - operations on arrays. + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions + for performing set operations on arrays. + """ ar = nm.asarray(ar1).flatten() if ar.size == 0: @@ -110,66 +117,78 @@ flag = nm.concatenate( ([True], ar[1:] != ar[:-1]) ) return ar[flag] -def intersect1d( ar1, ar2 ): +def intersect1d(ar1, ar2): """Intersection of 1D arrays with unique elements. Use unique1d() to generate arrays with only unique elements to use as inputs to this function. Alternatively, use intersect1d_nu() which will find the unique values for you. - :Parameters: - - `ar1` : array - - `ar2` : array + Parameters + ---------- + ar1 : array + ar2 : array - :Returns: - - `intersection` : array + Returns + ------- + intersection : array - :See also: - numpy.lib.arraysetops has a number of other functions for performing set - operations on arrays. + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + """ aux = nm.concatenate((ar1,ar2)) aux.sort() return aux[aux[1:] == aux[:-1]] -def intersect1d_nu( ar1, ar2 ): +def intersect1d_nu(ar1, ar2): """Intersection of 1D arrays with any elements. The input arrays do not have unique elements like intersect1d() requires. - :Parameters: - - `ar1` : array - - `ar2` : array + Parameters + ---------- + ar1 : array + ar2 : array - :Returns: - - `intersection` : array + Returns + ------- + intersection : array - :See also: - numpy.lib.arraysetops has a number of other functions for performing set - operations on arrays. + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + """ # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? aux = nm.concatenate((unique1d(ar1), unique1d(ar2))) aux.sort() return aux[aux[1:] == aux[:-1]] -def setxor1d( ar1, ar2 ): +def setxor1d(ar1, ar2): """Set exclusive-or of 1D arrays with unique elements. Use unique1d() to generate arrays with only unique elements to use as inputs to this function. - :Parameters: - - `ar1` : array - - `ar2` : array + Parameters + ---------- + ar1 : array + ar2 : array - :Returns: - - `xor` : array + Returns + ------- + xor : array The values that are only in one, but not both, of the input arrays. - :See also: - numpy.lib.arraysetops has a number of other functions for performing set - operations on arrays. + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + """ aux = nm.concatenate((ar1, ar2)) if aux.size == 0: @@ -182,24 +201,28 @@ flag2 = flag[1:] == flag[:-1] return aux[flag2] -def setmember1d( ar1, ar2 ): +def setmember1d(ar1, ar2): """Return a boolean array of shape of ar1 containing True where the elements of ar1 are in ar2 and False otherwise. Use unique1d() to generate arrays with only unique elements to use as inputs to this function. - :Parameters: - - `ar1` : array - - `ar2` : array + Parameters + ---------- + ar1 : array + ar2 : array - :Returns: - - `mask` : bool array + Returns + ------- + mask : bool array The values ar1[mask] are in ar2. - :See also: - numpy.lib.arraysetops has a number of other functions for performing set - operations on arrays. + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + """ ar1 = nm.asarray( ar1 ) ar2 = nm.asarray( ar2 ) @@ -225,42 +248,51 @@ return flag[indx] -def union1d( ar1, ar2 ): - """Union of 1D arrays with unique elements. +def union1d(ar1, ar2): + """ + Union of 1D arrays with unique elements. Use unique1d() to generate arrays with only unique elements to use as inputs to this function. - :Parameters: - - `ar1` : array - - `ar2` : array + Parameters + ---------- + ar1 : array + ar2 : array - :Returns: - - `union` : array + Returns + ------- + union : array - :See also: - numpy.lib.arraysetops has a number of other functions for performing set - operations on arrays. + See also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + """ return unique1d( nm.concatenate( (ar1, ar2) ) ) -def setdiff1d( ar1, ar2 ): +def setdiff1d(ar1, ar2): """Set difference of 1D arrays with unique elements. Use unique1d() to generate arrays with only unique elements to use as inputs to this function. - :Parameters: - - `ar1` : array - - `ar2` : array + Parameters + ---------- + ar1 : array + ar2 : array - :Returns: - - `difference` : array + Returns + ------- + difference : array The values in ar1 that are not in ar2. - :See also: - numpy.lib.arraysetops has a number of other functions for performing set - operations on arrays. + See Also + -------- + numpy.lib.arraysetops : Module with a number of other functions for + performing set operations on arrays. + """ aux = setmember1d(ar1,ar2) if aux.size == 0: Modified: trunk/numpy/lib/financial.py =================================================================== --- trunk/numpy/lib/financial.py 2008-05-18 13:24:43 UTC (rev 5189) +++ trunk/numpy/lib/financial.py 2008-05-19 10:45:14 UTC (rev 5190) @@ -88,8 +88,8 @@ fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer) return -(fv + pv*temp) / fact pmt.__doc__ += eqstr + """ -Example -------- +Examples +-------- What would the monthly payment need to be to pay off a $200,000 loan in 15 years at an annual interest rate of 7.5%? @@ -116,8 +116,8 @@ zer = np.zeros(miter.shape) return np.where(rate==zer, A+zer, B+zer) + 0.0 nper.__doc__ += eqstr + """ -Example -------- +Examples +-------- If you only had $150 to spend as payment, how long would it take to pay-off a loan of $8,000 at 7% annual interest? Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2008-05-18 13:24:43 UTC (rev 5189) +++ trunk/numpy/lib/function_base.py 2008-05-19 10:45:14 UTC (rev 5190) @@ -809,8 +809,17 @@ def angle(z, deg=0): - """Return the angle of the complex argument z. """ + Return the angle of the complex argument z. + + Examples + -------- + >>> numpy.angle(1+1j) # in radians + 0.78539816339744828 + >>> numpy.angle(1+1j,deg=True) # in degrees + 45.0 + + """ if deg: fact = 180/pi else: @@ -889,11 +898,12 @@ from sets import Set as set def unique(x): - """Return sorted unique items from an array or sequence. + """ + Return sorted unique items from an array or sequence. Examples -------- - >>> unique([5,2,4,0,4,4,2,2,1]) + >>> numpy.unique([5,2,4,0,4,4,2,2,1]) array([0, 1, 2, 4, 5]) """ @@ -1187,8 +1197,88 @@ return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1)) def bartlett(M): - """bartlett(M) returns the M-point Bartlett window. """ + Return the Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The triangular window, normalized to one (the value one + appears only if the number of samples is odd), with the first + and last samples equal to zero. + + See Also + -------- + blackman, hamming, hanning, kaiser + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \frac{2}{M-1} (\frac{M-1}{2} - |n - \frac{M-1}{2}|) + + Most references to the Bartlett window come from the signal + processing literature, where it is used as one of many windowing + functions for smoothing values. Note that convolution with this + window produces linear interpolation. It is also known as an + apodization (which means"removing the foot", i.e. smoothing + discontinuities at the beginning and end of the sampled signal) or + tapering function. + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [3] Wikipedia, "Window function", + http://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + >>> from numpy import bartlett + >>> bartlett(12) + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, + 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, + 0.18181818, 0. ]) + + # Plot the window and the frequency response of it. + >>> from numpy import clip, log10, array, bartlett + >>> from scipy.fftpack import fft + >>> from matplotlib import pyplot as plt + + >>> window = bartlett(51) + >>> plt.plot(window) + >>> plt.title("Bartlett window") + >>> plt.ylabel("Amplitude") + >>> plt.xlabel("Sample") + >>> plt.show() + + >>> A = fft(window, 2048) / 25.5 + >>> mag = abs(fftshift(A)) + >>> freq = linspace(-0.5,0.5,len(A)) + >>> response = 20*log10(mag) + >>> response = clip(response,-100,100) + >>> plt.plot(freq, response) + >>> plt.title("Frequency response of Bartlett window") + >>> plt.ylabel("Magnitude [dB]") + >>> plt.xlabel("Normalized frequency [cycles per sample]") + >>> plt.axis('tight'); plt.show() + + """ if M < 1: return array([]) if M == 1: Modified: trunk/numpy/lib/index_tricks.py =================================================================== --- trunk/numpy/lib/index_tricks.py 2008-05-18 13:24:43 UTC (rev 5189) +++ trunk/numpy/lib/index_tricks.py 2008-05-19 10:45:14 UTC (rev 5190) @@ -83,48 +83,48 @@ return tuple(out) class nd_grid(object): - """ Construct a "meshgrid" in N-dimensions. + """ + Construct a multi-dimensional "meshgrid". - grid = nd_grid() creates an instance which will return a mesh-grid - when indexed. The dimension and number of the output arrays are equal - to the number of indexing dimensions. If the step length is not a - complex number, then the stop is not inclusive. + grid = nd_grid() creates an instance which will return a mesh-grid + when indexed. The dimension and number of the output arrays are equal + to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. - However, if the step length is a COMPLEX NUMBER (e.g. 5j), then the - integer part of it's magnitude is interpreted as specifying the - number of points to create between the start and stop values, where - the stop value IS INCLUSIVE. + However, if the step length is a **complex number** (e.g. 5j), then the + integer part of it's magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. - If instantiated with an argument of sparse=True, the mesh-grid is - open (or not fleshed out) so that only one-dimension of each returned - argument is greater than 1 + If instantiated with an argument of sparse=True, the mesh-grid is + open (or not fleshed out) so that only one-dimension of each returned + argument is greater than 1 - Example: + Examples + -------- + >>> mgrid = nd_grid() + >>> mgrid[0:5,0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + >>> ogrid = nd_grid(sparse=True) + >>> ogrid[0:5,0:5] + [array([[0], + [1], + [2], + [3], + [4]]), array([[0, 1, 2, 3, 4]])] - >>> mgrid = nd_grid() - >>> mgrid[0:5,0:5] - array([[[0, 0, 0, 0, 0], - [1, 1, 1, 1, 1], - [2, 2, 2, 2, 2], - [3, 3, 3, 3, 3], - [4, 4, 4, 4, 4]], - - [[0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4], - [0, 1, 2, 3, 4]]]) - >>> mgrid[-1:1:5j] - array([-1. , -0.5, 0. , 0.5, 1. ]) - - >>> ogrid = nd_grid(sparse=True) - >>> ogrid[0:5,0:5] - [array([[0], - [1], - [2], - [3], - [4]]), array([[0, 1, 2, 3, 4]])] - """ def __init__(self, sparse=False): self.sparse = sparse From numpy-svn at scipy.org Mon May 19 11:26:58 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 19 May 2008 10:26:58 -0500 (CDT) Subject: [Numpy-svn] r5191 - trunk/numpy/doc Message-ID: <20080519152658.C126F39C8E0@scipy.org> Author: stefan Date: 2008-05-19 10:26:37 -0500 (Mon, 19 May 2008) New Revision: 5191 Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt Log: Fix typo in indexing tag. Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt =================================================================== --- trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-19 10:45:14 UTC (rev 5190) +++ trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-19 15:26:37 UTC (rev 5191) @@ -283,14 +283,14 @@ the ``index`` directive:: .. index:: - :refguide: ufunc, trigonometry + refguide: ufunc, trigonometry To index a function as a sub-category of a class, separate index entries by a semi-colon, e.g. :: - :refguide: ufunc, numpy;reshape, other + refguide: ufunc, numpy;reshape, other A `list of available categories `_ is From numpy-svn at scipy.org Mon May 19 11:30:22 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 19 May 2008 10:30:22 -0500 (CDT) Subject: [Numpy-svn] r5192 - trunk/numpy/doc Message-ID: <20080519153022.8B97939C8F9@scipy.org> Author: stefan Date: 2008-05-19 10:30:03 -0500 (Mon, 19 May 2008) New Revision: 5192 Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt Log: Indexing tag was correct. Revert. Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt =================================================================== --- trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-19 15:26:37 UTC (rev 5191) +++ trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-19 15:30:03 UTC (rev 5192) @@ -283,14 +283,14 @@ the ``index`` directive:: .. index:: - refguide: ufunc, trigonometry + :refguide: ufunc, trigonometry To index a function as a sub-category of a class, separate index entries by a semi-colon, e.g. :: - refguide: ufunc, numpy;reshape, other + :refguide: ufunc, numpy;reshape, other A `list of available categories `_ is From numpy-svn at scipy.org Mon May 19 15:52:25 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 19 May 2008 14:52:25 -0500 (CDT) Subject: [Numpy-svn] r5193 - trunk/numpy/core Message-ID: <20080519195225.CF43A39C018@scipy.org> Author: stefan Date: 2008-05-19 14:52:04 -0500 (Mon, 19 May 2008) New Revision: 5193 Modified: trunk/numpy/core/fromnumeric.py Log: Remove duplicate `alltrue`. Modified: trunk/numpy/core/fromnumeric.py =================================================================== --- trunk/numpy/core/fromnumeric.py 2008-05-19 15:30:03 UTC (rev 5192) +++ trunk/numpy/core/fromnumeric.py 2008-05-19 19:52:04 UTC (rev 5193) @@ -1116,33 +1116,6 @@ return any(axis, out) -def alltrue (a, axis=None, out=None): - """Check if all of the elements of `a` are true. - - Performs a logical_and over the given axis and returns the result - - Parameters - ---------- - a : array_like - axis : {None, integer} - Axis to perform the operation over. - If None, perform over flattened array. - out : {None, array}, optional - Array into which the product can be placed. Its type is preserved - and it must be of the right shape to hold the output. - - See Also - -------- - ndarray.all : equivalent method - - """ - try: - all = a.all - except AttributeError: - return _wrapit(a, 'all', axis, out) - return all(axis, out) - - def any(a,axis=None, out=None): """Check if any of the elements of `a` are true. From numpy-svn at scipy.org Mon May 19 15:53:32 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 19 May 2008 14:53:32 -0500 (CDT) Subject: [Numpy-svn] r5194 - trunk/numpy/lib Message-ID: <20080519195332.832BC39C018@scipy.org> Author: stefan Date: 2008-05-19 14:53:13 -0500 (Mon, 19 May 2008) New Revision: 5194 Modified: trunk/numpy/lib/function_base.py Log: Fix math in Bartlett docstring. Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2008-05-19 19:52:04 UTC (rev 5193) +++ trunk/numpy/lib/function_base.py 2008-05-19 19:53:13 UTC (rev 5194) @@ -1226,7 +1226,7 @@ ----- The Bartlett window is defined as - .. math:: w(n) = \frac{2}{M-1} (\frac{M-1}{2} - |n - \frac{M-1}{2}|) + .. math:: w(n) = \\frac{2}{M-1} (\\frac{M-1}{2} - |n - \\frac{M-1}{2}|) Most references to the Bartlett window come from the signal processing literature, where it is used as one of many windowing From numpy-svn at scipy.org Mon May 19 16:26:07 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 19 May 2008 15:26:07 -0500 (CDT) Subject: [Numpy-svn] r5195 - trunk/numpy/core Message-ID: <20080519202607.4BC7E39C8EE@scipy.org> Author: ptvirtan Date: 2008-05-19 15:25:50 -0500 (Mon, 19 May 2008) New Revision: 5195 Modified: trunk/numpy/core/fromnumeric.py Log: Fix accidentally replaced (r5190, r5193) alltrue code. Diff shows that fromnumeric.py has only docstrings changes from r5189 now. Modified: trunk/numpy/core/fromnumeric.py =================================================================== --- trunk/numpy/core/fromnumeric.py 2008-05-19 19:53:13 UTC (rev 5194) +++ trunk/numpy/core/fromnumeric.py 2008-05-19 20:25:50 UTC (rev 5195) @@ -1106,14 +1106,14 @@ See Also -------- - ndarray.any : equivalent method + ndarray.all : equivalent method """ try: - any = a.any + all = a.all except AttributeError: - return _wrapit(a, 'any', axis, out) - return any(axis, out) + return _wrapit(a, 'all', axis, out) + return all(axis, out) def any(a,axis=None, out=None): From numpy-svn at scipy.org Mon May 19 16:40:56 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 19 May 2008 15:40:56 -0500 (CDT) Subject: [Numpy-svn] r5196 - trunk/numpy/core/tests Message-ID: <20080519204056.9665F39C8F8@scipy.org> Author: rkern Date: 2008-05-19 15:40:56 -0500 (Mon, 19 May 2008) New Revision: 5196 Modified: trunk/numpy/core/tests/test_multiarray.py Log: Use endian-specific dtypes in tests which construct cross-type views. Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2008-05-19 20:25:50 UTC (rev 5195) +++ trunk/numpy/core/tests/test_multiarray.py 2008-05-19 20:40:56 UTC (rev 5196) @@ -837,7 +837,8 @@ def test_basic(self): x = np.array([(1,2,3,4),(5,6,7,8)],dtype=[('r',np.int8),('g',np.int8), ('b',np.int8),('a',np.int8)]) - y = x.view(dtype=np.int32) + # We must be specific about the endianness here: + y = x.view(dtype=' Author: pearu Date: 2008-05-19 17:10:36 -0500 (Mon, 19 May 2008) New Revision: 5198 Modified: trunk/numpy/f2py/cfuncs.py trunk/numpy/f2py/crackfortran.py Log: f2py: Allow expressions that contain max/min calls, be used as dimension specifications. Defined macros min/max that are needed when --lower is used. Typical usage case: real a(min(1,n)). Modified: trunk/numpy/f2py/cfuncs.py =================================================================== --- trunk/numpy/f2py/cfuncs.py 2008-05-19 21:42:16 UTC (rev 5197) +++ trunk/numpy/f2py/cfuncs.py 2008-05-19 22:10:36 UTC (rev 5198) @@ -228,6 +228,8 @@ \tfprintf(stderr,\"\\n\"); """ cppmacros['MINMAX']="""\ +#define max(a,b) ((a > b) ? (a) : (b)) +#define min(a,b) ((a < b) ? (a) : (b)) #ifndef MAX #define MAX(a,b) ((a > b) ? (a) : (b)) #endif Modified: trunk/numpy/f2py/crackfortran.py =================================================================== --- trunk/numpy/f2py/crackfortran.py 2008-05-19 21:42:16 UTC (rev 5197) +++ trunk/numpy/f2py/crackfortran.py 2008-05-19 22:10:36 UTC (rev 5198) @@ -208,6 +208,7 @@ 'struct','static','register','new','break','do','goto','switch', 'continue','else','inline','extern','delete','const','auto', 'len','rank','shape','index','slen','size','_i', + 'max', 'min', 'flen','fshape', 'string','complex_double','float_double','stdin','stderr','stdout', 'type','default']: @@ -1732,7 +1733,7 @@ r = eval(e,g,l) if type(r) in [type(0),type(0.0)]: return r - raise ValueError,'r=%r' % (r) + raise ValueError('r=%r' % (r)) getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z',re.I) def getlincoef(e,xset): # e = a*x+b ; x in xset @@ -1745,6 +1746,9 @@ len_e = len(e) for x in xset: if len(x)>len_e: continue + if re.search(r'\w\s*\([^)]*\b'+x+r'\b', e): + # skip function calls having x as an argument, e.g max(1, x) + continue re_1 = re.compile(r'(?P.*?)\b'+x+r'\b(?P.*)',re.I) m = re_1.match(e) if m: @@ -1764,7 +1768,13 @@ ee = '%s(%s)%s'%(m1.group('before'),0.5,m1.group('after')) m1 = re_1.match(ee) c = myeval(ee,{},{}) - if (a*0.5+b==c): + # computing another point to be sure that expression is linear + m1 = re_1.match(e) + while m1: + ee = '%s(%s)%s'%(m1.group('before'),1.5,m1.group('after')) + m1 = re_1.match(ee) + c2 = myeval(ee,{},{}) + if (a*0.5+b==c and a*1.5+b==c2): return a,b,x except: pass break From numpy-svn at scipy.org Mon May 19 23:11:41 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 19 May 2008 22:11:41 -0500 (CDT) Subject: [Numpy-svn] r5200 - in trunk: . tools tools/win32build Message-ID: <20080520031141.27EDF39CA47@scipy.org> Author: cdavid Date: 2008-05-19 22:11:36 -0500 (Mon, 19 May 2008) New Revision: 5200 Added: trunk/tools/ trunk/tools/win32build/ trunk/tools/win32build/build.py Log: Add a tools directory, to hold various things related to build, etc... Add a python script to build win32 binaries. Added: trunk/tools/win32build/build.py =================================================================== --- trunk/tools/win32build/build.py 2008-05-19 22:57:04 UTC (rev 5199) +++ trunk/tools/win32build/build.py 2008-05-20 03:11:36 UTC (rev 5200) @@ -0,0 +1,74 @@ +import sys +from subprocess import call +import os +import shutil + +PYEXECS = {"2.5" : "C:\python25\python.exe", + "2.4" : "C:\python24\python2.4.exe"} + +_SSE3_CFG = """[atlas] +library_dirs = C:\local\lib\yop\sse3""" +_SSE2_CFG = """[atlas] +library_dirs = C:\local\lib\yop\sse2""" +_NOSSE_CFG = """[DEFAULT] +library_dirs = C:\local\lib\yop\nosse""" + +SITECFG = {"sse2" : _SSE2_CFG, "sse3" : _SSE3_CFG, "nosse" : _NOSSE_CFG} + +def get_python_exec(ver): + """Return the executable of python for the given version.""" + # XXX Check that the file actually exists + try: + return PYEXECS[ver] + except KeyError: + raise ValueError("Version %s not supported/recognized" % ver) + +def get_windist_name(ver): + pass + +def get_clean(): + if os.path.exists("build"): + shutil.rmtree("build") + if os.path.exists("dist"): + shutil.rmtree("dist") + +def write_site_cfg(arch): + if os.path.exists("site.cfg"): + os.remove("site.cfg") + f = open("site.cfg", 'w') + f.writelines(SITECFG[arch]) + f.close() + +def build(arch, pyver): + get_clean() + write_site_cfg(arch) + + cmd = "%s setup.py build -c mingw32 bdist_wininst" % get_python_exec(pyver) + call(cmd, shell = True, + + +def get_numpy_version(): + import __builtin__ + __builtin__.__NUMPY_SETUP__ = True + from numpy.version import version + return version + +def get_windist_exec(pyver): + """Return the name of the installer built by wininst command.""" + # Yeah, the name logic is harcoded in distutils. We have to reproduce it + # here + name = "numpy-%s.win32-%s.exe" % (get_numpy_version(), pyver) + return name + +USAGE = """build.py ARCH PYTHON_VERSION + +Example: build.py sse2 2.4.""" + +if __name__ == '__main__': + if len(sys.argv) < 3: + raise ValueError(Usage) + sys.exit(-1) + + arch = sys.argv[1] + pyver = sys.argv[2] + build(arch, pyver) From numpy-svn at scipy.org Mon May 19 23:33:32 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 19 May 2008 22:33:32 -0500 (CDT) Subject: [Numpy-svn] r5201 - trunk/tools/win32build Message-ID: <20080520033332.9262139C15A@scipy.org> Author: cdavid Date: 2008-05-19 22:33:24 -0500 (Mon, 19 May 2008) New Revision: 5201 Modified: trunk/tools/win32build/build.py Log: - win32 build script is now functional. Still does not handle svn version, though - Fix indentation on the win32 build script. Modified: trunk/tools/win32build/build.py =================================================================== --- trunk/tools/win32build/build.py 2008-05-20 03:11:36 UTC (rev 5200) +++ trunk/tools/win32build/build.py 2008-05-20 03:33:24 UTC (rev 5201) @@ -1,74 +1,98 @@ +"""Python script to build windows binaries to be fed to the "superpack". + +The script is pretty dumb: it assumes python executables are installed the +standard way, and the location for blas/lapack/atlas is harcoded.""" + +# TODO: +# - integrate the x86analysis script to check built binaries +# - make the config configurable with a file import sys -from subprocess import call +import subprocess import os import shutil PYEXECS = {"2.5" : "C:\python25\python.exe", - "2.4" : "C:\python24\python2.4.exe"} + "2.4" : "C:\python24\python2.4.exe"} -_SSE3_CFG = """[atlas] +_SSE3_CFG = r"""[atlas] library_dirs = C:\local\lib\yop\sse3""" -_SSE2_CFG = """[atlas] +_SSE2_CFG = r"""[atlas] library_dirs = C:\local\lib\yop\sse2""" -_NOSSE_CFG = """[DEFAULT] +_NOSSE_CFG = r"""[DEFAULT] library_dirs = C:\local\lib\yop\nosse""" SITECFG = {"sse2" : _SSE2_CFG, "sse3" : _SSE3_CFG, "nosse" : _NOSSE_CFG} def get_python_exec(ver): - """Return the executable of python for the given version.""" - # XXX Check that the file actually exists - try: - return PYEXECS[ver] - except KeyError: - raise ValueError("Version %s not supported/recognized" % ver) + """Return the executable of python for the given version.""" + # XXX Check that the file actually exists + try: + return PYEXECS[ver] + except KeyError: + raise ValueError("Version %s not supported/recognized" % ver) -def get_windist_name(ver): - pass - def get_clean(): - if os.path.exists("build"): - shutil.rmtree("build") - if os.path.exists("dist"): - shutil.rmtree("dist") + if os.path.exists("build"): + shutil.rmtree("build") + if os.path.exists("dist"): + shutil.rmtree("dist") def write_site_cfg(arch): - if os.path.exists("site.cfg"): - os.remove("site.cfg") - f = open("site.cfg", 'w') - f.writelines(SITECFG[arch]) - f.close() + if os.path.exists("site.cfg"): + os.remove("site.cfg") + f = open("site.cfg", 'w') + f.writelines(SITECFG[arch]) + f.close() def build(arch, pyver): - get_clean() - write_site_cfg(arch) + print "Building numpy binary for python %s, arch is %s" % (get_python_exec(pyver), arch) + get_clean() + write_site_cfg(arch) - cmd = "%s setup.py build -c mingw32 bdist_wininst" % get_python_exec(pyver) - call(cmd, shell = True, + cmd = "%s setup.py build -c mingw32 bdist_wininst" % get_python_exec(pyver) + build_log = "build-%s-%s.log" % (arch, pyver) + f = open(build_log, 'w') + try: + try: + print "Executing command %s" % cmd + subprocess.check_call(cmd, shell = True, stderr = subprocess.STDOUT, stdout = f) + finally: + f.close() + except subprocess.CalledProcessError, e: + msg = """ +There was an error while executing the following command: + %s + +Error was : %s + +Look at the build log (%s).""" % (cmd, str(e), build_log) + raise Exception(msg) + def get_numpy_version(): - import __builtin__ - __builtin__.__NUMPY_SETUP__ = True - from numpy.version import version - return version + import __builtin__ + __builtin__.__NUMPY_SETUP__ = True + from numpy.version import version + return version def get_windist_exec(pyver): - """Return the name of the installer built by wininst command.""" - # Yeah, the name logic is harcoded in distutils. We have to reproduce it - # here - name = "numpy-%s.win32-%s.exe" % (get_numpy_version(), pyver) - return name + """Return the name of the installer built by wininst command.""" + # Yeah, the name logic is harcoded in distutils. We have to reproduce it + # here + name = "numpy-%s.win32-%s.exe" % (get_numpy_version(), pyver) + return name USAGE = """build.py ARCH PYTHON_VERSION Example: build.py sse2 2.4.""" if __name__ == '__main__': - if len(sys.argv) < 3: - raise ValueError(Usage) - sys.exit(-1) + if len(sys.argv) < 3: + raise ValueError(Usage) + sys.exit(-1) - arch = sys.argv[1] - pyver = sys.argv[2] - build(arch, pyver) + #arch = sys.argv[1] + pyver = sys.argv[2] + for arch in SITECFG.keys(): + build(arch, pyver) From numpy-svn at scipy.org Tue May 20 00:03:21 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 19 May 2008 23:03:21 -0500 (CDT) Subject: [Numpy-svn] r5202 - trunk/tools/win32build Message-ID: <20080520040321.A369C39C15A@scipy.org> Author: cdavid Date: 2008-05-19 23:03:17 -0500 (Mon, 19 May 2008) New Revision: 5202 Modified: trunk/tools/win32build/build.py Log: Update build script. Modified: trunk/tools/win32build/build.py =================================================================== --- trunk/tools/win32build/build.py 2008-05-20 03:33:24 UTC (rev 5201) +++ trunk/tools/win32build/build.py 2008-05-20 04:03:17 UTC (rev 5202) @@ -55,7 +55,6 @@ try: try: - print "Executing command %s" % cmd subprocess.check_call(cmd, shell = True, stderr = subprocess.STDOUT, stdout = f) finally: f.close() @@ -70,17 +69,29 @@ Look at the build log (%s).""" % (cmd, str(e), build_log) raise Exception(msg) + move_binary(arch, pyver) + +def move_binary(arch, pyver): + if not os.path.exists("binaries"): + os.makedirs("binaries") + + shutil.move(os.path.join('dist', get_windist_exec(pyver)), + os.path.join("binaries", get_binary_name(arch))) + def get_numpy_version(): import __builtin__ __builtin__.__NUMPY_SETUP__ = True from numpy.version import version return version +def get_binary_name(arch): + return "numpy-%s-%s.exe" % (get_numpy_version(), arch) + def get_windist_exec(pyver): """Return the name of the installer built by wininst command.""" # Yeah, the name logic is harcoded in distutils. We have to reproduce it # here - name = "numpy-%s.win32-%s.exe" % (get_numpy_version(), pyver) + name = "numpy-%s.win32-py%s.exe" % (get_numpy_version(), pyver) return name USAGE = """build.py ARCH PYTHON_VERSION @@ -89,10 +100,11 @@ if __name__ == '__main__': if len(sys.argv) < 3: - raise ValueError(Usage) + raise ValueError(USAGE) sys.exit(-1) - #arch = sys.argv[1] + arch = sys.argv[1] pyver = sys.argv[2] + #build(arch, pyver) for arch in SITECFG.keys(): build(arch, pyver) From numpy-svn at scipy.org Tue May 20 04:10:56 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 20 May 2008 03:10:56 -0500 (CDT) Subject: [Numpy-svn] r5203 - branches Message-ID: <20080520081056.C520239CA6A@scipy.org> Author: cdavid Date: 2008-05-20 03:10:47 -0500 (Tue, 20 May 2008) New Revision: 5203 Added: branches/cdavid/ Log: Creating a personal branch to avoid cluttering the trunk during release freeze. Copied: branches/cdavid (from rev 5202, trunk) From numpy-svn at scipy.org Tue May 20 04:12:23 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 20 May 2008 03:12:23 -0500 (CDT) Subject: [Numpy-svn] r5204 - trunk Message-ID: <20080520081223.92EEA39CA78@scipy.org> Author: cdavid Date: 2008-05-20 03:12:20 -0500 (Tue, 20 May 2008) New Revision: 5204 Modified: trunk/ Log: Initialized merge tracking via "svnmerge" with revisions "1-5203" from http://svn.scipy.org/svn/numpy/branches/cdavid Property changes on: trunk ___________________________________________________________________ Name: svnmerge-integrated - /branches/aligned_alloca:1-5127 /branches/build_with_scons:1-4676 /branches/cleanconfig_rtm:1-4677 /branches/distutils-revamp:1-2752 /branches/distutils_scons_command:1-4619 /branches/multicore:1-3687 /branches/numpy.scons:1-4484 /trunk:1-2871 + /branches/aligned_alloca:1-5127 /branches/build_with_scons:1-4676 /branches/cdavid:1-5203 /branches/cleanconfig_rtm:1-4677 /branches/distutils-revamp:1-2752 /branches/distutils_scons_command:1-4619 /branches/multicore:1-3687 /branches/numpy.scons:1-4484 /trunk:1-2871 From numpy-svn at scipy.org Tue May 20 04:14:33 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 20 May 2008 03:14:33 -0500 (CDT) Subject: [Numpy-svn] r5205 - branches/cdavid Message-ID: <20080520081433.1048139CA70@scipy.org> Author: cdavid Date: 2008-05-20 03:14:30 -0500 (Tue, 20 May 2008) New Revision: 5205 Modified: branches/cdavid/ Log: Initialized merge tracking via "svnmerge" with revisions "1-5204" from http://svn.scipy.org/svn/numpy/trunk Property changes on: branches/cdavid ___________________________________________________________________ Name: svnmerge-integrated - /branches/aligned_alloca:1-5127 /branches/build_with_scons:1-4676 /branches/cleanconfig_rtm:1-4677 /branches/distutils-revamp:1-2752 /branches/distutils_scons_command:1-4619 /branches/multicore:1-3687 /branches/numpy.scons:1-4484 /trunk:1-2871 + /branches/aligned_alloca:1-5127 /branches/build_with_scons:1-4676 /branches/cleanconfig_rtm:1-4677 /branches/distutils-revamp:1-2752 /branches/distutils_scons_command:1-4619 /branches/multicore:1-3687 /branches/numpy.scons:1-4484 /trunk:1-5204 From numpy-svn at scipy.org Tue May 20 04:17:31 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 20 May 2008 03:17:31 -0500 (CDT) Subject: [Numpy-svn] r5206 - branches/cdavid/numpy/distutils/command Message-ID: <20080520081731.7C34739C09F@scipy.org> Author: cdavid Date: 2008-05-20 03:17:27 -0500 (Tue, 20 May 2008) New Revision: 5206 Modified: branches/cdavid/numpy/distutils/command/scons.py Log: Current handling of bootstrapping is flawed: I should handle it at the distutils level, not at the scons level. This is the first step to detect bootstrapping at distutils level, and pass its state to scons through command line. Modified: branches/cdavid/numpy/distutils/command/scons.py =================================================================== --- branches/cdavid/numpy/distutils/command/scons.py 2008-05-20 08:14:30 UTC (rev 5205) +++ branches/cdavid/numpy/distutils/command/scons.py 2008-05-20 08:17:27 UTC (rev 5206) @@ -196,6 +196,15 @@ raise ValueError(msg) return common +def is_bootstrapping(): + import __builtin__ + try: + __builtin__.__NUMPY_SETUP__ + return True + except AttributeError: + return False + __NUMPY_SETUP__ = False + class scons(old_build_ext): # XXX: add an option to the scons command for configuration (auto/force/cache). description = "Scons builder" @@ -303,6 +312,8 @@ else: # nothing to do, just leave it here. return + + print "is bootstrapping ? %s" % is_bootstrapping() # XXX: when a scons script is missing, scons only prints warnings, and # does not return a failure (status is 0). We have to detect this from # distutils (this cannot work for recursive scons builds...) @@ -326,6 +337,11 @@ post_hooks = self.post_hooks pkg_names = self.pkg_names + if is_bootstrapping(): + bootstrap = 1 + else: + bootstrap = 0 + for sconscript, pre_hook, post_hook, pkg_name in zip(sconscripts, pre_hooks, post_hooks, pkg_names): @@ -364,6 +380,7 @@ elif int(self.silent) == 3: cmd.append('-s') cmd.append('silent=%d' % int(self.silent)) + cmd.append('boostrapping=%d' % bootstrap) cmdstr = ' '.join(cmd) if int(self.silent) < 1: log.info("Executing scons command (pkg is %s): %s ", pkg_name, cmdstr) From numpy-svn at scipy.org Tue May 20 04:35:06 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 20 May 2008 03:35:06 -0500 (CDT) Subject: [Numpy-svn] r5207 - branches/cdavid/numpy/distutils/command Message-ID: <20080520083506.7AC9939C995@scipy.org> Author: cdavid Date: 2008-05-20 03:35:01 -0500 (Tue, 20 May 2008) New Revision: 5207 Modified: branches/cdavid/numpy/distutils/command/scons.py Log: Fix typo when passing bootstrapping option to scons. Modified: branches/cdavid/numpy/distutils/command/scons.py =================================================================== --- branches/cdavid/numpy/distutils/command/scons.py 2008-05-20 08:17:27 UTC (rev 5206) +++ branches/cdavid/numpy/distutils/command/scons.py 2008-05-20 08:35:01 UTC (rev 5207) @@ -380,7 +380,7 @@ elif int(self.silent) == 3: cmd.append('-s') cmd.append('silent=%d' % int(self.silent)) - cmd.append('boostrapping=%d' % bootstrap) + cmd.append('bootstrapping=%d' % bootstrap) cmdstr = ' '.join(cmd) if int(self.silent) < 1: log.info("Executing scons command (pkg is %s): %s ", pkg_name, cmdstr) From numpy-svn at scipy.org Tue May 20 04:41:24 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 20 May 2008 03:41:24 -0500 (CDT) Subject: [Numpy-svn] r5208 - in branches/cdavid/numpy: fft lib linalg numarray random Message-ID: <20080520084124.C243239C273@scipy.org> Author: cdavid Date: 2008-05-20 03:41:11 -0500 (Tue, 20 May 2008) New Revision: 5208 Modified: branches/cdavid/numpy/fft/SConstruct branches/cdavid/numpy/lib/SConstruct branches/cdavid/numpy/linalg/SConstruct branches/cdavid/numpy/numarray/SConstruct branches/cdavid/numpy/random/SConstruct Log: Do not mess with __NUMPY_SETUP__ in scons scripts anymore: this is handled in numscons. Modified: branches/cdavid/numpy/fft/SConstruct =================================================================== --- branches/cdavid/numpy/fft/SConstruct 2008-05-20 08:35:01 UTC (rev 5207) +++ branches/cdavid/numpy/fft/SConstruct 2008-05-20 08:41:11 UTC (rev 5208) @@ -1,8 +1,5 @@ -# Last Change: Thu Oct 18 09:00 PM 2007 J +# Last Change: Tue May 20 05:00 PM 2008 J # vim:syntax=python -import __builtin__ -__builtin__.__NUMPY_SETUP__ = True -from numpy.distutils.misc_util import get_numpy_include_dirs from numscons import GetNumpyEnvironment, scons_get_paths env = GetNumpyEnvironment(ARGUMENTS) Modified: branches/cdavid/numpy/lib/SConstruct =================================================================== --- branches/cdavid/numpy/lib/SConstruct 2008-05-20 08:35:01 UTC (rev 5207) +++ branches/cdavid/numpy/lib/SConstruct 2008-05-20 08:41:11 UTC (rev 5208) @@ -1,8 +1,5 @@ -# Last Change: Thu Oct 18 09:00 PM 2007 J +# Last Change: Tue May 20 05:00 PM 2008 J # vim:syntax=python -import __builtin__ -__builtin__.__NUMPY_SETUP__ = True -from numpy.distutils.misc_util import get_numpy_include_dirs from numscons import GetNumpyEnvironment, scons_get_paths env = GetNumpyEnvironment(ARGUMENTS) Modified: branches/cdavid/numpy/linalg/SConstruct =================================================================== --- branches/cdavid/numpy/linalg/SConstruct 2008-05-20 08:35:01 UTC (rev 5207) +++ branches/cdavid/numpy/linalg/SConstruct 2008-05-20 08:41:11 UTC (rev 5208) @@ -1,11 +1,7 @@ -# Last Change: Fri Nov 16 05:00 PM 2007 J +# Last Change: Tue May 20 05:00 PM 2008 J # vim:syntax=python import os.path -import __builtin__ -__builtin__.__NUMPY_SETUP__ = True - -from numpy.distutils.misc_util import get_numpy_include_dirs, get_mathlibs from numscons import GetNumpyEnvironment, scons_get_paths, \ scons_get_mathlib from numscons import CheckF77LAPACK Modified: branches/cdavid/numpy/numarray/SConstruct =================================================================== --- branches/cdavid/numpy/numarray/SConstruct 2008-05-20 08:35:01 UTC (rev 5207) +++ branches/cdavid/numpy/numarray/SConstruct 2008-05-20 08:41:11 UTC (rev 5208) @@ -1,8 +1,5 @@ -# Last Change: Fri Oct 19 09:00 AM 2007 J +# Last Change: Tue May 20 05:00 PM 2008 J # vim:syntax=python -import __builtin__ -__builtin__.__NUMPY_SETUP__ = True -from numpy.distutils.misc_util import get_numpy_include_dirs from numscons import GetNumpyEnvironment, scons_get_paths env = GetNumpyEnvironment(ARGUMENTS) Modified: branches/cdavid/numpy/random/SConstruct =================================================================== --- branches/cdavid/numpy/random/SConstruct 2008-05-20 08:35:01 UTC (rev 5207) +++ branches/cdavid/numpy/random/SConstruct 2008-05-20 08:41:11 UTC (rev 5208) @@ -1,11 +1,7 @@ -# Last Change: Tue Nov 13 11:00 PM 2007 J +# Last Change: Tue May 20 05:00 PM 2008 J # vim:syntax=python import os -import __builtin__ -__builtin__.__NUMPY_SETUP__ = True - -from numpy.distutils.misc_util import get_numpy_include_dirs, get_mathlibs from numscons import GetNumpyEnvironment, scons_get_paths, \ scons_get_mathlib From numpy-svn at scipy.org Tue May 20 04:43:50 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 20 May 2008 03:43:50 -0500 (CDT) Subject: [Numpy-svn] r5209 - branches/cdavid/numpy/linalg Message-ID: <20080520084350.9D2BD39C75E@scipy.org> Author: cdavid Date: 2008-05-20 03:43:46 -0500 (Tue, 20 May 2008) New Revision: 5209 Modified: branches/cdavid/numpy/linalg/SConstruct Log: Forgot one file in lapack_lite when no LAPACK is available. Modified: branches/cdavid/numpy/linalg/SConstruct =================================================================== --- branches/cdavid/numpy/linalg/SConstruct 2008-05-20 08:41:11 UTC (rev 5208) +++ branches/cdavid/numpy/linalg/SConstruct 2008-05-20 08:43:46 UTC (rev 5209) @@ -23,7 +23,7 @@ sources = ['lapack_litemodule.c'] if not use_lapack: - sources.extend(['zlapack_lite.c', 'dlapack_lite.c', 'blas_lite.c', - 'dlamch.c', 'f2c_lite.c']) + sources.extend(['python_xerbla.c', 'zlapack_lite.c', 'dlapack_lite.c', + 'blas_lite.c', 'dlamch.c', 'f2c_lite.c']) lapack_lite = env.NumpyPythonExtension('lapack_lite', source = sources) From numpy-svn at scipy.org Tue May 20 05:24:57 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 20 May 2008 04:24:57 -0500 (CDT) Subject: [Numpy-svn] r5210 - branches/cdavid/numpy/distutils/command Message-ID: <20080520092457.DA1CC39C765@scipy.org> Author: cdavid Date: 2008-05-20 04:24:38 -0500 (Tue, 20 May 2008) New Revision: 5210 Modified: branches/cdavid/numpy/distutils/command/scons.py Log: Handle fortran compiler on open-solaris Modified: branches/cdavid/numpy/distutils/command/scons.py =================================================================== --- branches/cdavid/numpy/distutils/command/scons.py 2008-05-20 08:43:46 UTC (rev 5209) +++ branches/cdavid/numpy/distutils/command/scons.py 2008-05-20 09:24:38 UTC (rev 5210) @@ -85,6 +85,8 @@ return 'g77' elif compiler.compiler_type == 'gnu95': return 'gfortran' + elif compiler.compiler_type == 'sun': + return 'sunf77' else: # XXX: Just give up for now, and use generic fortran compiler return 'fortran' From numpy-svn at scipy.org Tue May 20 12:07:51 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 20 May 2008 11:07:51 -0500 (CDT) Subject: [Numpy-svn] r5211 - trunk/numpy/testing Message-ID: <20080520160751.960D339CACD@scipy.org> Author: stefan Date: 2008-05-20 11:07:23 -0500 (Tue, 20 May 2008) New Revision: 5211 Modified: trunk/numpy/testing/numpytest.py Log: Fix unit test capturing under Python 2.6. Modified: trunk/numpy/testing/numpytest.py =================================================================== --- trunk/numpy/testing/numpytest.py 2008-05-20 09:24:38 UTC (rev 5210) +++ trunk/numpy/testing/numpytest.py 2008-05-20 16:07:23 UTC (rev 5211) @@ -109,6 +109,8 @@ self.data.append(message) def writeln(self,message): self.write(message+'\n') + def flush(self): + self.stream.flush() class NumpyTestCase (unittest.TestCase): From numpy-svn at scipy.org Wed May 21 16:18:41 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 21 May 2008 15:18:41 -0500 (CDT) Subject: [Numpy-svn] r5212 - trunk/numpy/core/tests Message-ID: <20080521201841.4762E39C288@scipy.org> Author: rkern Date: 2008-05-21 15:18:40 -0500 (Wed, 21 May 2008) New Revision: 5212 Modified: trunk/numpy/core/tests/test_multiarray.py Log: Try again to fix the endianness tests. Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2008-05-20 16:07:23 UTC (rev 5211) +++ trunk/numpy/core/tests/test_multiarray.py 2008-05-21 20:18:40 UTC (rev 5212) @@ -839,8 +839,9 @@ ('b',np.int8),('a',np.int8)]) # We must be specific about the endianness here: y = x.view(dtype=' Author: charris Date: 2008-05-21 15:25:14 -0500 (Wed, 21 May 2008) New Revision: 5213 Modified: trunk/numpy/ma/tests/test_old_ma.py Log: Fix one small error in test(all=1). Modified: trunk/numpy/ma/tests/test_old_ma.py =================================================================== --- trunk/numpy/ma/tests/test_old_ma.py 2008-05-21 20:18:40 UTC (rev 5212) +++ trunk/numpy/ma/tests/test_old_ma.py 2008-05-21 20:25:14 UTC (rev 5213) @@ -2,6 +2,7 @@ import types, time from numpy.ma import * from numpy.core.numerictypes import float32 +from numpy.ma.core import umath from numpy.testing import NumpyTestCase, NumpyTest pi = numpy.pi def eq(v,w, msg=''): From numpy-svn at scipy.org Wed May 21 17:38:14 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 21 May 2008 16:38:14 -0500 (CDT) Subject: [Numpy-svn] r5214 - trunk/numpy/linalg/tests Message-ID: <20080521213814.71AAD39C23D@scipy.org> Author: charris Date: 2008-05-21 16:38:11 -0500 (Wed, 21 May 2008) New Revision: 5214 Modified: trunk/numpy/linalg/tests/test_regression.py Log: Fix ordering assumption in regression test. Modified: trunk/numpy/linalg/tests/test_regression.py =================================================================== --- trunk/numpy/linalg/tests/test_regression.py 2008-05-21 20:25:14 UTC (rev 5213) +++ trunk/numpy/linalg/tests/test_regression.py 2008-05-21 21:38:11 UTC (rev 5214) @@ -12,7 +12,7 @@ class TestRegression(NumpyTestCase): def test_eig_build(self, level = rlevel): """Ticket #652""" - rva = [1.03221168e+02 +0.j, + rva = array([1.03221168e+02 +0.j, -1.91843603e+01 +0.j, -6.04004526e-01+15.84422474j, -6.04004526e-01-15.84422474j, @@ -24,11 +24,13 @@ 7.80732773e+00 +0.j , -7.65390898e-01 +0.j, 1.51971555e-15 +0.j , - -1.51308713e-15 +0.j] + -1.51308713e-15 +0.j]) a = arange(13*13, dtype = float64) a.shape = (13,13) a = a%17 va, ve = linalg.eig(a) + va.sort() + rva.sort() assert_array_almost_equal(va, rva) def test_eigh_build(self, level = rlevel): From numpy-svn at scipy.org Wed May 21 17:53:37 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 21 May 2008 16:53:37 -0500 (CDT) Subject: [Numpy-svn] r5215 - in trunk/numpy/core: src tests Message-ID: <20080521215337.C082D39C23D@scipy.org> Author: oliphant Date: 2008-05-21 16:53:36 -0500 (Wed, 21 May 2008) New Revision: 5215 Modified: trunk/numpy/core/src/arrayobject.c trunk/numpy/core/tests/test_regression.py Log: Fix the logic testing for potential problems with array subclasses. Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2008-05-21 21:38:11 UTC (rev 5214) +++ trunk/numpy/core/src/arrayobject.c 2008-05-21 21:53:36 UTC (rev 5215) @@ -1972,20 +1972,15 @@ sz = self->dimensions[0]; lp = PyList_New(sz); for(i = 0; i < sz; i++) { - if (PyArray_CheckExact(self)) { - v=(PyArrayObject *)array_big_item(self, i); + v = (PyArrayObject *)array_big_item(self, i); + if (PyArray_Check(v) && (v->nd >= self->nd)) { + PyErr_SetString(PyExc_RuntimeError, + "array_item not returning smaller-" \ + "dimensional array"); + Py_DECREF(v); + Py_DECREF(lp); + return NULL; } - else { - v = (PyArrayObject *)PySequence_GetItem((PyObject *)self, i); - if ((!PyArray_Check(v)) || (v->nd >= self->nd)) { - PyErr_SetString(PyExc_RuntimeError, - "array_item not returning smaller-" \ - "dimensional array"); - Py_DECREF(v); - Py_DECREF(lp); - return NULL; - } - } PyList_SetItem(lp, i, PyArray_ToList(v)); Py_DECREF(v); } Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2008-05-21 21:38:11 UTC (rev 5214) +++ trunk/numpy/core/tests/test_regression.py 2008-05-21 21:53:36 UTC (rev 5215) @@ -1038,5 +1038,14 @@ assert (xp.__array_interface__['data'][0] != xpd.__array_interface__['data'][0]) + def check_recarray_tolist(self, level=rlevel): + """Ticket #783 + """ + a = np.recarray(2, formats="i4,f8,f8", names="id,x,y") + b = a.tolist() + assert( a[0].tolist() == b[0]) + assert( a[1].tolist() == b[1]) + + if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Wed May 21 17:54:29 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 21 May 2008 16:54:29 -0500 (CDT) Subject: [Numpy-svn] r5216 - trunk/numpy/core/tests Message-ID: <20080521215429.29EE239C4C7@scipy.org> Author: oliphant Date: 2008-05-21 16:54:28 -0500 (Wed, 21 May 2008) New Revision: 5216 Modified: trunk/numpy/core/tests/test_regression.py Log: Fix comments in tests. Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2008-05-21 21:53:36 UTC (rev 5215) +++ trunk/numpy/core/tests/test_regression.py 2008-05-21 21:54:28 UTC (rev 5216) @@ -1028,7 +1028,7 @@ np.dot(z, y) def check_astype_copy(self, level=rlevel): - """Ticket 788, changeset r5155""" + """Ticket #788, changeset r5155""" # The test data file was generated by scipy.io.savemat. # The dtype is float64, but the isbuiltin attribute is 0. data_dir = path.join(path.dirname(__file__), 'data') @@ -1039,7 +1039,7 @@ xpd.__array_interface__['data'][0]) def check_recarray_tolist(self, level=rlevel): - """Ticket #783 + """Ticket #793, changeset r5215 """ a = np.recarray(2, formats="i4,f8,f8", names="id,x,y") b = a.tolist() From numpy-svn at scipy.org Wed May 21 19:16:04 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 21 May 2008 18:16:04 -0500 (CDT) Subject: [Numpy-svn] r5217 - trunk/numpy/testing Message-ID: <20080521231604.1B29039C663@scipy.org> Author: charris Date: 2008-05-21 18:16:02 -0500 (Wed, 21 May 2008) New Revision: 5217 Modified: trunk/numpy/testing/numpytest.py Log: Make test(all=True) the default. Modified: trunk/numpy/testing/numpytest.py =================================================================== --- trunk/numpy/testing/numpytest.py 2008-05-21 21:54:28 UTC (rev 5216) +++ trunk/numpy/testing/numpytest.py 2008-05-21 23:16:02 UTC (rev 5217) @@ -527,7 +527,7 @@ all_tests = unittest.TestSuite(suite_list) return all_tests - def test(self, level=1, verbosity=1, all=False, sys_argv=[], + def test(self, level=1, verbosity=1, all=True, sys_argv=[], testcase_pattern='.*'): """Run Numpy module test suite with level and verbosity. From numpy-svn at scipy.org Wed May 21 22:02:42 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 21 May 2008 21:02:42 -0500 (CDT) Subject: [Numpy-svn] r5218 - in trunk/numpy/core: src tests Message-ID: <20080522020242.2DD1039C401@scipy.org> Author: cdavid Date: 2008-05-21 21:02:36 -0500 (Wed, 21 May 2008) New Revision: 5218 Modified: trunk/numpy/core/src/multiarraymodule.c trunk/numpy/core/tests/test_regression.py Log: Fix #789 by Alan Mcintyre. Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2008-05-21 23:16:02 UTC (rev 5217) +++ trunk/numpy/core/src/multiarraymodule.c 2008-05-22 02:02:36 UTC (rev 5218) @@ -3895,7 +3895,16 @@ } obj = (PyArrayObject *)PyArray_FromArray(ret, self->descr, flags); - if (obj != ret) copyret = 1; + if (obj == NULL) { + PyErr_SetString(PyExc_ValueError, + "unable to create array of proper type from output array"); + ret = NULL; + Py_DECREF(self->descr); + goto fail; + } + else if (obj != ret) { + copyret = 1; + } ret = obj; } Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2008-05-21 23:16:02 UTC (rev 5217) +++ trunk/numpy/core/tests/test_regression.py 2008-05-22 02:02:36 UTC (rev 5218) @@ -1038,6 +1038,22 @@ assert (xp.__array_interface__['data'][0] != xpd.__array_interface__['data'][0]) + def check_compress_small_type(self, level=rlevel): + """Ticket #789, changeset 5217. + """ + # compress with out argument segfaulted if cannot cast safely + import numpy as np + a = np.array([[1, 2], [3, 4]]) + b = np.zeros((2, 1), dtype = np.single) + try: + a.compress([True, False], axis = 1, out = b) + raise AssertionError("compress with an out which cannot be " \ + "safely casted should not return "\ + "successfully") + except ValueError: + pass + + def check_recarray_tolist(self, level=rlevel): """Ticket #793, changeset r5215 """ From numpy-svn at scipy.org Wed May 21 23:14:58 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 21 May 2008 22:14:58 -0500 (CDT) Subject: [Numpy-svn] r5219 - trunk/numpy/core/src Message-ID: <20080522031458.95E6339C114@scipy.org> Author: cdavid Date: 2008-05-21 22:14:53 -0500 (Wed, 21 May 2008) New Revision: 5219 Modified: trunk/numpy/core/src/multiarraymodule.c Log: Remove trailing space. Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2008-05-22 02:02:36 UTC (rev 5218) +++ trunk/numpy/core/src/multiarraymodule.c 2008-05-22 03:14:53 UTC (rev 5219) @@ -3895,15 +3895,15 @@ } obj = (PyArrayObject *)PyArray_FromArray(ret, self->descr, flags); - if (obj == NULL) { - PyErr_SetString(PyExc_ValueError, - "unable to create array of proper type from output array"); - ret = NULL; - Py_DECREF(self->descr); - goto fail; - } + if (obj == NULL) { + PyErr_SetString(PyExc_ValueError, + "unable to create array of proper type from output array"); + ret = NULL; + Py_DECREF(self->descr); + goto fail; + } else if (obj != ret) { - copyret = 1; + copyret = 1; } ret = obj; } From numpy-svn at scipy.org Wed May 21 23:43:57 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 21 May 2008 22:43:57 -0500 (CDT) Subject: [Numpy-svn] r5220 - in trunk/numpy/core: src tests Message-ID: <20080522034357.3DDAD39C11E@scipy.org> Author: oliphant Date: 2008-05-21 22:43:55 -0500 (Wed, 21 May 2008) New Revision: 5220 Modified: trunk/numpy/core/src/multiarraymodule.c trunk/numpy/core/tests/test_regression.py Log: Fix ticket #789 again. Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2008-05-22 03:14:53 UTC (rev 5219) +++ trunk/numpy/core/src/multiarraymodule.c 2008-05-22 03:43:55 UTC (rev 5220) @@ -3895,17 +3895,9 @@ } obj = (PyArrayObject *)PyArray_FromArray(ret, self->descr, flags); - if (obj == NULL) { - PyErr_SetString(PyExc_ValueError, - "unable to create array of proper type from output array"); - ret = NULL; - Py_DECREF(self->descr); - goto fail; - } - else if (obj != ret) { - copyret = 1; - } + if (obj != ret) copyret = 1; ret = obj; + if (ret == NULL) goto fail; } max_item = self->dimensions[axis]; Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2008-05-22 03:14:53 UTC (rev 5219) +++ trunk/numpy/core/tests/test_regression.py 2008-05-22 03:43:55 UTC (rev 5220) @@ -1050,7 +1050,7 @@ raise AssertionError("compress with an out which cannot be " \ "safely casted should not return "\ "successfully") - except ValueError: + except TypeError: pass From numpy-svn at scipy.org Thu May 22 02:34:35 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 22 May 2008 01:34:35 -0500 (CDT) Subject: [Numpy-svn] r5221 - in trunk/numpy/core: code_generators src tests Message-ID: <20080522063435.D772C39C053@scipy.org> Author: oliphant Date: 2008-05-22 01:34:33 -0500 (Thu, 22 May 2008) New Revision: 5221 Modified: trunk/numpy/core/code_generators/multiarray_api_order.txt trunk/numpy/core/src/arrayobject.c trunk/numpy/core/src/multiarraymodule.c trunk/numpy/core/tests/test_regression.py Log: Fix bug reported on SciPy mailing list which arose when the results of a broadcast were too large to fit in memory and the simple MultiplyList function is not doing overflow detection. Create a new funtion that does Overflow detection but apply it sparingly. morarge broadcast results could caus Modified: trunk/numpy/core/code_generators/multiarray_api_order.txt =================================================================== --- trunk/numpy/core/code_generators/multiarray_api_order.txt 2008-05-22 03:43:55 UTC (rev 5220) +++ trunk/numpy/core/code_generators/multiarray_api_order.txt 2008-05-22 06:34:33 UTC (rev 5221) @@ -82,3 +82,4 @@ PyArray_DescrAlignConverter2 PyArray_SearchsideConverter PyArray_CheckAxis +PyArray_OverflowMultiplyList Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2008-05-22 03:43:55 UTC (rev 5220) +++ trunk/numpy/core/src/arrayobject.c 2008-05-22 06:34:33 UTC (rev 5221) @@ -5564,7 +5564,7 @@ return NULL; } size *= dims[i]; - if (size > largest) { + if (size > largest || size < 0) { PyErr_SetString(PyExc_ValueError, "dimensions too large."); Py_DECREF(descr); @@ -10158,8 +10158,13 @@ /* Reset the iterator dimensions and strides of each iterator object -- using 0 valued strides for broadcasting */ - - tmp = PyArray_MultiplyList(mit->dimensions, mit->nd); + /* Need to check for overflow */ + tmp = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd); + if (tmp < 0) { + PyErr_SetString(PyExc_ValueError, + "broadcast dimensions too large."); + return -1; + } mit->size = tmp; for(i=0; inumiter; i++) { it = mit->iters[i]; @@ -10408,7 +10413,12 @@ } finish: /* Here check the indexes (now that we have iteraxes) */ - mit->size = PyArray_MultiplyList(mit->dimensions, mit->nd); + mit->size = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd); + if (mit->size < 0) { + PyErr_SetString(PyExc_ValueError, + "dimensions too large in fancy indexing"); + goto fail; + } if (mit->ait->size == 0 && mit->size != 0) { PyErr_SetString(PyExc_ValueError, "invalid index into a 0-size array"); Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2008-05-22 03:43:55 UTC (rev 5220) +++ trunk/numpy/core/src/multiarraymodule.c 2008-05-22 06:34:33 UTC (rev 5221) @@ -123,6 +123,22 @@ } /*MULTIARRAY_API + Multiply a List of Non-negative numbers with over-flow detection. +*/ +static intp +PyArray_OverflowMultiplyList(register intp *l1, register int n) +{ + register intp s=1; + while (n--) { + if (*l1 == 0) return 0; + if ((s > MAX_INTP / *l1) || (*l1 > MAX_INTP / s)) + return -1; + s *= (*l1++); + } + return s; +} + +/*MULTIARRAY_API Produce a pointer into array */ static void * Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2008-05-22 03:43:55 UTC (rev 5220) +++ trunk/numpy/core/tests/test_regression.py 2008-05-22 06:34:33 UTC (rev 5221) @@ -1061,7 +1061,17 @@ b = a.tolist() assert( a[0].tolist() == b[0]) assert( a[1].tolist() == b[1]) - + def check_large_fancy_indexing(self, level=rlevel): + # Large enough to fail on 64-bit. + nbits = np.dtype(np.intp).itemsize * 8 + thesize = int((2**nbits)**(1.0/5.0)+1) + def dp(): + n = 3 + a = np.ones((n,)*5) + i = np.random.randint(0,n,size=thesize) + a[np.ix_(i,i,i,i,i)] = 0 + self.failUnlessRaises(ValueError, dp) + if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Thu May 22 02:43:31 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 22 May 2008 01:43:31 -0500 (CDT) Subject: [Numpy-svn] r5222 - in trunk/numpy: . core core/tests distutils distutils/command doc lib/tests ma ma/tests Message-ID: <20080522064331.6133A39C4F4@scipy.org> Author: jarrod.millman Date: 2008-05-22 01:43:22 -0500 (Thu, 22 May 2008) New Revision: 5222 Modified: trunk/numpy/__init__.py trunk/numpy/core/scons_support.py trunk/numpy/core/tests/test_defmatrix.py trunk/numpy/core/tests/test_regression.py trunk/numpy/distutils/command/scons.py trunk/numpy/distutils/conv_template.py trunk/numpy/doc/example.py trunk/numpy/lib/tests/test_function_base.py trunk/numpy/ma/core.py trunk/numpy/ma/extras.py trunk/numpy/ma/mrecords.py trunk/numpy/ma/tests/test_core.py trunk/numpy/ma/tests/test_mrecords.py trunk/numpy/ma/testutils.py Log: fixed whitespace w/ reindent Modified: trunk/numpy/__init__.py =================================================================== --- trunk/numpy/__init__.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/__init__.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -125,4 +125,3 @@ print 'Python version %s' % (sys.version.replace('\n', '',),) return NumpyTest().test(*args, **kw) test.__doc__ = NumpyTest.test.__doc__ - Modified: trunk/numpy/core/scons_support.py =================================================================== --- trunk/numpy/core/scons_support.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/core/scons_support.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -188,17 +188,17 @@ nosmp = 0 return nosmp == 1 -array_api_gen_bld = Builder(action = Action(do_generate_array_api, '$ARRAPIGENCOMSTR'), +array_api_gen_bld = Builder(action = Action(do_generate_array_api, '$ARRAPIGENCOMSTR'), emitter = [generate_api_emitter, distutils_dirs_emitter]) -ufunc_api_gen_bld = Builder(action = Action(do_generate_ufunc_api, '$UFUNCAPIGENCOMSTR'), +ufunc_api_gen_bld = Builder(action = Action(do_generate_ufunc_api, '$UFUNCAPIGENCOMSTR'), emitter = [generate_api_emitter, distutils_dirs_emitter]) -template_bld = Builder(action = Action(generate_from_template, '$TEMPLATECOMSTR'), +template_bld = Builder(action = Action(generate_from_template, '$TEMPLATECOMSTR'), emitter = [generate_from_template_emitter, distutils_dirs_emitter]) -umath_bld = Builder(action = Action(generate_umath, '$UMATHCOMSTR'), +umath_bld = Builder(action = Action(generate_umath, '$UMATHCOMSTR'), emitter = [generate_umath_emitter, distutils_dirs_emitter]) Modified: trunk/numpy/core/tests/test_defmatrix.py =================================================================== --- trunk/numpy/core/tests/test_defmatrix.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/core/tests/test_defmatrix.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -247,6 +247,6 @@ assert_array_equal(x[[2,1,0],:],x[::-1,:]) - + if __name__ == "__main__": NumpyTest().run() Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/core/tests/test_regression.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -1053,7 +1053,7 @@ except TypeError: pass - + def check_recarray_tolist(self, level=rlevel): """Ticket #793, changeset r5215 """ Modified: trunk/numpy/distutils/command/scons.py =================================================================== --- trunk/numpy/distutils/command/scons.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/distutils/command/scons.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -180,7 +180,7 @@ """Given two list, return the index of the common items. The index are relative to seq1. - + Note: do not handle duplicate items.""" dict2 = dict([(i, None) for i in seq2]) @@ -288,7 +288,7 @@ cxxcompiler.customize_cmd(self) self.cxxcompiler = cxxcompiler.cxx_compiler() #print self.cxxcompiler.compiler_cxx[0] - + if self.package_list: self.package_list = parse_package_list(self.package_list) @@ -314,7 +314,7 @@ scons_exec = get_python_exec_invoc() scons_exec += ' ' + protect_path(pjoin(get_scons_local_path(), 'scons.py')) - if self.package_list is not None: + if self.package_list is not None: id = select_packages(self.pkg_names, self.package_list) sconscripts = [self.sconscripts[i] for i in id] pre_hooks = [self.pre_hooks[i] for i in id] Modified: trunk/numpy/distutils/conv_template.py =================================================================== --- trunk/numpy/distutils/conv_template.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/distutils/conv_template.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -286,4 +286,3 @@ allstr = fid.read() writestr = process_str(allstr) outfile.write(writestr) - Modified: trunk/numpy/doc/example.py =================================================================== --- trunk/numpy/doc/example.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/doc/example.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -121,4 +121,3 @@ """ pass - Modified: trunk/numpy/lib/tests/test_function_base.py =================================================================== --- trunk/numpy/lib/tests/test_function_base.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/lib/tests/test_function_base.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -453,7 +453,7 @@ assert_array_equal(a, 10) def check_normed_new(self): - # Check that the integral of the density equals 1. + # Check that the integral of the density equals 1. n = 100 v = rand(n) a,b = histogram(v, normed=True, new=True) @@ -466,49 +466,49 @@ a,b = histogram(v, bins, normed=True, new=True) area = sum(a*diff(b)) assert_almost_equal(area, 1) - - + + def check_outliers_new(self): # Check that outliers are not tallied a = arange(10)+.5 - + # Lower outliers h,b = histogram(a, range=[0,9], new=True) assert_equal(h.sum(),9) - + # Upper outliers h,b = histogram(a, range=[1,10], new=True) assert_equal(h.sum(),9) - + # Normalization h,b = histogram(a, range=[1,9], normed=True, new=True) assert_equal((h*diff(b)).sum(),1) - + # Weights w = arange(10)+.5 h,b = histogram(a, range=[1,9], weights=w, normed=True, new=True) assert_equal((h*diff(b)).sum(),1) - + h,b = histogram(a, bins=8, range=[1,9], weights=w, new=True) assert_equal(h, w[1:-1]) - - + + def check_type_new(self): # Check the type of the returned histogram a = arange(10)+.5 h,b = histogram(a, new=True) assert(issubdtype(h.dtype, int)) - + h,b = histogram(a, normed=True, new=True) assert(issubdtype(h.dtype, float)) - + h,b = histogram(a, weights=ones(10, int), new=True) assert(issubdtype(h.dtype, int)) - + h,b = histogram(a, weights=ones(10, float), new=True) assert(issubdtype(h.dtype, float)) - - + + def check_weights_new(self): v = rand(100) w = ones(100)*5 @@ -518,19 +518,19 @@ nwa,nwb = histogram(v, weights=w, normed=True, new=True) assert_array_almost_equal(a*5, wa) assert_array_almost_equal(na, nwa) - + # Check weights are properly applied. v = linspace(0,10,10) w = concatenate((zeros(5), ones(5))) wa,wb = histogram(v, bins=arange(11),weights=w, new=True) assert_array_almost_equal(wa, w) - + # Check with integer weights wa, wb = histogram([1,2,2,4], bins=4, weights=[4,3,2,1], new=True) assert_array_equal(wa, [4,5,0,1]) wa, wb = histogram([1,2,2,4], bins=4, weights=[4,3,2,1], normed=True, new=True) assert_array_equal(wa, array([4,5,0,1])/10./3.*4) - + class TestHistogramdd(NumpyTestCase): def check_simple(self): x = array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], \ Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/ma/core.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -1225,7 +1225,7 @@ # def _update_from(self, obj): """Copies some attributes of obj to self. - """ + """ if obj is not None and isinstance(obj,ndarray): _baseclass = type(obj) else: @@ -1237,7 +1237,7 @@ _baseclass=getattr(obj,'_baseclass',_baseclass), _basedict=_basedict,) self.__dict__.update(_dict) - self.__dict__.update(_basedict) + self.__dict__.update(_basedict) return #........................ def __array_finalize__(self,obj): @@ -1368,7 +1368,7 @@ return #.... dval = narray(value, copy=False, dtype=self.dtype) - valmask = getmask(value) + valmask = getmask(value) if self._mask is nomask: # Set the data, then the mask ndarray.__setitem__(self._data,indx,dval) @@ -2893,17 +2893,17 @@ result[invalid] = masked result._data[invalid] = result.fill_value return result - + # if fb.dtype.char in typecodes["Integer"]: # return masked_array(umath.power(fa, fb), m) -# m = mask_or(m, (fa < 0) & (fb != fb.astype(int))) +# m = mask_or(m, (fa < 0) & (fb != fb.astype(int))) # if m is nomask: # return masked_array(umath.power(fa, fb)) # else: # fa = fa.copy() # if m.all(): # fa.flat = 1 -# else: +# else: # numpy.putmask(fa,m,1) # return masked_array(umath.power(fa, fb), m) @@ -3420,4 +3420,3 @@ clip = numpy.clip ############################################################################### - Modified: trunk/numpy/ma/extras.py =================================================================== --- trunk/numpy/ma/extras.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/ma/extras.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -842,7 +842,7 @@ def polyfit(x, y, deg, rcond=None, full=False): """%s - + Notes ----- Any masked values in x is propagated in y, and vice-versa. @@ -876,7 +876,7 @@ x = x / scale # solve least squares equation for powers of x v = vander(x, order) - c, resids, rank, s = _lstsq(v, y.filled(0), rcond) + c, resids, rank, s = _lstsq(v, y.filled(0), rcond) # warn on rank reduction, which indicates an ill conditioned matrix if rank != order and not full: warnings.warn("Polyfit may be poorly conditioned", np.RankWarning) @@ -890,7 +890,7 @@ return c, resids, rank, s, rcond else : return c - + _g = globals() for nfunc in ('vander', 'polyfit'): _g[nfunc].func_doc = _g[nfunc].func_doc % getattr(np,nfunc).__doc__ Modified: trunk/numpy/ma/mrecords.py =================================================================== --- trunk/numpy/ma/mrecords.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/ma/mrecords.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -350,8 +350,8 @@ "Sets the given record to value." MaskedArray.__setitem__(self, indx, value) if isinstance(indx, basestring): - self._fieldmask[indx] = ma.getmaskarray(value) - + self._fieldmask[indx] = ma.getmaskarray(value) + #............................................ def __setslice__(self, i, j, value): "Sets the slice described by [i,j] to `value`." @@ -780,4 +780,3 @@ return newdata ############################################################################### - Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/ma/tests/test_core.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -260,7 +260,7 @@ # Make sure we don't lose the shape in some circumstances xm = array((0,0))/0. assert_equal(xm.shape,(2,)) - assert_equal(xm.mask,[1,1]) + assert_equal(xm.mask,[1,1]) #......................... def test_basic_ufuncs (self): "Test various functions such as sin, cos." @@ -1376,8 +1376,8 @@ a[0,0] = masked b = a.compressed() assert_equal(b, [[2,3,4]]) - + def test_tolist(self): "Tests to list" x = array(numpy.arange(12)) @@ -1582,9 +1582,9 @@ assert_equal(x._mask, y._mask) assert_almost_equal(x,y) assert_almost_equal(x._data,y._data) - + ############################################################################### #------------------------------------------------------------------------------ if __name__ == "__main__": Modified: trunk/numpy/ma/tests/test_mrecords.py =================================================================== --- trunk/numpy/ma/tests/test_mrecords.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/ma/tests/test_mrecords.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -129,7 +129,7 @@ rdata['num'] = val assert_equal(rdata.num, val) assert_equal(rdata.num.mask, [1,0,0]) - + # def test_set_mask(self): base = self.base.copy() @@ -297,9 +297,9 @@ # One record only _x = ma.array([1,1.1,'one'], mask=[1,0,0],) assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0]) - + def test_fromrecords(self): "Test construction from records." (mrec, nrec, ddtype) = self.data Modified: trunk/numpy/ma/testutils.py =================================================================== --- trunk/numpy/ma/testutils.py 2008-05-22 06:34:33 UTC (rev 5221) +++ trunk/numpy/ma/testutils.py 2008-05-22 06:43:22 UTC (rev 5222) @@ -55,8 +55,8 @@ y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_) d = N.around(N.abs(x-y),decimal) <= 10.0**(-decimal) return d.ravel() - + #................................................ def _assert_equal_on_sequences(actual, desired, err_msg=''): "Asserts the equality of two non-array sequences." From numpy-svn at scipy.org Thu May 22 11:09:29 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 22 May 2008 10:09:29 -0500 (CDT) Subject: [Numpy-svn] r5223 - trunk/numpy/core/tests Message-ID: <20080522150929.B01EE39C68A@scipy.org> Author: oliphant Date: 2008-05-22 10:09:28 -0500 (Thu, 22 May 2008) New Revision: 5223 Modified: trunk/numpy/core/tests/test_regression.py Log: Add one-more test case using getmap to supplement the setmap test. Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2008-05-22 06:43:22 UTC (rev 5222) +++ trunk/numpy/core/tests/test_regression.py 2008-05-22 15:09:28 UTC (rev 5223) @@ -1071,7 +1071,13 @@ a = np.ones((n,)*5) i = np.random.randint(0,n,size=thesize) a[np.ix_(i,i,i,i,i)] = 0 + def dp2(): + n = 3 + a = np.ones((n,)*5) + i = np.random.randint(0,n,size=thesize) + g = a[np.ix_(i,i,i,i,i)] self.failUnlessRaises(ValueError, dp) + self.failUnlessRaises(ValueError, dp2) if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Thu May 22 13:18:17 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 22 May 2008 12:18:17 -0500 (CDT) Subject: [Numpy-svn] r5224 - trunk/numpy/ma/tests Message-ID: <20080522171817.2814139C634@scipy.org> Author: pierregm Date: 2008-05-22 12:18:16 -0500 (Thu, 22 May 2008) New Revision: 5224 Modified: trunk/numpy/ma/tests/test_mrecords.py Log: test_set_fields: filter out the warning Modified: trunk/numpy/ma/tests/test_mrecords.py =================================================================== --- trunk/numpy/ma/tests/test_mrecords.py 2008-05-22 15:09:28 UTC (rev 5223) +++ trunk/numpy/ma/tests/test_mrecords.py 2008-05-22 17:18:16 UTC (rev 5224) @@ -126,6 +126,8 @@ rdata = data.view(MaskedRecords) val = ma.array([10,20,30], mask=[1,0,0]) # + import warnings + warnings.simplefilter("ignore") rdata['num'] = val assert_equal(rdata.num, val) assert_equal(rdata.num.mask, [1,0,0]) From numpy-svn at scipy.org Thu May 22 14:06:55 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 22 May 2008 13:06:55 -0500 (CDT) Subject: [Numpy-svn] r5225 - in trunk/numpy/core: code_generators src Message-ID: <20080522180655.A999439C053@scipy.org> Author: charris Date: 2008-05-22 13:06:53 -0500 (Thu, 22 May 2008) New Revision: 5225 Modified: trunk/numpy/core/code_generators/array_api_order.txt trunk/numpy/core/src/arrayobject.c Log: Add PyArray_CompareString to the API. Modified: trunk/numpy/core/code_generators/array_api_order.txt =================================================================== --- trunk/numpy/core/code_generators/array_api_order.txt 2008-05-22 17:18:16 UTC (rev 5224) +++ trunk/numpy/core/code_generators/array_api_order.txt 2008-05-22 18:06:53 UTC (rev 5225) @@ -83,3 +83,4 @@ PyArray_Item_INCREF PyArray_Item_XDECREF PyArray_FieldNames +PyArray_CompareString Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2008-05-22 17:18:16 UTC (rev 5224) +++ trunk/numpy/core/src/arrayobject.c 2008-05-22 18:06:53 UTC (rev 5225) @@ -4433,7 +4433,7 @@ return 0; } -/* +/*OBJECT_API */ static int PyArray_CompareString(char *s1, char *s2, size_t len) From numpy-svn at scipy.org Thu May 22 18:07:22 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 22 May 2008 17:07:22 -0500 (CDT) Subject: [Numpy-svn] r5226 - in trunk/numpy/core: code_generators src Message-ID: <20080522220722.9336339C536@scipy.org> Author: charris Date: 2008-05-22 17:07:16 -0500 (Thu, 22 May 2008) New Revision: 5226 Modified: trunk/numpy/core/code_generators/array_api_order.txt trunk/numpy/core/code_generators/multiarray_api_order.txt trunk/numpy/core/src/arrayobject.c Log: Add PyArray_CompareString to multiarray_api instead of array_api so as not to disturb the current order of the API. Modified: trunk/numpy/core/code_generators/array_api_order.txt =================================================================== --- trunk/numpy/core/code_generators/array_api_order.txt 2008-05-22 18:06:53 UTC (rev 5225) +++ trunk/numpy/core/code_generators/array_api_order.txt 2008-05-22 22:07:16 UTC (rev 5226) @@ -1,5 +1,6 @@ -# The functions in the numpy_core C API -# They are defined here so that the order is set. +# The functions in the numpy_core C API. They are defined +# here so that the order is set. Do not append to this +# list, append to multiarray_api_order.txt instead. PyArray_SetNumericOps PyArray_GetNumericOps PyArray_INCREF @@ -83,4 +84,3 @@ PyArray_Item_INCREF PyArray_Item_XDECREF PyArray_FieldNames -PyArray_CompareString Modified: trunk/numpy/core/code_generators/multiarray_api_order.txt =================================================================== --- trunk/numpy/core/code_generators/multiarray_api_order.txt 2008-05-22 18:06:53 UTC (rev 5225) +++ trunk/numpy/core/code_generators/multiarray_api_order.txt 2008-05-22 22:07:16 UTC (rev 5226) @@ -83,3 +83,4 @@ PyArray_SearchsideConverter PyArray_CheckAxis PyArray_OverflowMultiplyList +PyArray_CompareString Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2008-05-22 18:06:53 UTC (rev 5225) +++ trunk/numpy/core/src/arrayobject.c 2008-05-22 22:07:16 UTC (rev 5226) @@ -4433,7 +4433,7 @@ return 0; } -/*OBJECT_API +/*MULTIARRAY_API */ static int PyArray_CompareString(char *s1, char *s2, size_t len) From numpy-svn at scipy.org Sat May 24 04:17:48 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 24 May 2008 03:17:48 -0500 (CDT) Subject: [Numpy-svn] r5227 - branches Message-ID: <20080524081748.A252539C7B6@scipy.org> Author: jarrod.millman Date: 2008-05-24 03:17:45 -0500 (Sat, 24 May 2008) New Revision: 5227 Added: branches/1.1.x/ Log: creating final 1.1 maintenance branch Copied: branches/1.1.x (from rev 5226, trunk) From numpy-svn at scipy.org Sat May 24 04:19:23 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 24 May 2008 03:19:23 -0500 (CDT) Subject: [Numpy-svn] r5228 - trunk/numpy Message-ID: <20080524081923.71A2639C7B6@scipy.org> Author: jarrod.millman Date: 2008-05-24 03:19:21 -0500 (Sat, 24 May 2008) New Revision: 5228 Modified: trunk/numpy/version.py Log: trunk open for 1.2 development series Modified: trunk/numpy/version.py =================================================================== --- trunk/numpy/version.py 2008-05-24 08:17:45 UTC (rev 5227) +++ trunk/numpy/version.py 2008-05-24 08:19:21 UTC (rev 5228) @@ -1,4 +1,4 @@ -version='1.1.0' +version='1.2.0' release=False if not release: From numpy-svn at scipy.org Sat May 24 10:19:42 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 24 May 2008 09:19:42 -0500 (CDT) Subject: [Numpy-svn] r5229 - in trunk/numpy/core: . code_generators src Message-ID: <20080524141942.40662C7C020@scipy.org> Author: charris Date: 2008-05-24 09:19:36 -0500 (Sat, 24 May 2008) New Revision: 5229 Added: trunk/numpy/core/code_generators/numpy_api_order.txt Modified: trunk/numpy/core/SConstruct trunk/numpy/core/code_generators/generate_array_api.py trunk/numpy/core/src/arraymethods.c trunk/numpy/core/src/arrayobject.c trunk/numpy/core/src/arraytypes.inc.src trunk/numpy/core/src/multiarraymodule.c trunk/numpy/core/src/scalartypes.inc.src Log: Merge OBJECT_API and MULTIARRAY_API as NUMPY_API. Modified: trunk/numpy/core/SConstruct =================================================================== --- trunk/numpy/core/SConstruct 2008-05-24 08:19:21 UTC (rev 5228) +++ trunk/numpy/core/SConstruct 2008-05-24 14:19:36 UTC (rev 5229) @@ -7,7 +7,7 @@ from numscons import get_python_inc, get_pythonlib_dir from numscons import GetNumpyEnvironment -from numscons import CheckCBLAS +from numscons import CheckCBLAS from numscons import write_info from scons_support import CheckBrokenMathlib, define_no_smp, \ @@ -32,7 +32,7 @@ # numpyconfig_sym will keep the values of some configuration variables, the one # needed for the public numpy API. - + # Convention: list of tuples (definition, value). value: # - 0: #undef definition # - 1: #define definition @@ -63,12 +63,12 @@ # We check declaration AND type because that's how distutils does it. if config.CheckDeclaration('PY_LONG_LONG', includes = '#include \n'): - st = config.CheckTypeSize('PY_LONG_LONG', + st = config.CheckTypeSize('PY_LONG_LONG', includes = '#include \n') assert not st == 0 - numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_LONGLONG', + numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_LONGLONG', '#define NPY_SIZEOF_LONGLONG %d' % st)) - numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_PY_LONG_LONG', + numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_PY_LONG_LONG', '#define NPY_SIZEOF_PY_LONG_LONG %d' % st)) else: numpyconfig_sym.append(('DEFINE_NPY_SIZEOF_LONGLONG', '')) @@ -84,7 +84,7 @@ #---------------------- if is_npy_no_signal(): numpyconfig_sym.append(('DEFINE_NPY_NO_SIGNAL', '#define NPY_NO_SIGNAL\n')) - config.Define('__NPY_PRIVATE_NO_SIGNAL', + config.Define('__NPY_PRIVATE_NO_SIGNAL', comment = "define to 1 to disable SMP support ") else: numpyconfig_sym.append(('DEFINE_NPY_NO_SIGNAL', '')) @@ -99,11 +99,11 @@ numpyconfig_sym.append(('NPY_NO_SMP', nosmp)) #---------------------- -# Checking the mathlib +# Checking the mathlib #---------------------- mlibs = [[], ['m'], ['cpml']] mathlib = os.environ.get('MATHLIB') -if mathlib: +if mathlib: mlibs.insert(0, mathlib) mlib = check_mlibs(config, mlibs) @@ -157,10 +157,10 @@ #------------------------------------------------------- # Define the function PyOS_ascii_strod if not available #------------------------------------------------------- -if not config.CheckDeclaration('PyOS_ascii_strtod', +if not config.CheckDeclaration('PyOS_ascii_strtod', includes = "#include "): if config.CheckFunc('strtod'): - config.Define('PyOS_ascii_strtod', 'strtod', + config.Define('PyOS_ascii_strtod', 'strtod', "Define to a function to use as a replacement for "\ "PyOS_ascii_strtod if not available in python header") @@ -175,7 +175,7 @@ (a, os.name, sys.platform) if a == 'AMD64': distutils_use_sdk = 1 - config.Define('DISTUTILS_USE_SDK', distutils_use_sdk, + config.Define('DISTUTILS_USE_SDK', distutils_use_sdk, "define to 1 to disable SMP support ") #-------------- @@ -203,7 +203,7 @@ env['SUBST_DICT'] = config_dict include_dir = 'include/numpy' -env.SubstInFile(pjoin(env['build_dir'], 'numpyconfig.h'), +env.SubstInFile(pjoin(env['build_dir'], 'numpyconfig.h'), pjoin(env['src_dir'], include_dir, 'numpyconfig.h.in')) env['CONFIG_H_GEN'] = numpyconfig_sym @@ -229,11 +229,10 @@ umath = env.GenerateUmath('__umath_generated', pjoin('code_generators', 'generate_umath.py')) -multiarray_api = env.GenerateMultiarrayApi('multiarray_api', - [ pjoin('code_generators', 'array_api_order.txt'), - pjoin('code_generators', 'multiarray_api_order.txt')]) +multiarray_api = env.GenerateMultiarrayApi('multiarray_api', + [ pjoin('code_generators', 'numpy_api_order.txt')]) -ufunc_api = env.GenerateUfuncApi('ufunc_api', +ufunc_api = env.GenerateUfuncApi('ufunc_api', pjoin('code_generators', 'ufunc_api_order.txt')) env.Append(CPPPATH = [pjoin(env['src_dir'], 'include'), env['build_dir']]) @@ -257,7 +256,7 @@ #------------------------ # Build scalarmath module #------------------------ -scalarmathmodule = env.NumpyPythonExtension('scalarmath', +scalarmathmodule = env.NumpyPythonExtension('scalarmath', source = scalarmathmodule_src) #---------------------- Modified: trunk/numpy/core/code_generators/generate_array_api.py =================================================================== --- trunk/numpy/core/code_generators/generate_array_api.py 2008-05-24 08:19:21 UTC (rev 5228) +++ trunk/numpy/core/code_generators/generate_array_api.py 2008-05-24 14:19:36 UTC (rev 5229) @@ -118,6 +118,12 @@ }; """ +c_api_header = """ +=========== +Numpy C-API +=========== +""" + def generate_api(output_dir, force=False): basename = 'multiarray_api' @@ -125,7 +131,7 @@ c_file = os.path.join(output_dir, '__%s.c' % basename) d_file = os.path.join(output_dir, '%s.txt' % basename) targets = (h_file, c_file, d_file) - sources = ['array_api_order.txt', 'multiarray_api_order.txt'] + sources = ['numpy_api_order.txt'] if (not force and not genapi.should_rebuild(targets, sources + [__file__])): return targets @@ -139,17 +145,11 @@ c_file = targets[1] doc_file = targets[2] - objectapi_list = genapi.get_api_functions('OBJECT_API', - sources[0]) - multiapi_list = genapi.get_api_functions('MULTIARRAY_API', - sources[1]) - # API fixes for __arrayobject_api.h + numpyapi_list = genapi.get_api_functions('NUMPY_API', sources[0]) + # API fixes for __arrayobject_api.h fixed = 10 numtypes = len(types) + fixed - numobject = len(objectapi_list) + numtypes - nummulti = len(multiapi_list) - numtotal = numobject + nummulti module_list = [] extension_list = [] @@ -167,14 +167,9 @@ extension_list.append(astr) # set up object API - genapi.add_api_list(numtypes, 'PyArray_API', objectapi_list, + genapi.add_api_list(numtypes, 'PyArray_API', numpyapi_list, module_list, extension_list, init_list) - # set up multiarray module API - genapi.add_api_list(numobject, 'PyArray_API', multiapi_list, - module_list, extension_list, init_list) - - # Write to header fid = open(header_file, 'w') s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) @@ -189,25 +184,10 @@ # write to documentation fid = open(doc_file, 'w') - fid.write(''' -=========== -Numpy C-API -=========== - -Object API -========== -''') - for func in objectapi_list: + fid.write(c_api_header) + for func in numpyapi_list: fid.write(func.to_ReST()) fid.write('\n\n') - fid.write(''' - -Multiarray API -============== -''') - for func in multiapi_list: - fid.write(func.to_ReST()) - fid.write('\n\n') fid.close() return targets Added: trunk/numpy/core/code_generators/numpy_api_order.txt =================================================================== --- trunk/numpy/core/code_generators/numpy_api_order.txt 2008-05-24 08:19:21 UTC (rev 5228) +++ trunk/numpy/core/code_generators/numpy_api_order.txt 2008-05-24 14:19:36 UTC (rev 5229) @@ -0,0 +1,172 @@ +# The functions in the numpy C API. They are defined +# here so that the order is set. Append new functions +# to the end of the list. +PyArray_SetNumericOps +PyArray_GetNumericOps +PyArray_INCREF +PyArray_XDECREF +PyArray_SetStringFunction +PyArray_DescrFromType +PyArray_TypeObjectFromType +PyArray_Zero +PyArray_One +PyArray_CastToType +PyArray_CastTo +PyArray_CastAnyTo +PyArray_CanCastSafely +PyArray_CanCastTo +PyArray_ObjectType +PyArray_DescrFromObject +PyArray_ConvertToCommonType +PyArray_DescrFromScalar +PyArray_DescrFromTypeObject +PyArray_Size +PyArray_Scalar +PyArray_FromScalar +PyArray_ScalarAsCtype +PyArray_CastScalarToCtype +PyArray_CastScalarDirect +PyArray_ScalarFromObject +PyArray_GetCastFunc +PyArray_FromDims +PyArray_FromDimsAndDataAndDescr +PyArray_FromAny +PyArray_EnsureArray +PyArray_EnsureAnyArray +PyArray_FromFile +PyArray_FromString +PyArray_FromBuffer +PyArray_FromIter +PyArray_Return +PyArray_GetField +PyArray_SetField +PyArray_Byteswap +PyArray_Resize +PyArray_MoveInto +PyArray_CopyInto +PyArray_CopyAnyInto +PyArray_CopyObject +PyArray_NewCopy +PyArray_ToList +PyArray_ToString +PyArray_ToFile +PyArray_Dump +PyArray_Dumps +PyArray_ValidType +PyArray_UpdateFlags +PyArray_New +PyArray_NewFromDescr +PyArray_DescrNew +PyArray_DescrNewFromType +PyArray_GetPriority +PyArray_IterNew +PyArray_MultiIterNew +PyArray_PyIntAsInt +PyArray_PyIntAsIntp +PyArray_Broadcast +PyArray_FillObjectArray +PyArray_FillWithScalar +PyArray_CheckStrides +PyArray_DescrNewByteorder +PyArray_IterAllButAxis +PyArray_CheckFromAny +PyArray_FromArray +PyArray_FromInterface +PyArray_FromStructInterface +PyArray_FromArrayAttr +PyArray_ScalarKind +PyArray_CanCoerceScalar +PyArray_NewFlagsObject +PyArray_CanCastScalar +PyArray_CompareUCS4 +PyArray_RemoveSmallest +PyArray_ElementStrides +PyArray_Item_INCREF +PyArray_Item_XDECREF +PyArray_FieldNames +PyArray_Transpose +PyArray_TakeFrom +PyArray_PutTo +PyArray_PutMask +PyArray_Repeat +PyArray_Choose +PyArray_Sort +PyArray_ArgSort +PyArray_SearchSorted +PyArray_ArgMax +PyArray_ArgMin +PyArray_Reshape +PyArray_Newshape +PyArray_Squeeze +PyArray_View +PyArray_SwapAxes +PyArray_Max +PyArray_Min +PyArray_Ptp +PyArray_Mean +PyArray_Trace +PyArray_Diagonal +PyArray_Clip +PyArray_Conjugate +PyArray_Nonzero +PyArray_Std +PyArray_Sum +PyArray_CumSum +PyArray_Prod +PyArray_CumProd +PyArray_All +PyArray_Any +PyArray_Compress +PyArray_Flatten +PyArray_Ravel +PyArray_MultiplyList +PyArray_MultiplyIntList +PyArray_GetPtr +PyArray_CompareLists +PyArray_AsCArray +PyArray_As1D +PyArray_As2D +PyArray_Free +PyArray_Converter +PyArray_IntpFromSequence +PyArray_Concatenate +PyArray_InnerProduct +PyArray_MatrixProduct +PyArray_CopyAndTranspose +PyArray_Correlate +PyArray_TypestrConvert +PyArray_DescrConverter +PyArray_DescrConverter2 +PyArray_IntpConverter +PyArray_BufferConverter +PyArray_AxisConverter +PyArray_BoolConverter +PyArray_ByteorderConverter +PyArray_OrderConverter +PyArray_EquivTypes +PyArray_Zeros +PyArray_Empty +PyArray_Where +PyArray_Arange +PyArray_ArangeObj +PyArray_SortkindConverter +PyArray_LexSort +PyArray_Round +PyArray_EquivTypenums +PyArray_RegisterDataType +PyArray_RegisterCastFunc +PyArray_RegisterCanCast +PyArray_InitArrFuncs +PyArray_IntTupleFromIntp +PyArray_TypeNumFromName +PyArray_ClipmodeConverter +PyArray_OutputConverter +PyArray_BroadcastToShape +_PyArray_SigintHandler +_PyArray_GetSigintBuf +PyArray_DescrAlignConverter +PyArray_DescrAlignConverter2 +PyArray_SearchsideConverter +PyArray_CheckAxis +PyArray_OverflowMultiplyList +PyArray_CompareString Modified: trunk/numpy/core/src/arraymethods.c =================================================================== --- trunk/numpy/core/src/arraymethods.c 2008-05-24 08:19:21 UTC (rev 5228) +++ trunk/numpy/core/src/arraymethods.c 2008-05-24 14:19:36 UTC (rev 5229) @@ -246,7 +246,7 @@ /* steals typed reference */ -/*OBJECT_API +/*NUMPY_API Get a subset of bytes from each element of the array */ static PyObject * @@ -295,7 +295,7 @@ } -/*OBJECT_API +/*NUMPY_API Set a subset of bytes from each element of the array */ static int @@ -351,7 +351,7 @@ /* This doesn't change the descriptor just the actual data... */ -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_Byteswap(PyArrayObject *self, Bool inplace) { @@ -1351,7 +1351,7 @@ return Py_None; } -/*OBJECT_API*/ +/*NUMPY_API*/ static int PyArray_Dump(PyObject *self, PyObject *file, int protocol) { @@ -1376,7 +1376,7 @@ return 0; } -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_Dumps(PyObject *self, int protocol) { Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2008-05-24 08:19:21 UTC (rev 5228) +++ trunk/numpy/core/src/arrayobject.c 2008-05-24 14:19:36 UTC (rev 5229) @@ -21,7 +21,7 @@ */ /*#include */ -/*OBJECT_API +/*NUMPY_API * Get Priority from object */ static double @@ -68,7 +68,7 @@ */ -/*OBJECT_API +/*NUMPY_API Get pointer to zero of correct type for array. */ static char * @@ -103,7 +103,7 @@ return zeroval; } -/*OBJECT_API +/*NUMPY_API Get pointer to one of correct type for array */ static char * @@ -149,7 +149,7 @@ /* Incref all objects found at this record */ -/*OBJECT_API +/*NUMPY_API */ static void PyArray_Item_INCREF(char *data, PyArray_Descr *descr) @@ -181,7 +181,7 @@ } /* XDECREF all objects found at this record */ -/*OBJECT_API +/*NUMPY_API */ static void PyArray_Item_XDECREF(char *data, PyArray_Descr *descr) @@ -216,7 +216,7 @@ /* Used for arrays of python objects to increment the reference count of */ /* every python object in the array. */ -/*OBJECT_API +/*NUMPY_API For object arrays, increment all internal references. */ static int @@ -272,7 +272,7 @@ return 0; } -/*OBJECT_API +/*NUMPY_API Decrement all internal references for object arrays. (or arrays with object fields) */ @@ -535,7 +535,7 @@ /* Helper functions */ -/*OBJECT_API*/ +/*NUMPY_API*/ static intp PyArray_PyIntAsIntp(PyObject *o) { @@ -635,7 +635,7 @@ static PyObject *array_int(PyArrayObject *v); -/*OBJECT_API*/ +/*NUMPY_API*/ static int PyArray_PyIntAsInt(PyObject *o) { @@ -745,7 +745,7 @@ return NULL; } -/*OBJECT_API +/*NUMPY_API Compute the size of an array (in number of items) */ static intp @@ -1137,7 +1137,7 @@ } } -/*OBJECT_API +/*NUMPY_API * Copy an Array into another array -- memory must not overlap * Does not require src and dest to have "broadcastable" shapes * (only the same number of elements). @@ -1216,7 +1216,7 @@ return 0; } -/*OBJECT_API +/*NUMPY_API * Copy an Array into another array -- memory must not overlap. */ static int @@ -1226,7 +1226,7 @@ } -/*OBJECT_API +/*NUMPY_API * Move the memory of one array into another. */ static int @@ -1236,7 +1236,7 @@ } -/*OBJECT_API*/ +/*NUMPY_API*/ static int PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) { @@ -1300,7 +1300,7 @@ /* They all zero-out the memory as previously done */ /* steals reference to descr -- and enforces native byteorder on it.*/ -/*OBJECT_API +/*NUMPY_API Like FromDimsAndData but uses the Descr structure instead of typecode as input. */ @@ -1333,7 +1333,7 @@ return ret; } -/*OBJECT_API +/*NUMPY_API Construct an empty array from dimensions and typenum */ static PyObject * @@ -1356,7 +1356,7 @@ /* end old calls */ -/*OBJECT_API +/*NUMPY_API Copy an array. */ static PyObject * @@ -1388,7 +1388,7 @@ static PyObject *array_big_item(PyArrayObject *, intp); /* Does nothing with descr (cannot be NULL) */ -/*OBJECT_API +/*NUMPY_API Get scalar-equivalent to a region of memory described by a descriptor. */ static PyObject * @@ -1542,7 +1542,7 @@ /* Return Array Scalar if 0-d array object is encountered */ -/*OBJECT_API +/*NUMPY_API Return either an array or the appropriate Python object if the array is 0d and matches a Python type. */ @@ -1572,7 +1572,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Initialize arrfuncs to NULL */ static void @@ -1643,7 +1643,7 @@ found. Only works for user-defined data-types. */ -/*MULTIARRAY_API +/*NUMPY_API */ static int PyArray_TypeNumFromName(char *str) @@ -1665,7 +1665,7 @@ needs the userdecrs table and PyArray_NUMUSER variables defined in arraytypes.inc */ -/*MULTIARRAY_API +/*NUMPY_API Register Data type Does not change the reference count of descr */ @@ -1717,7 +1717,7 @@ return typenum; } -/*MULTIARRAY_API +/*NUMPY_API Register Casting Function Replaces any function currently stored. */ @@ -1762,7 +1762,7 @@ return newtypes; } -/*MULTIARRAY_API +/*NUMPY_API Register a type number indicating that a descriptor can be cast to it safely */ @@ -1811,7 +1811,7 @@ This will need the addition of a Fortran-order iterator. */ -/*OBJECT_API +/*NUMPY_API To File */ static int @@ -1952,7 +1952,7 @@ return 0; } -/*OBJECT_API +/*NUMPY_API * To List */ static PyObject * @@ -1987,7 +1987,7 @@ return lp; } -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_ToString(PyArrayObject *self, NPY_ORDER order) { @@ -3365,7 +3365,7 @@ } -/*OBJECT_API +/*NUMPY_API Set internal structure with number functions that all arrays will use */ int @@ -3413,7 +3413,7 @@ (PyDict_SetItemString(dict, #op, n_ops.op)==-1)) \ goto fail; -/*OBJECT_API +/*NUMPY_API Get dictionary showing number functions that all arrays will use */ static PyObject * @@ -4362,7 +4362,7 @@ static PyObject *PyArray_StrFunction=NULL; static PyObject *PyArray_ReprFunction=NULL; -/*OBJECT_API +/*NUMPY_API Set the array print function to be a Python function. */ static void @@ -4417,7 +4417,7 @@ -/*OBJECT_API +/*NUMPY_API */ static int PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, register size_t len) @@ -4433,7 +4433,7 @@ return 0; } -/*MULTIARRAY_API +/*NUMPY_API */ static int PyArray_CompareString(char *s1, char *s2, size_t len) @@ -5028,7 +5028,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API PyArray_CheckAxis */ static PyObject * @@ -5079,7 +5079,7 @@ #include "arraymethods.c" /* Lifted from numarray */ -/*MULTIARRAY_API +/*NUMPY_API PyArray_IntTupleFromIntp */ static PyObject * @@ -5107,7 +5107,7 @@ /* Returns the number of dimensions or -1 if an error occurred */ /* vals must be large enough to hold maxvals */ -/*MULTIARRAY_API +/*NUMPY_API PyArray_IntpFromSequence */ static int @@ -5255,7 +5255,7 @@ } -/*OBJECT_API +/*NUMPY_API */ static int PyArray_ElementStrides(PyObject *arr) @@ -5271,7 +5271,7 @@ return 1; } -/*OBJECT_API +/*NUMPY_API Update Several Flags at once. */ static void @@ -5321,7 +5321,7 @@ or negative). */ -/*OBJECT_API*/ +/*NUMPY_API*/ static Bool PyArray_CheckStrides(int elsize, int nd, intp numbytes, intp offset, intp *dims, intp *newstrides) @@ -5389,7 +5389,7 @@ return itemsize; } -/*OBJECT_API +/*NUMPY_API Generic new array creation routine. */ static PyObject * @@ -5492,7 +5492,7 @@ /* steals a reference to descr (even on failure) */ -/*OBJECT_API +/*NUMPY_API Generic new array creation routine. */ static PyObject * @@ -5718,7 +5718,7 @@ } -/*OBJECT_API +/*NUMPY_API Resize (reallocate data). Only works if nothing else is referencing this array and it is contiguous. If refcheck is 0, then the reference count is not checked @@ -5888,7 +5888,7 @@ } /* Assumes contiguous */ -/*OBJECT_API*/ +/*NUMPY_API*/ static void PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) { @@ -5920,7 +5920,7 @@ } } -/*OBJECT_API*/ +/*NUMPY_API*/ static int PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) { @@ -7738,7 +7738,7 @@ } -/*OBJECT_API +/*NUMPY_API Is the typenum valid? */ static int @@ -7758,7 +7758,7 @@ /* For backward compatibility */ /* steals reference to at --- cannot be NULL*/ -/*OBJECT_API +/*NUMPY_API *Cast an array using typecode structure. */ static PyObject * @@ -7819,7 +7819,7 @@ } -/*OBJECT_API +/*NUMPY_API Get a cast function to cast from the input descriptor to the output type_number (must be a registered data-type). Returns NULL if un-successful. @@ -8017,7 +8017,7 @@ * as the size of the casting buffer. */ -/*OBJECT_API +/*NUMPY_API * Cast to an already created array. */ static int @@ -8181,7 +8181,7 @@ return retval; } -/*OBJECT_API +/*NUMPY_API Cast to an already created array. Arrays don't have to be "broadcastable" Only requirement is they have the same number of elements. */ @@ -8231,7 +8231,7 @@ /* steals reference to newtype --- acc. NULL */ -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) { @@ -8489,7 +8489,7 @@ return descr; } -/*OBJECT_API */ +/*NUMPY_API */ static PyObject * PyArray_FromStructInterface(PyObject *input) { @@ -8545,7 +8545,7 @@ #define PyIntOrLong_Check(obj) (PyInt_Check(obj) || PyLong_Check(obj)) -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_FromInterface(PyObject *input) { @@ -8696,7 +8696,7 @@ return NULL; } -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) { @@ -8745,7 +8745,7 @@ /* Does not check for ENSURECOPY and NOTSWAPPED in flags */ /* Steals a reference to newtype --- which can be NULL */ -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, int max_depth, int flags, PyObject *context) @@ -8873,14 +8873,14 @@ } /* new reference -- accepts NULL for mintype*/ -/*OBJECT_API*/ +/*NUMPY_API*/ static PyArray_Descr * PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) { return _array_find_type(op, mintype, MAX_DIMS); } -/*OBJECT_API +/*NUMPY_API Return the typecode of the array a Python object would be converted to */ @@ -8943,7 +8943,7 @@ /* steals a reference to descr -- accepts NULL */ -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, int max_depth, int requires, PyObject *context) @@ -8984,7 +8984,7 @@ /* Because it decrefs op if any conversion needs to take place so it can be used like PyArray_EnsureArray(some_function(...)) */ -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_EnsureArray(PyObject *op) { @@ -9009,7 +9009,7 @@ return new; } -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_EnsureAnyArray(PyObject *op) { @@ -9017,7 +9017,7 @@ return PyArray_EnsureArray(op); } -/*OBJECT_API +/*NUMPY_API Check the type coercion rules. */ static int @@ -9124,7 +9124,7 @@ } /* leaves reference count alone --- cannot be NULL*/ -/*OBJECT_API*/ +/*NUMPY_API*/ static Bool PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) { @@ -9156,7 +9156,7 @@ return ret; } -/*OBJECT_API +/*NUMPY_API See if array scalars can be cast. */ static Bool @@ -9177,7 +9177,7 @@ /* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/ /* and Python's array iterator ***/ -/*OBJECT_API +/*NUMPY_API Get Iterator. */ static PyObject * @@ -9221,7 +9221,7 @@ return (PyObject *)it; } -/*MULTIARRAY_API +/*NUMPY_API Get Iterator broadcast to a particular shape */ static PyObject * @@ -9288,7 +9288,7 @@ -/*OBJECT_API +/*NUMPY_API Get Iterator that iterates over all but one axis (don't use this with PyArray_ITER_GOTO1D). The axis will be over-written if negative with the axis having the smallest stride. @@ -9338,7 +9338,7 @@ /* don't use with PyArray_ITER_GOTO1D because factors are not adjusted */ -/*OBJECT_API +/*NUMPY_API Adjusts previously broadcasted iterators so that the axis with the smallest sum of iterator strides is not iterated over. Returns dimension which is smallest in the range [0,multi->nd). @@ -10119,7 +10119,7 @@ /* Adjust dimensionality and strides for index object iterators --- i.e. broadcast */ -/*OBJECT_API*/ +/*NUMPY_API*/ static int PyArray_Broadcast(PyArrayMultiIterObject *mit) { @@ -10761,7 +10761,7 @@ /** END of Subscript Iterator **/ -/*OBJECT_API +/*NUMPY_API Get MultiIterator, */ static PyObject * @@ -11033,7 +11033,7 @@ 0 /* tp_weaklist */ }; -/*OBJECT_API*/ +/*NUMPY_API*/ static PyArray_Descr * PyArray_DescrNewFromType(int type_num) { @@ -11060,7 +11060,7 @@ **/ /* base cannot be NULL */ -/*OBJECT_API*/ +/*NUMPY_API*/ static PyArray_Descr * PyArray_DescrNew(PyArray_Descr *base) { @@ -11701,7 +11701,7 @@ byte-order is not changed but any fields are: */ -/*OBJECT_API +/*NUMPY_API Deep bytorder change of a data-type descriptor *** Leaves reference count of self unchanged --- does not DECREF self *** */ @@ -12088,7 +12088,7 @@ /** Array Flags Object **/ -/*OBJECT_API +/*NUMPY_API Get New ArrayFlagsObject */ static PyObject * Modified: trunk/numpy/core/src/arraytypes.inc.src =================================================================== --- trunk/numpy/core/src/arraytypes.inc.src 2008-05-24 08:19:21 UTC (rev 5228) +++ trunk/numpy/core/src/arraytypes.inc.src 2008-05-24 14:19:36 UTC (rev 5229) @@ -2458,7 +2458,7 @@ &VOID_Descr, }; -/*OBJECT_API +/*NUMPY_API Get the PyArray_Descr structure for a type. */ static PyArray_Descr * Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2008-05-24 08:19:21 UTC (rev 5228) +++ trunk/numpy/core/src/multiarraymodule.c 2008-05-24 14:19:36 UTC (rev 5229) @@ -100,7 +100,7 @@ /* An Error object -- rarely used? */ static PyObject *MultiArrayError; -/*MULTIARRAY_API +/*NUMPY_API Multiply a List of ints */ static int @@ -111,7 +111,7 @@ return s; } -/*MULTIARRAY_API +/*NUMPY_API Multiply a List */ static intp @@ -122,7 +122,7 @@ return s; } -/*MULTIARRAY_API +/*NUMPY_API Multiply a List of Non-negative numbers with over-flow detection. */ static intp @@ -138,7 +138,7 @@ return s; } -/*MULTIARRAY_API +/*NUMPY_API Produce a pointer into array */ static void * @@ -152,7 +152,7 @@ return (void *)dptr; } -/*MULTIARRAY_API +/*NUMPY_API Get axis from an object (possibly None) -- a converter function, */ static int @@ -170,7 +170,7 @@ return PY_SUCCEED; } -/*MULTIARRAY_API +/*NUMPY_API Compare Lists */ static int @@ -184,7 +184,7 @@ } /* steals a reference to type -- accepts NULL */ -/*MULTIARRAY_API +/*NUMPY_API View */ static PyObject * @@ -222,7 +222,7 @@ /* Returns a contiguous array */ -/*MULTIARRAY_API +/*NUMPY_API Ravel */ static PyObject * @@ -260,7 +260,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API Round */ static PyObject * @@ -382,7 +382,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Flatten */ static PyObject * @@ -416,7 +416,7 @@ / * Not recommended */ -/*MULTIARRAY_API +/*NUMPY_API Reshape an array */ static PyObject * @@ -624,7 +624,7 @@ copy-only-if-necessary */ -/*MULTIARRAY_API +/*NUMPY_API New shape for an array */ static PyObject * @@ -771,7 +771,7 @@ return the same array. */ -/*MULTIARRAY_API*/ +/*NUMPY_API*/ static PyObject * PyArray_Squeeze(PyArrayObject *self) { @@ -811,7 +811,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Mean */ static PyObject * @@ -843,7 +843,7 @@ } /* Set variance to 1 to by-pass square-root calculation and return variance */ -/*MULTIARRAY_API +/*NUMPY_API Std */ static PyObject * @@ -962,7 +962,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Sum */ static PyObject * @@ -978,7 +978,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API Prod */ static PyObject * @@ -994,7 +994,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API CumSum */ static PyObject * @@ -1010,7 +1010,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API CumProd */ static PyObject * @@ -1027,7 +1027,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API Any */ static PyObject * @@ -1044,7 +1044,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API All */ static PyObject * @@ -1062,7 +1062,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Compress */ static PyObject * @@ -1091,7 +1091,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API Nonzero */ static PyObject * @@ -1188,7 +1188,7 @@ return res2; } -/*MULTIARRAY_API +/*NUMPY_API Clip */ static PyObject * @@ -1417,7 +1417,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Conjugate */ static PyObject * @@ -1447,7 +1447,7 @@ } } -/*MULTIARRAY_API +/*NUMPY_API Trace */ static PyObject * @@ -1463,7 +1463,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API Diagonal */ static PyObject * @@ -1598,7 +1598,7 @@ */ /* steals a reference to typedescr -- can be NULL*/ -/*MULTIARRAY_API +/*NUMPY_API Simulat a C-array */ static int @@ -1657,7 +1657,7 @@ /* Deprecated --- Use PyArray_AsCArray instead */ -/*MULTIARRAY_API +/*NUMPY_API Convert to a 1D C-array */ static int @@ -1673,7 +1673,7 @@ return 0; } -/*MULTIARRAY_API +/*NUMPY_API Convert to a 2D C-array */ static int @@ -1693,7 +1693,7 @@ /* End Deprecated */ -/*MULTIARRAY_API +/*NUMPY_API Free pointers created if As2D is called */ static int @@ -1748,7 +1748,7 @@ /* If axis is MAX_DIMS or bigger, then each sequence object will be flattened before concatenation */ -/*MULTIARRAY_API +/*NUMPY_API Concatenate an arbitrary Python sequence into an array. */ static PyObject * @@ -1859,7 +1859,7 @@ return NULL; } -/*MULTIARRAY_API +/*NUMPY_API SwapAxes */ static PyObject * @@ -1906,7 +1906,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API Return Transpose. */ static PyObject * @@ -1978,7 +1978,7 @@ return (PyObject *)ret; } -/*MULTIARRAY_API +/*NUMPY_API Repeat the array. */ static PyObject * @@ -2101,7 +2101,7 @@ } -/*OBJECT_API*/ +/*NUMPY_API*/ static NPY_SCALARKIND PyArray_ScalarKind(int typenum, PyArrayObject **arr) { @@ -2128,7 +2128,7 @@ return PyArray_OBJECT_SCALAR; } -/*OBJECT_API*/ +/*NUMPY_API*/ static int PyArray_CanCoerceScalar(int thistype, int neededtype, NPY_SCALARKIND scalar) @@ -2168,7 +2168,7 @@ } -/*OBJECT_API*/ +/*NUMPY_API*/ static PyArrayObject ** PyArray_ConvertToCommonType(PyObject *op, int *retn) { @@ -2284,7 +2284,7 @@ return NULL; } -/*MULTIARRAY_API +/*NUMPY_API */ static PyObject * PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *ret, @@ -2645,7 +2645,7 @@ } \ } -/*MULTIARRAY_API +/*NUMPY_API Sort an array in-place */ static int @@ -2731,7 +2731,7 @@ global_obj); } -/*MULTIARRAY_API +/*NUMPY_API ArgSort an array */ static PyObject * @@ -2821,7 +2821,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API LexSort an array providing indices that will sort a collection of arrays lexicographically. The first key is sorted on first, followed by the second key -- requires that arg"merge"sort is available for each sort_key @@ -3086,7 +3086,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Convert object to searchsorted side */ static int @@ -3114,7 +3114,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Numeric.searchsorted(a,v) */ static PyObject * @@ -3214,7 +3214,7 @@ /* Could perhaps be redone to not make contiguous arrays */ -/*MULTIARRAY_API +/*NUMPY_API Numeric.innerproduct(a,v) */ static PyObject * @@ -3326,7 +3326,7 @@ /* just like inner product but does the swapaxes stuff on the fly */ -/*MULTIARRAY_API +/*NUMPY_API Numeric.matrixproduct(a,v) */ static PyObject * @@ -3458,7 +3458,7 @@ return NULL; } -/*MULTIARRAY_API +/*NUMPY_API Fast Copy and Transpose */ static PyObject * @@ -3521,7 +3521,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API Numeric.correlate(a1,a2,mode) */ static PyObject * @@ -3631,7 +3631,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API ArgMin */ static PyObject * @@ -3661,7 +3661,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API Max */ static PyObject * @@ -3678,7 +3678,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API Min */ static PyObject * @@ -3695,7 +3695,7 @@ return ret; } -/*MULTIARRAY_API +/*NUMPY_API Ptp */ static PyObject * @@ -3730,7 +3730,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API ArgMax */ static PyObject * @@ -3839,7 +3839,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Take */ static PyObject * @@ -4001,7 +4001,7 @@ return NULL; } -/*MULTIARRAY_API +/*NUMPY_API Put values into an array */ static PyObject * @@ -4167,7 +4167,7 @@ return PyArray_PutMask((PyArrayObject *)array, values, mask); } -/*MULTIARRAY_API +/*NUMPY_API Put values into an array according to a mask. */ static PyObject * @@ -4276,7 +4276,7 @@ as you get a new reference to it. */ -/*MULTIARRAY_API +/*NUMPY_API Useful to pass as converter function for O& processing in PyArgs_ParseTuple. */ @@ -4295,7 +4295,7 @@ } } -/*MULTIARRAY_API +/*NUMPY_API Useful to pass as converter function for O& processing in PyArgs_ParseTuple for output arrays */ @@ -4319,7 +4319,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Convert an object to true / false */ static int @@ -4333,7 +4333,7 @@ return PY_SUCCEED; } -/*MULTIARRAY_API +/*NUMPY_API Convert an object to FORTRAN / C / ANY */ static int @@ -4372,7 +4372,7 @@ return PY_SUCCEED; } -/*MULTIARRAY_API +/*NUMPY_API Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP */ static int @@ -4418,7 +4418,7 @@ -/*MULTIARRAY_API +/*NUMPY_API Typestr converter */ static int @@ -4551,7 +4551,7 @@ */ -/*MULTIARRAY_API +/*NUMPY_API Get buffer chunk from object */ static int @@ -4591,7 +4591,7 @@ PyDimMem_FREE(seq.ptr)** */ -/*MULTIARRAY_API +/*NUMPY_API Get intp chunk from sequence */ static int @@ -5238,7 +5238,7 @@ */ -/*MULTIARRAY_API +/*NUMPY_API Get type-descriptor from an object forcing alignment if possible None goes to DEFAULT type. */ @@ -5267,7 +5267,7 @@ return PY_SUCCEED; } -/*MULTIARRAY_API +/*NUMPY_API Get type-descriptor from an object forcing alignment if possible None goes to NULL. */ @@ -5297,7 +5297,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Get typenum from an object -- None goes to NULL */ static int @@ -5321,7 +5321,7 @@ */ /* new reference in *at */ -/*MULTIARRAY_API +/*NUMPY_API Get typenum from an object -- None goes to PyArray_DEFAULT */ static int @@ -5505,7 +5505,7 @@ return PY_FAIL; } -/*MULTIARRAY_API +/*NUMPY_API Convert object to endian */ static int @@ -5543,7 +5543,7 @@ return PY_SUCCEED; } -/*MULTIARRAY_API +/*NUMPY_API Convert object to sort kind */ static int @@ -5596,7 +5596,7 @@ equivalent (same basic kind and same itemsize). */ -/*MULTIARRAY_API*/ +/*NUMPY_API*/ static unsigned char PyArray_EquivTypes(PyArray_Descr *typ1, PyArray_Descr *typ2) { @@ -5618,7 +5618,7 @@ return (typ1->kind == typ2->kind); } -/*MULTIARRAY_API*/ +/*NUMPY_API*/ static unsigned char PyArray_EquivTypenums(int typenum1, int typenum2) { @@ -5768,7 +5768,7 @@ /* accepts NULL type */ /* steals referenct to type */ -/*MULTIARRAY_API +/*NUMPY_API Empty */ static PyObject * @@ -5889,7 +5889,7 @@ /* steal a reference */ /* accepts NULL type */ -/*MULTIARRAY_API +/*NUMPY_API Zeros */ static PyObject * @@ -6172,7 +6172,7 @@ } #undef FROM_BUFFER_SIZE -/*OBJECT_API +/*NUMPY_API Given a pointer to a string ``data``, a string length ``slen``, and a ``PyArray_Descr``, return an array corresponding to the data @@ -6332,7 +6332,7 @@ return r; } -/*OBJECT_API +/*NUMPY_API Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an array corresponding to the data encoded in that file. @@ -6452,7 +6452,7 @@ /* steals a reference to dtype (which cannot be NULL) */ -/*OBJECT_API */ +/*NUMPY_API */ static PyObject * PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, intp count) { @@ -6567,7 +6567,7 @@ } -/*OBJECT_API*/ +/*NUMPY_API*/ static PyObject * PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, intp count, intp offset) @@ -6737,7 +6737,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Arange, */ static PyObject * @@ -6841,7 +6841,7 @@ } /* this doesn't change the references */ -/*MULTIARRAY_API +/*NUMPY_API ArangeObj, */ static PyObject * @@ -7065,7 +7065,7 @@ } -/*MULTIARRAY_API +/*NUMPY_API Where */ static PyObject * @@ -7373,7 +7373,7 @@ SIGJMP_BUF _NPY_SIGINT_BUF; -/*MULTIARRAY_API +/*NUMPY_API */ static void _PyArray_SigintHandler(int signum) @@ -7382,7 +7382,7 @@ SIGLONGJMP(_NPY_SIGINT_BUF, signum); } -/*MULTIARRAY_API +/*NUMPY_API */ static void* _PyArray_GetSigintBuf(void) Modified: trunk/numpy/core/src/scalartypes.inc.src =================================================================== --- trunk/numpy/core/src/scalartypes.inc.src 2008-05-24 08:19:21 UTC (rev 5228) +++ trunk/numpy/core/src/scalartypes.inc.src 2008-05-24 14:19:36 UTC (rev 5229) @@ -131,7 +131,7 @@ /* no error checking is performed -- ctypeptr must be same type as scalar */ /* in case of flexible type, the data is not copied into ctypeptr which is expected to be a pointer to pointer */ -/*OBJECT_API +/*NUMPY_API Convert to c-type */ static void @@ -160,7 +160,7 @@ /* This may not work right on narrow builds for NumPy unicode scalars. */ -/*OBJECT_API +/*NUMPY_API Cast Scalar to c-type */ static int @@ -197,7 +197,7 @@ return 0; } -/*OBJECT_API +/*NUMPY_API Cast Scalar to c-type */ static int @@ -220,7 +220,7 @@ */ /* steals reference to outcode */ -/*OBJECT_API +/*NUMPY_API Get 0-dim array from scalar */ static PyObject * @@ -292,7 +292,7 @@ return ret; } -/*OBJECT_API +/*NUMPY_API Get an Array Scalar From a Python Object Returns NULL if unsuccessful but error is only set if another error occurred. Currently only Numeric-like @@ -2720,7 +2720,7 @@ } /*New reference */ -/*OBJECT_API +/*NUMPY_API */ static PyArray_Descr * PyArray_DescrFromTypeObject(PyObject *type) @@ -2785,7 +2785,7 @@ return _descr_from_subtype(type); } -/*OBJECT_API +/*NUMPY_API Return the tuple of ordered field names from a dictionary. */ static PyObject * @@ -2812,7 +2812,7 @@ } /* New reference */ -/*OBJECT_API +/*NUMPY_API Return descr object from array scalar. */ static PyArray_Descr * @@ -2856,7 +2856,7 @@ } /* New reference */ -/*OBJECT_API +/*NUMPY_API Get a typeobject from a type-number -- can return NULL. */ static PyObject * From numpy-svn at scipy.org Sat May 24 11:07:34 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 24 May 2008 10:07:34 -0500 (CDT) Subject: [Numpy-svn] r5230 - trunk/numpy/core/code_generators Message-ID: <20080524150734.1092D39C615@scipy.org> Author: charris Date: 2008-05-24 10:07:31 -0500 (Sat, 24 May 2008) New Revision: 5230 Removed: trunk/numpy/core/code_generators/array_api_order.txt trunk/numpy/core/code_generators/multiarray_api_order.txt Log: Remove now unused files. They have been merged into numpy_api_order.txt. Deleted: trunk/numpy/core/code_generators/array_api_order.txt =================================================================== --- trunk/numpy/core/code_generators/array_api_order.txt 2008-05-24 14:19:36 UTC (rev 5229) +++ trunk/numpy/core/code_generators/array_api_order.txt 2008-05-24 15:07:31 UTC (rev 5230) @@ -1,86 +0,0 @@ -# The functions in the numpy_core C API. They are defined -# here so that the order is set. Do not append to this -# list, append to multiarray_api_order.txt instead. -PyArray_SetNumericOps -PyArray_GetNumericOps -PyArray_INCREF -PyArray_XDECREF -PyArray_SetStringFunction -PyArray_DescrFromType -PyArray_TypeObjectFromType -PyArray_Zero -PyArray_One -PyArray_CastToType -PyArray_CastTo -PyArray_CastAnyTo -PyArray_CanCastSafely -PyArray_CanCastTo -PyArray_ObjectType -PyArray_DescrFromObject -PyArray_ConvertToCommonType -PyArray_DescrFromScalar -PyArray_DescrFromTypeObject -PyArray_Size -PyArray_Scalar -PyArray_FromScalar -PyArray_ScalarAsCtype -PyArray_CastScalarToCtype -PyArray_CastScalarDirect -PyArray_ScalarFromObject -PyArray_GetCastFunc -PyArray_FromDims -PyArray_FromDimsAndDataAndDescr -PyArray_FromAny -PyArray_EnsureArray -PyArray_EnsureAnyArray -PyArray_FromFile -PyArray_FromString -PyArray_FromBuffer -PyArray_FromIter -PyArray_Return -PyArray_GetField -PyArray_SetField -PyArray_Byteswap -PyArray_Resize -PyArray_MoveInto -PyArray_CopyInto -PyArray_CopyAnyInto -PyArray_CopyObject -PyArray_NewCopy -PyArray_ToList -PyArray_ToString -PyArray_ToFile -PyArray_Dump -PyArray_Dumps -PyArray_ValidType -PyArray_UpdateFlags -PyArray_New -PyArray_NewFromDescr -PyArray_DescrNew -PyArray_DescrNewFromType -PyArray_GetPriority -PyArray_IterNew -PyArray_MultiIterNew -PyArray_PyIntAsInt -PyArray_PyIntAsIntp -PyArray_Broadcast -PyArray_FillObjectArray -PyArray_FillWithScalar -PyArray_CheckStrides -PyArray_DescrNewByteorder -PyArray_IterAllButAxis -PyArray_CheckFromAny -PyArray_FromArray -PyArray_FromInterface -PyArray_FromStructInterface -PyArray_FromArrayAttr -PyArray_ScalarKind -PyArray_CanCoerceScalar -PyArray_NewFlagsObject -PyArray_CanCastScalar -PyArray_CompareUCS4 -PyArray_RemoveSmallest -PyArray_ElementStrides -PyArray_Item_INCREF -PyArray_Item_XDECREF -PyArray_FieldNames Deleted: trunk/numpy/core/code_generators/multiarray_api_order.txt =================================================================== --- trunk/numpy/core/code_generators/multiarray_api_order.txt 2008-05-24 14:19:36 UTC (rev 5229) +++ trunk/numpy/core/code_generators/multiarray_api_order.txt 2008-05-24 15:07:31 UTC (rev 5230) @@ -1,86 +0,0 @@ -PyArray_Transpose -PyArray_TakeFrom -PyArray_PutTo -PyArray_PutMask -PyArray_Repeat -PyArray_Choose -PyArray_Sort -PyArray_ArgSort -PyArray_SearchSorted -PyArray_ArgMax -PyArray_ArgMin -PyArray_Reshape -PyArray_Newshape -PyArray_Squeeze -PyArray_View -PyArray_SwapAxes -PyArray_Max -PyArray_Min -PyArray_Ptp -PyArray_Mean -PyArray_Trace -PyArray_Diagonal -PyArray_Clip -PyArray_Conjugate -PyArray_Nonzero -PyArray_Std -PyArray_Sum -PyArray_CumSum -PyArray_Prod -PyArray_CumProd -PyArray_All -PyArray_Any -PyArray_Compress -PyArray_Flatten -PyArray_Ravel -PyArray_MultiplyList -PyArray_MultiplyIntList -PyArray_GetPtr -PyArray_CompareLists -PyArray_AsCArray -PyArray_As1D -PyArray_As2D -PyArray_Free -PyArray_Converter -PyArray_IntpFromSequence -PyArray_Concatenate -PyArray_InnerProduct -PyArray_MatrixProduct -PyArray_CopyAndTranspose -PyArray_Correlate -PyArray_TypestrConvert -PyArray_DescrConverter -PyArray_DescrConverter2 -PyArray_IntpConverter -PyArray_BufferConverter -PyArray_AxisConverter -PyArray_BoolConverter -PyArray_ByteorderConverter -PyArray_OrderConverter -PyArray_EquivTypes -PyArray_Zeros -PyArray_Empty -PyArray_Where -PyArray_Arange -PyArray_ArangeObj -PyArray_SortkindConverter -PyArray_LexSort -PyArray_Round -PyArray_EquivTypenums -PyArray_RegisterDataType -PyArray_RegisterCastFunc -PyArray_RegisterCanCast -PyArray_InitArrFuncs -PyArray_IntTupleFromIntp -PyArray_TypeNumFromName -PyArray_ClipmodeConverter -PyArray_OutputConverter -PyArray_BroadcastToShape -_PyArray_SigintHandler -_PyArray_GetSigintBuf -PyArray_DescrAlignConverter -PyArray_DescrAlignConverter2 -PyArray_SearchsideConverter -PyArray_CheckAxis -PyArray_OverflowMultiplyList -PyArray_CompareString From numpy-svn at scipy.org Sat May 24 12:41:20 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 24 May 2008 11:41:20 -0500 (CDT) Subject: [Numpy-svn] r5231 - trunk/numpy/core/src Message-ID: <20080524164120.017F939C62F@scipy.org> Author: charris Date: 2008-05-24 11:41:19 -0500 (Sat, 24 May 2008) New Revision: 5231 Modified: trunk/numpy/core/src/_sortmodule.c.src Log: Define copy_string to memcpy. Closes ticket #666. Modified: trunk/numpy/core/src/_sortmodule.c.src =================================================================== --- trunk/numpy/core/src/_sortmodule.c.src 2008-05-24 15:07:31 UTC (rev 5230) +++ trunk/numpy/core/src/_sortmodule.c.src 2008-05-24 16:41:19 UTC (rev 5231) @@ -389,35 +389,10 @@ * for strings and unicode is compiled with proper flags. */ -static int -compare_string(char *s1, char *s2, size_t len) -{ - const unsigned char *c1 = (unsigned char *)s1; - const unsigned char *c2 = (unsigned char *)s2; - size_t i; +#define copy_string memcpy - for(i = 0; i < len; ++i) { - if (c1[i] != c2[i]) { - return (c1[i] > c2[i]) ? 1 : -1; - } - } - return 0; -} static void -copy_string(char *s1, char *s2, size_t len) -{ - if (len < SMALL_STRING) { - while(len--) { - *s1++ = *s2++; - } - } - else { - memcpy(s1, s2, len); - } -} - -static void swap_string(char *s1, char *s2, size_t len) { while(len--) { @@ -429,13 +404,15 @@ static int -compare_ucs4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) +compare_string(char *s1, char *s2, size_t len) { + const unsigned char *c1 = (unsigned char *)s1; + const unsigned char *c2 = (unsigned char *)s2; size_t i; for(i = 0; i < len; ++i) { - if (s1[i] != s2[i]) { - return (s1[i] > s2[i]) ? 1 : -1; + if (c1[i] != c2[i]) { + return (c1[i] > c2[i]) ? 1 : -1; } } return 0; @@ -461,6 +438,21 @@ } } + +static int +compare_ucs4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len) +{ + size_t i; + + for(i = 0; i < len; ++i) { + if (s1[i] != s2[i]) { + return (s1[i] > s2[i]) ? 1 : -1; + } + } + return 0; +} + + /**begin repeat #TYPE=STRING, UNICODE# #type=char, PyArray_UCS4# From numpy-svn at scipy.org Sat May 24 18:44:12 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 24 May 2008 17:44:12 -0500 (CDT) Subject: [Numpy-svn] r5232 - trunk/numpy/core/tests Message-ID: <20080524224412.4269939C840@scipy.org> Author: charris Date: 2008-05-24 17:44:09 -0500 (Sat, 24 May 2008) New Revision: 5232 Modified: trunk/numpy/core/tests/test_ufunc.py Log: Start work on testing ufuncs. Modified: trunk/numpy/core/tests/test_ufunc.py =================================================================== --- trunk/numpy/core/tests/test_ufunc.py 2008-05-24 16:41:19 UTC (rev 5231) +++ trunk/numpy/core/tests/test_ufunc.py 2008-05-24 22:44:09 UTC (rev 5232) @@ -147,5 +147,78 @@ # check PyUFunc_On_Om # fixme -- I don't know how to do this yet + def check_all_ufunc(self) : + """Try to check presence and results of all ufuncs. + + The list of ufuncs comes from generate_umath.py and is as follows: + + add + subtract + multiply + divide + floor_divide + true_divide + conjugate + fmod + square + reciprocal + ones_like + power + absolute + negative + sign + greater + greate_equal + less + less_equal + equal + not_equal + logical_and + logical_or + logical_xor + maximum + minimum + bitwise_and + bitwise_or + bitwise_xor + invert + left_shift + right_shift + degrees + radians + arccos + arccosh + arcsin + arcsinh + arctan + arctanh + cos + sin + tan + cosh + sinh + tanh + exp + expm1 + log + log10 + log1p + sqrt + ceil + fabs + floor + rint + arctan2 + remainder + hypot + isnan + isinf + isfinite + signbit + modf + + """ + pass + if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Sat May 24 19:02:59 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 24 May 2008 18:02:59 -0500 (CDT) Subject: [Numpy-svn] r5233 - in trunk/numpy/core: . code_generators Message-ID: <20080524230259.4EB9939C814@scipy.org> Author: charris Date: 2008-05-24 18:02:56 -0500 (Sat, 24 May 2008) New Revision: 5233 Added: trunk/numpy/core/code_generators/generate_numpy_api.py Modified: trunk/numpy/core/scons_support.py trunk/numpy/core/setup.py Log: Rename generate_array_api to generate_numpy_api. Added: trunk/numpy/core/code_generators/generate_numpy_api.py =================================================================== --- trunk/numpy/core/code_generators/generate_numpy_api.py 2008-05-24 22:44:09 UTC (rev 5232) +++ trunk/numpy/core/code_generators/generate_numpy_api.py 2008-05-24 23:02:56 UTC (rev 5233) @@ -0,0 +1,193 @@ +import os +import genapi + +types = ['Generic','Number','Integer','SignedInteger','UnsignedInteger', + 'Inexact', + 'Floating', 'ComplexFloating', 'Flexible', 'Character', + 'Byte','Short','Int', 'Long', 'LongLong', 'UByte', 'UShort', + 'UInt', 'ULong', 'ULongLong', 'Float', 'Double', 'LongDouble', + 'CFloat', 'CDouble', 'CLongDouble', 'Object', 'String', 'Unicode', + 'Void'] + +h_template = r""" +#ifdef _MULTIARRAYMODULE + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + + +static unsigned int PyArray_GetNDArrayCVersion (void); +static PyTypeObject PyBigArray_Type; +static PyTypeObject PyArray_Type; +static PyTypeObject PyArrayDescr_Type; +static PyTypeObject PyArrayFlags_Type; +static PyTypeObject PyArrayIter_Type; +static PyTypeObject PyArrayMapIter_Type; +static PyTypeObject PyArrayMultiIter_Type; +static int NPY_NUMUSERTYPES=0; +static PyTypeObject PyBoolArrType_Type; +static PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +%s + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern void **PyArray_API; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +void **PyArray_API; +#else +static void **PyArray_API=NULL; +#endif +#endif + +#define PyArray_GetNDArrayCVersion (*(unsigned int (*)(void)) PyArray_API[0]) +#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1]) +#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) +#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) +#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4]) +#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) +#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) +#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) +#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) +#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) + +%s + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); + PyObject *c_api = NULL; + if (numpy == NULL) return -1; + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + if (c_api == NULL) {Py_DECREF(numpy); return -1;} + if (PyCObject_Check(c_api)) { + PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); + } + Py_DECREF(c_api); + Py_DECREF(numpy); + if (PyArray_API == NULL) return -1; + /* Perform runtime check of C API version */ + if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "version %%x of C-API but this version of numpy is %%x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + return 0; +} + +#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return; } } + +#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } + +#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } + +#endif + +#endif +""" + + +c_template = r""" +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyArray_API[] = { + (void *) PyArray_GetNDArrayCVersion, + (void *) &PyBigArray_Type, + (void *) &PyArray_Type, + (void *) &PyArrayDescr_Type, + (void *) &PyArrayFlags_Type, + (void *) &PyArrayIter_Type, + (void *) &PyArrayMultiIter_Type, + (int *) &NPY_NUMUSERTYPES, + (void *) &PyBoolArrType_Type, + (void *) &_PyArrayScalar_BoolValues, +%s +}; +""" + +c_api_header = """ +=========== +Numpy C-API +=========== +""" + +def generate_api(output_dir, force=False): + basename = 'multiarray_api' + + h_file = os.path.join(output_dir, '__%s.h' % basename) + c_file = os.path.join(output_dir, '__%s.c' % basename) + d_file = os.path.join(output_dir, '%s.txt' % basename) + targets = (h_file, c_file, d_file) + sources = ['numpy_api_order.txt'] + + if (not force and not genapi.should_rebuild(targets, sources + [__file__])): + return targets + else: + do_generate_api(targets, sources) + + return targets + +def do_generate_api(targets, sources): + header_file = targets[0] + c_file = targets[1] + doc_file = targets[2] + + numpyapi_list = genapi.get_api_functions('NUMPY_API', sources[0]) + + # API fixes for __arrayobject_api.h + fixed = 10 + numtypes = len(types) + fixed + + module_list = [] + extension_list = [] + init_list = [] + + # setup types + for k, atype in enumerate(types): + num = fixed + k + astr = " (void *) &Py%sArrType_Type," % types[k] + init_list.append(astr) + astr = "static PyTypeObject Py%sArrType_Type;" % types[k] + module_list.append(astr) + astr = "#define Py%sArrType_Type (*(PyTypeObject *)PyArray_API[%d])" % \ + (types[k], num) + extension_list.append(astr) + + # set up object API + genapi.add_api_list(numtypes, 'PyArray_API', numpyapi_list, + module_list, extension_list, init_list) + + # Write to header + fid = open(header_file, 'w') + s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) + fid.write(s) + fid.close() + + # Write to c-code + fid = open(c_file, 'w') + s = c_template % '\n'.join(init_list) + fid.write(s) + fid.close() + + # write to documentation + fid = open(doc_file, 'w') + fid.write(c_api_header) + for func in numpyapi_list: + fid.write(func.to_ReST()) + fid.write('\n\n') + fid.close() + + return targets Modified: trunk/numpy/core/scons_support.py =================================================================== --- trunk/numpy/core/scons_support.py 2008-05-24 22:44:09 UTC (rev 5232) +++ trunk/numpy/core/scons_support.py 2008-05-24 23:02:56 UTC (rev 5233) @@ -9,8 +9,8 @@ from os.path import join as pjoin, dirname as pdirname, basename as pbasename from copy import deepcopy -from code_generators.generate_array_api import \ - do_generate_api as nowrap_do_generate_array_api +from code_generators.generate_numpy_api import \ + do_generate_api as nowrap_do_generate_numpy_api from code_generators.generate_ufunc_api import \ do_generate_api as nowrap_do_generate_ufunc_api @@ -35,8 +35,8 @@ #------------------------------------ # Ufunc and multiarray API generators #------------------------------------ -def do_generate_array_api(target, source, env): - nowrap_do_generate_array_api([str(i) for i in target], +def do_generate_numpy_api(target, source, env): + nowrap_do_generate_numpy_api([str(i) for i in target], [str(i) for i in source]) return 0 @@ -188,7 +188,7 @@ nosmp = 0 return nosmp == 1 -array_api_gen_bld = Builder(action = Action(do_generate_array_api, '$ARRAPIGENCOMSTR'), +array_api_gen_bld = Builder(action = Action(do_generate_numpy_api, '$ARRAPIGENCOMSTR'), emitter = [generate_api_emitter, distutils_dirs_emitter]) Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2008-05-24 22:44:09 UTC (rev 5232) +++ trunk/numpy/core/setup.py 2008-05-24 23:02:56 UTC (rev 5233) @@ -206,7 +206,7 @@ return (h_file,) return generate_api - generate_array_api = generate_api_func('generate_array_api') + generate_numpy_api = generate_api_func('generate_numpy_api') generate_ufunc_api = generate_api_func('generate_ufunc_api') def generate_umath_c(ext,build_dir): @@ -246,10 +246,10 @@ sources = [join('src','multiarraymodule.c'), generate_config_h, generate_numpyconfig_h, - generate_array_api, + generate_numpy_api, join('src','scalartypes.inc.src'), join('src','arraytypes.inc.src'), - join(codegen_dir,'generate_array_api.py'), + join(codegen_dir,'generate_numpy_api.py'), join('*.py') ], depends = deps, @@ -274,7 +274,7 @@ sources=[join('src','_sortmodule.c.src'), generate_config_h, generate_numpyconfig_h, - generate_array_api, + generate_numpy_api, ], ) @@ -282,7 +282,7 @@ sources=[join('src','scalarmathmodule.c.src'), generate_config_h, generate_numpyconfig_h, - generate_array_api, + generate_numpy_api, generate_ufunc_api], ) From numpy-svn at scipy.org Sat May 24 19:04:08 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 24 May 2008 18:04:08 -0500 (CDT) Subject: [Numpy-svn] r5234 - trunk/numpy/core/code_generators Message-ID: <20080524230408.B3DC439C814@scipy.org> Author: charris Date: 2008-05-24 18:04:06 -0500 (Sat, 24 May 2008) New Revision: 5234 Removed: trunk/numpy/core/code_generators/generate_array_api.py Log: Delete generate_array_api.py Deleted: trunk/numpy/core/code_generators/generate_array_api.py =================================================================== --- trunk/numpy/core/code_generators/generate_array_api.py 2008-05-24 23:02:56 UTC (rev 5233) +++ trunk/numpy/core/code_generators/generate_array_api.py 2008-05-24 23:04:06 UTC (rev 5234) @@ -1,193 +0,0 @@ -import os -import genapi - -types = ['Generic','Number','Integer','SignedInteger','UnsignedInteger', - 'Inexact', - 'Floating', 'ComplexFloating', 'Flexible', 'Character', - 'Byte','Short','Int', 'Long', 'LongLong', 'UByte', 'UShort', - 'UInt', 'ULong', 'ULongLong', 'Float', 'Double', 'LongDouble', - 'CFloat', 'CDouble', 'CLongDouble', 'Object', 'String', 'Unicode', - 'Void'] - -h_template = r""" -#ifdef _MULTIARRAYMODULE - -typedef struct { - PyObject_HEAD - npy_bool obval; -} PyBoolScalarObject; - - -static unsigned int PyArray_GetNDArrayCVersion (void); -static PyTypeObject PyBigArray_Type; -static PyTypeObject PyArray_Type; -static PyTypeObject PyArrayDescr_Type; -static PyTypeObject PyArrayFlags_Type; -static PyTypeObject PyArrayIter_Type; -static PyTypeObject PyArrayMapIter_Type; -static PyTypeObject PyArrayMultiIter_Type; -static int NPY_NUMUSERTYPES=0; -static PyTypeObject PyBoolArrType_Type; -static PyBoolScalarObject _PyArrayScalar_BoolValues[2]; - -%s - -#else - -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL -#endif - -#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -#else -#if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -#else -static void **PyArray_API=NULL; -#endif -#endif - -#define PyArray_GetNDArrayCVersion (*(unsigned int (*)(void)) PyArray_API[0]) -#define PyBigArray_Type (*(PyTypeObject *)PyArray_API[1]) -#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) -#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) -#define PyArrayFlags_Type (*(PyTypeObject *)PyArray_API[4]) -#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) -#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) -#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) -#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) -#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) - -%s - -#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) -static int -_import_array(void) -{ - PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); - PyObject *c_api = NULL; - if (numpy == NULL) return -1; - c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); - if (c_api == NULL) {Py_DECREF(numpy); return -1;} - if (PyCObject_Check(c_api)) { - PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); - } - Py_DECREF(c_api); - Py_DECREF(numpy); - if (PyArray_API == NULL) return -1; - /* Perform runtime check of C API version */ - if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { - PyErr_Format(PyExc_RuntimeError, "module compiled against "\ - "version %%x of C-API but this version of numpy is %%x", \ - (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); - return -1; - } - return 0; -} - -#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return; } } - -#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } - -#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } - -#endif - -#endif -""" - - -c_template = r""" -/* These pointers will be stored in the C-object for use in other - extension modules -*/ - -void *PyArray_API[] = { - (void *) PyArray_GetNDArrayCVersion, - (void *) &PyBigArray_Type, - (void *) &PyArray_Type, - (void *) &PyArrayDescr_Type, - (void *) &PyArrayFlags_Type, - (void *) &PyArrayIter_Type, - (void *) &PyArrayMultiIter_Type, - (int *) &NPY_NUMUSERTYPES, - (void *) &PyBoolArrType_Type, - (void *) &_PyArrayScalar_BoolValues, -%s -}; -""" - -c_api_header = """ -=========== -Numpy C-API -=========== -""" - -def generate_api(output_dir, force=False): - basename = 'multiarray_api' - - h_file = os.path.join(output_dir, '__%s.h' % basename) - c_file = os.path.join(output_dir, '__%s.c' % basename) - d_file = os.path.join(output_dir, '%s.txt' % basename) - targets = (h_file, c_file, d_file) - sources = ['numpy_api_order.txt'] - - if (not force and not genapi.should_rebuild(targets, sources + [__file__])): - return targets - else: - do_generate_api(targets, sources) - - return targets - -def do_generate_api(targets, sources): - header_file = targets[0] - c_file = targets[1] - doc_file = targets[2] - - numpyapi_list = genapi.get_api_functions('NUMPY_API', sources[0]) - - # API fixes for __arrayobject_api.h - fixed = 10 - numtypes = len(types) + fixed - - module_list = [] - extension_list = [] - init_list = [] - - # setup types - for k, atype in enumerate(types): - num = fixed + k - astr = " (void *) &Py%sArrType_Type," % types[k] - init_list.append(astr) - astr = "static PyTypeObject Py%sArrType_Type;" % types[k] - module_list.append(astr) - astr = "#define Py%sArrType_Type (*(PyTypeObject *)PyArray_API[%d])" % \ - (types[k], num) - extension_list.append(astr) - - # set up object API - genapi.add_api_list(numtypes, 'PyArray_API', numpyapi_list, - module_list, extension_list, init_list) - - # Write to header - fid = open(header_file, 'w') - s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) - fid.write(s) - fid.close() - - # Write to c-code - fid = open(c_file, 'w') - s = c_template % '\n'.join(init_list) - fid.write(s) - fid.close() - - # write to documentation - fid = open(doc_file, 'w') - fid.write(c_api_header) - for func in numpyapi_list: - fid.write(func.to_ReST()) - fid.write('\n\n') - fid.close() - - return targets From numpy-svn at scipy.org Sun May 25 02:35:54 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 25 May 2008 01:35:54 -0500 (CDT) Subject: [Numpy-svn] r5235 - trunk/numpy/core/tests Message-ID: <20080525063554.C6E4C39C8B7@scipy.org> Author: charris Date: 2008-05-25 01:35:51 -0500 (Sun, 25 May 2008) New Revision: 5235 Modified: trunk/numpy/core/tests/test_ufunc.py Log: Save preliminary work on testing ufuncs. Modified: trunk/numpy/core/tests/test_ufunc.py =================================================================== --- trunk/numpy/core/tests/test_ufunc.py 2008-05-24 23:04:06 UTC (rev 5234) +++ trunk/numpy/core/tests/test_ufunc.py 2008-05-25 06:35:51 UTC (rev 5235) @@ -152,71 +152,83 @@ The list of ufuncs comes from generate_umath.py and is as follows: - add - subtract - multiply - divide - floor_divide - true_divide - conjugate - fmod - square - reciprocal - ones_like - power - absolute - negative - sign - greater - greate_equal - less - less_equal - equal - not_equal - logical_and - logical_or - logical_xor - maximum - minimum - bitwise_and - bitwise_or - bitwise_xor - invert - left_shift - right_shift - degrees - radians - arccos - arccosh - arcsin - arcsinh - arctan - arctanh - cos - sin - tan - cosh - sinh - tanh - exp - expm1 - log - log10 - log1p - sqrt - ceil - fabs - floor - rint - arctan2 - remainder - hypot - isnan - isinf - isfinite - signbit - modf + ===== ============= =============== ======================== + done function types notes + ===== ============= =============== ======================== + n add bool + nums + O boolean + is || + n subtract bool + nums + O boolean - is ^ + n multiply bool + nums + O boolean * is & + n divide nums + O + n floor_divide nums + O + n true_divide nums + O bBhH -> f, iIlLqQ -> d + n conjugate nums + O + n fmod nums + M + n square nums + O + n reciprocal nums + O + n ones_like nums + O + n power nums + O + n absolute nums + O complex -> real + n negative nums + O + n sign nums + O -> int + n greater bool + nums + O -> bool + n greater_equal bool + nums + O -> bool + n less bool + nums + O -> bool + n less_equal bool + nums + O -> bool + n equal bool + nums + O -> bool + n not_equal bool + nums + O -> bool + n logical_and bool + nums + M -> bool + n logical_not bool + nums + M -> bool + n logical_or bool + nums + M -> bool + n logical_xor bool + nums + M -> bool + n maximum bool + nums + O + n minimum bool + nums + O + n bitwise_and bool + ints + O flts raise an error + n bitwise_or bool + ints + O flts raise an error + n bitwise_xor bool + ints + O flts raise an error + n invert bool + ints + O flts raise an error + n left_shift ints + O flts raise an error + n right_shift ints + O flts raise an error + n degrees real + M cmplx raise an error + n radians real + M cmplx raise an error + n arccos flts + M + n arccosh flts + M + n arcsin flts + M + n arcsinh flts + M + n arctan flts + M + n arctanh flts + M + n cos flts + M + n sin flts + M + n tan flts + M + n cosh flts + M + n sinh flts + M + n tanh flts + M + n exp flts + M + n expm1 flts + M + n log flts + M + n log10 flts + M + n log1p flts + M + n sqrt flts + M real x < 0 raises error + n ceil real + M + n floor real + M + n fabs real + M + n rint flts + M + n arctan2 real + M + n remainder ints + real + O + n hypot real + M + n isnan flts -> bool + n isinf flts -> bool + n isfinite flts -> bool + n signbit real -> bool + n modf real -> (frac, int) + ===== ============= =============== ======================== + Types other than those listed will be accepted, but they are cast to + the smallest compatible type for which the function is defined. The + casting rules are: + + bool -> int8 -> float32 + ints -> double + """ pass From numpy-svn at scipy.org Sun May 25 05:28:02 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 25 May 2008 04:28:02 -0500 (CDT) Subject: [Numpy-svn] r5236 - in trunk/numpy: . tests Message-ID: <20080525092802.8305339C8EE@scipy.org> Author: cdavid Date: 2008-05-25 04:27:57 -0500 (Sun, 25 May 2008) New Revision: 5236 Modified: trunk/numpy/ctypeslib.py trunk/numpy/tests/test_ctypeslib.py Log: Handle library with extension in their name for ctypes.load_library. Modified: trunk/numpy/ctypeslib.py =================================================================== --- trunk/numpy/ctypeslib.py 2008-05-25 06:35:51 UTC (rev 5235) +++ trunk/numpy/ctypeslib.py 2008-05-25 09:27:57 UTC (rev 5236) @@ -29,7 +29,10 @@ import warnings warnings.warn("All features of ctypes interface may not work " \ "with ctypes < 1.0.1") - if '.' not in libname: + + ext = os.path.splitext(libname)[1] + + if not ext: # Try to load library with platform-specific name, otherwise # default to libname.[so|pyd]. Sometimes, these files are built # erroneously on non-linux platforms. @@ -38,6 +41,8 @@ libname_ext.insert(0, '%s.dll' % libname) elif sys.platform == 'darwin': libname_ext.insert(0, '%s.dylib' % libname) + else: + libname_ext = [libname] loader_path = os.path.abspath(loader_path) if not os.path.isdir(loader_path): Modified: trunk/numpy/tests/test_ctypeslib.py =================================================================== --- trunk/numpy/tests/test_ctypeslib.py 2008-05-25 06:35:51 UTC (rev 5235) +++ trunk/numpy/tests/test_ctypeslib.py 2008-05-25 09:27:57 UTC (rev 5236) @@ -12,6 +12,22 @@ " (import error was: %s)" % str(e) print msg + def check_basic2(self): + """Regression for #801: load_library with a full library name + (including extension) does not work.""" + try: + try: + from distutils import sysconfig + so = sysconfig.get_config_var('SO') + cdll = load_library('multiarray%s' % so, + np.core.multiarray.__file__) + except ImportError: + print "No distutils available, skipping test." + except ImportError, e: + msg = "ctypes is not available on this python: skipping the test" \ + " (import error was: %s)" % str(e) + print msg + class TestNdpointer(NumpyTestCase): def check_dtype(self): dt = np.intc From numpy-svn at scipy.org Mon May 26 03:39:07 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 26 May 2008 02:39:07 -0500 (CDT) Subject: [Numpy-svn] r5237 - tags Message-ID: <20080526073907.41EE739C330@scipy.org> Author: jarrod.millman Date: 2008-05-26 02:39:03 -0500 (Mon, 26 May 2008) New Revision: 5237 Added: tags/1.1.0/ Log: tagging 1.1.0 final Copied: tags/1.1.0 (from rev 5236, branches/1.1.x) From numpy-svn at scipy.org Mon May 26 03:41:37 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 26 May 2008 02:41:37 -0500 (CDT) Subject: [Numpy-svn] r5238 - tags/1.1.0/numpy Message-ID: <20080526074137.2C2A039C364@scipy.org> Author: jarrod.millman Date: 2008-05-26 02:41:29 -0500 (Mon, 26 May 2008) New Revision: 5238 Modified: tags/1.1.0/numpy/version.py Log: updating version info Modified: tags/1.1.0/numpy/version.py =================================================================== --- tags/1.1.0/numpy/version.py 2008-05-26 07:39:03 UTC (rev 5237) +++ tags/1.1.0/numpy/version.py 2008-05-26 07:41:29 UTC (rev 5238) @@ -1,5 +1,5 @@ version='1.1.0' -release=False +release=True if not release: version += '.dev' From numpy-svn at scipy.org Mon May 26 03:42:24 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 26 May 2008 02:42:24 -0500 (CDT) Subject: [Numpy-svn] r5239 - branches/1.1.x/numpy Message-ID: <20080526074224.A87C939C257@scipy.org> Author: jarrod.millman Date: 2008-05-26 02:42:22 -0500 (Mon, 26 May 2008) New Revision: 5239 Modified: branches/1.1.x/numpy/version.py Log: 1.1.1 development Modified: branches/1.1.x/numpy/version.py =================================================================== --- branches/1.1.x/numpy/version.py 2008-05-26 07:41:29 UTC (rev 5238) +++ branches/1.1.x/numpy/version.py 2008-05-26 07:42:22 UTC (rev 5239) @@ -1,4 +1,4 @@ -version='1.1.0' +version='1.1.1' release=False if not release: From numpy-svn at scipy.org Mon May 26 07:29:46 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 26 May 2008 06:29:46 -0500 (CDT) Subject: [Numpy-svn] r5240 - in trunk/tools/win32build: . cpuid nsis_scripts Message-ID: <20080526112946.0B8DD39CA07@scipy.org> Author: cdavid Date: 2008-05-26 06:29:37 -0500 (Mon, 26 May 2008) New Revision: 5240 Added: trunk/tools/win32build/README.txt trunk/tools/win32build/cpuid/ trunk/tools/win32build/cpuid/SConstruct trunk/tools/win32build/cpuid/cpuid.c trunk/tools/win32build/cpuid/cpuid.h trunk/tools/win32build/cpuid/test.c trunk/tools/win32build/nsis_scripts/ trunk/tools/win32build/nsis_scripts/numpy-superinstaller-2.4.nsi trunk/tools/win32build/nsis_scripts/numpy-superinstaller-2.5.nsi Log: Add cpuid + nsis scripts to build win32 installer. Added: trunk/tools/win32build/README.txt =================================================================== --- trunk/tools/win32build/README.txt 2008-05-26 07:42:22 UTC (rev 5239) +++ trunk/tools/win32build/README.txt 2008-05-26 11:29:37 UTC (rev 5240) @@ -0,0 +1,9 @@ +This directory contains various scripts and code to build installers for +windows + - cpuid: contains a mini lib to detect SSE. + - cpucaps: nsis plugin to add the ability to detect SSE for installers. + - *nsi scripts: actual nsis scripts to build the installer + - build.py: script to build various versions of python binaries + (several archs, several python versions) + +To build the binaries, you need blas/lapack/atlas for all architectures. Added: trunk/tools/win32build/cpuid/SConstruct =================================================================== --- trunk/tools/win32build/cpuid/SConstruct 2008-05-26 07:42:22 UTC (rev 5239) +++ trunk/tools/win32build/cpuid/SConstruct 2008-05-26 11:29:37 UTC (rev 5240) @@ -0,0 +1,5 @@ +env = Environment(tools = ['mingw']) + +#libcpuid = env.SharedLibrary('cpuid', source = ['cpuid.c']) +#test = env.Program('test', source = ['test.c'], LIBS = libcpuid, RPATH = ['.']) +test = env.Program('test', source = ['test.c', 'cpuid.c']) Added: trunk/tools/win32build/cpuid/cpuid.c =================================================================== --- trunk/tools/win32build/cpuid/cpuid.c 2008-05-26 07:42:22 UTC (rev 5239) +++ trunk/tools/win32build/cpuid/cpuid.c 2008-05-26 11:29:37 UTC (rev 5240) @@ -0,0 +1,169 @@ +/* + * TODO: + * - test for cpuid availability + * - test for OS support (tricky) + */ + +#include +#include +#include + +#include "cpuid.h" + +#ifndef __GNUC__ +#error "Sorry, this code can only be compiled with gcc for now" +#endif + +/* + * SIMD: SSE 1, 2 and 3, MMX + */ +#define CPUID_FLAG_MMX 1 << 23 /* in edx */ +#define CPUID_FLAG_SSE 1 << 25 /* in edx */ +#define CPUID_FLAG_SSE2 1 << 26 /* in edx */ +#define CPUID_FLAG_SSE3 1 << 0 /* in ecx */ + +/* + * long mode (AMD64 instruction set) + */ +#define CPUID_FLAGS_LONG_MODE 1 << 29 /* in edx */ + +/* + * struct reprensenting the cpuid flags as put in the register + */ +typedef struct { + uint32_t eax; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; +} cpuid_t; + +/* + * Union to read bytes in 32 (intel) bits registers + */ +union _le_reg { + uint8_t ccnt[4]; + uint32_t reg; +} __attribute__ ((packed)); +typedef union _le_reg le_reg_t ; + +/* + * can_cpuid and read_cpuid are the two only functions using asm + */ +static int can_cpuid(void) +{ + int has_cpuid = 0 ; + + /* + * See intel doc on cpuid (pdf) + */ + asm volatile ( + "pushfl \n\t" + "popl %%eax \n\t" + "movl %%eax, %%ecx \n\t" + "xorl $0x200000, %%eax \n\t" + "pushl %%eax \n\t" + "popfl \n\t" + "pushfl \n\t" + "popl %%eax \n\t" + "xorl %%ecx, %%eax \n\t" + "andl $0x200000, %%eax \n\t" + "movl %%eax,%0 \n\t" + :"=m" (has_cpuid) + : /*no input*/ + : "eax","ecx","cc"); + + return (has_cpuid != 0) ; +} + +/* + * func is the "level" of cpuid. See for cpuid.txt + */ +static cpuid_t read_cpuid(unsigned int func) +{ + cpuid_t res; + + /* we save ebx because it is used when compiled by -fPIC */ + asm volatile( + "pushl %%ebx \n\t" /* save %ebx */ + "cpuid \n\t" + "movl %%ebx, %1 \n\t" /* save what cpuid just put in %ebx */ + "popl %%ebx \n\t" /* restore the old %ebx */ + : "=a"(res.eax), "=r"(res.ebx), + "=c"(res.ecx), "=d"(res.edx) + : "a"(func) + : "cc"); + + return res; +} + +static uint32_t get_max_func() +{ + cpuid_t cpuid; + + cpuid = read_cpuid(0); + return cpuid.eax; +} + +/* + * vendor should have at least CPUID_VENDOR_STRING_LEN characters + */ +static int get_vendor_string(cpuid_t cpuid, char vendor[]) +{ + int i; + le_reg_t treg; + + treg.reg = cpuid.ebx; + for (i = 0; i < 4; ++i) { + vendor[i] = treg.ccnt[i]; + } + + treg.reg = cpuid.edx; + for (i = 0; i < 4; ++i) { + vendor[i+4] = treg.ccnt[i]; + } + + treg.reg = cpuid.ecx; + for (i = 0; i < 4; ++i) { + vendor[i+8] = treg.ccnt[i]; + } + vendor[12] = '\0'; + return 0; +} + +int cpuid_get_caps(cpu_caps_t *cpu) +{ + cpuid_t cpuid; + int max; + + memset(cpu, 0, sizeof(*cpu)); + + if (!can_cpuid()) { + return 0; + } + + max = get_max_func(); + + /* Read vendor string */ + cpuid = read_cpuid(0); + get_vendor_string(cpuid, cpu->vendor); + + if (max < 0x00000001) { + return 0; + } + cpuid = read_cpuid(0x00000001); + + /* We can read mmx, sse 1 2 and 3 when cpuid level >= 0x00000001 */ + if (cpuid.edx & CPUID_FLAG_MMX) { + cpu->has_mmx = 1; + } + if (cpuid.edx & CPUID_FLAG_SSE) { + cpu->has_sse = 1; + } + if (cpuid.edx & CPUID_FLAG_SSE2) { + cpu->has_sse2 = 1; + } + if (cpuid.ecx & CPUID_FLAG_SSE3) { + cpu->has_sse3 = 1; + } + return 0; +} Added: trunk/tools/win32build/cpuid/cpuid.h =================================================================== --- trunk/tools/win32build/cpuid/cpuid.h 2008-05-26 07:42:22 UTC (rev 5239) +++ trunk/tools/win32build/cpuid/cpuid.h 2008-05-26 11:29:37 UTC (rev 5240) @@ -0,0 +1,20 @@ +#ifndef _GABOU_CPUID_H +#define _GABOU_CPUID_H + +#include + +#define CPUID_VENDOR_STRING_LEN 12 + +struct _cpu_caps { + int has_cpuid; + int has_mmx; + int has_sse; + int has_sse2; + int has_sse3; + char vendor[CPUID_VENDOR_STRING_LEN+1]; +}; +typedef struct _cpu_caps cpu_caps_t; + +int cpuid_get_caps(cpu_caps_t *cpuinfo); + +#endif Added: trunk/tools/win32build/cpuid/test.c =================================================================== --- trunk/tools/win32build/cpuid/test.c 2008-05-26 07:42:22 UTC (rev 5239) +++ trunk/tools/win32build/cpuid/test.c 2008-05-26 11:29:37 UTC (rev 5240) @@ -0,0 +1,44 @@ +#include + +#include "cpuid.h" + +int main() +{ + cpu_caps_t *cpuinfo; + + cpuinfo = malloc(sizeof(*cpuinfo)); + + if (cpuinfo == NULL) { + fprintf(stderr, "Error allocating\n"); + } + + cpuid_get_caps(cpuinfo); + printf("This cpu string is %s\n", cpuinfo->vendor); + + if (cpuinfo->has_mmx) { + printf("This cpu has mmx instruction set\n"); + } else { + printf("This cpu does NOT have mmx instruction set\n"); + } + + if (cpuinfo->has_sse) { + printf("This cpu has sse instruction set\n"); + } else { + printf("This cpu does NOT have sse instruction set\n"); + } + + if (cpuinfo->has_sse2) { + printf("This cpu has sse2 instruction set\n"); + } else { + printf("This cpu does NOT have sse2 instruction set\n"); + } + + if (cpuinfo->has_sse3) { + printf("This cpu has sse3 instruction set\n"); + } else { + printf("This cpu does NOT have sse3 instruction set\n"); + } + + free(cpuinfo); + return 0; +} Added: trunk/tools/win32build/nsis_scripts/numpy-superinstaller-2.4.nsi =================================================================== --- trunk/tools/win32build/nsis_scripts/numpy-superinstaller-2.4.nsi 2008-05-26 07:42:22 UTC (rev 5239) +++ trunk/tools/win32build/nsis_scripts/numpy-superinstaller-2.4.nsi 2008-05-26 11:29:37 UTC (rev 5240) @@ -0,0 +1,120 @@ +;-------------------------------- +;Include Modern UI + +!include "MUI2.nsh" + +;SetCompress off ; Useful to disable compression under development + +;-------------------------------- +;General + +;Name and file +Name "Numpy super installer" +OutFile "numpy-1.1.0-win32-superpack-python2.4.exe" + +;Default installation folder +InstallDir "$TEMP" + +;-------------------------------- +;Interface Settings + +!define MUI_ABORTWARNING + +;-------------------------------- +;Pages + +;!insertmacro MUI_PAGE_LICENSE "${NSISDIR}\Docs\Modern UI\License.txt" +;!insertmacro MUI_PAGE_COMPONENTS +;!insertmacro MUI_PAGE_DIRECTORY +;!insertmacro MUI_PAGE_INSTFILES + +;!insertmacro MUI_UNPAGE_CONFIRM +;!insertmacro MUI_UNPAGE_INSTFILES + +;-------------------------------- +;Languages + +!insertmacro MUI_LANGUAGE "English" + +;-------------------------------- +;Component Sections + +!include 'Sections.nsh' +!include LogicLib.nsh + +Var HasSSE2 +Var HasSSE3 +Var CPUSSE + +Section "Core" SecCore + + ;SectionIn RO + SetOutPath "$INSTDIR" + + ;Create uninstaller + ;WriteUninstaller "$INSTDIR\Uninstall.exe" + + DetailPrint "Install dir for actual installers is $INSTDIR" + + StrCpy $CPUSSE "0" + CpuCaps::hasSSE2 + Pop $0 + StrCpy $HasSSE2 $0 + + CpuCaps::hasSSE3 + Pop $0 + StrCpy $HasSSE3 $0 + + ; Debug + StrCmp $HasSSE2 "Y" include_sse2 no_include_sse2 + include_sse2: + DetailPrint '"Target CPU handles SSE2"' + StrCpy $CPUSSE "2" + goto done_sse2 + no_include_sse2: + DetailPrint '"Target CPU does NOT handle SSE2"' + goto done_sse2 + done_sse2: + + StrCmp $HasSSE3 "Y" include_sse3 no_include_sse3 + include_sse3: + DetailPrint '"Target CPU handles SSE3"' + StrCpy $CPUSSE "3" + goto done_sse3 + no_include_sse3: + DetailPrint '"Target CPU does NOT handle SSE3"' + goto done_sse3 + done_sse3: + + ClearErrors + + ; Install files conditionaly on detected cpu + ${Switch} $CPUSSE + ${Case} "3" + DetailPrint '"Install SSE 3"' + File "numpy-1.1.0-sse3.exe" + ExecWait '"$INSTDIR\numpy-1.1.0-sse3.exe"' + ${Break} + ${Case} "2" + DetailPrint '"Install SSE 2"' + File "numpy-1.1.0-sse2.exe" + ExecWait '"$INSTDIR\numpy-1.1.0-sse2.exe"' + ${Break} + ${Default} + DetailPrint '"Install NO SSE"' + File "numpy-1.1.0-nosse.exe" + ExecWait '"$INSTDIR\numpy-1.1.0-nosse.exe"' + ${Break} + ${EndSwitch} + + ; Handle errors when executing installers + IfErrors error no_error + + error: + messageBox MB_OK "Executing numpy installer failed" + goto done + no_error: + goto done + done: + +SectionEnd Added: trunk/tools/win32build/nsis_scripts/numpy-superinstaller-2.5.nsi =================================================================== --- trunk/tools/win32build/nsis_scripts/numpy-superinstaller-2.5.nsi 2008-05-26 07:42:22 UTC (rev 5239) +++ trunk/tools/win32build/nsis_scripts/numpy-superinstaller-2.5.nsi 2008-05-26 11:29:37 UTC (rev 5240) @@ -0,0 +1,120 @@ +;-------------------------------- +;Include Modern UI + +!include "MUI2.nsh" + +;SetCompress off ; Useful to disable compression under development + +;-------------------------------- +;General + +;Name and file +Name "Numpy super installer" +OutFile "numpy-1.1.0-win32-superpack-python2.5.exe" + +;Default installation folder +InstallDir "$TEMP" + +;-------------------------------- +;Interface Settings + +!define MUI_ABORTWARNING + +;-------------------------------- +;Pages + +;!insertmacro MUI_PAGE_LICENSE "${NSISDIR}\Docs\Modern UI\License.txt" +;!insertmacro MUI_PAGE_COMPONENTS +;!insertmacro MUI_PAGE_DIRECTORY +;!insertmacro MUI_PAGE_INSTFILES + +;!insertmacro MUI_UNPAGE_CONFIRM +;!insertmacro MUI_UNPAGE_INSTFILES + +;-------------------------------- +;Languages + +!insertmacro MUI_LANGUAGE "English" + +;-------------------------------- +;Component Sections + +!include 'Sections.nsh' +!include LogicLib.nsh + +Var HasSSE2 +Var HasSSE3 +Var CPUSSE + +Section "Core" SecCore + + ;SectionIn RO + SetOutPath "$INSTDIR" + + ;Create uninstaller + ;WriteUninstaller "$INSTDIR\Uninstall.exe" + + DetailPrint "Install dir for actual installers is $INSTDIR" + + StrCpy $CPUSSE "0" + CpuCaps::hasSSE2 + Pop $0 + StrCpy $HasSSE2 $0 + + CpuCaps::hasSSE3 + Pop $0 + StrCpy $HasSSE3 $0 + + ; Debug + StrCmp $HasSSE2 "Y" include_sse2 no_include_sse2 + include_sse2: + DetailPrint '"Target CPU handles SSE2"' + StrCpy $CPUSSE "2" + goto done_sse2 + no_include_sse2: + DetailPrint '"Target CPU does NOT handle SSE2"' + goto done_sse2 + done_sse2: + + StrCmp $HasSSE3 "Y" include_sse3 no_include_sse3 + include_sse3: + DetailPrint '"Target CPU handles SSE3"' + StrCpy $CPUSSE "3" + goto done_sse3 + no_include_sse3: + DetailPrint '"Target CPU does NOT handle SSE3"' + goto done_sse3 + done_sse3: + + ClearErrors + + ; Install files conditionaly on detected cpu + ${Switch} $CPUSSE + ${Case} "3" + DetailPrint '"Install SSE 3"' + File "numpy-1.1.0-sse3.exe" + ExecWait '"$INSTDIR\numpy-1.1.0-sse3.exe"' + ${Break} + ${Case} "2" + DetailPrint '"Install SSE 2"' + File "numpy-1.1.0-sse2.exe" + ExecWait '"$INSTDIR\numpy-1.1.0-sse2.exe"' + ${Break} + ${Default} + DetailPrint '"Install NO SSE"' + File "numpy-1.1.0-nosse.exe" + ExecWait '"$INSTDIR\numpy-1.1.0-nosse.exe"' + ${Break} + ${EndSwitch} + + ; Handle errors when executing installers + IfErrors error no_error + + error: + messageBox MB_OK "Executing numpy installer failed" + goto done + no_error: + goto done + done: + +SectionEnd From numpy-svn at scipy.org Mon May 26 18:08:38 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 26 May 2008 17:08:38 -0500 (CDT) Subject: [Numpy-svn] r5241 - in trunk/numpy/core: src tests Message-ID: <20080526220838.E2F2F39C318@scipy.org> Author: charris Date: 2008-05-26 17:08:35 -0500 (Mon, 26 May 2008) New Revision: 5241 Modified: trunk/numpy/core/src/arrayobject.c trunk/numpy/core/tests/test_regression.py Log: Fix regression in dtype='c' array creation. Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2008-05-26 11:29:37 UTC (rev 5240) +++ trunk/numpy/core/src/arrayobject.c 2008-05-26 22:08:35 UTC (rev 5241) @@ -13,7 +13,8 @@ Travis Oliphant, oliphant at ee.byu.edu Brigham Young Univeristy - maintainer email: oliphant.travis at ieee.org +:8613 +maintainer email: oliphant.travis at ieee.org Numarray design (which provided guidance) by Space Science Telescope Institute @@ -8801,7 +8802,7 @@ else if (newtype->type_num == PyArray_OBJECT) { isobject = 1; } - if (!PyString_Check(op) && PySequence_Check(op)) { + if (PySequence_Check(op)) { PyObject *thiserr = NULL; /* necessary but not sufficient */ Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2008-05-26 11:29:37 UTC (rev 5240) +++ trunk/numpy/core/tests/test_regression.py 2008-05-26 22:08:35 UTC (rev 5241) @@ -1079,5 +1079,10 @@ self.failUnlessRaises(ValueError, dp) self.failUnlessRaises(ValueError, dp2) + def check_char_array_creation(self, level=rlevel): + a = np.array('123', dtype='c') + b = np.array(['1','2','3']) + assert_equal(a,b) + if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Mon May 26 18:15:32 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 26 May 2008 17:15:32 -0500 (CDT) Subject: [Numpy-svn] r5242 - in trunk/numpy/ma: . tests Message-ID: <20080526221532.43D8A39C6F2@scipy.org> Author: pierregm Date: 2008-05-26 17:15:29 -0500 (Mon, 26 May 2008) New Revision: 5242 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/mrecords.py trunk/numpy/ma/tests/test_core.py Log: core : __new__: keep the fill_value of the initializing object by default mrecords: force _guessvartypes to return numpy.dtypes instead of types Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-26 22:08:35 UTC (rev 5241) +++ trunk/numpy/ma/core.py 2008-05-26 22:15:29 UTC (rev 5242) @@ -1214,8 +1214,9 @@ else: _data._mask = umath.logical_or(mask, _data._mask) _data._sharedmask = False - # Update fill_value....... + if fill_value is None: + fill_value = getattr(data,'_fill_value', None) _data._fill_value = _check_fill_value(fill_value, _data.dtype) # Process extra options .. _data._hardmask = hard_mask Modified: trunk/numpy/ma/mrecords.py =================================================================== --- trunk/numpy/ma/mrecords.py 2008-05-26 22:08:35 UTC (rev 5241) +++ trunk/numpy/ma/mrecords.py 2008-05-26 22:15:29 UTC (rev 5242) @@ -664,11 +664,11 @@ except ValueError: vartypes.append(arr.dtype) else: - vartypes.append(complex) + vartypes.append(np.dtype(complex)) else: - vartypes.append(float) + vartypes.append(np.dtype(float)) else: - vartypes.append(int) + vartypes.append(np.dtype(int)) return vartypes def openfile(fname): @@ -738,11 +738,12 @@ vartypes = _guessvartypes(_variables[0]) # Construct the descriptor .................. mdescr = [(n,f) for (n,f) in zip(varnames, vartypes)] + mfillv = [ma.default_fill_value(f) for f in vartypes] # Get the data and the mask ................. # We just need a list of masked_arrays. It's easier to create it like that: _mask = (_variables.T == missingchar) - _datalist = [masked_array(a,mask=m,dtype=t) - for (a,m,t) in zip(_variables.T, _mask, vartypes)] + _datalist = [masked_array(a,mask=m,dtype=t,fill_value=f) + for (a,m,t,f) in zip(_variables.T, _mask, vartypes, mfillv)] return fromarrays(_datalist, dtype=mdescr) #.................................................................... Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2008-05-26 22:08:35 UTC (rev 5241) +++ trunk/numpy/ma/tests/test_core.py 2008-05-26 22:15:29 UTC (rev 5242) @@ -811,9 +811,11 @@ # def test_asarray(self): (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm.fill_value = -9999 xmm = asarray(xm) assert_equal(xmm._data, xm._data) assert_equal(xmm._mask, xm._mask) + assert_equal(xmm.fill_value, xm.fill_value) # def test_fix_invalid(self): "Checks fix_invalid." @@ -1588,4 +1590,4 @@ ############################################################################### #------------------------------------------------------------------------------ if __name__ == "__main__": - NumpyTest().run() + NumpyTest('numpy.ma.core').run() From numpy-svn at scipy.org Mon May 26 18:21:16 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 26 May 2008 17:21:16 -0500 (CDT) Subject: [Numpy-svn] r5243 - in branches/1.1.x/numpy/core: src tests Message-ID: <20080526222116.2B9C139C6F2@scipy.org> Author: charris Date: 2008-05-26 17:21:13 -0500 (Mon, 26 May 2008) New Revision: 5243 Modified: branches/1.1.x/numpy/core/src/arrayobject.c branches/1.1.x/numpy/core/tests/test_regression.py Log: Fix regression in dtype='c' array creation. Modified: branches/1.1.x/numpy/core/src/arrayobject.c =================================================================== --- branches/1.1.x/numpy/core/src/arrayobject.c 2008-05-26 22:15:29 UTC (rev 5242) +++ branches/1.1.x/numpy/core/src/arrayobject.c 2008-05-26 22:21:13 UTC (rev 5243) @@ -8801,7 +8801,7 @@ else if (newtype->type_num == PyArray_OBJECT) { isobject = 1; } - if (!PyString_Check(op) && PySequence_Check(op)) { + if (PySequence_Check(op)) { PyObject *thiserr = NULL; /* necessary but not sufficient */ Modified: branches/1.1.x/numpy/core/tests/test_regression.py =================================================================== --- branches/1.1.x/numpy/core/tests/test_regression.py 2008-05-26 22:15:29 UTC (rev 5242) +++ branches/1.1.x/numpy/core/tests/test_regression.py 2008-05-26 22:21:13 UTC (rev 5243) @@ -1079,5 +1079,11 @@ self.failUnlessRaises(ValueError, dp) self.failUnlessRaises(ValueError, dp2) + def check_char_array_creation(self, level=rlevel): + a = np.array('123', dtype='c') + b = np.array(['1','2','3']) + assert_equal(a,b) + + if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Wed May 28 22:31:36 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 28 May 2008 21:31:36 -0500 (CDT) Subject: [Numpy-svn] r5244 - in trunk/numpy/ma: . tests Message-ID: <20080529023136.C7B7AC7C09B@scipy.org> Author: pierregm Date: 2008-05-28 21:31:28 -0500 (Wed, 28 May 2008) New Revision: 5244 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/extras.py trunk/numpy/ma/mrecords.py trunk/numpy/ma/tests/test_mrecords.py Log: mrecords : Make sure a field shares its mask with the whole array mrecords : IMPORTANT : the mask of a field is no longer set to nomask when it's full of False, which simplifies masking specific fields. extras : Reorganized personal comments Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2008-05-26 22:21:13 UTC (rev 5243) +++ trunk/numpy/ma/core.py 2008-05-29 02:31:28 UTC (rev 5244) @@ -1633,7 +1633,7 @@ else: return str(self._data) # convert to object array to make filled work -#CHECK: the two lines below seem more robust than the self._data.astype +#!!!: the two lines below seem more robust than the self._data.astype # res = numeric.empty(self._data.shape, object_) # numeric.putmask(res,~m,self._data) res = self._data.astype("|O8") @@ -2032,7 +2032,7 @@ indicated `axis1` and `axis2`. """ - # TODO: What are we doing with `out`? + #!!!: implement out + test! m = self._mask if m is nomask: result = super(MaskedArray, self).trace(offset=offset, axis1=axis1, @@ -2207,7 +2207,7 @@ """ if self._mask is nomask: - # TODO: Do we keep super, or var _data and take a view ? + #???: Do we keep super, or var _data and take a view ? return super(MaskedArray, self).var(axis=axis, dtype=dtype, ddof=ddof) else: @@ -2676,7 +2676,7 @@ for convenience. And backwards compatibility... """ - #TODO: we should try to put 'order' somwehere + #!!!: we should try to put 'order' somwehere return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, subok=subok, keep_mask=keep_mask, hard_mask=hard_mask, fill_value=fill_value, ndmin=ndmin, shrink=shrink) @@ -3186,7 +3186,7 @@ def choose (indices, t, out=None, mode='raise'): "Return array shaped like indices with elements chosen from t" - #TODO: implement options `out` and `mode`, if possible. + #!!!: implement options `out` and `mode`, if possible + test. def fmask (x): "Returns the filled array, or True if masked." if x is masked: Modified: trunk/numpy/ma/extras.py =================================================================== --- trunk/numpy/ma/extras.py 2008-05-26 22:21:13 UTC (rev 5243) +++ trunk/numpy/ma/extras.py 2008-05-29 02:31:28 UTC (rev 5244) @@ -567,7 +567,7 @@ The first argument is not conjugated. """ - #TODO: Works only with 2D arrays. There should be a way to get it to run with higher dimension + #!!!: Works only with 2D arrays. There should be a way to get it to run with higher dimension if strict and (a.ndim == 2) and (b.ndim == 2): a = mask_rows(a) b = mask_cols(b) Modified: trunk/numpy/ma/mrecords.py =================================================================== --- trunk/numpy/ma/mrecords.py 2008-05-26 22:21:13 UTC (rev 5243) +++ trunk/numpy/ma/mrecords.py 2008-05-29 02:31:28 UTC (rev 5244) @@ -6,11 +6,11 @@ :author: Pierre Gerard-Marchant """ -#TODO: We should make sure that no field is called '_mask','mask','_fieldmask', -#TODO: ...or whatever restricted keywords. -#TODO: An idea would be to no bother in the first place, and then rename the -#TODO: invalid fields with a trailing underscore... -#TODO: Maybe we could just overload the parser function ? +#!!!: * We should make sure that no field is called '_mask','mask','_fieldmask', +#!!!: or whatever restricted keywords. +#!!!: An idea would be to no bother in the first place, and then rename the +#!!!: invalid fields with a trailing underscore... +#!!!: Maybe we could just overload the parser function ? __author__ = "Pierre GF Gerard-Marchant" @@ -51,9 +51,6 @@ formats = '' for obj in data: obj = np.asarray(obj) -# if not isinstance(obj, ndarray): -## if not isinstance(obj, ndarray): -# raise ValueError, "item in the array list must be an ndarray." formats += _typestr[obj.dtype.type] if issubclass(obj.dtype.type, ntypes.flexible): formats += `obj.itemsize` @@ -124,7 +121,6 @@ self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, strides=strides, formats=formats, byteorder=byteorder, aligned=aligned,) -# self = self.view(cls) # mdtype = [(k,'|b1') for (k,_) in self.dtype.descr] if mask is nomask or not np.size(mask): @@ -331,11 +327,13 @@ _data = self._data # We want a field ........ if isinstance(indx, basestring): + #NB: Make sure _sharedmask is True to propagate back to _fieldmask + #NB: Don't use _set_mask, there are some copies being made that break propagation + #NB: Don't force the mask to nomask, that wrecks easy masking obj = _data[indx].view(MaskedArray) - obj._set_mask(_fieldmask[indx]) - # Force to nomask if the mask is empty - if not obj._mask.any(): - obj._mask = nomask +# obj._set_mask(_fieldmask[indx]) + obj._mask = _fieldmask[indx] + obj._sharedmask = True # Force to masked if the mask is True if not obj.ndim and obj._mask: return masked @@ -780,4 +778,3 @@ newdata._fieldmask = newmask return newdata -############################################################################### Modified: trunk/numpy/ma/tests/test_mrecords.py =================================================================== --- trunk/numpy/ma/tests/test_mrecords.py 2008-05-26 22:21:13 UTC (rev 5243) +++ trunk/numpy/ma/tests/test_mrecords.py 2008-05-29 02:31:28 UTC (rev 5244) @@ -132,6 +132,20 @@ assert_equal(rdata.num, val) assert_equal(rdata.num.mask, [1,0,0]) + def test_set_fields_mask(self): + "Tests setting the mask of a field." + base = self.base.copy() + # This one has already a mask.... + mbase = base.view(mrecarray) + mbase['a'][-2] = masked + assert_equal(mbase.a, [1,2,3,4,5]) + assert_equal(mbase.a._mask, [0,1,0,1,1]) + # This one has not yet + mbase = fromarrays([np.arange(5), np.random.rand(5)], + dtype=[('a',int),('b',float)]) + mbase['a'][-2] = masked + assert_equal(mbase.a, [0,1,2,3,4]) + assert_equal(mbase.a._mask, [0,0,0,1,0]) # def test_set_mask(self): base = self.base.copy() @@ -231,7 +245,8 @@ mbase.soften_mask() assert(not mbase._hardmask) mbase._mask = nomask - assert(mbase['b']._mask is nomask) + # So, the mask of a field is no longer set to nomask... + assert(ma.make_mask(mbase['b']._mask) is nomask) assert_equal(mbase['a']._mask,mbase['b']._mask) # def test_pickling(self): @@ -365,7 +380,7 @@ f.write(fcontent) f.close() mrectxt = fromtextfile(fname,delimitor=',',varnames='ABCDEFG') - os.unlink(fname) + os.remove(fname) # assert(isinstance(mrectxt, MaskedRecords)) assert_equal(mrectxt.F, [1,1,1,1]) From numpy-svn at scipy.org Thu May 29 11:15:54 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 29 May 2008 10:15:54 -0500 (CDT) Subject: [Numpy-svn] r5245 - trunk/numpy/core/src Message-ID: <20080529151554.4C14739C5F8@scipy.org> Author: oliphant Date: 2008-05-29 10:15:45 -0500 (Thu, 29 May 2008) New Revision: 5245 Modified: trunk/numpy/core/src/ufuncobject.c Log: Use memmove when memory areas can overlap. Modified: trunk/numpy/core/src/ufuncobject.c =================================================================== --- trunk/numpy/core/src/ufuncobject.c 2008-05-29 02:31:28 UTC (rev 5244) +++ trunk/numpy/core/src/ufuncobject.c 2008-05-29 15:15:45 UTC (rev 5245) @@ -2742,7 +2742,7 @@ while(loop->index < loop->size) { if (loop->obj) Py_INCREF(*((PyObject **)loop->it->dataptr)); - memcpy(loop->bufptr[0], loop->it->dataptr, + memmove(loop->bufptr[0], loop->it->dataptr, loop->outsize); PyArray_ITER_NEXT(loop->it); loop->bufptr[0] += loop->outsize; @@ -2755,7 +2755,7 @@ /* Copy first element to output */ if (loop->obj) Py_INCREF(*((PyObject **)loop->it->dataptr)); - memcpy(loop->bufptr[0], loop->it->dataptr, + memmove(loop->bufptr[0], loop->it->dataptr, loop->outsize); /* Adjust input pointer */ loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; From numpy-svn at scipy.org Sat May 31 20:53:59 2008 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 31 May 2008 19:53:59 -0500 (CDT) Subject: [Numpy-svn] r5246 - trunk/numpy/doc Message-ID: <20080601005359.4833F39C59A@scipy.org> Author: ptvirtan Date: 2008-05-31 19:53:50 -0500 (Sat, 31 May 2008) New Revision: 5246 Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt Log: Spell out namespace convention in Examples and See Also sections in docstrings Modified: trunk/numpy/doc/HOWTO_DOCUMENT.txt =================================================================== --- trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-05-29 15:15:45 UTC (rev 5245) +++ trunk/numpy/doc/HOWTO_DOCUMENT.txt 2008-06-01 00:53:50 UTC (rev 5246) @@ -73,7 +73,7 @@ We are busy converting existing docstrings to the new format, expanding them where they are lacking, as well as writing new ones for undocumented functions. Volunteers are welcome to join the effort on -our new wiki-based documentation system (see the `Developer Zone +our new documentation system (see the `Developer Zone `_). Sections @@ -177,7 +177,10 @@ See Also -------- - average : Weighted average + numpy.average : Weighted average + + Preferably, use the full namespace prefixes. For targets in the same + module as the documented object, the prefix can be omitted. 8. **Notes** @@ -277,6 +280,10 @@ b + The examples may assume that ``import numpy`` is executed before + the example code in *numpy*, and ``import scipy`` in *scipy*, but + other modules used should be explicitly imported. + 11. **Indexing tags*** Each function needs to be categorised for indexing purposes. Use