From numpy-svn at scipy.org Sun May 2 16:02:14 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 2 May 2010 15:02:14 -0500 (CDT) Subject: [Numpy-svn] r8375 - in trunk/numpy/lib: . tests Message-ID: <20100502200214.3653C39C4B4@scipy.org> Author: stefan Date: 2010-05-02 15:02:14 -0500 (Sun, 02 May 2010) New Revision: 8375 Modified: trunk/numpy/lib/npyio.py trunk/numpy/lib/tests/test_io.py Log: BUG: loadtxt should handle universal newlines. Modified: trunk/numpy/lib/npyio.py =================================================================== --- trunk/numpy/lib/npyio.py 2010-04-30 07:06:02 UTC (rev 8374) +++ trunk/numpy/lib/npyio.py 2010-05-02 20:02:14 UTC (rev 8375) @@ -584,7 +584,7 @@ import bz2 fh = bz2.BZ2File(fname) else: - fh = file(fname) + fh = file(fname, 'U') elif hasattr(fname, 'readline'): fh = fname else: Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2010-04-30 07:06:02 UTC (rev 8374) +++ trunk/numpy/lib/tests/test_io.py 2010-05-02 20:02:14 UTC (rev 8375) @@ -403,8 +403,18 @@ dtype=ndtype) assert_equal(test, control) + def test_universal_newline(self): + f, name = mkstemp() + os.write(f, asbytes('1 21\r3 42\r')) + os.close(f) + try: + data = np.loadtxt(name) + assert_array_equal(data, [[1, 21], [3, 42]]) + finally: + os.unlink(name) + class Testfromregex(TestCase): def test_record(self): c = StringIO() From numpy-svn at scipy.org Mon May 3 03:50:02 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 3 May 2010 02:50:02 -0500 (CDT) Subject: [Numpy-svn] r8376 - in trunk/numpy: core/code_generators core/include/numpy core/src/multiarray core/src/private core/src/umath f2py/src lib numarray numarray/include/numpy random/mtrand Message-ID: <20100503075002.A41B539C4B4@scipy.org> Author: charris Date: 2010-05-03 02:50:02 -0500 (Mon, 03 May 2010) New Revision: 8376 Modified: trunk/numpy/core/code_generators/generate_numpy_api.py trunk/numpy/core/code_generators/generate_ufunc_api.py trunk/numpy/core/include/numpy/ndarrayobject.h trunk/numpy/core/src/multiarray/arraytypes.c.src trunk/numpy/core/src/multiarray/common.c trunk/numpy/core/src/multiarray/convert_datatype.c trunk/numpy/core/src/multiarray/ctors.c trunk/numpy/core/src/multiarray/descriptor.c trunk/numpy/core/src/multiarray/getset.c trunk/numpy/core/src/multiarray/multiarraymodule.c trunk/numpy/core/src/multiarray/scalarapi.c trunk/numpy/core/src/multiarray/scalartypes.c.src trunk/numpy/core/src/multiarray/scalartypes.h trunk/numpy/core/src/multiarray/usertypes.c trunk/numpy/core/src/private/npy_3kcompat.h trunk/numpy/core/src/umath/ufunc_object.c trunk/numpy/core/src/umath/umathmodule.c.src trunk/numpy/f2py/src/fortranobject.c trunk/numpy/f2py/src/fortranobject.h trunk/numpy/lib/type_check.py trunk/numpy/numarray/_capi.c trunk/numpy/numarray/include/numpy/libnumarray.h trunk/numpy/random/mtrand/Python.pxi Log: ENH, BUG: PyCObject will be deprecated in python 2.7. So use the NpyCapsule compatibility functions in npy_3kcompat.h to replace the current calls. This gets rid of a number of version checks and is easier to maintain. Fix bug that was present in the ufunc _loop1d_list_free destructor in the python3k case. Modified: trunk/numpy/core/code_generators/generate_numpy_api.py =================================================================== --- trunk/numpy/core/code_generators/generate_numpy_api.py 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/code_generators/generate_numpy_api.py 2010-05-03 07:50:02 UTC (rev 8376) @@ -64,7 +64,7 @@ } Py_DECREF(numpy); -#if PY_VERSION_HEX >= 0x03010000 +#if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_CheckExact(c_api)) { PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); Py_DECREF(c_api); Modified: trunk/numpy/core/code_generators/generate_ufunc_api.py =================================================================== --- trunk/numpy/core/code_generators/generate_ufunc_api.py 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/code_generators/generate_ufunc_api.py 2010-05-03 07:50:02 UTC (rev 8376) @@ -53,7 +53,7 @@ } Py_DECREF(numpy); -#if PY_VERSION_HEX >= 0x03010000 +#if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_CheckExact(c_api)) { PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); Py_DECREF(c_api); Modified: trunk/numpy/core/include/numpy/ndarrayobject.h =================================================================== --- trunk/numpy/core/include/numpy/ndarrayobject.h 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/include/numpy/ndarrayobject.h 2010-05-03 07:50:02 UTC (rev 8376) @@ -598,9 +598,19 @@ int sec, us, ps, as; } npy_timedeltastruct; +#if PY_VERSION_HEX >= 0x02070000 +#define PyDataType_GetDatetimeMetaData(descr) \ + ((descr->metadata == NULL) ? NULL : \ + ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ + PyDict_GetItemString( \ + descr->metadata, NPY_METADATA_DTSTR), NULL)))) +#else +#define PyDataType_GetDatetimeMetaData(descr) \ + ((descr->metadata == NULL) ? NULL : \ + ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \ + PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) +#endif -#define PyDataType_GetDatetimeMetaData(descr) ((descr->metadata == NULL) ? NULL : ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr(PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) - typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); /* Means c-style contiguous (last index varies the fastest). The Modified: trunk/numpy/core/src/multiarray/arraytypes.c.src =================================================================== --- trunk/numpy/core/src/multiarray/arraytypes.c.src 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/arraytypes.c.src 2010-05-03 07:50:02 UTC (rev 8376) @@ -3537,14 +3537,7 @@ * There is no error check here and no way to indicate an error * until the metadata turns up NULL. */ -#if defined(NPY_PY3K) - cobj = PyCapsule_New((void *)dt_data, NULL, simple_capsule_dtor); - if (cobj == NULL) { - PyErr_Clear(); - } -#else - cobj = PyCObject_FromVoidPtr((void *)dt_data, simple_capsule_dtor); -#endif + cobj = NpyCapsule_FromVoidPtr((void *)dt_data, simple_capsule_dtor); descr->metadata = PyDict_New(); PyDict_SetItemString(descr->metadata, NPY_METADATA_DTSTR, cobj); Py_DECREF(cobj); Modified: trunk/numpy/core/src/multiarray/common.c =================================================================== --- trunk/numpy/core/src/multiarray/common.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/common.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -258,25 +258,14 @@ PyArrayInterface *inter; char buf[40]; -#if defined(NPY_PY3K) - if (PyCapsule_CheckExact(ip)) { - inter = (PyArrayInterface *)PyCapsule_GetPointer(ip, NULL); + if (NpyCapsule_Check(ip)) { + inter = (PyArrayInterface *)NpyCapsule_AsVoidPtr(ip); if (inter->two == 2) { PyOS_snprintf(buf, sizeof(buf), "|%c%d", inter->typekind, inter->itemsize); chktype = _array_typedescr_fromstr(buf); } } -#else - if (PyCObject_Check(ip)) { - inter = (PyArrayInterface *)PyCObject_AsVoidPtr(ip); - if (inter->two == 2) { - PyOS_snprintf(buf, sizeof(buf), - "|%c%d", inter->typekind, inter->itemsize); - chktype = _array_typedescr_fromstr(buf); - } - } -#endif Py_DECREF(ip); if (chktype) { goto finish; Modified: trunk/numpy/core/src/multiarray/convert_datatype.c =================================================================== --- trunk/numpy/core/src/multiarray/convert_datatype.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/convert_datatype.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -102,15 +102,9 @@ key = PyInt_FromLong(type_num); cobj = PyDict_GetItem(obj, key); Py_DECREF(key); -#if defined(NPY_PY3K) - if (PyCapsule_CheckExact(cobj)) { - castfunc = PyCapsule_GetPointer(cobj, NULL); - } -#else - if (PyCObject_Check(cobj)) { + if (NpyCapsule_Check(cobj)) { castfunc = PyCObject_AsVoidPtr(cobj); } -#endif } } if (PyTypeNum_ISCOMPLEX(descr->type_num) && Modified: trunk/numpy/core/src/multiarray/ctors.c =================================================================== --- trunk/numpy/core/src/multiarray/ctors.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/ctors.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -1140,18 +1140,9 @@ #endif if ((e = PyObject_GetAttrString(s, "__array_struct__")) != NULL) { d = -1; -#if defined(NPY_PY3K) - if (PyCapsule_CheckExact(e)) { + if (NpyCapsule_Check(e)) { PyArrayInterface *inter; - inter = (PyArrayInterface *)PyCapsule_GetPointer(e, NULL); - if (inter == NULL) { - PyErr_Clear(); - } -#else - if (PyCObject_Check(e)) { - PyArrayInterface *inter; - inter = (PyArrayInterface *)PyCObject_AsVoidPtr(e); -#endif + inter = (PyArrayInterface *)NpyCapsule_AsVoidPtr(e); if (inter->two == 2) { d = inter->nd; } @@ -1574,20 +1565,10 @@ */ PyArray_UpdateFlags(self, UPDATE_ALL); } -#if defined(NPY_PY3K) - if PyCapsule_CheckExact(func) { + if (NpyCapsule_Check(func)) { /* A C-function is stored here */ PyArray_FinalizeFunc *cfunc; - cfunc = PyCapsule_GetPointer(func, NULL); - if (cfunc == NULL) { - PyErr_Clear(); - } -#else - if PyCObject_Check(func) { - /* A C-function is stored here */ - PyArray_FinalizeFunc *cfunc; - cfunc = PyCObject_AsVoidPtr(func); -#endif + cfunc = NpyCapsule_AsVoidPtr(func); Py_DECREF(func); if (cfunc(self, obj) < 0) { goto fail; @@ -2129,20 +2110,10 @@ PyErr_Clear(); return Py_NotImplemented; } -#if defined(NPY_PY3K) - if (!PyCapsule_CheckExact(attr)) { + if (!NpyCapsule_Check(attr)) { goto fail; } - inter = PyCapsule_GetPointer(attr, NULL); - if (inter == NULL) { - PyErr_Clear(); - } -#else - if (!PyCObject_Check(attr)) { - goto fail; - } - inter = PyCObject_AsVoidPtr(attr); -#endif + inter = NpyCapsule_AsVoidPtr(attr); if (inter->two != 2) { goto fail; } Modified: trunk/numpy/core/src/multiarray/descriptor.c =================================================================== --- trunk/numpy/core/src/multiarray/descriptor.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/descriptor.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -646,14 +646,7 @@ PyArray_DatetimeMetaData *dt_data; PyObject *dt_tuple; -#if defined(NPY_PY3K) - dt_data = PyCapsule_GetPointer(cobj, NULL); - if (dt_data == NULL) { - PyErr_Clear(); - } -#else - dt_data = PyCObject_AsVoidPtr(cobj); -#endif + dt_data = NpyCapsule_AsVoidPtr(cobj); dt_tuple = PyTuple_New(4); PyTuple_SET_ITEM(dt_tuple, 0, @@ -692,14 +685,7 @@ /* FIXME * There is no error handling here. */ -#if defined(NPY_PY3K) - ret = PyCapsule_New((void *)dt_data, NULL, simple_capsule_dtor); - if (ret == NULL) { - PyErr_Clear(); - } -#else - ret = PyCObject_FromVoidPtr((void *)dt_data, simple_capsule_dtor); -#endif + ret = NpyCapsule_FromVoidPtr((void *)dt_data, simple_capsule_dtor); return ret; } @@ -1563,14 +1549,7 @@ return ret; } tmp = PyDict_GetItemString(self->metadata, NPY_METADATA_DTSTR); -#if defined(NPY_PY3K) - dt_data = PyCapsule_GetPointer(tmp, NULL); - if (dt_data == NULL) { - PyErr_Clear(); - } -#else - dt_data = PyCObject_AsVoidPtr(tmp); -#endif + dt_data = NpyCapsule_AsVoidPtr(tmp); num = dt_data->num; den = dt_data->den; events = dt_data->events; Modified: trunk/numpy/core/src/multiarray/getset.c =================================================================== --- trunk/numpy/core/src/multiarray/getset.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/getset.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -552,19 +552,7 @@ inter->descr = NULL; } Py_INCREF(self); -#if defined(NPY_PY3K) - ret = PyCapsule_New(inter, NULL, gentype_struct_free); - if (ret == NULL) { - PyErr_Clear(); - } - else if (PyCapsule_SetContext(ret, self) != 0) { - PyErr_Clear(); - Py_DECREF(ret); - ret = NULL; - } -#else - ret = PyCObject_FromVoidPtrAndDesc(inter, self, gentype_struct_free); -#endif + ret = NpyCapsule_FromVoidPtrAndDesc(inter, self, gentype_struct_free); return ret; } Modified: trunk/numpy/core/src/multiarray/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarray/multiarraymodule.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/multiarraymodule.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -1353,19 +1353,8 @@ /* FIXME * There is no err handling here. */ -#if defined(NPY_PY3K) - data1 = PyCapsule_GetPointer(cobj1, NULL); - if (data1 == NULL) { - PyErr_Clear(); - } - data2 = PyCapsule_GetPointer(cobj2, NULL); - if (data2 == NULL) { - PyErr_Clear(); - } -#else - data1 = PyCObject_AsVoidPtr(cobj1); - data2 = PyCObject_AsVoidPtr(cobj2); -#endif + data1 = NpyCapsule_AsVoidPtr(cobj1); + data2 = NpyCapsule_AsVoidPtr(cobj2); return ((data1->base == data2->base) && (data1->num == data2->num) && (data1->den == data2->den) @@ -3062,14 +3051,7 @@ /* FIXME * There is no error handling here */ -#if defined(NPY_PY3K) - c_api = PyCapsule_New((void *)PyArray_API, NULL, NULL); - if (c_api == NULL) { - PyErr_Clear(); - } -#else - c_api = PyCObject_FromVoidPtr((void *)PyArray_API, NULL); -#endif + c_api = NpyCapsule_FromVoidPtr((void *)PyArray_API, NULL); PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); if (PyErr_Occurred()) { @@ -3103,14 +3085,7 @@ /* FIXME * There is no error handling here */ -#if defined(NPY_PY3K) - s = PyCapsule_New((void *)_datetime_strings, NULL, NULL); - if (s == NULL) { - PyErr_Clear(); - } -#else - s = PyCObject_FromVoidPtr((void *)_datetime_strings, NULL); -#endif + s = NpyCapsule_FromVoidPtr((void *)_datetime_strings, NULL); PyDict_SetItemString(d, "DATETIMEUNITS", s); Py_DECREF(s); Modified: trunk/numpy/core/src/multiarray/scalarapi.c =================================================================== --- trunk/numpy/core/src/multiarray/scalarapi.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/scalarapi.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -534,14 +534,7 @@ memcpy(dt_data, &((PyTimedeltaScalarObject *)sc)->obmeta, sizeof(PyArray_DatetimeMetaData)); } -#if defined(NPY_PY3K) - cobj = PyCapsule_New((void *)dt_data, NULL, simple_capsule_dtor); - if (cobj == NULL) { - PyErr_Clear(); - } -#else - cobj = PyCObject_FromVoidPtr((void *)dt_data, simple_capsule_dtor); -#endif + cobj = NpyCapsule_FromVoidPtr((void *)dt_data, simple_capsule_dtor); /* Add correct meta-data to the data-type */ if (descr == NULL) { @@ -677,14 +670,7 @@ /* FIXME * There is no error handling here. */ -#if defined(NPY_PY3K) - dt_data = PyCapsule_GetPointer(cobj, NULL); - if (dt_data == NULL) { - PyErr_Clear(); - } -#else - dt_data = PyCObject_AsVoidPtr(cobj); -#endif + dt_data = NpyCapsule_AsVoidPtr(cobj); memcpy(&(((PyDatetimeScalarObject *)obj)->obmeta), dt_data, sizeof(PyArray_DatetimeMetaData)); } Modified: trunk/numpy/core/src/multiarray/scalartypes.c.src =================================================================== --- trunk/numpy/core/src/multiarray/scalartypes.c.src 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/scalartypes.c.src 2010-05-03 07:50:02 UTC (rev 8376) @@ -810,7 +810,7 @@ return PyInt_FromLong(1); } -#if defined(NPY_PY3K) +#if PY_VERSION_HEX >= 0x02070000 NPY_NO_EXPORT void gentype_struct_free(PyObject *ptr) { @@ -857,18 +857,7 @@ inter->data = arr->data; inter->descr = NULL; -#if defined(NPY_PY3K) - ret = PyCapsule_New(inter, NULL, gentype_struct_free); - if (ret == NULL) { - PyErr_Clear(); - } - else if (PyCapsule_SetContext(ret, arr) != 0) { - PyErr_Clear(); - ret == NULL; - } -#else - ret = PyCObject_FromVoidPtrAndDesc(inter, arr, gentype_struct_free); -#endif + ret = NpyCapsule_FromVoidPtrAndDesc(inter, arr, gentype_struct_free); return ret; } Modified: trunk/numpy/core/src/multiarray/scalartypes.h =================================================================== --- trunk/numpy/core/src/multiarray/scalartypes.h 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/scalartypes.h 2010-05-03 07:50:02 UTC (rev 8376) @@ -7,7 +7,7 @@ NPY_NO_EXPORT void format_longdouble(char *buf, size_t buflen, longdouble val, unsigned int prec); -#if defined(NPY_PY3K) +#if PY_VERSION_HEX >= 0x02070000 NPY_NO_EXPORT void gentype_struct_free(PyObject *ptr); #else Modified: trunk/numpy/core/src/multiarray/usertypes.c =================================================================== --- trunk/numpy/core/src/multiarray/usertypes.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/multiarray/usertypes.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -206,15 +206,8 @@ if (PyErr_Occurred()) { return -1; } -#if defined(NPY_PY3K) - cobj = PyCapsule_New((void *)castfunc, NULL, NULL); + cobj = NpyCapsule_FromVoidPtr((void *)castfunc, NULL); if (cobj == NULL) { - PyErr_Clear(); - } -#else - cobj = PyCObject_FromVoidPtr((void *)castfunc, NULL); -#endif - if (cobj == NULL) { Py_DECREF(key); return -1; } Modified: trunk/numpy/core/src/private/npy_3kcompat.h =================================================================== --- trunk/numpy/core/src/private/npy_3kcompat.h 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/private/npy_3kcompat.h 2010-05-03 07:50:02 UTC (rev 8376) @@ -219,7 +219,7 @@ * The main job here is to get rid of the improved error handling * of PyCapsules. It's a shame... */ -#if defined(NPY_PY3K) +#if PY_VERSION_HEX >= 0x02070000 static NPY_INLINE PyObject * NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) Modified: trunk/numpy/core/src/umath/ufunc_object.c =================================================================== --- trunk/numpy/core/src/umath/ufunc_object.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/umath/ufunc_object.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -372,14 +372,7 @@ PyUFunc_Loop1d *funcdata; int i; -#if defined(NPY_PY3K) - funcdata = (PyUFunc_Loop1d *)PyCapsule_GetPointer(obj, NULL); - if (funcdata == NULL) { - PyErr_Clear(); - } -#else - funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); -#endif + funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj); while (funcdata != NULL) { for (i = 0; i < nin; i++) { if (!PyArray_CanCoerceScalar(arg_types[i], @@ -524,14 +517,7 @@ * extract the correct function * data and argtypes */ -#if defined(NPY_PY3K) - funcdata = (PyUFunc_Loop1d *)PyCapsule_GetPointer(obj, NULL); - if (funcdata == NULL) { - PyErr_Clear(); - } -#else - funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); -#endif + funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj); while (funcdata != NULL) { if (n != 1) { for (i = 0; i < nargs; i++) { @@ -3890,36 +3876,31 @@ * This frees the linked-list structure when the CObject * is destroyed (removed from the internal dictionary) */ -#if defined(NPY_PY3K) static void -_loop1d_list_free(PyObject *ptr) +_free_loop1d_list(PyUFunc_Loop1d *data) { - PyUFunc_Loop1d *funcdata; - - funcdata = (PyUFunc_Loop1d *)PyCapsule_GetPointer(ptr, NULL); - if (funcdata == NULL) { + if (data == NULL) { return; } - _pya_free(funcdata->arg_types); - _loop1d_list_free(funcdata->next); - _pya_free(funcdata); + _pya_free(data->arg_types); + _free_loop1d_list(data->next); + _pya_free(data); } +#if PY_VERSION_HEX >= 0x02070000 +static void +_loop1d_list_free(PyObject *ptr) +{ + PyUFunc_Loop1d *data = (PyUFunc_Loop1d *)PyCapsule_GetPointer(ptr, NULL); + _free_loop1d_list(data); +} + #else static void _loop1d_list_free(void *ptr) { - PyUFunc_Loop1d *funcdata; - if (ptr == NULL) { - return; - } - funcdata = (PyUFunc_Loop1d *)ptr; - if (funcdata == NULL) { - return; - } - _pya_free(funcdata->arg_types); - _loop1d_list_free(funcdata->next); - _pya_free(funcdata); + PyUFunc_Loop1d *data = (PyUFunc_Loop1d *)ptr; + _free_loop1d_list(data); } #endif @@ -3980,15 +3961,8 @@ cobj = PyDict_GetItem(ufunc->userloops, key); /* If it's not there, then make one and return. */ if (cobj == NULL) { -#if defined(NPY_PY3K) - cobj = PyCapsule_New((void *)funcdata, NULL, _loop1d_list_free); + cobj = NpyCapsule_FromVoidPtr((void *)funcdata, _loop1d_list_free); if (cobj == NULL) { - PyErr_Clear(); - } -#else - cobj = PyCObject_FromVoidPtr((void *)funcdata, _loop1d_list_free); -#endif - if (cobj == NULL) { goto fail; } PyDict_SetItem(ufunc->userloops, key, cobj); @@ -4005,14 +3979,7 @@ * is exactly like this one, then just replace. * Otherwise insert. */ -#if defined(NPY_PY3K) - current = (PyUFunc_Loop1d *)PyCapsule_GetPointer(cobj, NULL); - if (current == NULL) { - PyErr_Clear(); - } -#else - current = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(cobj); -#endif + current = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(cobj); while (current != NULL) { cmp = cmp_arg_types(current->arg_types, newtypes, ufunc->nargs); if (cmp >= 0) { Modified: trunk/numpy/core/src/umath/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umath/umathmodule.c.src 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/core/src/umath/umathmodule.c.src 2010-05-03 07:50:02 UTC (rev 8376) @@ -304,14 +304,7 @@ /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); -#if defined(NPY_PY3K) - c_api = PyCapsule_New((void *)PyUFunc_API, NULL, NULL); - if (c_api == NULL) { - PyErr_Clear(); - } -#else - c_api = PyCObject_FromVoidPtr((void *)PyUFunc_API, NULL); -#endif + c_api = NpyCapsule_FromVoidPtr((void *)PyUFunc_API, NULL); if (PyErr_Occurred()) { goto err; } Modified: trunk/numpy/f2py/src/fortranobject.c =================================================================== --- trunk/numpy/f2py/src/fortranobject.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/f2py/src/fortranobject.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -913,11 +913,11 @@ return PyArray_CopyInto(out, (PyArrayObject *)arr); } -/*******************************************/ -/* Compatibility functions for Python 3.1 */ -/*******************************************/ +/*********************************************/ +/* Compatibility functions for Python >= 2.7 */ +/*********************************************/ -#if PY_VERSION_HEX >= 0X03010000 +#if PY_VERSION_HEX >= 0X02070000 PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) Modified: trunk/numpy/f2py/src/fortranobject.h =================================================================== --- trunk/numpy/f2py/src/fortranobject.h 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/f2py/src/fortranobject.h 2010-05-03 07:50:02 UTC (rev 8376) @@ -76,7 +76,7 @@ 123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 PyFortranObject represents various Fortran objects: -Fortran (module) routines, COMMON blocks, module data. +Fortran (module) routines, COMMON blocks, module data. Author: Pearu Peterson */ @@ -121,7 +121,7 @@ extern PyObject * PyFortranObject_New(FortranDataDef* defs, f2py_void_func init); extern PyObject * PyFortranObject_NewAsAttr(FortranDataDef* defs); -#if PY_VERSION_HEX >= 0x03010000 +#if PY_VERSION_HEX >= 0x02070000 PyObject * F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); void * F2PyCapsule_AsVoidPtr(PyObject *obj); Modified: trunk/numpy/lib/type_check.py =================================================================== --- trunk/numpy/lib/type_check.py 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/lib/type_check.py 2010-05-03 07:50:02 UTC (rev 8376) @@ -620,7 +620,7 @@ ('events', ctypes.c_int)] import sys - if sys.version_info[:2] >= (3,1): + if sys.version_info[:2] >= (2,7): func = ctypes.pythonapi.PyCapsule_GetPointer func.argtypes = [ctypes.py_object, ctypes.c_char_p] func.restype = ctypes.c_void_p Modified: trunk/numpy/numarray/_capi.c =================================================================== --- trunk/numpy/numarray/_capi.c 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/numarray/_capi.c 2010-05-03 07:50:02 UTC (rev 8376) @@ -3402,7 +3402,7 @@ _Error = PyErr_NewException("numpy.numarray._capi.error", NULL, NULL); /* Create a CObject containing the API pointer array's address */ -#if PY_VERSION_HEX >= 0x03010000 +#if PY_VERSION_HEX >= 0x02070000 m = PyModule_Create(&moduledef); c_api_object = PyCapsule_New((void *)libnumarray_API, NULL, NULL); if (c_api_object == NULL) { Modified: trunk/numpy/numarray/include/numpy/libnumarray.h =================================================================== --- trunk/numpy/numarray/include/numpy/libnumarray.h 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/numarray/include/numpy/libnumarray.h 2010-05-03 07:50:02 UTC (rev 8376) @@ -40,7 +40,7 @@ #endif #endif -#if PY_VERSION_HEX >= 0x03010000 +#if PY_VERSION_HEX >= 0x02070000 #define _import_libnumarray() \ { \ PyObject *module = PyImport_ImportModule("numpy.numarray._capi"); \ Modified: trunk/numpy/random/mtrand/Python.pxi =================================================================== --- trunk/numpy/random/mtrand/Python.pxi 2010-05-02 20:02:14 UTC (rev 8375) +++ trunk/numpy/random/mtrand/Python.pxi 2010-05-03 07:50:02 UTC (rev 8376) @@ -29,6 +29,9 @@ void Py_XINCREF(object obj) # CObject API +# If this is uncommented it needs to be fixed to use PyCapsule +# for Python >= 2.7 +# # ctypedef void (*destructor1)(void* cobj) # ctypedef void (*destructor2)(void* cobj, void* desc) # int PyCObject_Check(object p) From numpy-svn at scipy.org Mon May 3 13:08:22 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 3 May 2010 12:08:22 -0500 (CDT) Subject: [Numpy-svn] r8377 - trunk/numpy/core/src/multiarray Message-ID: <20100503170822.17BB239CAEC@scipy.org> Author: charris Date: 2010-05-03 12:08:21 -0500 (Mon, 03 May 2010) New Revision: 8377 Modified: trunk/numpy/core/src/multiarray/methods.c Log: ENH,BUG: Simplify array_choose with NpyArg_ParseKeywords. Fix some uses of int where Py_ssize_t should be used. Modified: trunk/numpy/core/src/multiarray/methods.c =================================================================== --- trunk/numpy/core/src/multiarray/methods.c 2010-05-03 07:50:02 UTC (rev 8376) +++ trunk/numpy/core/src/multiarray/methods.c 2010-05-03 17:08:21 UTC (rev 8377) @@ -104,14 +104,13 @@ PyArray_Dims newshape; PyObject *ret; PyArray_ORDER order = PyArray_CORDER; - int n; + Py_ssize_t n = PyTuple_Size(args); if (!NpyArg_ParseKeywords(kwds, "|O&", keywords, PyArray_OrderConverter, &order)) { return NULL; } - n = PyTuple_Size(args); if (n <= 1) { if (PyTuple_GET_ITEM(args, 0) == Py_None) { return PyArray_View(self, NULL, NULL); @@ -929,7 +928,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"refcheck", NULL}; - intp size = PyTuple_Size(args); + Py_ssize_t size = PyTuple_Size(args); int refcheck = 1; PyArray_Dims newshape; PyObject *ret, *obj; @@ -985,12 +984,12 @@ static PyObject * array_choose(PyArrayObject *self, PyObject *args, PyObject *kwds) { + static char *keywords[] = {"out", "mode"}; PyObject *choices; - int n; PyArrayObject *out = NULL; NPY_CLIPMODE clipmode = NPY_RAISE; + Py_ssize_t n = PyTuple_Size(args); - n = PyTuple_Size(args); if (n <= 1) { if (!PyArg_ParseTuple(args, "O", &choices)) { return NULL; @@ -999,16 +998,11 @@ else { choices = args; } - /*FIXME: use NpyArg_ParseKeywords*/ - if (kwds && PyDict_Check(kwds)) { - if (PyArray_OutputConverter(PyDict_GetItemString(kwds, "out"), - &out) == PY_FAIL) { - return NULL; - } - if (PyArray_ClipmodeConverter(PyDict_GetItemString(kwds, "mode"), - &clipmode) == PY_FAIL) { - return NULL; - } + + if (!NpyArg_ParseKeywords(kwds, "|O&O&", keywords, + PyArray_OutputConverter, &out, + PyArray_ClipmodeConverter, &clipmode)) { + return NULL; } return _ARET(PyArray_Choose(self, choices, out, clipmode)); @@ -1623,11 +1617,10 @@ array_transpose(PyArrayObject *self, PyObject *args) { PyObject *shape = Py_None; - int n; + Py_ssize_t n = PyTuple_Size(args); PyArray_Dims permute; PyObject *ret; - n = PyTuple_Size(args); if (n > 1) { shape = args; } From numpy-svn at scipy.org Mon May 3 13:08:25 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 3 May 2010 12:08:25 -0500 (CDT) Subject: [Numpy-svn] r8378 - trunk/numpy/core/src/multiarray Message-ID: <20100503170825.88FED39CAF8@scipy.org> Author: charris Date: 2010-05-03 12:08:25 -0500 (Mon, 03 May 2010) New Revision: 8378 Modified: trunk/numpy/core/src/multiarray/iterators.c Log: BUG: Fix use of int type where Py_ssize_t is appropriate. Modified: trunk/numpy/core/src/multiarray/iterators.c =================================================================== --- trunk/numpy/core/src/multiarray/iterators.c 2010-05-03 17:08:21 UTC (rev 8377) +++ trunk/numpy/core/src/multiarray/iterators.c 2010-05-03 17:08:25 UTC (rev 8378) @@ -1532,7 +1532,7 @@ arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *kwds) { - int n, i; + Py_ssize_t n, i; PyArrayMultiIterObject *multi; PyObject *arr; From numpy-svn at scipy.org Mon May 3 14:10:39 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 3 May 2010 13:10:39 -0500 (CDT) Subject: [Numpy-svn] r8379 - trunk/numpy/lib Message-ID: <20100503181039.E407039C4B4@scipy.org> Author: charris Date: 2010-05-03 13:10:39 -0500 (Mon, 03 May 2010) New Revision: 8379 Modified: trunk/numpy/lib/npyio.py Log: BUG: The builtin file function goes away in python 3k, use open instead. Modified: trunk/numpy/lib/npyio.py =================================================================== --- trunk/numpy/lib/npyio.py 2010-05-03 17:08:25 UTC (rev 8378) +++ trunk/numpy/lib/npyio.py 2010-05-03 18:10:39 UTC (rev 8379) @@ -584,7 +584,7 @@ import bz2 fh = bz2.BZ2File(fname) else: - fh = file(fname, 'U') + fh = open(fname, 'U') elif hasattr(fname, 'readline'): fh = fname else: From numpy-svn at scipy.org Tue May 4 02:24:05 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 4 May 2010 01:24:05 -0500 (CDT) Subject: [Numpy-svn] r8380 - trunk/numpy/core/src/umath Message-ID: <20100504062405.6060539C4B4@scipy.org> Author: charris Date: 2010-05-04 01:24:05 -0500 (Tue, 04 May 2010) New Revision: 8380 Modified: trunk/numpy/core/src/umath/ufunc_object.c Log: ENH: Simplify list destructor. Modified: trunk/numpy/core/src/umath/ufunc_object.c =================================================================== --- trunk/numpy/core/src/umath/ufunc_object.c 2010-05-03 18:10:39 UTC (rev 8379) +++ trunk/numpy/core/src/umath/ufunc_object.c 2010-05-04 06:24:05 UTC (rev 8380) @@ -3876,15 +3876,15 @@ * This frees the linked-list structure when the CObject * is destroyed (removed from the internal dictionary) */ -static void +static NPY_INLINE void _free_loop1d_list(PyUFunc_Loop1d *data) { - if (data == NULL) { - return; + while (data != NULL) { + PyUFunc_Loop1d *next = data->next; + _pya_free(data->arg_types); + _pya_free(data); + data = next; } - _pya_free(data->arg_types); - _free_loop1d_list(data->next); - _pya_free(data); } #if PY_VERSION_HEX >= 0x02070000 From numpy-svn at scipy.org Tue May 4 02:24:07 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 4 May 2010 01:24:07 -0500 (CDT) Subject: [Numpy-svn] r8381 - trunk/numpy/lib Message-ID: <20100504062407.D583639C4B4@scipy.org> Author: charris Date: 2010-05-04 01:24:07 -0500 (Tue, 04 May 2010) New Revision: 8381 Modified: trunk/numpy/lib/type_check.py Log: BUG: Fix datetime_data for python versions >= 2.7. Modified: trunk/numpy/lib/type_check.py =================================================================== --- trunk/numpy/lib/type_check.py 2010-05-04 06:24:05 UTC (rev 8380) +++ trunk/numpy/lib/type_check.py 2010-05-04 06:24:07 UTC (rev 8381) @@ -638,7 +638,10 @@ # FIXME: This needs to be kept consistent with enum in ndarrayobject.h from numpy.core.multiarray import DATETIMEUNITS obj = ctypes.py_object(DATETIMEUNITS) - result = func(obj) + if sys.version_info[:2] >= (2,7): + result = func(obj, ctypes.c_char_p(None)) + else: + result = func(obj) _unitnum2name = ctypes.cast(ctypes.c_void_p(result), ctypes.POINTER(ctypes.c_char_p)) return (_unitnum2name[base], struct.num, struct.den, struct.events) From numpy-svn at scipy.org Tue May 4 22:50:00 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 4 May 2010 21:50:00 -0500 (CDT) Subject: [Numpy-svn] r8382 - trunk/numpy/lib Message-ID: <20100505025000.91B9F39CAE9@scipy.org> Author: charris Date: 2010-05-04 21:50:00 -0500 (Tue, 04 May 2010) New Revision: 8382 Modified: trunk/numpy/lib/polynomial.py Log: BUG: Make polyder return a poly1d for the zeroeth order derivative when the input is a poly1d. Fixes ticket #1249. Modified: trunk/numpy/lib/polynomial.py =================================================================== --- trunk/numpy/lib/polynomial.py 2010-05-04 06:24:07 UTC (rev 8381) +++ trunk/numpy/lib/polynomial.py 2010-05-05 02:50:00 UTC (rev 8382) @@ -378,19 +378,20 @@ """ m = int(m) + if m < 0: + raise ValueError, "Order of derivative must be positive (see polyint)" + truepoly = isinstance(p, poly1d) p = NX.asarray(p) - n = len(p)-1 + n = len(p) - 1 y = p[:-1] * NX.arange(n, 0, -1) - if m < 0: - raise ValueError, "Order of derivative must be positive (see polyint)" if m == 0: - return p + val = p else: - val = polyder(y, m-1) - if truepoly: - val = poly1d(val) - return val + val = polyder(y, m - 1) + if truepoly: + val = poly1d(val) + return val def polyfit(x, y, deg, rcond=None, full=False): """ From numpy-svn at scipy.org Tue May 4 22:50:03 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 4 May 2010 21:50:03 -0500 (CDT) Subject: [Numpy-svn] r8383 - trunk/numpy/lib/tests Message-ID: <20100505025003.3355739CAE9@scipy.org> Author: charris Date: 2010-05-04 21:50:03 -0500 (Tue, 04 May 2010) New Revision: 8383 Modified: trunk/numpy/lib/tests/test_regression.py Log: ENH: Add test of polyder return type. Modified: trunk/numpy/lib/tests/test_regression.py =================================================================== --- trunk/numpy/lib/tests/test_regression.py 2010-05-05 02:50:00 UTC (rev 8382) +++ trunk/numpy/lib/tests/test_regression.py 2010-05-05 02:50:03 UTC (rev 8383) @@ -181,5 +181,13 @@ assert isinstance(path, (str, unicode)) assert path != '' + def test_polyder_return_type(self): + """Ticket #1249""" + assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d)) + assert_(isinstance(np.polyder([1], 0), np.ndarray)) + assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d)) + assert_(isinstance(np.polyder([1], 1), np.ndarray)) + + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Tue May 4 23:34:47 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 4 May 2010 22:34:47 -0500 (CDT) Subject: [Numpy-svn] r8384 - in trunk/numpy/linalg: . tests Message-ID: <20100505033447.4936C39CAE9@scipy.org> Author: charris Date: 2010-05-04 22:34:47 -0500 (Tue, 04 May 2010) New Revision: 8384 Modified: trunk/numpy/linalg/__init__.py trunk/numpy/linalg/linalg.py trunk/numpy/linalg/tests/test_linalg.py Log: ENH: Add slogdet to the linalg module. The patch is from njs with slogdet substituted for sign_log_det. Closes ticket #1402. Modified: trunk/numpy/linalg/__init__.py =================================================================== --- trunk/numpy/linalg/__init__.py 2010-05-05 02:50:03 UTC (rev 8383) +++ trunk/numpy/linalg/__init__.py 2010-05-05 03:34:47 UTC (rev 8384) @@ -9,6 +9,7 @@ inv Inverse of a square matrix solve Solve a linear system of equations det Determinant of a square matrix +slogdet Logarithm of the determinant of a square matrix lstsq Solve linear least-squares problem pinv Pseudo-inverse (Moore-Penrose) calculated using a singular value decomposition Modified: trunk/numpy/linalg/linalg.py =================================================================== --- trunk/numpy/linalg/linalg.py 2010-05-05 02:50:03 UTC (rev 8383) +++ trunk/numpy/linalg/linalg.py 2010-05-05 03:34:47 UTC (rev 8384) @@ -10,15 +10,15 @@ """ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', - 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'det', 'svd', - 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank', + 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det', + 'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank', 'LinAlgError'] from numpy.core import array, asarray, zeros, empty, transpose, \ intc, single, double, csingle, cdouble, inexact, complexfloating, \ newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \ maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \ - isfinite, size, finfo + isfinite, size, finfo, absolute, log, exp from numpy.lib import triu from numpy.linalg import lapack_lite from numpy.matrixlib.defmatrix import matrix_power @@ -1533,10 +1533,15 @@ # Determinant -def det(a): +def slogdet(a): """ - Compute the determinant of an array. + Compute the sign and (natural) logarithm of the determinant of an array. + If an array has a very small or very large determinant, than a call to + `det` may overflow or underflow. This routine is more robust against such + issues, because it computes the logarithm of the determinant rather than + the determinant itself. + Parameters ---------- a : array_like, shape (M, M) @@ -1544,9 +1549,16 @@ Returns ------- - det : ndarray - Determinant of `a`. + sign : float or complex + A number representing the sign of the determinant. For a real matrix, + this is 1, 0, or -1. For a complex matrix, this is a complex number + with absolute value 1 (i.e., it is on the unit circle), or else 0. + logdet : float + The natural log of the absolute value of the determinant. + If the determinant is zero, then `sign` will be 0 and `logdet` will be + -Inf. In all cases, the determinant is equal to `sign * np.exp(logdet)`. + Notes ----- The determinant is computed via LU factorization using the LAPACK @@ -1557,9 +1569,23 @@ The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: >>> a = np.array([[1, 2], [3, 4]]) - >>> np.linalg.det(a) + >>> (sign, logdet) = np.linalg.slogdet(a) + >>> (sign, logdet) + (-1, 0.69314718055994529) + >>> sign * np.exp(logdet) -2.0 + This routine succeeds where ordinary `det` does not: + + >>> np.linalg.det(np.eye(500) * 0.1) + 0.0 + >>> np.linalg.slogdet(np.eye(500) * 0.1) + (1, -1151.2925464970228) + + See Also + -------- + det + """ a = asarray(a) _assertRank2(a) @@ -1577,11 +1603,51 @@ if (info < 0): raise TypeError, "Illegal input to Fortran routine" elif (info > 0): - return 0.0 - sign = add.reduce(pivots != arange(1, n+1)) % 2 - return (1.-2.*sign)*multiply.reduce(diagonal(a), axis=-1) + return (t(0.0), _realType(t)(-Inf)) + sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2) + d = diagonal(a) + absd = absolute(d) + sign *= multiply.reduce(d / absd) + log(absd, absd) + logdet = add.reduce(absd, axis=-1) + return sign, logdet +def det(a): + """ + Compute the determinant of an array. + Parameters + ---------- + a : array_like, shape (M, M) + Input array. + + Returns + ------- + det : ndarray + Determinant of `a`. + + Notes + ----- + The determinant is computed via LU factorization using the LAPACK + routine z/dgetrf. + + Examples + -------- + The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.linalg.det(a) + -2.0 + + See Also + -------- + slogdet : Another way to representing the determinant, more suitable + for large matrices where underflow/overflow may occur. + + """ + sign, logdet = slogdet(a) + return sign * exp(logdet) + # Linear Least Squares def lstsq(a, b, rcond=-1): Modified: trunk/numpy/linalg/tests/test_linalg.py =================================================================== --- trunk/numpy/linalg/tests/test_linalg.py 2010-05-05 02:50:03 UTC (rev 8383) +++ trunk/numpy/linalg/tests/test_linalg.py 2010-05-05 03:34:47 UTC (rev 8384) @@ -127,13 +127,32 @@ class TestDet(LinalgTestCase, TestCase): def do(self, a, b): d = linalg.det(a) + (s, ld) = linalg.slogdet(a) if asarray(a).dtype.type in (single, double): ad = asarray(a).astype(double) else: ad = asarray(a).astype(cdouble) ev = linalg.eigvals(ad) assert_almost_equal(d, multiply.reduce(ev)) + assert_almost_equal(s * np.exp(ld), multiply.reduce(ev)) + if s != 0: + assert_almost_equal(np.abs(s), 1) + else: + assert_equal(ld, -inf) + def test_zero(self): + assert_equal(linalg.det([[0.0]]), 0.0) + assert_equal(type(linalg.det([[0.0]])), double) + assert_equal(linalg.det([[0.0j]]), 0.0) + assert_equal(type(linalg.det([[0.0j]])), cdouble) + + assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) + assert_equal(type(linalg.slogdet([[0.0]])[0]), double) + assert_equal(type(linalg.slogdet([[0.0]])[1]), double) + assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) + assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) + assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) + class TestLstsq(LinalgTestCase, TestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) From numpy-svn at scipy.org Tue May 4 23:34:51 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 4 May 2010 22:34:51 -0500 (CDT) Subject: [Numpy-svn] r8385 - in trunk: doc/source/reference numpy/linalg Message-ID: <20100505033451.3FF0E39CAEE@scipy.org> Author: charris Date: 2010-05-04 22:34:51 -0500 (Tue, 04 May 2010) New Revision: 8385 Modified: trunk/doc/source/reference/routines.linalg.rst trunk/numpy/linalg/linalg.py Log: ENH: Do some housekeeping for the newly added slogdet function. Modified: trunk/doc/source/reference/routines.linalg.rst =================================================================== --- trunk/doc/source/reference/routines.linalg.rst 2010-05-05 03:34:47 UTC (rev 8384) +++ trunk/doc/source/reference/routines.linalg.rst 2010-05-05 03:34:51 UTC (rev 8385) @@ -45,6 +45,7 @@ linalg.norm linalg.cond linalg.det + linalg.slogdet trace Solving equations and inverting matrices Modified: trunk/numpy/linalg/linalg.py =================================================================== --- trunk/numpy/linalg/linalg.py 2010-05-05 03:34:47 UTC (rev 8384) +++ trunk/numpy/linalg/linalg.py 2010-05-05 03:34:51 UTC (rev 8385) @@ -1564,6 +1564,8 @@ The determinant is computed via LU factorization using the LAPACK routine z/dgetrf. + .. versionadded:: 2.0.0. + Examples -------- The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: From numpy-svn at scipy.org Wed May 5 11:55:25 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 5 May 2010 10:55:25 -0500 (CDT) Subject: [Numpy-svn] r8386 - trunk/doc/release Message-ID: <20100505155525.AE93F39CAEA@scipy.org> Author: charris Date: 2010-05-05 10:55:25 -0500 (Wed, 05 May 2010) New Revision: 8386 Modified: trunk/doc/release/2.0.0-notes.rst Log: Add slogdet to the release notes for 2.0.0. Modified: trunk/doc/release/2.0.0-notes.rst =================================================================== --- trunk/doc/release/2.0.0-notes.rst 2010-05-05 03:34:51 UTC (rev 8385) +++ trunk/doc/release/2.0.0-notes.rst 2010-05-05 15:55:25 UTC (rev 8386) @@ -48,3 +48,10 @@ >>> np.dot(a, np.dot(b, c)) +linalg.slogdet function +~~~~~~~~~~~~~~~~~~~~~~~ + +The slogdet function returns the sign and logarithm of the determinant +of a matrix. Because the determinant may involve the product of many +small/large values, the result is often more accurate than that obtained +by simple multiplication. From numpy-svn at scipy.org Wed May 5 15:27:46 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 5 May 2010 14:27:46 -0500 (CDT) Subject: [Numpy-svn] r8387 - trunk/numpy/core/src/multiarray Message-ID: <20100505192746.4ADFA39CAEA@scipy.org> Author: ptvirtan Date: 2010-05-05 14:27:46 -0500 (Wed, 05 May 2010) New Revision: 8387 Modified: trunk/numpy/core/src/multiarray/numpyos.c Log: BUG: use PyOS_string_to_double on Python 2.7 Related to #1345. This works around PyOS_ascii_strtod raising a PendingDeprecationWarning when GIL is released, which results to a crash. The full fix would be to re-acquire GIL, but I believe we will not want to do that for performance reasons, as it doesn't otherwise seem to be necessary. Modified: trunk/numpy/core/src/multiarray/numpyos.c =================================================================== --- trunk/numpy/core/src/multiarray/numpyos.c 2010-05-05 15:55:25 UTC (rev 8386) +++ trunk/numpy/core/src/multiarray/numpyos.c 2010-05-05 19:27:46 UTC (rev 8387) @@ -508,7 +508,7 @@ } memcpy(buffer, s, n); buffer[n] = '\0'; -#if defined(NPY_PY3K) +#if PY_VERSION_HEX >= 0x02070000 result = PyOS_string_to_double(buffer, &q, NULL); #else result = PyOS_ascii_strtod(buffer, &q); @@ -521,7 +521,7 @@ } /* End of ##2 */ -#if defined(NPY_PY3K) +#if PY_VERSION_HEX >= 0x02070000 return PyOS_string_to_double(s, endptr, NULL); #else return PyOS_ascii_strtod(s, endptr); From numpy-svn at scipy.org Wed May 5 15:28:02 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 5 May 2010 14:28:02 -0500 (CDT) Subject: [Numpy-svn] r8388 - trunk/numpy/core/src/multiarray Message-ID: <20100505192802.5C86739CAEA@scipy.org> Author: ptvirtan Date: 2010-05-05 14:28:02 -0500 (Wed, 05 May 2010) New Revision: 8388 Modified: trunk/numpy/core/src/multiarray/methods.c Log: BUG: core: ensure keywords[] list is NULL terminated in array_choose (might be related to #1476) Modified: trunk/numpy/core/src/multiarray/methods.c =================================================================== --- trunk/numpy/core/src/multiarray/methods.c 2010-05-05 19:27:46 UTC (rev 8387) +++ trunk/numpy/core/src/multiarray/methods.c 2010-05-05 19:28:02 UTC (rev 8388) @@ -984,7 +984,7 @@ static PyObject * array_choose(PyArrayObject *self, PyObject *args, PyObject *kwds) { - static char *keywords[] = {"out", "mode"}; + static char *keywords[] = {"out", "mode", NULL}; PyObject *choices; PyArrayObject *out = NULL; NPY_CLIPMODE clipmode = NPY_RAISE; From numpy-svn at scipy.org Thu May 6 02:41:55 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 6 May 2010 01:41:55 -0500 (CDT) Subject: [Numpy-svn] r8389 - in trunk/numpy/lib: . tests Message-ID: <20100506064155.8F79139CAED@scipy.org> Author: charris Date: 2010-05-06 01:41:55 -0500 (Thu, 06 May 2010) New Revision: 8389 Modified: trunk/numpy/lib/function_base.py trunk/numpy/lib/tests/test_function_base.py Log: Merge branch 'vectorize' Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2010-05-05 19:28:02 UTC (rev 8388) +++ trunk/numpy/lib/function_base.py 2010-05-06 06:41:55 UTC (rev 8389) @@ -117,15 +117,16 @@ if weights is not None: weights = asarray(weights) if np.any(weights.shape != a.shape): - raise ValueError, 'weights should have the same shape as a.' + raise ValueError( + 'weights should have the same shape as a.') weights = weights.ravel() a = a.ravel() if (range is not None): mn, mx = range if (mn > mx): - raise AttributeError, \ - 'max must be larger than min in range parameter.' + raise AttributeError( + 'max must be larger than min in range parameter.') if not iterable(bins): if range is None: @@ -138,7 +139,8 @@ else: bins = asarray(bins) if (np.diff(bins) < 0).any(): - raise AttributeError, 'bins must increase monotonically.' + raise AttributeError( + 'bins must increase monotonically.') # Histogram is an integer or a float array depending on the weights. if weights is None: @@ -244,8 +246,9 @@ try: M = len(bins) if M != D: - raise AttributeError, 'The dimension of bins must be equal ' \ - 'to the dimension of the sample x.' + raise AttributeError( + 'The dimension of bins must be equal'\ + ' to the dimension of the sample x.') except TypeError: bins = D*[bins] @@ -334,12 +337,13 @@ s = hist.sum() for i in arange(D): shape = ones(D, int) - shape[i] = nbin[i]-2 + shape[i] = nbin[i] - 2 hist = hist / dedges[i].reshape(shape) hist /= s - if (hist.shape != nbin-2).any(): - raise RuntimeError('Internal Shape Error') + if (hist.shape != nbin - 2).any(): + raise RuntimeError( + "Internal Shape Error") return hist, edges @@ -429,23 +433,30 @@ # Sanity checks if a.shape != wgt.shape : if axis is None : - raise TypeError, "Axis must be specified when shapes of a and weights differ." + raise TypeError( + "Axis must be specified when shapes of a "\ + "and weights differ.") if wgt.ndim != 1 : - raise TypeError, "1D weights expected when shapes of a and weights differ." + raise TypeError( + "1D weights expected when shapes of a and "\ + "weights differ.") if wgt.shape[0] != a.shape[axis] : - raise ValueError, "Length of weights not compatible with specified axis." + raise ValueError( + "Length of weights not compatible with "\ + "specified axis.") # setup wgt to broadcast along axis - wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1,axis) + wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis) scl = wgt.sum(axis=axis) if (scl == 0.0).any(): - raise ZeroDivisionError, "Weights sum to zero, can't be normalized" + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized") - avg = np.multiply(a,wgt).sum(axis)/scl + avg = np.multiply(a, wgt).sum(axis)/scl if returned: - scl = np.multiply(avg,0) + scl + scl = np.multiply(avg, 0) + scl return avg, scl else: return avg @@ -513,7 +524,8 @@ a = asarray(a) if (a.dtype.char in typecodes['AllFloat']) \ and (_nx.isnan(a).any() or _nx.isinf(a).any()): - raise ValueError, "array must not contain infs or NaNs" + raise ValueError( + "array must not contain infs or NaNs") return a def piecewise(x, condlist, funclist, *args, **kw): @@ -612,8 +624,8 @@ condlist.append(~totlist) n += 1 if (n != n2): - raise ValueError, "function list and condition list " \ - "must be the same" + raise ValueError( + "function list and condition list must be the same") zerod = False # This is a hack to work around problems with NumPy's # handling of 0-d arrays and boolean indexing with @@ -683,7 +695,8 @@ n = len(condlist) n2 = len(choicelist) if n2 != n: - raise ValueError, "list of cases must be same length as list of conditions" + raise ValueError( + "list of cases must be same length as list of conditions") choicelist = [default] + choicelist S = 0 pfac = 1 @@ -791,7 +804,8 @@ elif n == N: dx = list(varargs) else: - raise SyntaxError, "invalid number of arguments" + raise SyntaxError( + "invalid number of arguments") # use central differences on interior and first differences on endpoints @@ -885,7 +899,8 @@ if n == 0: return a if n < 0: - raise ValueError, 'order must be non-negative but got ' + repr(n) + raise ValueError( + "order must be non-negative but got " + repr(n)) a = asanyarray(a) nd = len(a.shape) slice1 = [slice(None)]*nd @@ -1596,22 +1611,49 @@ # return number of input arguments and # number of default arguments -import re + def _get_nargs(obj): + import re + + terr = re.compile(r'.*? takes (exactly|at least) (?P(\d+)|(\w+))' + + r' argument(s|) \((?P(\d+)|(\w+)) given\)') + def _convert_to_int(strval): + try: + result = int(strval) + except ValueError: + if strval=='zero': + result = 0 + elif strval=='one': + result = 1 + elif strval=='two': + result = 2 + # How high to go? English only? + else: + raise + return result + if not callable(obj): - raise TypeError, "Object is not callable." + raise TypeError( + "Object is not callable.") if sys.version_info[0] >= 3: + # inspect currently fails for binary extensions + # like math.cos. So fall back to other methods if + # it fails. import inspect - spec = inspect.getargspec(obj) - nargs = len(spec.args) - if spec.defaults: - ndefaults = len(spec.defaults) - else: - ndefaults = 0 - if inspect.ismethod(obj): - nargs -= 1 - return nargs, ndefaults - elif hasattr(obj,'func_code'): + try: + spec = inspect.getargspec(obj) + nargs = len(spec.args) + if spec.defaults: + ndefaults = len(spec.defaults) + else: + ndefaults = 0 + if inspect.ismethod(obj): + nargs -= 1 + return nargs, ndefaults + except: + pass + + if hasattr(obj,'func_code'): fcode = obj.func_code nargs = fcode.co_argcount if obj.func_defaults is not None: @@ -1621,21 +1663,23 @@ if isinstance(obj, types.MethodType): nargs -= 1 return nargs, ndefaults - terr = re.compile(r'.*? takes exactly (?P\d+) argument(s|) \((?P\d+) given\)') + try: obj() return 0, 0 except TypeError, msg: m = terr.match(str(msg)) if m: - nargs = int(m.group('exargs')) - ndefaults = int(m.group('gargs')) + nargs = _convert_to_int(m.group('exargs')) + ndefaults = _convert_to_int(m.group('gargs')) if isinstance(obj, types.MethodType): nargs -= 1 return nargs, ndefaults - raise ValueError, 'failed to determine the number of arguments for %s' % (obj) + raise ValueError( + "failed to determine the number of arguments for %s" % (obj)) + class vectorize(object): """ vectorize(pyfunc, otypes='', doc=None) @@ -1717,11 +1761,13 @@ self.otypes = otypes for char in self.otypes: if char not in typecodes['All']: - raise ValueError, "invalid otype specified" + raise ValueError( + "invalid otype specified") elif iterable(otypes): self.otypes = ''.join([_nx.dtype(x).char for x in otypes]) else: - raise ValueError, "output types must be a string of typecode characters or a list of data-types" + raise ValueError( + "Invalid otype specification") self.lastcallargs = 0 def __call__(self, *args): @@ -1730,8 +1776,8 @@ nargs = len(args) if self.nin: if (nargs > self.nin) or (nargs < self.nin_wo_defaults): - raise ValueError, "mismatch between python function inputs"\ - " and received arguments" + raise ValueError( + "Invalid number of arguments") # we need a new ufunc if this is being called with more arguments. if (self.lastcallargs != nargs): @@ -3052,7 +3098,8 @@ if isinstance(obj, (int, long, integer)): if (obj < 0): obj += N if (obj < 0 or obj >=N): - raise ValueError, "invalid entry" + raise ValueError( + "invalid entry") newshape[axis]-=1; new = empty(newshape, arr.dtype, arr.flags.fnc) slobj[axis] = slice(None, obj) @@ -3197,8 +3244,9 @@ if isinstance(obj, (int, long, integer)): if (obj < 0): obj += N if obj < 0 or obj > N: - raise ValueError, "index (%d) out of range (0<=index<=%d) "\ - "in dimension %d" % (obj, N, axis) + raise ValueError( + "index (%d) out of range (0<=index<=%d) "\ + "in dimension %d" % (obj, N, axis)) newshape[axis] += 1; new = empty(newshape, arr.dtype, arr.flags.fnc) slobj[axis] = slice(None, obj) Modified: trunk/numpy/lib/tests/test_function_base.py =================================================================== --- trunk/numpy/lib/tests/test_function_base.py 2010-05-05 19:28:02 UTC (rev 8388) +++ trunk/numpy/lib/tests/test_function_base.py 2010-05-06 06:41:55 UTC (rev 8389) @@ -23,6 +23,7 @@ assert_array_equal(sometrue(y1, axis=0), [1, 1, 0]) assert_array_equal(sometrue(y1, axis=1), [0, 1, 1]) + class TestAll(TestCase): def test_basic(self): y1 = [0, 1, 1, 0] @@ -39,6 +40,7 @@ assert_array_equal(alltrue(y1, axis=0), [0, 0, 1]) assert_array_equal(alltrue(y1, axis=1), [0, 0, 1]) + class TestAverage(TestCase): def test_basic(self): y1 = array([1, 2, 3]) @@ -64,31 +66,30 @@ def test_weights(self): y = arange(10) w = arange(10) - assert_almost_equal(average(y, weights=w), (arange(10) ** 2).sum()*1. / arange(10).sum()) - + actual = average(y, weights=w) + desired = (arange(10) ** 2).sum()*1. / arange(10).sum() + assert_almost_equal(actual, desired) + y1 = array([[1, 2, 3], [4, 5, 6]]) w0 = [1, 2] actual = average(y1, weights=w0, axis=0) desired = array([3., 4., 5.]) assert_almost_equal(actual, desired) - w1 = [0, 0, 1] + actual = average(y1, weights=w1, axis=1) desired = array([3., 6.]) - assert_almost_equal(average(y1, weights=w1, axis=1), desired) + assert_almost_equal(actual, desired) # This should raise an error. Can we test for that ? # assert_equal(average(y1, weights=w1), 9./2.) - # 2D Case w2 = [[0, 0, 1], [0, 0, 2]] desired = array([3., 6.]) assert_array_equal(average(y1, weights=w2, axis=1), desired) - assert_equal(average(y1, weights=w2), 5.) - def test_returned(self): y = array([[1, 2, 3], [4, 5, 6]]) @@ -136,6 +137,7 @@ assert_equal(len(choices), 3) assert_equal(len(conditions), 3) + class TestInsert(TestCase): def test_basic(self): a = [1, 2, 3] @@ -143,6 +145,7 @@ assert_equal(insert(a, 3, 1), [1, 2, 3, 1]) assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3]) + class TestAmax(TestCase): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] @@ -153,6 +156,7 @@ assert_equal(amax(b, axis=0), [8.0, 10.0, 9.0]) assert_equal(amax(b, axis=1), [9.0, 10.0, 8.0]) + class TestAmin(TestCase): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] @@ -163,6 +167,7 @@ assert_equal(amin(b, axis=0), [3.0, 3.0, 2.0]) assert_equal(amin(b, axis=1), [3.0, 4.0, 2.0]) + class TestPtp(TestCase): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] @@ -173,6 +178,7 @@ assert_equal(ptp(b, axis=0), [5.0, 7.0, 7.0]) assert_equal(ptp(b, axis= -1), [6.0, 6.0, 6.0]) + class TestCumsum(TestCase): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] @@ -189,6 +195,7 @@ [5, 11, 18, 27], [10, 13, 17, 22]], ctype)) + class TestProd(TestCase): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] @@ -207,6 +214,7 @@ array([50, 36, 84, 180], ctype)) assert_array_equal(prod(a2, axis= -1), array([24, 1890, 600], ctype)) + class TestCumprod(TestCase): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] @@ -232,6 +240,7 @@ [ 5, 30, 210, 1890], [10, 30, 120, 600]], ctype)) + class TestDiff(TestCase): def test_basic(self): x = [1, 4, 6, 7, 12] @@ -253,6 +262,7 @@ assert_array_equal(diff(x, axis=0), out3) assert_array_equal(diff(x, n=2, axis=0), out4) + class TestGradient(TestCase): def test_basic(self): x = array([[1, 1], [3, 4]]) @@ -271,6 +281,7 @@ x = np.ma.array([[1, 1], [3, 4]]) assert_equal(type(gradient(x)[0]), type(x)) + class TestAngle(TestCase): def test_basic(self): x = [1 + 3j, sqrt(2) / 2.0 + 1j * sqrt(2) / 2, 1, 1j, -1, -1j, 1 - 3j, -1 + 3j] @@ -282,6 +293,7 @@ assert_array_almost_equal(y, yo, 11) assert_array_almost_equal(z, zo, 11) + class TestTrimZeros(TestCase): """ only testing for integer splits. """ @@ -289,10 +301,12 @@ a = array([0, 0, 1, 2, 3, 4, 0]) res = trim_zeros(a) assert_array_equal(res, array([1, 2, 3, 4])) + def test_leading_skip(self): a = array([0, 0, 1, 0, 2, 3, 4, 0]) res = trim_zeros(a) assert_array_equal(res, array([1, 0, 2, 3, 4])) + def test_trailing_skip(self): a = array([0, 0, 1, 0, 2, 3, 0, 4, 0]) res = trim_zeros(a) @@ -304,10 +318,12 @@ a = array([1, 3, 2, 1, 2, 3, 3]) b = extract(a > 1, a) assert_array_equal(b, [3, 2, 2, 3, 3]) + def test_place(self): a = array([1, 4, 3, 2, 5, 8, 7]) place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) + def test_both(self): a = rand(10) mask = a > 0.5 @@ -317,6 +333,7 @@ place(a, mask, c) assert_array_equal(a, ac) + class TestVectorize(TestCase): def test_simple(self): def addsubtract(a, b): @@ -327,6 +344,7 @@ f = vectorize(addsubtract) r = f([0, 3, 6, 9], [1, 3, 5, 7]) assert_array_equal(r, [1, 6, 1, 2]) + def test_scalar(self): def addsubtract(a, b): if a > b: @@ -336,12 +354,45 @@ f = vectorize(addsubtract) r = f([0, 3, 6, 9], 5) assert_array_equal(r, [5, 8, 1, 4]) + def test_large(self): x = linspace(-3, 2, 10000) f = vectorize(lambda x: x) y = f(x) assert_array_equal(y, x) + def test_ufunc(self): + import math + f = vectorize(math.cos) + args = array([0, 0.5*pi, pi, 1.5*pi, 2*pi]) + r1 = f(args) + r2 = cos(args) + assert_array_equal(r1, r2) + + def test_keywords(self): + import math + def foo(a, b=1): + return a + b + f = vectorize(foo) + args = array([1,2,3]) + r1 = f(args) + r2 = array([2,3,4]) + assert_array_equal(r1, r2) + r1 = f(args, 2) + r2 = array([3,4,5]) + assert_array_equal(r1, r2) + + def test_keywords_no_func_code(self): + # This needs to test a function that has keywords but + # no func_code attribute, since otherwise vectorize will + # inspect the func_code. + import random + try: + f = vectorize(random.randrange) + except: + raise AssertionError() + + class TestDigitize(TestCase): def test_forward(self): x = arange(-6, 5) @@ -358,6 +409,7 @@ bin = linspace(x.min(), x.max(), 10) assert all(digitize(x, bin) != 0) + class TestUnwrap(TestCase): def test_simple(self): #check that unwrap removes jumps greather that 2*pi @@ -447,6 +499,7 @@ #check symmetry assert_array_almost_equal(w, flipud(w), 7) + class TestHistogram(TestCase): def setUp(self): pass @@ -486,7 +539,6 @@ area = sum(a * diff(b)) assert_almost_equal(area, 1) - def test_outliers(self): # Check that outliers are not tallied a = arange(10) + .5 @@ -511,7 +563,6 @@ h, b = histogram(a, bins=8, range=[1, 9], weights=w) assert_equal(h, w[1:-1]) - def test_type(self): # Check the type of the returned histogram a = arange(10) + .5 @@ -527,7 +578,6 @@ h, b = histogram(a, weights=ones(10, float)) assert(issubdtype(h.dtype, float)) - def test_weights(self): v = rand(100) w = ones(100) * 5 @@ -550,6 +600,7 @@ wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True) assert_array_equal(wa, array([4, 5, 0, 1]) / 10. / 3. * 4) + class TestHistogramdd(TestCase): def test_simple(self): x = array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], \ @@ -622,6 +673,7 @@ hist, edges = histogramdd(x, bins=2) assert_array_equal(edges[0], array([-0.5, 0. , 0.5])) + class TestUnique(TestCase): def test_simple(self): x = array([4, 3, 2, 1, 1, 2, 3, 4, 0]) @@ -642,6 +694,7 @@ assert_raises(ValueError, numpy.lib.asarray_chkfinite, b) assert_raises(ValueError, numpy.lib.asarray_chkfinite, c) + class TestNaNFuncts(TestCase): def setUp(self): self.A = array([[[ nan, 0.01319214, 0.01620964], @@ -728,7 +781,6 @@ assert_equal(np.isinf(a), np.zeros((2, 4), dtype=bool)) - class TestCorrCoef(TestCase): def test_simple(self): A = array([[ 0.15391142, 0.18045767, 0.14197213], @@ -756,7 +808,6 @@ - 0.66173113, 0.98317823, 1. ]])) - class Test_i0(TestCase): def test_simple(self): assert_almost_equal(i0(0.5), array(1.0634833707413234)) @@ -775,6 +826,7 @@ [ 1.03352052, 1.13557954], [ 1.0588429 , 1.06432317]])) + class TestKaiser(TestCase): def test_simple(self): assert_almost_equal(kaiser(0, 1.0), array([])) @@ -790,6 +842,7 @@ def test_int_beta(self): kaiser(3, 4) + class TestMsort(TestCase): def test_simple(self): A = array([[ 0.44567325, 0.79115165, 0.5490053 ], @@ -800,6 +853,7 @@ [ 0.44567325, 0.52929049, 0.5490053 ], [ 0.64864341, 0.79115165, 0.96098397]])) + class TestMeshgrid(TestCase): def test_simple(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) @@ -857,6 +911,7 @@ assert y.ndim == 0 assert y == 0 + class TestBincount(TestCase): def test_simple(self): y = np.bincount(np.arange(4)) @@ -878,9 +933,11 @@ y = np.bincount(x, w) assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) + def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i]) + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Thu May 6 04:05:05 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 6 May 2010 03:05:05 -0500 (CDT) Subject: [Numpy-svn] r8390 - in trunk/doc/numpy.scipy.org: . _theme/scipy _theme/scipy/static Message-ID: <20100506080505.BE99A39CAED@scipy.org> Author: ptvirtan Date: 2010-05-06 03:05:05 -0500 (Thu, 06 May 2010) New Revision: 8390 Added: trunk/doc/numpy.scipy.org/license.rst Modified: trunk/doc/numpy.scipy.org/_theme/scipy/index.html trunk/doc/numpy.scipy.org/_theme/scipy/static/scipy.css trunk/doc/numpy.scipy.org/content.rst Log: numpy.scipy.org: add license information (#1478), tidy up CSS Modified: trunk/doc/numpy.scipy.org/_theme/scipy/index.html =================================================================== --- trunk/doc/numpy.scipy.org/_theme/scipy/index.html 2010-05-06 06:41:55 UTC (rev 8389) +++ trunk/doc/numpy.scipy.org/_theme/scipy/index.html 2010-05-06 08:05:05 UTC (rev 8390) @@ -46,6 +46,8 @@ multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases.

+

Numpy is licensed under the BSD license, + enabling reuse with few restrictions.

Getting Started?

Modified: trunk/doc/numpy.scipy.org/_theme/scipy/static/scipy.css =================================================================== --- trunk/doc/numpy.scipy.org/_theme/scipy/static/scipy.css 2010-05-06 06:41:55 UTC (rev 8389) +++ trunk/doc/numpy.scipy.org/_theme/scipy/static/scipy.css 2010-05-06 08:05:05 UTC (rev 8390) @@ -13,6 +13,12 @@ width: 100%; } +div.documentwrapper { + border-left: 1px solid #ccc; + border-right: 1px solid #ccc; + border-bottom: 1px solid #ccc; +} + div.body p, div.body dd, div.body li { line-height: 125%; } @@ -181,9 +187,10 @@ */ body div.top-logo-header { text-align: left; - background-color: #8CAAE6; + background-color: #a2bae8; border-bottom: 8px solid #003399; - border-top: 10px solid #6487dc; + margin-top: 10px; + border-top: 1px solid #bbb; } /** Modified: trunk/doc/numpy.scipy.org/content.rst =================================================================== --- trunk/doc/numpy.scipy.org/content.rst 2010-05-06 06:41:55 UTC (rev 8389) +++ trunk/doc/numpy.scipy.org/content.rst 2010-05-06 08:05:05 UTC (rev 8390) @@ -5,3 +5,5 @@ :maxdepth: 2 old_array_packages.rst + license.rst + Added: trunk/doc/numpy.scipy.org/license.rst =================================================================== --- trunk/doc/numpy.scipy.org/license.rst (rev 0) +++ trunk/doc/numpy.scipy.org/license.rst 2010-05-06 08:05:05 UTC (rev 8390) @@ -0,0 +1,33 @@ +Numpy license +============= + +| Copyright ? 2005-2010, NumPy Developers. +| All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. From numpy-svn at scipy.org Thu May 6 22:25:08 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 6 May 2010 21:25:08 -0500 (CDT) Subject: [Numpy-svn] r8391 - in trunk/numpy/lib: . tests Message-ID: <20100507022508.7788F39CAED@scipy.org> Author: charris Date: 2010-05-06 21:25:08 -0500 (Thu, 06 May 2010) New Revision: 8391 Modified: trunk/numpy/lib/function_base.py trunk/numpy/lib/tests/test_function_base.py Log: BUG: Make interp handle zero dimensional ndarrays as interpolation points. Add some tests for interp. Fixes ticket #1177. unc_api.txt.tmp Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2010-05-06 08:05:05 UTC (rev 8390) +++ trunk/numpy/lib/function_base.py 2010-05-07 02:25:08 UTC (rev 8391) @@ -55,7 +55,7 @@ range : (float, float), optional The lower and upper range of the bins. If not provided, range is simply ``(a.min(), a.max())``. Values outside the range are - ignored. + ignored. normed : bool, optional If False, the result will contain the number of samples in each bin. If True, the result is the value of the @@ -68,7 +68,7 @@ only contributes its associated weight towards the bin count (instead of 1). If `normed` is True, the weights are normalized, so that the integral of the density over the range remains 1 - + Returns ------- hist : array @@ -76,8 +76,8 @@ description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. - + See Also -------- histogramdd, bincount, searchsorted @@ -112,7 +112,7 @@ 1.0 """ - + a = asarray(a) if weights is not None: weights = asarray(weights) @@ -985,6 +985,8 @@ """ if isinstance(x, (float, int, number)): return compiled_interp([x], xp, fp, left, right).item() + elif isinstance(x, np.ndarray) and x.ndim == 0: + return compiled_interp([x], xp, fp, left, right).item() else: return compiled_interp(x, xp, fp, left, right) @@ -2043,7 +2045,7 @@ >>> plt.xlabel("Sample") >>> plt.show() - + >>> plt.figure() >>> A = fft(window, 2048) / 25.5 @@ -2152,7 +2154,7 @@ >>> plt.xlabel("Sample") >>> plt.show() - + >>> plt.figure() >>> A = fft(window, 2048) / 25.5 Modified: trunk/numpy/lib/tests/test_function_base.py =================================================================== --- trunk/numpy/lib/tests/test_function_base.py 2010-05-06 08:05:05 UTC (rev 8390) +++ trunk/numpy/lib/tests/test_function_base.py 2010-05-07 02:25:08 UTC (rev 8391) @@ -69,7 +69,7 @@ actual = average(y, weights=w) desired = (arange(10) ** 2).sum()*1. / arange(10).sum() assert_almost_equal(actual, desired) - + y1 = array([[1, 2, 3], [4, 5, 6]]) w0 = [1, 2] actual = average(y1, weights=w0, axis=0) @@ -934,6 +934,34 @@ assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) +class TestInterp(TestCase): + def test_basic(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.linspace(0, 1, 50) + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_scalar_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = 0 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = .3 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float32(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float64(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_zero_dimensional_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.array(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.array(.3, dtype=object) + assert_almost_equal(np.interp(x0, x, y), .3) + + def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i]) From numpy-svn at scipy.org Sat May 8 07:08:32 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 May 2010 06:08:32 -0500 (CDT) Subject: [Numpy-svn] r8392 - trunk/doc Message-ID: <20100508110832.F225539CAED@scipy.org> Author: rgommers Date: 2010-05-08 06:08:32 -0500 (Sat, 08 May 2010) New Revision: 8392 Added: trunk/doc/HOWTO_RELEASE.txt Log: DOC: add a description of the release process and setup needed. Added: trunk/doc/HOWTO_RELEASE.txt =================================================================== --- trunk/doc/HOWTO_RELEASE.txt (rev 0) +++ trunk/doc/HOWTO_RELEASE.txt 2010-05-08 11:08:32 UTC (rev 8392) @@ -0,0 +1,248 @@ +This file gives an overview of what is necessary to build binary releases for +NumPy on OS X. Windows binaries are built here using Wine, they can of course +also be built on Windows itself. Building OS X binaries on another platform is +not possible. + +Current build and release info +============================== +The current info on building and releasing NumPy and SciPy is scattered in +several places. It should be summarized in one place, updated and where +necessary described in more detail. The sections below list all places where +useful info can be found. + +Source tree +----------- +* INSTALL.txt +* release.sh +* pavement.py + +NumPy Trac +---------- +* http://projects.scipy.org/numpy/wiki/MakingReleases +* http://projects.scipy.org/numpy/wiki/MicrosoftToolchainSupport +* http://projects.scipy.org/numpy/wiki/ApiDeprecation + +SciPy.org wiki +-------------- +* http://www.scipy.org/Installing_SciPy and links on that page. +* http://new.scipy.org/building/windows.html + +Doc wiki +-------- +* http://docs.scipy.org/numpy/docs/numpy-docs/user/install.rst/ + + +Supported platforms and versions +================================ +Python 2.4-2.6 are the currently supported versions on all platforms. +NumPy 2.0 should include support for Python >=3.1. + +OS X +---- +OS X 10.4 (Tiger), 10.5 (Leopard) and 10.6 (Snow Leopard) are supported for the 1.4.x +releases. Note that only the Python from `python.org `_ is +supported. Binaries do *not* support Apple Python. + +Windows +------- +Windows XP and Windows Vista are supported by the 1.3.0 release, the 1.4.x +releases also support Windows 7. + +Linux +----- +Many distributions include NumPy. Building from source is also relatively +straightforward. Only tarballs are created for Linux, no specific binary +installers are provided (yet). + +BSD / Solaris +------------- +No binaries are provided, but succesful builds on Solaris and BSD have been +reported. + + +Tool chain +========== +Compilers +--------- +The same gcc version is used as the one with which Python itself is built on +each platform. At the moment this means: + +* OS X uses gcc 4.0 +* Windows build uses latest released version from MinGW, now at 3.4.5. + +Cython >= 0.12 is needed. + +Fortran: on OS X gfortran from `this site `_ +is used. On Windows g77 (included in MinGW) is the current default, in the future +this may shift to gfortran as well. + +Python +------ +* Python from `python.org `_ +* virtualenv +* paver +* bdist_mpkg +* `numpy-macosx-installer `_ + +Building docs +------------- +* Sphinx +* numpydoc +* Matplotlib +* Texlive (or MikTeX on Windows) + +Wine +---- +For building Windows binaries on OS X Wine can be used. In Wine the following +needs to be installed: + +* Python 2.5 +* Python 2.6 +* MakeNsis +* CpuId plugin for MakeNsis : this can be found in the NumPy source tree under + tools/win32build/cpucaps and has to be built with MinGW (see SConstruct file in + that dir for details) +* MinGW +* ATLAS, 3x ([No SSE, SSE2, SSE3] for superpack installer) : ATLAS does not + compile under wine or on Windows out of the box. Binaries for ATLAS can be + found in svn under vendor/. + +To install Wine on OS X Snow Leopard the current options are to compile a +current unstable version ,``_, or to use +an install script from `here `_. For +me, the former option did not work (everything compiled, but after installing +Python the command ``import tempfile`` resulted in an exception. The latter +option did work. + +After successful installation and an invocation of the wine executable, a +~/.wine folder exists - new programs will be installed there in +~/.wine/drive_c. Installing Windows programs with .exe executables is done by +running + + $ wine yourprog.exe + +and MSI installers can be installed with + + $ msiexec /i yourprog.msi + +For the above to work you probably need to put the wine-1.x.x/bin directory in +your PATH. + +To install MinGW, the easiest option is to use the automated installer on the +MinGW download page. This will give you (at this moment) GCC 3.4.5; GCC 4.x is +still not supported officially by MinGW. + +To be able to use gcc and MakeNsis in Wine, the locations of gcc.exe and +makensis.exe should be added to the Windows environment variable PATH. This can +easily be done by running + + $ wine regedit + +add adding a PATH variable in HKEY_CURRENT_USER/Environment. + +Virtualenv +---------- +Virtualenv is a very useful tool to keep several versions of packages around. +It is also used in the Paver script to build the docs. The use of the +``--no-site-packages`` option in the Paver script is probably not necessary, +and may prevent successful building of the docs. If doc building fails because +Matplotlib can not be found, remove this option. + + +What is released +================ + +Binaries +-------- +Windows binaries in "superpack" form for Python 2.5 and 2.6. A superpack +contains three builds, for SSE2, SSE3 and no SSE. + +OS X binaries are made in dmg format, targeting only the Python from +`python.org `_ + +For Python 2.4 binaries are not built. It should be checked however +that the source release works with Python 2.4. + +Other +----- +* Release Notes +* Changelog + +Source distribution +------------------- +A source release in both .zip and .tar.gz formats is released. + + +Release process +=============== + +Check the buildbot +------------------ +The buildbot is located at ``_. + +Make sure current trunk builds a package correctly +-------------------------------------------------- +:: + + python setup.py bdist + python setup.py sdist + +To actually build the binaries after everything is set up correctly, the +release.sh script can be used. For details of the build process itself it is +best to read the pavement.py script. + +.. note:: The following steps are repeated for the beta(s), release + candidates(s) and the final release. + +Create the release "tag" +------------------------ +:: + + svn cp http://svn.scipy.org/svn/numpy/trunk http://svn.scipy.org/svn/numpy/tags/ + +Update the version of the trunk +------------------------------- +Increment the release number in setup.py. Release candidates should have "rc1" +(or "rc2", "rcN") appended to the X.Y.Z format. + +Update the version of the tag +----------------------------- +Switch to the tag:: + + svn switch http://svn.scipy.org/svn/numpy/tags/ + +Set ``release=True`` in setup.py. + +Make the release +---------------- +The tar-files and binary releases for distribution should be uploaded to SourceForge, +together with the Release Notes and the Changelog. Uploading can be done +through a web interface or, more efficiently, through scp/sftp/rsync as +described `here `_. +For example:: + + scp ,numpy at frs.sourceforge.net:/home/frs/project/n/nu/numpy/NumPy// + +Update PyPi +----------- +There are two ways to update PyPi, the first one is:: + + $ python setup.py sdist upload + +and the second one is to upload the PKG_INFO file inside the sdist dir in the +web interface of PyPi. The source tarball can also be uploaded through this +interface. + +Update scipy.org +---------------- +A release announcement with a link to the download site should be placed in the +sidebar of the front page of scipy.org. + +Announce to the lists +--------------------- +The release should be announced on the mailing lists of +NumPy and SciPy, and possibly also those of Matplotlib,IPython and/or Pygame. + +During the beta/RC phase an explicit request for testing the binaries with +several other libraries (SciPy/Matplotlib/Pygame) should be posted on the +mailing list. From numpy-svn at scipy.org Sat May 8 07:14:47 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 May 2010 06:14:47 -0500 (CDT) Subject: [Numpy-svn] r8393 - trunk/doc Message-ID: <20100508111447.9EB5839CAED@scipy.org> Author: rgommers Date: 2010-05-08 06:14:47 -0500 (Sat, 08 May 2010) New Revision: 8393 Modified: trunk/doc/HOWTO_RELEASE.txt Log: DOC: fix ReST warning. Modified: trunk/doc/HOWTO_RELEASE.txt =================================================================== --- trunk/doc/HOWTO_RELEASE.txt 2010-05-08 11:08:32 UTC (rev 8392) +++ trunk/doc/HOWTO_RELEASE.txt 2010-05-08 11:14:47 UTC (rev 8393) @@ -218,7 +218,8 @@ The tar-files and binary releases for distribution should be uploaded to SourceForge, together with the Release Notes and the Changelog. Uploading can be done through a web interface or, more efficiently, through scp/sftp/rsync as -described `here `_. +described in the SourceForge +`upload guide `_. For example:: scp ,numpy at frs.sourceforge.net:/home/frs/project/n/nu/numpy/NumPy// From numpy-svn at scipy.org Sat May 8 12:34:28 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 May 2010 11:34:28 -0500 (CDT) Subject: [Numpy-svn] r8394 - in trunk/numpy/core: src/multiarray tests Message-ID: <20100508163428.A231239CAF8@scipy.org> Author: ptvirtan Date: 2010-05-08 11:34:28 -0500 (Sat, 08 May 2010) New Revision: 8394 Modified: trunk/numpy/core/src/multiarray/numpyos.c trunk/numpy/core/tests/test_regression.py Log: BUG: core: ensure we have GIL before calling PyOS_string_to_double (fixes #1345) In Python >= 2.7, the PyOS_ascii_strtod has been deprecated and replaced with PyOS_string_to_double. However, the latter may raise Python exceptions, which requires that GIL is held when calling it. Modified: trunk/numpy/core/src/multiarray/numpyos.c =================================================================== --- trunk/numpy/core/src/multiarray/numpyos.c 2010-05-08 11:14:47 UTC (rev 8393) +++ trunk/numpy/core/src/multiarray/numpyos.c 2010-05-08 16:34:28 UTC (rev 8394) @@ -420,6 +420,32 @@ return 0; } +/* + * _NumPyOS_ascii_strtod_plain: + * + * PyOS_ascii_strtod work-alike, with no enhanced features, + * for forward compatibility with Python >= 2.7 + */ +static double +NumPyOS_ascii_strtod_plain(const char *s, char** endptr) +{ + double result; +#if PY_VERSION_HEX >= 0x02070000 + NPY_ALLOW_C_API_DEF + NPY_ALLOW_C_API + result = PyOS_string_to_double(s, endptr, NULL); + if (PyErr_Occurred()) { + if (endptr) { + *endptr = (char*)s; + } + PyErr_Clear(); + } + NPY_DISABLE_C_API +#else + result = PyOS_ascii_strtod(s, endptr); +#endif + return result; +} /* * NumPyOS_ascii_strtod: @@ -508,11 +534,7 @@ } memcpy(buffer, s, n); buffer[n] = '\0'; -#if PY_VERSION_HEX >= 0x02070000 - result = PyOS_string_to_double(buffer, &q, NULL); -#else - result = PyOS_ascii_strtod(buffer, &q); -#endif + result = NumPyOS_ascii_strtod_plain(buffer, &q); if (endptr != NULL) { *endptr = (char*)(s + (q - buffer)); } @@ -521,11 +543,7 @@ } /* End of ##2 */ -#if PY_VERSION_HEX >= 0x02070000 - return PyOS_string_to_double(s, endptr, NULL); -#else - return PyOS_ascii_strtod(s, endptr); -#endif + return NumPyOS_ascii_strtod_plain(s, endptr); } Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2010-05-08 11:14:47 UTC (rev 8393) +++ trunk/numpy/core/tests/test_regression.py 2010-05-08 16:34:28 UTC (rev 8394) @@ -1298,5 +1298,9 @@ assert np.alltrue(a == np.array([0,1,2,3,4,5,6,7,8,9])) assert np.alltrue(b == np.array([0,1,2,3,4,5,6,7,8,9])) + def test_fromstring_crash(self): + # Ticket #1345: the following should not cause a crash + np.fromstring(asbytes('aa, aa, 1.0'), sep=',') + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Sat May 8 12:34:39 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 May 2010 11:34:39 -0500 (CDT) Subject: [Numpy-svn] r8395 - trunk/numpy/lib Message-ID: <20100508163439.E173239CAF8@scipy.org> Author: ptvirtan Date: 2010-05-08 11:34:39 -0500 (Sat, 08 May 2010) New Revision: 8395 Modified: trunk/numpy/lib/npyio.py Log: BUG: lib: make loadtxt work on Py3 when fh returns unicode (fixes #1479) Modified: trunk/numpy/lib/npyio.py =================================================================== --- trunk/numpy/lib/npyio.py 2010-05-08 16:34:28 UTC (rev 8394) +++ trunk/numpy/lib/npyio.py 2010-05-08 16:34:39 UTC (rev 8395) @@ -608,7 +608,7 @@ def split_line(line): """Chop off comments, strip, and split at delimiter.""" - line = line.split(comments)[0].strip() + line = asbytes(line).split(comments)[0].strip() if line: return line.split(delimiter) else: From numpy-svn at scipy.org Sat May 8 12:34:52 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 May 2010 11:34:52 -0500 (CDT) Subject: [Numpy-svn] r8396 - trunk/numpy/lib/tests Message-ID: <20100508163452.0990839CAF8@scipy.org> Author: ptvirtan Date: 2010-05-08 11:34:51 -0500 (Sat, 08 May 2010) New Revision: 8396 Modified: trunk/numpy/lib/tests/test_type_check.py Log: BUG: lib: remember in a test that datetime specifiers are bytes strings Modified: trunk/numpy/lib/tests/test_type_check.py =================================================================== --- trunk/numpy/lib/tests/test_type_check.py 2010-05-08 16:34:39 UTC (rev 8395) +++ trunk/numpy/lib/tests/test_type_check.py 2010-05-08 16:34:51 UTC (rev 8396) @@ -1,6 +1,7 @@ from numpy.testing import * from numpy.lib import * from numpy.core import * +from numpy.compat import asbytes def assert_all(x): assert(all(x)), x @@ -379,7 +380,7 @@ def test_basic(self): a = array(['1980-03-23'], dtype=datetime64) - assert_equal(datetime_data(a.dtype), ('us', 1, 1, 1)) + assert_equal(datetime_data(a.dtype), (asbytes('us'), 1, 1, 1)) if __name__ == "__main__": From numpy-svn at scipy.org Sat May 8 20:04:55 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 May 2010 19:04:55 -0500 (CDT) Subject: [Numpy-svn] r8397 - in trunk: doc/release numpy/core/include/numpy Message-ID: <20100509000455.5A49B39CAE7@scipy.org> Author: charris Date: 2010-05-08 19:04:55 -0500 (Sat, 08 May 2010) New Revision: 8397 Added: trunk/numpy/core/include/numpy/ndarraytypes.h Modified: trunk/doc/release/2.0.0-notes.rst trunk/numpy/core/include/numpy/ndarrayobject.h Log: ENH: Break out parts of ndarrayobject.h that don't reference the ndarray c-api into the header ndarraytypes.h. Modified: trunk/doc/release/2.0.0-notes.rst =================================================================== --- trunk/doc/release/2.0.0-notes.rst 2010-05-08 16:34:51 UTC (rev 8396) +++ trunk/doc/release/2.0.0-notes.rst 2010-05-09 00:04:55 UTC (rev 8397) @@ -55,3 +55,10 @@ of a matrix. Because the determinant may involve the product of many small/large values, the result is often more accurate than that obtained by simple multiplication. + +new header +~~~~~~~~~~ + +There is a new header ndarraytypes.h that provides needed ndarray types +that don't reference the ndarray c-api. Some folks might find this useful. + Modified: trunk/numpy/core/include/numpy/ndarrayobject.h =================================================================== --- trunk/numpy/core/include/numpy/ndarrayobject.h 2010-05-08 16:34:51 UTC (rev 8396) +++ trunk/numpy/core/include/numpy/ndarrayobject.h 2010-05-09 00:04:55 UTC (rev 8397) @@ -14,1256 +14,8 @@ everything when you're typing */ #endif -/* This is auto-generated by the installer */ -#include "numpyconfig.h" +#include "ndarraytypes.h" -#ifdef NPY_ENABLE_SEPARATE_COMPILATION - #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -#else - #define NPY_NO_EXPORT static -#endif - -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP - #define NPY_ALLOW_THREADS 1 -#else - #define NPY_ALLOW_THREADS 0 -#endif - -#include "npy_endian.h" - -#include "utils.h" - -/* There are several places in the code where an array of dimensions is - * allocated statically. This is the size of that static allocation. - * - * The array creation itself could have arbitrary dimensions but - * all the places where static allocation is used would need to - * be changed to dynamic (including inside of several structures) - */ - -#define NPY_MAXDIMS 32 -#define NPY_MAXARGS 32 - -/* Used for Converter Functions "O&" code in ParseTuple */ -#define NPY_FAIL 0 -#define NPY_SUCCEED 1 - -/* Binary compatibility version number. This number is increased - whenever the C-API is changed such that binary compatibility is - broken, i.e. whenever a recompile of extension modules is - needed. */ -#define NPY_VERSION NPY_ABI_VERSION - -/* Minor API version. This number is increased whenever a change is - made to the C-API -- whether it breaks binary compatibility or not. - Some changes, such as adding a function pointer to the end of the - function table, can be made without breaking binary compatibility. - In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - would be increased. Whenever binary compatibility is broken, both - NPY_VERSION and NPY_FEATURE_VERSION should be increased. - */ -#define NPY_FEATURE_VERSION NPY_API_VERSION - -#include "npy_common.h" - -enum NPY_TYPES { NPY_BOOL=0, - NPY_BYTE, NPY_UBYTE, - NPY_SHORT, NPY_USHORT, - NPY_INT, NPY_UINT, - NPY_LONG, NPY_ULONG, - NPY_LONGLONG, NPY_ULONGLONG, - NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, - NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, - NPY_DATETIME, NPY_TIMEDELTA, - NPY_OBJECT=19, - NPY_STRING, NPY_UNICODE, - NPY_VOID, - NPY_NTYPES, - NPY_NOTYPE, - NPY_CHAR, /* special flag */ - NPY_USERDEF=256 /* leave room for characters */ -}; - -#define NPY_METADATA_DTSTR "__frequency__" - -/* basetype array priority */ -#define NPY_PRIORITY 0.0 - -/* default subtype priority */ -#define NPY_SUBTYPE_PRIORITY 1.0 - -/* default scalar priority */ -#define NPY_SCALAR_PRIORITY -1000000.0 - -/* How many floating point types are there */ -#define NPY_NUM_FLOATTYPE 3 - -/* We need to match npy_intp to a signed integer of the same size as - a pointer variable. npy_uintp to the equivalent unsigned integer -*/ - - -/* These characters correspond to the array type and the - struct module */ - -/* except 'p' -- signed integer for pointer type */ - -enum NPY_TYPECHAR { NPY_BOOLLTR = '?', - NPY_BYTELTR = 'b', - NPY_UBYTELTR = 'B', - NPY_SHORTLTR = 'h', - NPY_USHORTLTR = 'H', - NPY_INTLTR = 'i', - NPY_UINTLTR = 'I', - NPY_LONGLTR = 'l', - NPY_ULONGLTR = 'L', - NPY_LONGLONGLTR = 'q', - NPY_ULONGLONGLTR = 'Q', - NPY_FLOATLTR = 'f', - NPY_DOUBLELTR = 'd', - NPY_LONGDOUBLELTR = 'g', - NPY_CFLOATLTR = 'F', - NPY_CDOUBLELTR = 'D', - NPY_CLONGDOUBLELTR = 'G', - NPY_OBJECTLTR = 'O', - NPY_STRINGLTR = 'S', - NPY_STRINGLTR2 = 'a', - NPY_UNICODELTR = 'U', - NPY_VOIDLTR = 'V', - NPY_DATETIMELTR = 'M', - NPY_TIMEDELTALTR = 'm', - NPY_CHARLTR = 'c', - - /* No Descriptor, just a define -- this let's - Python users specify an array of integers - large enough to hold a pointer on the platform*/ - NPY_INTPLTR = 'p', - NPY_UINTPLTR = 'P', - - NPY_GENBOOLLTR ='b', - NPY_SIGNEDLTR = 'i', - NPY_UNSIGNEDLTR = 'u', - NPY_FLOATINGLTR = 'f', - NPY_COMPLEXLTR = 'c' -}; - -typedef enum { - NPY_QUICKSORT=0, - NPY_HEAPSORT=1, - NPY_MERGESORT=2 -} NPY_SORTKIND; -#define NPY_NSORTS (NPY_MERGESORT + 1) - - -typedef enum { - NPY_SEARCHLEFT=0, - NPY_SEARCHRIGHT=1 -} NPY_SEARCHSIDE; -#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) - - -typedef enum { - NPY_NOSCALAR=-1, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR -} NPY_SCALARKIND; -#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) - -typedef enum { - NPY_ANYORDER=-1, - NPY_CORDER=0, - NPY_FORTRANORDER=1 -} NPY_ORDER; - - -typedef enum { - NPY_CLIP=0, - NPY_WRAP=1, - NPY_RAISE=2 -} NPY_CLIPMODE; - -typedef enum { - NPY_FR_Y, - NPY_FR_M, - NPY_FR_W, - NPY_FR_B, - NPY_FR_D, - NPY_FR_h, - NPY_FR_m, - NPY_FR_s, - NPY_FR_ms, - NPY_FR_us, - NPY_FR_ns, - NPY_FR_ps, - NPY_FR_fs, - NPY_FR_as -} NPY_DATETIMEUNIT; - -#define NPY_DATETIME_NUMUNITS (NPY_FR_as + 1) -#define NPY_DATETIME_DEFAULTUNIT NPY_FR_us - -#define NPY_STR_Y "Y" -#define NPY_STR_M "M" -#define NPY_STR_W "W" -#define NPY_STR_B "B" -#define NPY_STR_D "D" -#define NPY_STR_h "h" -#define NPY_STR_m "m" -#define NPY_STR_s "s" -#define NPY_STR_ms "ms" -#define NPY_STR_us "us" -#define NPY_STR_ns "ns" -#define NPY_STR_ps "ps" -#define NPY_STR_fs "fs" -#define NPY_STR_as "as" - - -/* This is to typedef npy_intp to the appropriate pointer size for this - * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. */ -typedef Py_intptr_t npy_intp; -typedef Py_uintptr_t npy_uintp; -#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T -#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T - -#ifdef constchar -#undef constchar -#endif - -#if (PY_VERSION_HEX < 0x02050000) - #ifndef PY_SSIZE_T_MIN - typedef int Py_ssize_t; - #define PY_SSIZE_T_MAX INT_MAX - #define PY_SSIZE_T_MIN INT_MIN - #endif -#define NPY_SSIZE_T_PYFMT "i" -#undef PyIndex_Check -#define constchar const char -#define PyIndex_Check(op) 0 -#else -#define NPY_SSIZE_T_PYFMT "n" -#define constchar char -#endif - -#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT - #define NPY_INTP NPY_INT - #define NPY_UINTP NPY_UINT - #define PyIntpArrType_Type PyIntArrType_Type - #define PyUIntpArrType_Type PyUIntArrType_Type - #define NPY_MAX_INTP NPY_MAX_INT - #define NPY_MIN_INTP NPY_MIN_INT - #define NPY_MAX_UINTP NPY_MAX_UINT - #define NPY_INTP_FMT "d" -#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG - #define NPY_INTP NPY_LONG - #define NPY_UINTP NPY_ULONG - #define PyIntpArrType_Type PyLongArrType_Type - #define PyUIntpArrType_Type PyULongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONG - #define NPY_MIN_INTP MIN_LONG - #define NPY_MAX_UINTP NPY_MAX_ULONG - #define NPY_INTP_FMT "ld" -#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG) - #define NPY_INTP NPY_LONGLONG - #define NPY_UINTP NPY_ULONGLONG - #define PyIntpArrType_Type PyLongLongArrType_Type - #define PyUIntpArrType_Type PyULongLongArrType_Type - #define NPY_MAX_INTP NPY_MAX_LONGLONG - #define NPY_MIN_INTP NPY_MIN_LONGLONG - #define NPY_MAX_UINTP NPY_MAX_ULONGLONG - #define NPY_INTP_FMT "Ld" -#endif - -/* We can only use C99 formats for npy_int_p if it is the same as intp_t, hence - * the condition on HAVE_UNITPTR_T */ -#if (NPY_USE_C99_FORMATS) == 1 \ - && (defined HAVE_UINTPTR_T) \ - && (defined HAVE_INTTYPES_H) - #include - #undef NPY_INTP_FMT - #define NPY_INTP_FMT PRIdPTR -#endif - -#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); -#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); - -#define NPY_STRINGIFY(x) #x -#define NPY_TOSTRING(x) NPY_STRINGIFY(x) - - /* Macros to define how array, and dimension/strides data is - allocated. - */ - - /* Data buffer */ -#define PyDataMem_NEW(size) ((char *)malloc(size)) -#define PyDataMem_FREE(ptr) free(ptr) -#define PyDataMem_RENEW(ptr,size) ((char *)realloc(ptr,size)) - -#define NPY_USE_PYMEM 1 - -#if NPY_USE_PYMEM == 1 -#define PyArray_malloc PyMem_Malloc -#define PyArray_free PyMem_Free -#define PyArray_realloc PyMem_Realloc -#else -#define PyArray_malloc malloc -#define PyArray_free free -#define PyArray_realloc realloc -#endif - -/* Dimensions and strides */ -#define PyDimMem_NEW(size) \ - ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) - -#define PyDimMem_FREE(ptr) PyArray_free(ptr) - -#define PyDimMem_RENEW(ptr,size) \ - ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) - -/* forward declaration */ -struct _PyArray_Descr; - - /* These must deal with unaligned and swapped data if necessary */ -typedef PyObject * (PyArray_GetItemFunc) (void *, void *); -typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); - -typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, - npy_intp, int, void *); - -typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); -typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - - - /* These assume aligned and notswapped data -- a buffer will be - used before or contiguous data will be obtained - */ -typedef int (PyArray_CompareFunc)(const void *, const void *, void *); -typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); - -typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, - npy_intp, void *); - -typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, - void *); - -/* XXX the ignore argument should be removed next time the API version - is bumped. It used to be the separator. */ -typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, - char *ignore, struct _PyArray_Descr *); -typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, - struct _PyArray_Descr *); - -typedef int (PyArray_FillFunc)(void *, npy_intp, void *); - -typedef int (PyArray_SortFunc)(void *, npy_intp, void *); -typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); - -typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); - -typedef int (PyArray_ScalarKindFunc)(void *); - -typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, - void *max, void *out); -typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, - void *values, npy_intp nv); -typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, - npy_intp nindarray, npy_intp n_outer, - npy_intp m_middle, npy_intp nelem, - NPY_CLIPMODE clipmode); - -typedef struct { - npy_intp *ptr; - int len; -} PyArray_Dims; - -typedef struct { - /* The next four functions *cannot* be NULL */ - - /* Functions to get and set items with standard - Python types -- not array scalars */ - PyArray_GetItemFunc *getitem; - PyArray_SetItemFunc *setitem; - - /* Copy and/or swap data. Memory areas may not overlap */ - /* Use memmove first if they might */ - PyArray_CopySwapNFunc *copyswapn; - PyArray_CopySwapFunc *copyswap; - - /* Function to compare items */ - /* Can be NULL - */ - PyArray_CompareFunc *compare; - - /* Function to select largest - Can be NULL - */ - PyArray_ArgFunc *argmax; - - /* Function to compute dot product */ - /* Can be NULL */ - PyArray_DotFunc *dotfunc; - - /* Function to scan an ASCII file and - place a single value plus possible separator - Can be NULL - */ - PyArray_ScanFunc *scanfunc; - - /* Function to read a single value from a string */ - /* and adjust the pointer; Can be NULL */ - PyArray_FromStrFunc *fromstr; - - /* Function to determine if data is zero or not */ - /* If NULL a default version is */ - /* used at Registration time. */ - PyArray_NonzeroFunc *nonzero; - - /* Used for arange. Can be NULL.*/ - PyArray_FillFunc *fill; - - /* Function to fill arrays with scalar values - Can be NULL*/ - PyArray_FillWithScalarFunc *fillwithscalar; - - /* Sorting functions; Can be NULL*/ - PyArray_SortFunc *sort[NPY_NSORTS]; - PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - - /* Dictionary of additional casting functions - PyArray_VectorUnaryFuncs - which can be populated to support casting - to other registered types. Can be NULL*/ - PyObject *castdict; - - /* Functions useful for generalizing - the casting rules. Can be NULL; - */ - PyArray_ScalarKindFunc *scalarkind; - int **cancastscalarkindto; - int *cancastto; - - PyArray_FastClipFunc *fastclip; - PyArray_FastPutmaskFunc *fastputmask; - PyArray_FastTakeFunc *fasttake; - - /* A little room to grow --- should use generic function interface for most additions */ - void *pad1; - void *pad2; - void *pad3; - void *pad4; - - /* Functions to cast to all other standard types*/ - /* Can have some NULL entries */ - PyArray_VectorUnaryFunc *cast[NPY_NTYPES]; - -} PyArray_ArrFuncs; - -/* The item must be reference counted when it is inserted or extracted. */ -#define NPY_ITEM_REFCOUNT 0x01 -/* Same as needing REFCOUNT */ -#define NPY_ITEM_HASOBJECT 0x01 -/* Convert to list for pickling */ -#define NPY_LIST_PICKLE 0x02 -/* The item is a POINTER */ -#define NPY_ITEM_IS_POINTER 0x04 -/* memory needs to be initialized for this data-type */ -#define NPY_NEEDS_INIT 0x08 -/* operations need Python C-API so don't give-up thread. */ -#define NPY_NEEDS_PYAPI 0x10 -/* Use f.getitem when extracting elements of this data-type */ -#define NPY_USE_GETITEM 0x20 -/* Use f.setitem when setting creating 0-d array from this data-type.*/ -#define NPY_USE_SETITEM 0x40 -/* define NPY_IS_COMPLEX */ - -/* These are inherited for global data-type if any data-types in the field - have them */ -#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ - NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) - -#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ - NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ - NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) - -#define PyDataType_FLAGCHK(dtype, flag) \ - (((dtype)->flags & (flag)) == (flag)) - -#define PyDataType_REFCHK(dtype) \ - PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) - -typedef struct _PyArray_Descr { - PyObject_HEAD - PyTypeObject *typeobj; /* the type object representing an - instance of this type -- should not - be two type_numbers with the same type - object. */ - char kind; /* kind for this type */ - char type; /* unique-character representing this type */ - char byteorder; /* '>' (big), '<' (little), '|' - (not-applicable), or '=' (native). */ - char unused; - int flags; /* flag describing data type */ - int type_num; /* number representing this type */ - int elsize; /* element size for this type */ - int alignment; /* alignment needed for this type */ - struct _arr_descr \ - *subarray; /* Non-NULL if this type is - is an array (C-contiguous) - of some other type - */ - PyObject *fields; /* The fields dictionary for this type */ - /* For statically defined descr this - is always Py_None */ - - PyObject *names; /* An ordered tuple of field names or NULL - if no fields are defined */ - - PyArray_ArrFuncs *f; /* a table of functions specific for each - basic data descriptor */ - - PyObject *metadata; /* Metadata about this dtype */ -} PyArray_Descr; - -typedef struct _arr_descr { - PyArray_Descr *base; - PyObject *shape; /* a tuple */ -} PyArray_ArrayDescr; - -/* - The main array object structure. It is recommended to use the macros - defined below (PyArray_DATA and friends) access fields here, instead - of the members themselves. - */ - -typedef struct PyArrayObject { - PyObject_HEAD - char *data; /* pointer to raw data buffer */ - int nd; /* number of dimensions, also called ndim */ - npy_intp *dimensions; /* size in each dimension */ - npy_intp *strides; /* bytes to jump to get to the - next element in each dimension */ - PyObject *base; /* This object should be decref'd - upon deletion of array */ - /* For views it points to the original array */ - /* For creation from buffer object it points - to an object that shold be decref'd on - deletion */ - /* For UPDATEIFCOPY flag this is an array - to-be-updated upon deletion of this one */ - PyArray_Descr *descr; /* Pointer to type structure */ - int flags; /* Flags describing array -- see below*/ - PyObject *weakreflist; /* For weakreferences */ -} PyArrayObject; - -#define NPY_AO PyArrayObject - -#define fortran fortran_ /* For some compilers */ - -/* Array Flags Object */ -typedef struct PyArrayFlagsObject { - PyObject_HEAD - PyObject *arr; - int flags; -} PyArrayFlagsObject; - -/* Mirrors buffer object to ptr */ - -typedef struct { - PyObject_HEAD - PyObject *base; - void *ptr; - npy_intp len; - int flags; -} PyArray_Chunk; - - -typedef struct { - NPY_DATETIMEUNIT base; - int num; - int den; /* Converted to 1 on input for now -- an input-only mechanism */ - int events; -} PyArray_DatetimeMetaData; - -typedef struct { - npy_longlong year; - int month, day, hour, min, sec, us, ps, as; -} npy_datetimestruct; - -typedef struct { - npy_longlong day; - int sec, us, ps, as; -} npy_timedeltastruct; - -#if PY_VERSION_HEX >= 0x02070000 -#define PyDataType_GetDatetimeMetaData(descr) \ - ((descr->metadata == NULL) ? NULL : \ - ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ - PyDict_GetItemString( \ - descr->metadata, NPY_METADATA_DTSTR), NULL)))) -#else -#define PyDataType_GetDatetimeMetaData(descr) \ - ((descr->metadata == NULL) ? NULL : \ - ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \ - PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) -#endif - -typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); - -/* Means c-style contiguous (last index varies the fastest). The - data elements right after each other. */ -#define NPY_CONTIGUOUS 0x0001 -/* set if array is a contiguous Fortran array: the first index - varies the fastest in memory (strides array is reverse of - C-contiguous array)*/ -#define NPY_FORTRAN 0x0002 - -#define NPY_C_CONTIGUOUS NPY_CONTIGUOUS -#define NPY_F_CONTIGUOUS NPY_FORTRAN - -/* - Note: all 0-d arrays are CONTIGUOUS and FORTRAN contiguous. If a - 1-d array is CONTIGUOUS it is also FORTRAN contiguous -*/ - -/* If set, the array owns the data: it will be free'd when the array - is deleted. */ -#define NPY_OWNDATA 0x0004 - -/* An array never has the next four set; they're only used as parameter - flags to the the various FromAny functions */ - -/* Cause a cast to occur regardless of whether or not it is safe. */ -#define NPY_FORCECAST 0x0010 - -/* Always copy the array. Returned arrays are always CONTIGUOUS, ALIGNED, - and WRITEABLE. */ -#define NPY_ENSURECOPY 0x0020 - -/* Make sure the returned array is a base-class ndarray */ -#define NPY_ENSUREARRAY 0x0040 - -/* Make sure that the strides are in units of the element size - Needed for some operations with record-arrays. -*/ -#define NPY_ELEMENTSTRIDES 0x0080 - -/* Array data is aligned on the appropiate memory address for the - type stored according to how the compiler would align things - (e.g., an array of integers (4 bytes each) starts on - a memory address that's a multiple of 4) */ -#define NPY_ALIGNED 0x0100 -/* Array data has the native endianness */ -#define NPY_NOTSWAPPED 0x0200 -/* Array data is writeable */ -#define NPY_WRITEABLE 0x0400 -/* If this flag is set, then base contains a pointer to an array of - the same size that should be updated with the current contents of - this array when this array is deallocated -*/ -#define NPY_UPDATEIFCOPY 0x1000 - -/* This flag is for the array interface */ -#define NPY_ARR_HAS_DESCR 0x0800 - - -#define NPY_BEHAVED (NPY_ALIGNED | NPY_WRITEABLE) -#define NPY_BEHAVED_NS (NPY_ALIGNED | NPY_WRITEABLE | NPY_NOTSWAPPED) -#define NPY_CARRAY (NPY_CONTIGUOUS | NPY_BEHAVED) -#define NPY_CARRAY_RO (NPY_CONTIGUOUS | NPY_ALIGNED) -#define NPY_FARRAY (NPY_FORTRAN | NPY_BEHAVED) -#define NPY_FARRAY_RO (NPY_FORTRAN | NPY_ALIGNED) -#define NPY_DEFAULT NPY_CARRAY -#define NPY_IN_ARRAY NPY_CARRAY_RO -#define NPY_OUT_ARRAY NPY_CARRAY -#define NPY_INOUT_ARRAY (NPY_CARRAY | NPY_UPDATEIFCOPY) -#define NPY_IN_FARRAY NPY_FARRAY_RO -#define NPY_OUT_FARRAY NPY_FARRAY -#define NPY_INOUT_FARRAY (NPY_FARRAY | NPY_UPDATEIFCOPY) - -#define NPY_UPDATE_ALL (NPY_CONTIGUOUS | NPY_FORTRAN | NPY_ALIGNED) - - -/* Size of internal buffers used for alignment */ -/* Make BUFSIZE a multiple of sizeof(cdouble) -- ususally 16 */ -/* So that ufunc buffers are aligned */ -#define NPY_MIN_BUFSIZE ((int)sizeof(cdouble)) -#define NPY_MAX_BUFSIZE (((int)sizeof(cdouble))*1000000) -#define NPY_BUFSIZE 10000 -/* #define NPY_BUFSIZE 80*/ - -#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) -#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) -#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ - ((p).real < (q).real))) -#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ - ((p).real > (q).real))) -#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ - ((p).real <= (q).real))) -#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ - ((p).real >= (q).real))) -#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) -#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) - -/* - * C API: consists of Macros and functions. The MACROS are defined here. - */ - - -#define PyArray_CHKFLAGS(m, FLAGS) \ - ((((PyArrayObject *)(m))->flags & (FLAGS)) == (FLAGS)) - -#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_CONTIGUOUS) -#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_WRITEABLE) -#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ALIGNED) - - -#if NPY_ALLOW_THREADS -#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; -#define NPY_BEGIN_THREADS _save = PyEval_SaveThread(); -#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); - -#define NPY_BEGIN_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_BEGIN_THREADS;} while (0); - -#define NPY_END_THREADS_DESCR(dtype) \ - do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ - NPY_END_THREADS; } while (0); - -#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; -#define NPY_ALLOW_C_API __save__ = PyGILState_Ensure(); -#define NPY_DISABLE_C_API PyGILState_Release(__save__); -#else -#define NPY_BEGIN_ALLOW_THREADS -#define NPY_END_ALLOW_THREADS -#define NPY_BEGIN_THREADS_DEF -#define NPY_BEGIN_THREADS -#define NPY_END_THREADS -#define NPY_BEGIN_THREADS_DESCR(dtype) -#define NPY_END_THREADS_DESCR(dtype) -#define NPY_ALLOW_C_API_DEF -#define NPY_ALLOW_C_API -#define NPY_DISABLE_C_API -#endif - -/***************************** - * Basic iterator object - *****************************/ - -/* FWD declaration */ -typedef struct PyArrayIterObject_tag PyArrayIterObject; - -/* type of the function which translates a set of coordinates to a pointer to - * the data */ -typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); - -struct PyArrayIterObject_tag { - PyObject_HEAD - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; -} ; - - -/* Iterator API */ -#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) - -#define _PyAIT(it) ((PyArrayIterObject *)(it)) -#define PyArray_ITER_RESET(it) { \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ - memset(_PyAIT(it)->coordinates, 0, \ - (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ -} - -#define _PyArray_ITER_NEXT1(it) { \ - (it)->dataptr += _PyAIT(it)->strides[0]; \ - (it)->coordinates[0]++; \ -} - -#define _PyArray_ITER_NEXT2(it) { \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ -} - -#define _PyArray_ITER_NEXT3(it) { \ - if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ - (it)->coordinates[2]++; \ - (it)->dataptr += (it)->strides[2]; \ - } \ - else { \ - (it)->coordinates[2] = 0; \ - (it)->dataptr -= (it)->backstrides[2]; \ - if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ - (it)->coordinates[1]++; \ - (it)->dataptr += (it)->strides[1]; \ - } \ - else { \ - (it)->coordinates[1] = 0; \ - (it)->coordinates[0]++; \ - (it)->dataptr += (it)->strides[0] - \ - (it)->backstrides[1]; \ - } \ - } \ -} - -#define PyArray_ITER_NEXT(it) { \ - _PyAIT(it)->index++; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyArray_ITER_NEXT1(_PyAIT(it)); \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr += _PyAIT(it)->ao->descr->elsize; \ - else if (_PyAIT(it)->nd_m1 == 1) { \ - _PyArray_ITER_NEXT2(_PyAIT(it)); \ - } \ - else { \ - int __npy_i; \ - for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ - if (_PyAIT(it)->coordinates[__npy_i] < \ - _PyAIT(it)->dims_m1[__npy_i]) { \ - _PyAIT(it)->coordinates[__npy_i]++; \ - _PyAIT(it)->dataptr += \ - _PyAIT(it)->strides[__npy_i]; \ - break; \ - } \ - else { \ - _PyAIT(it)->coordinates[__npy_i] = 0; \ - _PyAIT(it)->dataptr -= \ - _PyAIT(it)->backstrides[__npy_i]; \ - } \ - } \ - } \ -} - -#define PyArray_ITER_GOTO(it, destination) { \ - int __npy_i; \ - _PyAIT(it)->index = 0; \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ - for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ - if (destination[__npy_i] < 0) { \ - destination[__npy_i] += \ - _PyAIT(it)->dims_m1[__npy_i]+1; \ - } \ - _PyAIT(it)->dataptr += destination[__npy_i] * \ - _PyAIT(it)->strides[__npy_i]; \ - _PyAIT(it)->coordinates[__npy_i] = \ - destination[__npy_i]; \ - _PyAIT(it)->index += destination[__npy_i] * \ - ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ - _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ - } \ -} - -#define PyArray_ITER_GOTO1D(it, ind) { \ - int __npy_i; \ - npy_intp __npy_ind = (npy_intp) (ind); \ - if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ - _PyAIT(it)->index = __npy_ind; \ - if (_PyAIT(it)->nd_m1 == 0) { \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data + \ - __npy_ind * _PyAIT(it)->strides[0]; \ - } \ - else if (_PyAIT(it)->contiguous) \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data + \ - __npy_ind * _PyAIT(it)->ao->descr->elsize; \ - else { \ - _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ - for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ - __npy_i++) { \ - _PyAIT(it)->dataptr += \ - (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ - * _PyAIT(it)->strides[__npy_i]; \ - __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ - } \ - } \ -} - -#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) - -#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) - - -/* - Any object passed to PyArray_Broadcast must be binary compatible with - this structure. -*/ - -typedef struct { - PyObject_HEAD - int numiter; /* number of iters */ - npy_intp size; /* broadcasted size */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ -} PyArrayMultiIterObject; - -#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) -#define PyArray_MultiIter_RESET(multi) { \ - int __npy_mi; \ - _PyMIT(multi)->index = 0; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} - -#define PyArray_MultiIter_NEXT(multi) { \ - int __npy_mi; \ - _PyMIT(multi)->index++; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ - } \ -} - -#define PyArray_MultiIter_GOTO(multi, dest) { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} - -#define PyArray_MultiIter_GOTO1D(multi, ind) { \ - int __npy_mi; \ - for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ - PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ - } \ - _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ -} - -#define PyArray_MultiIter_DATA(multi, i) \ - ((void *)(_PyMIT(multi)->iters[i]->dataptr)) - -#define PyArray_MultiIter_NEXTi(multi, i) \ - PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) - -#define PyArray_MultiIter_NOTDONE(multi) \ - (_PyMIT(multi)->index < _PyMIT(multi)->size) - -/* Store the information needed for fancy-indexing over an array */ - -typedef struct { - PyObject_HEAD - /* Multi-iterator portion --- needs to be present in this order to - work with PyArray_Broadcast */ - - int numiter; /* number of index-array - iterators */ - npy_intp size; /* size of broadcasted - result */ - npy_intp index; /* current index */ - int nd; /* number of dims */ - npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ - PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object - iterators */ - PyArrayIterObject *ait; /* flat Iterator for - underlying array */ - - /* flat iterator for subspace (when numiter < nd) */ - PyArrayIterObject *subspace; - - /* if subspace iteration, then this is the array of - axes in the underlying array represented by the - index objects */ - int iteraxes[NPY_MAXDIMS]; - /* if subspace iteration, the these are the coordinates - to the start of the subspace. - */ - npy_intp bscoord[NPY_MAXDIMS]; - - PyObject *indexobj; /* creating obj */ - int consec; - char *dataptr; - -} PyArrayMapIterObject; - -enum { - NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, - NPY_NEIGHBORHOOD_ITER_ONE_PADDING, - NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, - NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, - NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING -}; - -typedef struct { - PyObject_HEAD - - /* - * PyArrayIterObject part: keep this in this exact order - */ - int nd_m1; /* number of dimensions - 1 */ - npy_intp index, size; - npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ - npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ - npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ - npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ - npy_intp factors[NPY_MAXDIMS]; /* shape factors */ - PyArrayObject *ao; - char *dataptr; /* pointer to current item*/ - npy_bool contiguous; - - npy_intp bounds[NPY_MAXDIMS][2]; - npy_intp limits[NPY_MAXDIMS][2]; - npy_intp limits_sizes[NPY_MAXDIMS]; - npy_iter_get_dataptr_t translate; - - /* - * New members - */ - npy_intp nd; - - /* Dimensions is the dimension of the array */ - npy_intp dimensions[NPY_MAXDIMS]; - - /* Neighborhood points coordinates are computed relatively to the point pointed - * by _internal_iter */ - PyArrayIterObject* _internal_iter; - /* To keep a reference to the representation of the constant value for - * constant padding */ - char* constant; - - int mode; -} PyArrayNeighborhoodIterObject; - -/* - * Neighborhood iterator API - */ - -/* General: those work for any mode */ -static NPY_INLINE int -PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); -static NPY_INLINE int -PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); -#if 0 -static NPY_INLINE int -PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); -#endif - -/* Include inline implementations - functions defined there are not considered - * public API */ -#define _NPY_INCLUDE_NEIGHBORHOOD_IMP -#include "_neighborhood_iterator_imp.h" -#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP - -/* The default array type - */ -#define NPY_DEFAULT_TYPE NPY_DOUBLE -#define PyArray_DEFAULT NPY_DEFAULT_TYPE -/* All sorts of useful ways to look into a PyArrayObject. - These are the recommended over casting to PyArrayObject and accessing - the members directly. - */ - -#define PyArray_NDIM(obj) (((PyArrayObject *)(obj))->nd) -#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ - PyArray_CHKFLAGS(m, NPY_CONTIGUOUS) || \ - PyArray_CHKFLAGS(m, NPY_FORTRAN)) - -#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_FORTRAN) && \ - (PyArray_NDIM(m) > 1)) - -#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_FORTRAN) ? \ - NPY_FORTRAN : 0)) - -#define FORTRAN_IF PyArray_FORTRAN_IF -#define PyArray_DATA(obj) ((void *)(((PyArrayObject *)(obj))->data)) -#define PyArray_BYTES(obj) (((PyArrayObject *)(obj))->data) -#define PyArray_DIMS(obj) (((PyArrayObject *)(obj))->dimensions) -#define PyArray_STRIDES(obj) (((PyArrayObject *)(obj))->strides) -#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) -#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) -#define PyArray_BASE(obj) (((PyArrayObject *)(obj))->base) -#define PyArray_DESCR(obj) (((PyArrayObject *)(obj))->descr) -#define PyArray_FLAGS(obj) (((PyArrayObject *)(obj))->flags) -#define PyArray_ITEMSIZE(obj) (((PyArrayObject *)(obj))->descr->elsize) -#define PyArray_TYPE(obj) (((PyArrayObject *)(obj))->descr->type_num) - -#define PyArray_GETITEM(obj,itemptr) \ - ((PyArrayObject *)(obj))->descr->f->getitem((char *)(itemptr), \ - (PyArrayObject *)(obj)) - -#define PyArray_SETITEM(obj,itemptr,v) \ - ((PyArrayObject *)(obj))->descr->f->setitem((PyObject *)(v), \ - (char *)(itemptr), \ - (PyArrayObject *)(obj)) - - -#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) - -#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ - ((type) == NPY_USHORT) || \ - ((type) == NPY_UINT) || \ - ((type) == NPY_ULONG) || \ - ((type) == NPY_ULONGLONG)) - -#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ - ((type) == NPY_SHORT) || \ - ((type) == NPY_INT) || \ - ((type) == NPY_LONG) || \ - ((type) == NPY_LONGLONG)) - -#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ - ((type) <= NPY_ULONGLONG)) - -#define PyTypeNum_ISFLOAT(type) (((type) >= NPY_FLOAT) && \ - ((type) <= NPY_LONGDOUBLE)) - -#define PyTypeNum_ISNUMBER(type) ((type) <= NPY_CLONGDOUBLE) - -#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ - ((type) == NPY_UNICODE)) - -#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ - ((type) <= NPY_CLONGDOUBLE)) - -#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ - ((type) == NPY_DOUBLE) || \ - ((type) == NPY_CDOUBLE) || \ - ((type) == NPY_BOOL) || \ - ((type) == NPY_OBJECT )) - -#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ - ((type) <=NPY_VOID)) - -#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ - ((type) <=NPY_TIMEDELTA)) - -#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ - ((type) < NPY_USERDEF+ \ - NPY_NUMUSERTYPES)) - -#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ - PyTypeNum_ISUSERDEF(type)) - -#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) - - -#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) -#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) -#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) -#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) - -#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) -#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) -#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) -#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) -#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) -#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) -#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) -#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) -#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) -#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) -#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) -#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) -#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) -#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) - - /* FIXME: This should check for a flag on the data-type - that states whether or not it is variable length. - Because the ISFLEXIBLE check is hard-coded to the - built-in data-types. - */ -#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) - -#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) - - -#define NPY_LITTLE '<' -#define NPY_BIG '>' -#define NPY_NATIVE '=' -#define NPY_SWAP 's' -#define NPY_IGNORE '|' - -#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN -#define NPY_NATBYTE NPY_BIG -#define NPY_OPPBYTE NPY_LITTLE -#else -#define NPY_NATBYTE NPY_LITTLE -#define NPY_OPPBYTE NPY_BIG -#endif - -#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) -#define PyArray_IsNativeByteOrder PyArray_ISNBO -#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) -#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) - -#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ - PyArray_ISNOTSWAPPED(m)) - -#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_CARRAY) -#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_CARRAY_RO) -#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_FARRAY) -#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_FARRAY_RO) -#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_BEHAVED) -#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ALIGNED) - - -#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) -#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) - - -/* This is the form of the struct that's returned pointed by the - PyCObject attribute of an array __array_struct__. See - http://numpy.scipy.org/array_interface.shtml for the full - documentation. */ -typedef struct { - int two; /* contains the integer 2 as a sanity check */ - int nd; /* number of dimensions */ - char typekind; /* kind in array --- character code of typestr */ - int itemsize; /* size of each element */ - int flags; /* how should be data interpreted. Valid - flags are CONTIGUOUS (1), FORTRAN (2), - ALIGNED (0x100), NOTSWAPPED (0x200), and - WRITEABLE (0x400). - ARR_HAS_DESCR (0x800) states that arrdescr - field is present in structure */ - npy_intp *shape; /* A length-nd array of shape information */ - npy_intp *strides; /* A length-nd array of stride information */ - void *data; /* A pointer to the first element of the array */ - PyObject *descr; /* A list of fields or NULL (ignored if flags - does not have ARR_HAS_DESCR flag set) */ -} PyArrayInterface; - - /* Includes the "function" C-API -- these are all stored in a list of pointers --- one for each file The two lists are concatenated into one in multiarray. Copied: trunk/numpy/core/include/numpy/ndarraytypes.h (from rev 8396, trunk/numpy/core/include/numpy/ndarrayobject.h) =================================================================== --- trunk/numpy/core/include/numpy/ndarraytypes.h (rev 0) +++ trunk/numpy/core/include/numpy/ndarraytypes.h 2010-05-09 00:04:55 UTC (rev 8397) @@ -0,0 +1,1253 @@ +#ifndef NPY_ARRAYTYPES_H +#define NPY_ARRAYTYPES_H + +/* This is auto-generated by the installer */ +#include "numpyconfig.h" + +#ifdef NPY_ENABLE_SEPARATE_COMPILATION + #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN +#else + #define NPY_NO_EXPORT static +#endif + +/* Only use thread if configured in config and python supports it */ +#if defined WITH_THREAD && !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + +#include "npy_endian.h" + +#include "utils.h" + +/* There are several places in the code where an array of dimensions is + * allocated statically. This is the size of that static allocation. + * + * The array creation itself could have arbitrary dimensions but + * all the places where static allocation is used would need to + * be changed to dynamic (including inside of several structures) + */ + +#define NPY_MAXDIMS 32 +#define NPY_MAXARGS 32 + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + +/* Binary compatibility version number. This number is increased + whenever the C-API is changed such that binary compatibility is + broken, i.e. whenever a recompile of extension modules is + needed. */ +#define NPY_VERSION NPY_ABI_VERSION + +/* Minor API version. This number is increased whenever a change is + made to the C-API -- whether it breaks binary compatibility or not. + Some changes, such as adding a function pointer to the end of the + function table, can be made without breaking binary compatibility. + In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + would be increased. Whenever binary compatibility is broken, both + NPY_VERSION and NPY_FEATURE_VERSION should be increased. + */ +#define NPY_FEATURE_VERSION NPY_API_VERSION + +#include "npy_common.h" + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_DATETIME, NPY_TIMEDELTA, + NPY_OBJECT=19, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + NPY_NTYPES, + NPY_NOTYPE, + NPY_CHAR, /* special flag */ + NPY_USERDEF=256 /* leave room for characters */ +}; + +#define NPY_METADATA_DTSTR "__frequency__" + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there */ +#define NPY_NUM_FLOATTYPE 3 + +/* We need to match npy_intp to a signed integer of the same size as + a pointer variable. npy_uintp to the equivalent unsigned integer +*/ + + +/* These characters correspond to the array type and the + struct module */ + +/* except 'p' -- signed integer for pointer type */ + +enum NPY_TYPECHAR { NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* No Descriptor, just a define -- this let's + Python users specify an array of integers + large enough to hold a pointer on the platform*/ + NPY_INTPLTR = 'p', + NPY_UINTPLTR = 'P', + + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c' +}; + +typedef enum { + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2 +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_MERGESORT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +typedef enum { + NPY_ANYORDER=-1, + NPY_CORDER=0, + NPY_FORTRANORDER=1 +} NPY_ORDER; + + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +typedef enum { + NPY_FR_Y, + NPY_FR_M, + NPY_FR_W, + NPY_FR_B, + NPY_FR_D, + NPY_FR_h, + NPY_FR_m, + NPY_FR_s, + NPY_FR_ms, + NPY_FR_us, + NPY_FR_ns, + NPY_FR_ps, + NPY_FR_fs, + NPY_FR_as +} NPY_DATETIMEUNIT; + +#define NPY_DATETIME_NUMUNITS (NPY_FR_as + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_us + +#define NPY_STR_Y "Y" +#define NPY_STR_M "M" +#define NPY_STR_W "W" +#define NPY_STR_B "B" +#define NPY_STR_D "D" +#define NPY_STR_h "h" +#define NPY_STR_m "m" +#define NPY_STR_s "s" +#define NPY_STR_ms "ms" +#define NPY_STR_us "us" +#define NPY_STR_ns "ns" +#define NPY_STR_ps "ps" +#define NPY_STR_fs "fs" +#define NPY_STR_as "as" + + +/* This is to typedef npy_intp to the appropriate pointer size for this + * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. */ +typedef Py_intptr_t npy_intp; +typedef Py_uintptr_t npy_uintp; +#define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T +#define NPY_SIZEOF_UINTP NPY_SIZEOF_PY_INTPTR_T + +#ifdef constchar +#undef constchar +#endif + +#if (PY_VERSION_HEX < 0x02050000) + #ifndef PY_SSIZE_T_MIN + typedef int Py_ssize_t; + #define PY_SSIZE_T_MAX INT_MAX + #define PY_SSIZE_T_MIN INT_MIN + #endif +#define NPY_SSIZE_T_PYFMT "i" +#undef PyIndex_Check +#define constchar const char +#define PyIndex_Check(op) 0 +#else +#define NPY_SSIZE_T_PYFMT "n" +#define constchar char +#endif + +#if NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_INT + #define NPY_INTP NPY_INT + #define NPY_UINTP NPY_UINT + #define PyIntpArrType_Type PyIntArrType_Type + #define PyUIntpArrType_Type PyUIntArrType_Type + #define NPY_MAX_INTP NPY_MAX_INT + #define NPY_MIN_INTP NPY_MIN_INT + #define NPY_MAX_UINTP NPY_MAX_UINT + #define NPY_INTP_FMT "d" +#elif NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONG + #define NPY_INTP NPY_LONG + #define NPY_UINTP NPY_ULONG + #define PyIntpArrType_Type PyLongArrType_Type + #define PyUIntpArrType_Type PyULongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONG + #define NPY_MIN_INTP MIN_LONG + #define NPY_MAX_UINTP NPY_MAX_ULONG + #define NPY_INTP_FMT "ld" +#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_PY_INTPTR_T == NPY_SIZEOF_LONGLONG) + #define NPY_INTP NPY_LONGLONG + #define NPY_UINTP NPY_ULONGLONG + #define PyIntpArrType_Type PyLongLongArrType_Type + #define PyUIntpArrType_Type PyULongLongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONGLONG + #define NPY_MIN_INTP NPY_MIN_LONGLONG + #define NPY_MAX_UINTP NPY_MAX_ULONGLONG + #define NPY_INTP_FMT "Ld" +#endif + +/* We can only use C99 formats for npy_int_p if it is the same as intp_t, hence + * the condition on HAVE_UNITPTR_T */ +#if (NPY_USE_C99_FORMATS) == 1 \ + && (defined HAVE_UINTPTR_T) \ + && (defined HAVE_INTTYPES_H) + #include + #undef NPY_INTP_FMT + #define NPY_INTP_FMT PRIdPTR +#endif + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + + /* Macros to define how array, and dimension/strides data is + allocated. + */ + + /* Data buffer */ +#define PyDataMem_NEW(size) ((char *)malloc(size)) +#define PyDataMem_FREE(ptr) free(ptr) +#define PyDataMem_RENEW(ptr,size) ((char *)realloc(ptr,size)) + +#define NPY_USE_PYMEM 1 + +#if NPY_USE_PYMEM == 1 +#define PyArray_malloc PyMem_Malloc +#define PyArray_free PyMem_Free +#define PyArray_realloc PyMem_Realloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + + /* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + + /* These assume aligned and notswapped data -- a buffer will be + used before or contiguous data will be obtained + */ +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* XXX the ignore argument should be removed next time the API version + is bumped. It used to be the separator. */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef void (PyArray_FastClipFunc)(void *in, npy_intp n_in, void *min, + void *max, void *out); +typedef void (PyArray_FastPutmaskFunc)(void *in, void *mask, npy_intp n_in, + void *values, npy_intp nv); +typedef int (PyArray_FastTakeFunc)(void *dest, void *src, npy_intp *indarray, + npy_intp nindarray, npy_intp n_outer, + npy_intp m_middle, npy_intp nelem, + NPY_CLIPMODE clipmode); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* The next four functions *cannot* be NULL */ + + /* Functions to get and set items with standard + Python types -- not array scalars */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* Copy and/or swap data. Memory areas may not overlap */ + /* Use memmove first if they might */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* Function to compare items */ + /* Can be NULL + */ + PyArray_CompareFunc *compare; + + /* Function to select largest + Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* Function to compute dot product */ + /* Can be NULL */ + PyArray_DotFunc *dotfunc; + + /* Function to scan an ASCII file and + place a single value plus possible separator + Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* Function to read a single value from a string */ + /* and adjust the pointer; Can be NULL */ + PyArray_FromStrFunc *fromstr; + + /* Function to determine if data is zero or not */ + /* If NULL a default version is */ + /* used at Registration time. */ + PyArray_NonzeroFunc *nonzero; + + /* Used for arange. Can be NULL.*/ + PyArray_FillFunc *fill; + + /* Function to fill arrays with scalar values + Can be NULL*/ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* Sorting functions; Can be NULL*/ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* Dictionary of additional casting functions + PyArray_VectorUnaryFuncs + which can be populated to support casting + to other registered types. Can be NULL*/ + PyObject *castdict; + + /* Functions useful for generalizing + the casting rules. Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + PyArray_FastClipFunc *fastclip; + PyArray_FastPutmaskFunc *fastputmask; + PyArray_FastTakeFunc *fasttake; + + /* A little room to grow --- should use generic function interface for most additions */ + void *pad1; + void *pad2; + void *pad3; + void *pad4; + + /* Functions to cast to all other standard types*/ + /* Can have some NULL entries */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES]; + +} PyArray_ArrFuncs; + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* define NPY_IS_COMPLEX */ + +/* These are inherited for global data-type if any data-types in the field + have them */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#define PyDataType_FLAGCHK(dtype, flag) \ + (((dtype)->flags & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +typedef struct _PyArray_Descr { + PyObject_HEAD + PyTypeObject *typeobj; /* the type object representing an + instance of this type -- should not + be two type_numbers with the same type + object. */ + char kind; /* kind for this type */ + char type; /* unique-character representing this type */ + char byteorder; /* '>' (big), '<' (little), '|' + (not-applicable), or '=' (native). */ + char unused; + int flags; /* flag describing data type */ + int type_num; /* number representing this type */ + int elsize; /* element size for this type */ + int alignment; /* alignment needed for this type */ + struct _arr_descr \ + *subarray; /* Non-NULL if this type is + is an array (C-contiguous) + of some other type + */ + PyObject *fields; /* The fields dictionary for this type */ + /* For statically defined descr this + is always Py_None */ + + PyObject *names; /* An ordered tuple of field names or NULL + if no fields are defined */ + + PyArray_ArrFuncs *f; /* a table of functions specific for each + basic data descriptor */ + + PyObject *metadata; /* Metadata about this dtype */ +} PyArray_Descr; + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + The main array object structure. It is recommended to use the macros + defined below (PyArray_DATA and friends) access fields here, instead + of the members themselves. + */ + +typedef struct PyArrayObject { + PyObject_HEAD + char *data; /* pointer to raw data buffer */ + int nd; /* number of dimensions, also called ndim */ + npy_intp *dimensions; /* size in each dimension */ + npy_intp *strides; /* bytes to jump to get to the + next element in each dimension */ + PyObject *base; /* This object should be decref'd + upon deletion of array */ + /* For views it points to the original array */ + /* For creation from buffer object it points + to an object that shold be decref'd on + deletion */ + /* For UPDATEIFCOPY flag this is an array + to-be-updated upon deletion of this one */ + PyArray_Descr *descr; /* Pointer to type structure */ + int flags; /* Flags describing array -- see below*/ + PyObject *weakreflist; /* For weakreferences */ +} PyArrayObject; + +#define NPY_AO PyArrayObject + +#define fortran fortran_ /* For some compilers */ + +/* Array Flags Object */ +typedef struct PyArrayFlagsObject { + PyObject_HEAD + PyObject *arr; + int flags; +} PyArrayFlagsObject; + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + + +typedef struct { + NPY_DATETIMEUNIT base; + int num; + int den; /* Converted to 1 on input for now -- an input-only mechanism */ + int events; +} PyArray_DatetimeMetaData; + +typedef struct { + npy_longlong year; + int month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +typedef struct { + npy_longlong day; + int sec, us, ps, as; +} npy_timedeltastruct; + +#if PY_VERSION_HEX >= 0x02070000 +#define PyDataType_GetDatetimeMetaData(descr) \ + ((descr->metadata == NULL) ? NULL : \ + ((PyArray_DatetimeMetaData *)(PyCapsule_GetPointer( \ + PyDict_GetItemString( \ + descr->metadata, NPY_METADATA_DTSTR), NULL)))) +#else +#define PyDataType_GetDatetimeMetaData(descr) \ + ((descr->metadata == NULL) ? NULL : \ + ((PyArray_DatetimeMetaData *)(PyCObject_AsVoidPtr( \ + PyDict_GetItemString(descr->metadata, NPY_METADATA_DTSTR))))) +#endif + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* Means c-style contiguous (last index varies the fastest). The + data elements right after each other. */ +#define NPY_CONTIGUOUS 0x0001 +/* set if array is a contiguous Fortran array: the first index + varies the fastest in memory (strides array is reverse of + C-contiguous array)*/ +#define NPY_FORTRAN 0x0002 + +#define NPY_C_CONTIGUOUS NPY_CONTIGUOUS +#define NPY_F_CONTIGUOUS NPY_FORTRAN + +/* + Note: all 0-d arrays are CONTIGUOUS and FORTRAN contiguous. If a + 1-d array is CONTIGUOUS it is also FORTRAN contiguous +*/ + +/* If set, the array owns the data: it will be free'd when the array + is deleted. */ +#define NPY_OWNDATA 0x0004 + +/* An array never has the next four set; they're only used as parameter + flags to the the various FromAny functions */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_FORCECAST 0x0010 + +/* Always copy the array. Returned arrays are always CONTIGUOUS, ALIGNED, + and WRITEABLE. */ +#define NPY_ENSURECOPY 0x0020 + +/* Make sure the returned array is a base-class ndarray */ +#define NPY_ENSUREARRAY 0x0040 + +/* Make sure that the strides are in units of the element size + Needed for some operations with record-arrays. +*/ +#define NPY_ELEMENTSTRIDES 0x0080 + +/* Array data is aligned on the appropiate memory address for the + type stored according to how the compiler would align things + (e.g., an array of integers (4 bytes each) starts on + a memory address that's a multiple of 4) */ +#define NPY_ALIGNED 0x0100 +/* Array data has the native endianness */ +#define NPY_NOTSWAPPED 0x0200 +/* Array data is writeable */ +#define NPY_WRITEABLE 0x0400 +/* If this flag is set, then base contains a pointer to an array of + the same size that should be updated with the current contents of + this array when this array is deallocated +*/ +#define NPY_UPDATEIFCOPY 0x1000 + +/* This flag is for the array interface */ +#define NPY_ARR_HAS_DESCR 0x0800 + + +#define NPY_BEHAVED (NPY_ALIGNED | NPY_WRITEABLE) +#define NPY_BEHAVED_NS (NPY_ALIGNED | NPY_WRITEABLE | NPY_NOTSWAPPED) +#define NPY_CARRAY (NPY_CONTIGUOUS | NPY_BEHAVED) +#define NPY_CARRAY_RO (NPY_CONTIGUOUS | NPY_ALIGNED) +#define NPY_FARRAY (NPY_FORTRAN | NPY_BEHAVED) +#define NPY_FARRAY_RO (NPY_FORTRAN | NPY_ALIGNED) +#define NPY_DEFAULT NPY_CARRAY +#define NPY_IN_ARRAY NPY_CARRAY_RO +#define NPY_OUT_ARRAY NPY_CARRAY +#define NPY_INOUT_ARRAY (NPY_CARRAY | NPY_UPDATEIFCOPY) +#define NPY_IN_FARRAY NPY_FARRAY_RO +#define NPY_OUT_FARRAY NPY_FARRAY +#define NPY_INOUT_FARRAY (NPY_FARRAY | NPY_UPDATEIFCOPY) + +#define NPY_UPDATE_ALL (NPY_CONTIGUOUS | NPY_FORTRAN | NPY_ALIGNED) + + +/* Size of internal buffers used for alignment */ +/* Make BUFSIZE a multiple of sizeof(cdouble) -- ususally 16 */ +/* So that ufunc buffers are aligned */ +#define NPY_MIN_BUFSIZE ((int)sizeof(cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(cdouble))*1000000) +#define NPY_BUFSIZE 10000 +/* #define NPY_BUFSIZE 80*/ + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) +#define PyArray_CLT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag) : \ + ((p).real < (q).real))) +#define PyArray_CGT(p,q) ((((p).real==(q).real) ? ((p).imag > (q).imag) : \ + ((p).real > (q).real))) +#define PyArray_CLE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag) : \ + ((p).real <= (q).real))) +#define PyArray_CGE(p,q) ((((p).real==(q).real) ? ((p).imag >= (q).imag) : \ + ((p).real >= (q).real))) +#define PyArray_CEQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag)) +#define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) + +/* + * C API: consists of Macros and functions. The MACROS are defined here. + */ + + +#define PyArray_CHKFLAGS(m, FLAGS) \ + ((((PyArrayObject *)(m))->flags & (FLAGS)) == (FLAGS)) + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ALIGNED) + + +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#define NPY_BEGIN_THREADS _save = PyEval_SaveThread(); +#define NPY_END_THREADS do {if (_save) PyEval_RestoreThread(_save);} while (0); + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API __save__ = PyGILState_Ensure(); +#define NPY_DISABLE_C_API PyGILState_Release(__save__); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS_DEF +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* type of the function which translates a set of coordinates to a pointer to + * the data */ +typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} + +#define _PyArray_ITER_NEXT1(it) { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} + +#define _PyArray_ITER_NEXT2(it) { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} + +#define _PyArray_ITER_NEXT3(it) { \ + if ((it)->coordinates[2] < (it)->dims_m1[2]) { \ + (it)->coordinates[2]++; \ + (it)->dataptr += (it)->strides[2]; \ + } \ + else { \ + (it)->coordinates[2] = 0; \ + (it)->dataptr -= (it)->backstrides[2]; \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ + } \ +} + +#define PyArray_ITER_NEXT(it) { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += _PyAIT(it)->ao->descr->elsize; \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} + +#define PyArray_ITER_GOTO(it, destination) { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} + +#define PyArray_ITER_GOTO1D(it, ind) { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp) (ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = _PyAIT(it)->ao->data + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = _PyAIT(it)->ao->data + \ + __npy_ind * _PyAIT(it)->ao->descr->elsize; \ + else { \ + _PyAIT(it)->dataptr = _PyAIT(it)->ao->data; \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + Any object passed to PyArray_Broadcast must be binary compatible with + this structure. +*/ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXARGS]; /* iterators */ +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} + +#define PyArray_MultiIter_NEXT(multi) { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} + +#define PyArray_MultiIter_GOTO(multi, dest) { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} + +#define PyArray_MultiIter_GOTO1D(multi, ind) { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + +/* Store the information needed for fancy-indexing over an array */ + +typedef struct { + PyObject_HEAD + /* Multi-iterator portion --- needs to be present in this order to + work with PyArray_Broadcast */ + + int numiter; /* number of index-array + iterators */ + npy_intp size; /* size of broadcasted + result */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS]; /* dimensions */ + PyArrayIterObject *iters[NPY_MAXDIMS]; /* index object + iterators */ + PyArrayIterObject *ait; /* flat Iterator for + underlying array */ + + /* flat iterator for subspace (when numiter < nd) */ + PyArrayIterObject *subspace; + + /* if subspace iteration, then this is the array of + axes in the underlying array represented by the + index objects */ + int iteraxes[NPY_MAXDIMS]; + /* if subspace iteration, the these are the coordinates + to the start of the subspace. + */ + npy_intp bscoord[NPY_MAXDIMS]; + + PyObject *indexobj; /* creating obj */ + int consec; + char *dataptr; + +} PyArrayMapIterObject; + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS][2]; + npy_intp limits[NPY_MAXDIMS][2]; + npy_intp limits_sizes[NPY_MAXDIMS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS]; + + /* Neighborhood points coordinates are computed relatively to the point pointed + * by _internal_iter */ + PyArrayIterObject* _internal_iter; + /* To keep a reference to the representation of the constant value for + * constant padding */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static NPY_INLINE int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static NPY_INLINE int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static NPY_INLINE int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* Include inline implementations - functions defined there are not considered + * public API */ +#define _NPY_INCLUDE_NEIGHBORHOOD_IMP +#include "_neighborhood_iterator_imp.h" +#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP + +/* The default array type + */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE +#define PyArray_DEFAULT NPY_DEFAULT_TYPE +/* All sorts of useful ways to look into a PyArrayObject. + These are the recommended over casting to PyArrayObject and accessing + the members directly. + */ + +#define PyArray_NDIM(obj) (((PyArrayObject *)(obj))->nd) +#define PyArray_ISONESEGMENT(m) (PyArray_NDIM(m) == 0 || \ + PyArray_CHKFLAGS(m, NPY_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_FORTRAN)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_FORTRAN) && \ + (PyArray_NDIM(m) > 1)) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_FORTRAN) ? \ + NPY_FORTRAN : 0)) + +#define FORTRAN_IF PyArray_FORTRAN_IF +#define PyArray_DATA(obj) ((void *)(((PyArrayObject *)(obj))->data)) +#define PyArray_BYTES(obj) (((PyArrayObject *)(obj))->data) +#define PyArray_DIMS(obj) (((PyArrayObject *)(obj))->dimensions) +#define PyArray_STRIDES(obj) (((PyArrayObject *)(obj))->strides) +#define PyArray_DIM(obj,n) (PyArray_DIMS(obj)[n]) +#define PyArray_STRIDE(obj,n) (PyArray_STRIDES(obj)[n]) +#define PyArray_BASE(obj) (((PyArrayObject *)(obj))->base) +#define PyArray_DESCR(obj) (((PyArrayObject *)(obj))->descr) +#define PyArray_FLAGS(obj) (((PyArrayObject *)(obj))->flags) +#define PyArray_ITEMSIZE(obj) (((PyArrayObject *)(obj))->descr->elsize) +#define PyArray_TYPE(obj) (((PyArrayObject *)(obj))->descr->type_num) + +#define PyArray_GETITEM(obj,itemptr) \ + ((PyArrayObject *)(obj))->descr->f->getitem((char *)(itemptr), \ + (PyArrayObject *)(obj)) + +#define PyArray_SETITEM(obj,itemptr,v) \ + ((PyArrayObject *)(obj))->descr->f->setitem((PyObject *)(v), \ + (char *)(itemptr), \ + (PyArrayObject *)(obj)) + + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) (((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) + +#define PyTypeNum_ISNUMBER(type) ((type) <= NPY_CLONGDOUBLE) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISPYTHON(type) (((type) == NPY_LONG) || \ + ((type) == NPY_DOUBLE) || \ + ((type) == NPY_CDOUBLE) || \ + ((type) == NPY_BOOL) || \ + ((type) == NPY_OBJECT )) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj)) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISPYTHON(obj) PyTypeNum_ISPYTHON(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL) + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISPYTHON(obj) PyTypeNum_ISPYTHON(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* FIXME: This should check for a flag on the data-type + that states whether or not it is variable length. + Because the ISFLEXIBLE check is hard-coded to the + built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + + +/* This is the form of the struct that's returned pointed by the + PyCObject attribute of an array __array_struct__. See + http://numpy.scipy.org/array_interface.shtml for the full + documentation. */ +typedef struct { + int two; /* contains the integer 2 as a sanity check */ + int nd; /* number of dimensions */ + char typekind; /* kind in array --- character code of typestr */ + int itemsize; /* size of each element */ + int flags; /* how should be data interpreted. Valid + flags are CONTIGUOUS (1), FORTRAN (2), + ALIGNED (0x100), NOTSWAPPED (0x200), and + WRITEABLE (0x400). + ARR_HAS_DESCR (0x800) states that arrdescr + field is present in structure */ + npy_intp *shape; /* A length-nd array of shape information */ + npy_intp *strides; /* A length-nd array of stride information */ + void *data; /* A pointer to the first element of the array */ + PyObject *descr; /* A list of fields or NULL (ignored if flags + does not have ARR_HAS_DESCR flag set) */ +} PyArrayInterface; + +#endif /* NPY_ARRAYTYPES_H */ From numpy-svn at scipy.org Sat May 8 20:04:58 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 May 2010 19:04:58 -0500 (CDT) Subject: [Numpy-svn] r8398 - trunk/numpy/core/include/numpy Message-ID: <20100509000458.31D4239CAE7@scipy.org> Author: charris Date: 2010-05-08 19:04:58 -0500 (Sat, 08 May 2010) New Revision: 8398 Modified: trunk/numpy/core/include/numpy/ndarraytypes.h Log: ENH: Make guard in ndarraytypes.h match the file name. Modified: trunk/numpy/core/include/numpy/ndarraytypes.h =================================================================== --- trunk/numpy/core/include/numpy/ndarraytypes.h 2010-05-09 00:04:55 UTC (rev 8397) +++ trunk/numpy/core/include/numpy/ndarraytypes.h 2010-05-09 00:04:58 UTC (rev 8398) @@ -1,5 +1,5 @@ -#ifndef NPY_ARRAYTYPES_H -#define NPY_ARRAYTYPES_H +#ifndef NDARRAYTYPES_H +#define NDARRAYTYPES_H /* This is auto-generated by the installer */ #include "numpyconfig.h" From numpy-svn at scipy.org Sat May 8 21:11:42 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 8 May 2010 20:11:42 -0500 (CDT) Subject: [Numpy-svn] r8399 - trunk/numpy/core/src/multiarray Message-ID: <20100509011142.C765539CAFE@scipy.org> Author: charris Date: 2010-05-08 20:11:42 -0500 (Sat, 08 May 2010) New Revision: 8399 Modified: trunk/numpy/core/src/multiarray/datetime.c Log: ENH: Remove obsolete keyword 'register'. Modified: trunk/numpy/core/src/multiarray/datetime.c =================================================================== --- trunk/numpy/core/src/multiarray/datetime.c 2010-05-09 00:04:58 UTC (rev 8398) +++ trunk/numpy/core/src/multiarray/datetime.c 2010-05-09 01:11:42 UTC (rev 8399) @@ -59,7 +59,7 @@ /* Return 1/0 iff year points to a leap year in calendar. */ static int -is_leapyear(register long year) +is_leapyear(long year) { return (year % 4 == 0) && ((year % 100 != 0) || (year % 400 == 0)); } @@ -88,7 +88,7 @@ * 31.12.(year-1) since 31.12.1969 in the proleptic Gregorian calendar. */ static npy_longlong -year_offset(register npy_longlong year) +year_offset(npy_longlong year) { /* Note that 477 == 1969/4 - 1969/100 + 1969/400 */ year--; @@ -173,7 +173,7 @@ days_to_ymdstruct(npy_datetime dlong) { ymdstruct ymd; - register long year; + long year; npy_longlong yearoffset; int leap, dayoffset; int month = 1, day = 1; From numpy-svn at scipy.org Sun May 9 08:19:06 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 9 May 2010 07:19:06 -0500 (CDT) Subject: [Numpy-svn] r8400 - trunk/numpy/core/tests Message-ID: <20100509121906.913D239CAF6@scipy.org> Author: ptvirtan Date: 2010-05-09 07:19:06 -0500 (Sun, 09 May 2010) New Revision: 8400 Modified: trunk/numpy/core/tests/test_datetime.py Log: BUG: core: fix a datetime test case (fixes #1468) Modified: trunk/numpy/core/tests/test_datetime.py =================================================================== --- trunk/numpy/core/tests/test_datetime.py 2010-05-09 01:11:42 UTC (rev 8399) +++ trunk/numpy/core/tests/test_datetime.py 2010-05-09 12:19:06 UTC (rev 8400) @@ -59,7 +59,7 @@ def test_creation_overflow(self): date = '1980-03-23 20:00:00' - timesteps = np.array([date], dtype='datetime64[s]')[0].astype(int) + timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64) for unit in ['ms', 'us', 'ns']: timesteps *= 1000 x = np.array([date], dtype='datetime64[%s]' % unit) @@ -67,7 +67,7 @@ assert_equal(timesteps, x[0].astype(np.int64), err_msg='Datetime conversion error for unit %s' % unit) - assert_equal(x[0].astype(int), 322689600000000000) + assert_equal(x[0].astype(np.int64), 322689600000000000) if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Sun May 9 11:45:43 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 9 May 2010 10:45:43 -0500 (CDT) Subject: [Numpy-svn] r8401 - trunk/doc/release Message-ID: <20100509154543.73FDA39CAF2@scipy.org> Author: charris Date: 2010-05-09 10:45:43 -0500 (Sun, 09 May 2010) New Revision: 8401 Modified: trunk/doc/release/2.0.0-notes.rst Log: Add better summary of ndarraytype.h to release notes. Modified: trunk/doc/release/2.0.0-notes.rst =================================================================== --- trunk/doc/release/2.0.0-notes.rst 2010-05-09 12:19:06 UTC (rev 8400) +++ trunk/doc/release/2.0.0-notes.rst 2010-05-09 15:45:43 UTC (rev 8401) @@ -59,6 +59,11 @@ new header ~~~~~~~~~~ -There is a new header ndarraytypes.h that provides needed ndarray types -that don't reference the ndarray c-api. Some folks might find this useful. + The new header file ndarraytypes.h contains the symbols from + ndarrayobject.h that no not depend on the PY_ARRAY_UNIQUE_SYMBOL and + NO_IMPORT/_ARRAY macros. Broadly, these symbols are types, typedefs, and + enumerations; the array function calls are left in ndarrayobject.h. This + allows users to include array-related types and enumerations without + needing to concern themselves with the macro expansions and their side- + effects. From numpy-svn at scipy.org Sun May 9 12:19:08 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 9 May 2010 11:19:08 -0500 (CDT) Subject: [Numpy-svn] r8402 - trunk/numpy/core/include/numpy Message-ID: <20100509161908.F0DA339CAE6@scipy.org> Author: charris Date: 2010-05-09 11:19:08 -0500 (Sun, 09 May 2010) New Revision: 8402 Modified: trunk/numpy/core/include/numpy/ndarraytypes.h Log: STY: Some cleanups of ndarraytypes.h. Modified: trunk/numpy/core/include/numpy/ndarraytypes.h =================================================================== --- trunk/numpy/core/include/numpy/ndarraytypes.h 2010-05-09 15:45:43 UTC (rev 8401) +++ trunk/numpy/core/include/numpy/ndarraytypes.h 2010-05-09 16:19:08 UTC (rev 8402) @@ -4,6 +4,10 @@ /* This is auto-generated by the installer */ #include "numpyconfig.h" +#include "npy_common.h" +#include "npy_endian.h" +#include "utils.h" + #ifdef NPY_ENABLE_SEPARATE_COMPILATION #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN #else @@ -17,16 +21,16 @@ #define NPY_ALLOW_THREADS 0 #endif -#include "npy_endian.h" -#include "utils.h" -/* There are several places in the code where an array of dimensions is - * allocated statically. This is the size of that static allocation. +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. * - * The array creation itself could have arbitrary dimensions but - * all the places where static allocation is used would need to - * be changed to dynamic (including inside of several structures) + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) */ #define NPY_MAXDIMS 32 @@ -36,24 +40,24 @@ #define NPY_FAIL 0 #define NPY_SUCCEED 1 -/* Binary compatibility version number. This number is increased - whenever the C-API is changed such that binary compatibility is - broken, i.e. whenever a recompile of extension modules is - needed. */ +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ #define NPY_VERSION NPY_ABI_VERSION -/* Minor API version. This number is increased whenever a change is - made to the C-API -- whether it breaks binary compatibility or not. - Some changes, such as adding a function pointer to the end of the - function table, can be made without breaking binary compatibility. - In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) - would be increased. Whenever binary compatibility is broken, both - NPY_VERSION and NPY_FEATURE_VERSION should be increased. +/* + * Minor API version. This number is increased whenever a change is + * made to the C-API -- whether it breaks binary compatibility or not. + * Some changes, such as adding a function pointer to the end of the + * function table, can be made without breaking binary compatibility. + * In this case, only the NPY_FEATURE_VERSION (*not* NPY_VERSION) + * would be increased. Whenever binary compatibility is broken, both + * NPY_VERSION and NPY_FEATURE_VERSION should be increased. */ #define NPY_FEATURE_VERSION NPY_API_VERSION -#include "npy_common.h" - enum NPY_TYPES { NPY_BOOL=0, NPY_BYTE, NPY_UBYTE, NPY_SHORT, NPY_USHORT, @@ -86,13 +90,16 @@ /* How many floating point types are there */ #define NPY_NUM_FLOATTYPE 3 -/* We need to match npy_intp to a signed integer of the same size as - a pointer variable. npy_uintp to the equivalent unsigned integer -*/ +/* + * We need to match npy_intp to a signed integer of the same size as a + * pointer variable. npy_uintp to the equivalent unsigned integer + */ -/* These characters correspond to the array type and the - struct module */ +/* + * These characters correspond to the array type and the struct + * module + */ /* except 'p' -- signed integer for pointer type */ @@ -122,9 +129,12 @@ NPY_TIMEDELTALTR = 'm', NPY_CHARLTR = 'c', - /* No Descriptor, just a define -- this let's - Python users specify an array of integers - large enough to hold a pointer on the platform*/ + /* + * No Descriptor, just a define -- this let's + * Python users specify an array of integers + * large enough to hold a pointer on the + * platform + */ NPY_INTPLTR = 'p', NPY_UINTPLTR = 'P', @@ -210,8 +220,10 @@ #define NPY_STR_as "as" -/* This is to typedef npy_intp to the appropriate pointer size for this - * platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. */ +/* + * This is to typedef npy_intp to the appropriate pointer size for + * this platform. Py_intptr_t, Py_uintptr_t are defined in pyport.h. + */ typedef Py_intptr_t npy_intp; typedef Py_uintptr_t npy_uintp; #define NPY_SIZEOF_INTP NPY_SIZEOF_PY_INTPTR_T @@ -265,8 +277,10 @@ #define NPY_INTP_FMT "Ld" #endif -/* We can only use C99 formats for npy_int_p if it is the same as intp_t, hence - * the condition on HAVE_UNITPTR_T */ +/* + * We can only use C99 formats for npy_int_p if it is the same as + * intp_t, hence the condition on HAVE_UNITPTR_T + */ #if (NPY_USE_C99_FORMATS) == 1 \ && (defined HAVE_UINTPTR_T) \ && (defined HAVE_INTTYPES_H) @@ -281,9 +295,10 @@ #define NPY_STRINGIFY(x) #x #define NPY_TOSTRING(x) NPY_STRINGIFY(x) - /* Macros to define how array, and dimension/strides data is - allocated. - */ + /* + * Macros to define how array, and dimension/strides data is + * allocated. + */ /* Data buffer */ #define PyDataMem_NEW(size) ((char *)malloc(size)) @@ -314,7 +329,7 @@ /* forward declaration */ struct _PyArray_Descr; - /* These must deal with unaligned and swapped data if necessary */ +/* These must deal with unaligned and swapped data if necessary */ typedef PyObject * (PyArray_GetItemFunc) (void *, void *); typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); @@ -325,9 +340,11 @@ typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); - /* These assume aligned and notswapped data -- a buffer will be - used before or contiguous data will be obtained - */ +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + typedef int (PyArray_CompareFunc)(const void *, const void *, void *); typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); @@ -337,8 +354,10 @@ typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *); -/* XXX the ignore argument should be removed next time the API version - is bumped. It used to be the separator. */ +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, char *ignore, struct _PyArray_Descr *); typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, @@ -370,65 +389,90 @@ typedef struct { /* The next four functions *cannot* be NULL */ - /* Functions to get and set items with standard - Python types -- not array scalars */ + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ PyArray_GetItemFunc *getitem; PyArray_SetItemFunc *setitem; - /* Copy and/or swap data. Memory areas may not overlap */ - /* Use memmove first if they might */ + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ PyArray_CopySwapNFunc *copyswapn; PyArray_CopySwapFunc *copyswap; - /* Function to compare items */ - /* Can be NULL + /* + * Function to compare items + * Can be NULL */ PyArray_CompareFunc *compare; - /* Function to select largest - Can be NULL - */ + /* + * Function to select largest + * Can be NULL + */ PyArray_ArgFunc *argmax; - /* Function to compute dot product */ - /* Can be NULL */ + /* + * Function to compute dot product + * Can be NULL + */ PyArray_DotFunc *dotfunc; - /* Function to scan an ASCII file and - place a single value plus possible separator - Can be NULL - */ + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ PyArray_ScanFunc *scanfunc; - /* Function to read a single value from a string */ - /* and adjust the pointer; Can be NULL */ + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ PyArray_FromStrFunc *fromstr; - /* Function to determine if data is zero or not */ - /* If NULL a default version is */ - /* used at Registration time. */ + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ PyArray_NonzeroFunc *nonzero; - /* Used for arange. Can be NULL.*/ + /* + * Used for arange. + * Can be NULL. + */ PyArray_FillFunc *fill; - /* Function to fill arrays with scalar values - Can be NULL*/ + /* + * Function to fill arrays with scalar values + * Can be NULL + */ PyArray_FillWithScalarFunc *fillwithscalar; - /* Sorting functions; Can be NULL*/ + /* + * Sorting functions + * Can be NULL + */ PyArray_SortFunc *sort[NPY_NSORTS]; PyArray_ArgSortFunc *argsort[NPY_NSORTS]; - /* Dictionary of additional casting functions - PyArray_VectorUnaryFuncs - which can be populated to support casting - to other registered types. Can be NULL*/ + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ PyObject *castdict; - /* Functions useful for generalizing - the casting rules. Can be NULL; - */ + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ PyArray_ScalarKindFunc *scalarkind; int **cancastscalarkindto; int *cancastto; @@ -437,14 +481,19 @@ PyArray_FastPutmaskFunc *fastputmask; PyArray_FastTakeFunc *fasttake; - /* A little room to grow --- should use generic function interface for most additions */ + /* + * A little room to grow --- should use generic function + * interface for most additions + */ void *pad1; void *pad2; void *pad3; void *pad4; - /* Functions to cast to all other standard types*/ - /* Can have some NULL entries */ + /* + * Functions to cast to all other standard types + * Can have some NULL entries + */ PyArray_VectorUnaryFunc *cast[NPY_NTYPES]; } PyArray_ArrFuncs; @@ -467,8 +516,10 @@ #define NPY_USE_SETITEM 0x40 /* define NPY_IS_COMPLEX */ -/* These are inherited for global data-type if any data-types in the field - have them */ +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ #define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) @@ -484,33 +535,43 @@ typedef struct _PyArray_Descr { PyObject_HEAD - PyTypeObject *typeobj; /* the type object representing an - instance of this type -- should not - be two type_numbers with the same type - object. */ + PyTypeObject *typeobj; /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ char kind; /* kind for this type */ char type; /* unique-character representing this type */ - char byteorder; /* '>' (big), '<' (little), '|' - (not-applicable), or '=' (native). */ - char unused; + char byteorder; /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char unused; int flags; /* flag describing data type */ int type_num; /* number representing this type */ int elsize; /* element size for this type */ int alignment; /* alignment needed for this type */ struct _arr_descr \ - *subarray; /* Non-NULL if this type is - is an array (C-contiguous) - of some other type - */ - PyObject *fields; /* The fields dictionary for this type */ - /* For statically defined descr this - is always Py_None */ + *subarray; /* + * Non-NULL if this type is + * is an array (C-contiguous) + * of some other type + */ + PyObject *fields; /* The fields dictionary for this type + * For statically defined descr this + * is always Py_None + */ - PyObject *names; /* An ordered tuple of field names or NULL - if no fields are defined */ + PyObject *names; /* + * An ordered tuple of field names or NULL + * if no fields are defined + */ - PyArray_ArrFuncs *f; /* a table of functions specific for each - basic data descriptor */ + PyArray_ArrFuncs *f; /* + * a table of functions specific for each + * basic data descriptor + */ PyObject *metadata; /* Metadata about this dtype */ } PyArray_Descr; @@ -521,28 +582,37 @@ } PyArray_ArrayDescr; /* - The main array object structure. It is recommended to use the macros - defined below (PyArray_DATA and friends) access fields here, instead - of the members themselves. + * The main array object structure. It is recommended to use the macros + * defined below (PyArray_DATA and friends) access fields here, instead + * of the members themselves. */ typedef struct PyArrayObject { PyObject_HEAD char *data; /* pointer to raw data buffer */ int nd; /* number of dimensions, also called ndim */ - npy_intp *dimensions; /* size in each dimension */ - npy_intp *strides; /* bytes to jump to get to the - next element in each dimension */ - PyObject *base; /* This object should be decref'd - upon deletion of array */ - /* For views it points to the original array */ - /* For creation from buffer object it points - to an object that shold be decref'd on - deletion */ - /* For UPDATEIFCOPY flag this is an array - to-be-updated upon deletion of this one */ + npy_intp *dimensions; /* size in each dimension */ + npy_intp *strides; /* + * bytes to jump to get to the + * next element in each dimension + */ + PyObject *base; /* + * This object should be decref'd upon + * deletion of array + * + * For views it points to the original + * array + * + * For creation from buffer object it + * points to an object that shold be + * decref'd on deletion + * + * For UPDATEIFCOPY flag this is an + * array to-be-updated upon deletion + * of this one + */ PyArray_Descr *descr; /* Pointer to type structure */ - int flags; /* Flags describing array -- see below*/ + int flags; /* Flags describing array -- see below */ PyObject *weakreflist; /* For weakreferences */ } PyArrayObject; @@ -571,7 +641,10 @@ typedef struct { NPY_DATETIMEUNIT base; int num; - int den; /* Converted to 1 on input for now -- an input-only mechanism */ + int den; /* + * Converted to 1 on input for now -- an + * input-only mechanism + */ int events; } PyArray_DatetimeMetaData; @@ -600,57 +673,75 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); -/* Means c-style contiguous (last index varies the fastest). The - data elements right after each other. */ +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + */ #define NPY_CONTIGUOUS 0x0001 -/* set if array is a contiguous Fortran array: the first index - varies the fastest in memory (strides array is reverse of - C-contiguous array)*/ + +/* + * set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + */ #define NPY_FORTRAN 0x0002 #define NPY_C_CONTIGUOUS NPY_CONTIGUOUS #define NPY_F_CONTIGUOUS NPY_FORTRAN /* - Note: all 0-d arrays are CONTIGUOUS and FORTRAN contiguous. If a - 1-d array is CONTIGUOUS it is also FORTRAN contiguous -*/ + * Note: all 0-d arrays are CONTIGUOUS and FORTRAN contiguous. If a + * 1-d array is CONTIGUOUS it is also FORTRAN contiguous + */ -/* If set, the array owns the data: it will be free'd when the array - is deleted. */ +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + */ #define NPY_OWNDATA 0x0004 -/* An array never has the next four set; they're only used as parameter - flags to the the various FromAny functions */ +/* + * An array never has the next four set; they're only used as parameter + * flags to the the various FromAny functions + */ /* Cause a cast to occur regardless of whether or not it is safe. */ #define NPY_FORCECAST 0x0010 -/* Always copy the array. Returned arrays are always CONTIGUOUS, ALIGNED, - and WRITEABLE. */ +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. + */ #define NPY_ENSURECOPY 0x0020 /* Make sure the returned array is a base-class ndarray */ #define NPY_ENSUREARRAY 0x0040 -/* Make sure that the strides are in units of the element size - Needed for some operations with record-arrays. -*/ +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + */ #define NPY_ELEMENTSTRIDES 0x0080 -/* Array data is aligned on the appropiate memory address for the - type stored according to how the compiler would align things - (e.g., an array of integers (4 bytes each) starts on - a memory address that's a multiple of 4) */ +/* + * Array data is aligned on the appropiate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + */ #define NPY_ALIGNED 0x0100 + /* Array data has the native endianness */ #define NPY_NOTSWAPPED 0x0200 + /* Array data is writeable */ #define NPY_WRITEABLE 0x0400 -/* If this flag is set, then base contains a pointer to an array of - the same size that should be updated with the current contents of - this array when this array is deallocated -*/ + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when this array is deallocated + */ #define NPY_UPDATEIFCOPY 0x1000 /* This flag is for the array interface */ @@ -674,9 +765,10 @@ #define NPY_UPDATE_ALL (NPY_CONTIGUOUS | NPY_FORTRAN | NPY_ALIGNED) -/* Size of internal buffers used for alignment */ -/* Make BUFSIZE a multiple of sizeof(cdouble) -- ususally 16 */ -/* So that ufunc buffers are aligned */ +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(cdouble) -- ususally 16 so that ufunc buffers are aligned + */ #define NPY_MIN_BUFSIZE ((int)sizeof(cdouble)) #define NPY_MAX_BUFSIZE (((int)sizeof(cdouble))*1000000) #define NPY_BUFSIZE 10000 @@ -696,7 +788,8 @@ #define PyArray_CNE(p,q) (((p).real!=(q).real) || ((p).imag != (q).imag)) /* - * C API: consists of Macros and functions. The MACROS are defined here. + * C API: consists of Macros and functions. The MACROS are defined + * here. */ @@ -746,8 +839,10 @@ /* FWD declaration */ typedef struct PyArrayIterObject_tag PyArrayIterObject; -/* type of the function which translates a set of coordinates to a pointer to - * the data */ +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*); struct PyArrayIterObject_tag { @@ -898,9 +993,9 @@ /* - Any object passed to PyArray_Broadcast must be binary compatible with - this structure. -*/ + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ typedef struct { PyObject_HEAD @@ -958,8 +1053,10 @@ typedef struct { PyObject_HEAD - /* Multi-iterator portion --- needs to be present in this order to - work with PyArray_Broadcast */ + /* + * Multi-iterator portion --- needs to be present in this + * order to work with PyArray_Broadcast + */ int numiter; /* number of index-array iterators */ @@ -976,13 +1073,15 @@ /* flat iterator for subspace (when numiter < nd) */ PyArrayIterObject *subspace; - /* if subspace iteration, then this is the array of - axes in the underlying array represented by the - index objects */ + /* + * if subspace iteration, then this is the array of axes in + * the underlying array represented by the index objects + */ int iteraxes[NPY_MAXDIMS]; - /* if subspace iteration, the these are the coordinates - to the start of the subspace. - */ + /* + * if subspace iteration, the these are the coordinates to the + * start of the subspace. + */ npy_intp bscoord[NPY_MAXDIMS]; PyObject *indexobj; /* creating obj */ @@ -1029,11 +1128,15 @@ /* Dimensions is the dimension of the array */ npy_intp dimensions[NPY_MAXDIMS]; - /* Neighborhood points coordinates are computed relatively to the point pointed - * by _internal_iter */ + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ PyArrayIterObject* _internal_iter; - /* To keep a reference to the representation of the constant value for - * constant padding */ + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ char* constant; int mode; @@ -1053,19 +1156,22 @@ PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); #endif -/* Include inline implementations - functions defined there are not considered - * public API */ +/* + * Include inline implementations - functions defined there are not + * considered public API + */ #define _NPY_INCLUDE_NEIGHBORHOOD_IMP #include "_neighborhood_iterator_imp.h" #undef _NPY_INCLUDE_NEIGHBORHOOD_IMP -/* The default array type - */ +/* The default array type */ #define NPY_DEFAULT_TYPE NPY_DOUBLE #define PyArray_DEFAULT NPY_DEFAULT_TYPE -/* All sorts of useful ways to look into a PyArrayObject. - These are the recommended over casting to PyArrayObject and accessing - the members directly. + +/* + * All sorts of useful ways to look into a PyArrayObject. These are + * the recommended over casting to PyArrayObject and accessing the + * members directly. */ #define PyArray_NDIM(obj) (((PyArrayObject *)(obj))->nd) @@ -1184,10 +1290,10 @@ #define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) #define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) - /* FIXME: This should check for a flag on the data-type - that states whether or not it is variable length. - Because the ISFLEXIBLE check is hard-coded to the - built-in data-types. + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. */ #define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) @@ -1228,26 +1334,49 @@ #define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) -/* This is the form of the struct that's returned pointed by the - PyCObject attribute of an array __array_struct__. See - http://numpy.scipy.org/array_interface.shtml for the full - documentation. */ +/* + * This is the form of the struct that's returned pointed by the + * PyCObject attribute of an array __array_struct__. See + * http://numpy.scipy.org/array_interface.shtml for the full + * documentation. + */ typedef struct { - int two; /* contains the integer 2 as a sanity check */ + int two; /* + * contains the integer 2 as a sanity + * check + */ + int nd; /* number of dimensions */ - char typekind; /* kind in array --- character code of typestr */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + int itemsize; /* size of each element */ - int flags; /* how should be data interpreted. Valid - flags are CONTIGUOUS (1), FORTRAN (2), - ALIGNED (0x100), NOTSWAPPED (0x200), and - WRITEABLE (0x400). - ARR_HAS_DESCR (0x800) states that arrdescr - field is present in structure */ - npy_intp *shape; /* A length-nd array of shape information */ - npy_intp *strides; /* A length-nd array of stride information */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), FORTRAN (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + void *data; /* A pointer to the first element of the array */ - PyObject *descr; /* A list of fields or NULL (ignored if flags - does not have ARR_HAS_DESCR flag set) */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ } PyArrayInterface; #endif /* NPY_ARRAYTYPES_H */ From numpy-svn at scipy.org Sun May 9 23:41:02 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 9 May 2010 22:41:02 -0500 (CDT) Subject: [Numpy-svn] r8403 - in trunk/numpy/fft: . tests Message-ID: <20100510034102.E280039C4B4@scipy.org> Author: charris Date: 2010-05-09 22:41:02 -0500 (Sun, 09 May 2010) New Revision: 8403 Modified: trunk/numpy/fft/helper.py trunk/numpy/fft/tests/test_helper.py Log: BUG: Make fftshift and ifftshift accept integer arguments for the axes value. The functions now match their documentation. Fixes ticket #1182, patch from rgommers. Modified: trunk/numpy/fft/helper.py =================================================================== --- trunk/numpy/fft/helper.py 2010-05-09 16:19:08 UTC (rev 8402) +++ trunk/numpy/fft/helper.py 2010-05-10 03:41:02 UTC (rev 8403) @@ -7,6 +7,7 @@ from numpy.core import asarray, concatenate, arange, take, \ integer, empty +import numpy.core.numerictypes as nt import types def fftshift(x,axes=None): @@ -57,6 +58,8 @@ ndim = len(tmp.shape) if axes is None: axes = range(ndim) + elif isinstance(axes, (int, nt.integer)): + axes = (axes,) y = tmp for k in axes: n = tmp.shape[k] @@ -103,6 +106,8 @@ ndim = len(tmp.shape) if axes is None: axes = range(ndim) + elif isinstance(axes, (int, nt.integer)): + axes = (axes,) y = tmp for k in axes: n = tmp.shape[k] Modified: trunk/numpy/fft/tests/test_helper.py =================================================================== --- trunk/numpy/fft/tests/test_helper.py 2010-05-09 16:19:08 UTC (rev 8402) +++ trunk/numpy/fft/tests/test_helper.py 2010-05-10 03:41:02 UTC (rev 8403) @@ -26,6 +26,14 @@ for n in [1,4,9,100,211]: x = random((n,)) assert_array_almost_equal(ifftshift(fftshift(x)),x) + + def test_axes_keyword(self): + freqs = [[ 0, 1, 2], [ 3, 4, -4], [-3, -2, -1]] + shifted = [[-1, -3, -2], [ 2, 0, 1], [-4, 3, 4]] + assert_array_almost_equal(fftshift(freqs, axes=(0, 1)), shifted) + assert_array_almost_equal(fftshift(freqs, axes=0), fftshift(freqs, axes=(0,))) + assert_array_almost_equal(ifftshift(shifted, axes=(0, 1)), freqs) + assert_array_almost_equal(ifftshift(shifted, axes=0), ifftshift(shifted, axes=(0,))) class TestFFTFreq(TestCase): From numpy-svn at scipy.org Mon May 10 02:48:17 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 10 May 2010 01:48:17 -0500 (CDT) Subject: [Numpy-svn] r8404 - trunk/doc/release Message-ID: <20100510064817.84D1039CAF5@scipy.org> Author: stefan Date: 2010-05-10 01:48:17 -0500 (Mon, 10 May 2010) New Revision: 8404 Modified: trunk/doc/release/2.0.0-notes.rst Log: DOC: Fix typo in 2.0.0 release notes. Modified: trunk/doc/release/2.0.0-notes.rst =================================================================== --- trunk/doc/release/2.0.0-notes.rst 2010-05-10 03:41:02 UTC (rev 8403) +++ trunk/doc/release/2.0.0-notes.rst 2010-05-10 06:48:17 UTC (rev 8404) @@ -59,11 +59,11 @@ new header ~~~~~~~~~~ - The new header file ndarraytypes.h contains the symbols from - ndarrayobject.h that no not depend on the PY_ARRAY_UNIQUE_SYMBOL and - NO_IMPORT/_ARRAY macros. Broadly, these symbols are types, typedefs, and - enumerations; the array function calls are left in ndarrayobject.h. This - allows users to include array-related types and enumerations without - needing to concern themselves with the macro expansions and their side- - effects. +The new header file ndarraytypes.h contains the symbols from +ndarrayobject.h that do not depend on the PY_ARRAY_UNIQUE_SYMBOL and +NO_IMPORT/_ARRAY macros. Broadly, these symbols are types, typedefs, +and enumerations; the array function calls are left in +ndarrayobject.h. This allows users to include array-related types and +enumerations without needing to concern themselves with the macro +expansions and their side- effects. From numpy-svn at scipy.org Tue May 11 16:42:42 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 May 2010 15:42:42 -0500 (CDT) Subject: [Numpy-svn] r8405 - in trunk/numpy: compat distutils Message-ID: <20100511204242.5B5E239CAEA@scipy.org> Author: ptvirtan Date: 2010-05-11 15:42:42 -0500 (Tue, 11 May 2010) New Revision: 8405 Modified: trunk/numpy/compat/py3k.py trunk/numpy/distutils/exec_command.py Log: BUG/3K: distutils: do not assume that files output e.g. by gcc can be read in ascii codec Modified: trunk/numpy/compat/py3k.py =================================================================== --- trunk/numpy/compat/py3k.py 2010-05-10 06:48:17 UTC (rev 8404) +++ trunk/numpy/compat/py3k.py 2010-05-11 20:42:42 UTC (rev 8405) @@ -5,7 +5,7 @@ __all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', - 'asstr'] + 'asstr', 'open_latin1'] import sys @@ -24,6 +24,8 @@ return s.decode('latin1') def isfileobj(f): return isinstance(f, io.FileIO) + def open_latin1(filename, mode='r'): + return open(f, mode=mode, encoding='iso-8859-1') strchar = 'U' else: bytes = str @@ -37,6 +39,8 @@ if isinstance(s, unicode): return s return s.decode('ascii') + def open_latin1(filename, mode='r'): + return open(filename, mode=mode) def getexception(): return sys.exc_info()[1] Modified: trunk/numpy/distutils/exec_command.py =================================================================== --- trunk/numpy/distutils/exec_command.py 2010-05-10 06:48:17 UTC (rev 8404) +++ trunk/numpy/distutils/exec_command.py 2010-05-11 20:42:42 UTC (rev 8405) @@ -55,6 +55,8 @@ from numpy.distutils import log from numpy.distutils.compat import get_exception +from numpy.compat import open_latin1 + def temp_file_name(): fo, name = make_temp_file() fo.close() @@ -248,13 +250,13 @@ return _exec_command(command, use_shell=use_shell, **env) if stsfile is not None: - f = open(stsfile,'r') + f = open_latin1(stsfile,'r') status_text = f.read() status = int(status_text) f.close() os.remove(stsfile) - f = open(tmpfile,'r') + f = open_latin1(tmpfile,'r') text = f.read() f.close() os.remove(tmpfile) @@ -293,12 +295,12 @@ raise RuntimeError("%r failed" % (cmd,)) os.remove(cmdfile) - f = open(stsfile,'r') + f = open_latin1(stsfile,'r') status = int(f.read()) f.close() os.remove(stsfile) - f = open(outfile,'r') + f = open_latin1(outfile,'r') text = f.read() f.close() os.remove(outfile) @@ -390,14 +392,14 @@ os.dup2(se_dup,se_fileno) fout.close() - fout = open(outfile,'r') + fout = open_latin1(outfile,'r') text = fout.read() fout.close() os.remove(outfile) if using_command: ferr.close() - ferr = open(errfile,'r') + ferr = open_latin1(errfile,'r') errmess = ferr.read() ferr.close() os.remove(errfile) From numpy-svn at scipy.org Tue May 11 16:44:05 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 May 2010 15:44:05 -0500 (CDT) Subject: [Numpy-svn] r8406 - trunk/numpy/linalg Message-ID: <20100511204405.65F6639CAEA@scipy.org> Author: ptvirtan Date: 2010-05-11 15:44:05 -0500 (Tue, 11 May 2010) New Revision: 8406 Modified: trunk/numpy/linalg/lapack_litemodule.c Log: BUG: linalg: check array byte order before passing it to lapack_lite (fixes #1482) Modified: trunk/numpy/linalg/lapack_litemodule.c =================================================================== --- trunk/numpy/linalg/lapack_litemodule.c 2010-05-11 20:42:42 UTC (rev 8405) +++ trunk/numpy/linalg/lapack_litemodule.c 2010-05-11 20:44:05 UTC (rev 8406) @@ -115,6 +115,12 @@ "Parameter %s is not of type %s in lapack_lite.%s", obname, tname, funname); return 0; + } else if (((PyArrayObject *)ob)->descr->byteorder != '=' && + ((PyArrayObject *)ob)->descr->byteorder != '|') { + PyErr_Format(LapackError, + "Parameter %s has non-native byte order in lapack_lite.%s", + obname, funname); + return 0; } else { return 1; } From numpy-svn at scipy.org Tue May 11 16:45:02 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 11 May 2010 15:45:02 -0500 (CDT) Subject: [Numpy-svn] r8407 - in trunk/numpy/linalg: . tests Message-ID: <20100511204502.BCE2939CAEA@scipy.org> Author: ptvirtan Date: 2010-05-11 15:45:02 -0500 (Tue, 11 May 2010) New Revision: 8407 Modified: trunk/numpy/linalg/linalg.py trunk/numpy/linalg/tests/test_regression.py Log: ENH: linalg: convert non-native endian arrays to native-endian before handing them to lapack_lite Modified: trunk/numpy/linalg/linalg.py =================================================================== --- trunk/numpy/linalg/linalg.py 2010-05-11 20:44:05 UTC (rev 8406) +++ trunk/numpy/linalg/linalg.py 2010-05-11 20:45:02 UTC (rev 8407) @@ -124,6 +124,18 @@ _fastCT = fastCopyAndTranspose +def _to_native_byte_order(*arrays): + ret = [] + for arr in arrays: + if arr.dtype.byteorder not in ('=', '|'): + ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) + else: + ret.append(arr) + if len(ret) == 1: + return ret[0] + else: + return ret + def _fastCopyAndTranspose(type, *arrays): cast_arrays = () for a in arrays: @@ -309,6 +321,7 @@ else: lapack_routine = lapack_lite.dgesv a, b = _fastCopyAndTranspose(t, a, b) + a, b = _to_native_byte_order(a, b) pivots = zeros(n_eq, fortran_int) results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0) if results['info'] > 0: @@ -505,6 +518,7 @@ _assertSquareness(a) t, result_t = _commonType(a) a = _fastCopyAndTranspose(t, a) + a = _to_native_byte_order(a) m = a.shape[0] n = a.shape[1] if isComplexType(t): @@ -623,6 +637,7 @@ m, n = a.shape t, result_t = _commonType(a) a = _fastCopyAndTranspose(t, a) + a = _to_native_byte_order(a) mn = min(m, n) tau = zeros((mn,), t) if isComplexType(t): @@ -762,6 +777,7 @@ t, result_t = _commonType(a) real_t = _linalgRealType(t) a = _fastCopyAndTranspose(t, a) + a = _to_native_byte_order(a) n = a.shape[0] dummy = zeros((1,), t) if isComplexType(t): @@ -853,6 +869,7 @@ t, result_t = _commonType(a) real_t = _linalgRealType(t) a = _fastCopyAndTranspose(t, a) + a = _to_native_byte_order(a) n = a.shape[0] liwork = 5*n+3 iwork = zeros((liwork,), fortran_int) @@ -1008,6 +1025,7 @@ _assertSquareness(a) _assertFinite(a) a, t, result_t = _convertarray(a) # convert to double or cdouble type + a = _to_native_byte_order(a) real_t = _linalgRealType(t) n = a.shape[0] dummy = zeros((1,), t) @@ -1144,6 +1162,7 @@ t, result_t = _commonType(a) real_t = _linalgRealType(t) a = _fastCopyAndTranspose(t, a) + a = _to_native_byte_order(a) n = a.shape[0] liwork = 5*n+3 iwork = zeros((liwork,), fortran_int) @@ -1254,6 +1273,7 @@ t, result_t = _commonType(a) real_t = _linalgRealType(t) a = _fastCopyAndTranspose(t, a) + a = _to_native_byte_order(a) s = zeros((min(n, m),), real_t) if compute_uv: if full_matrices: @@ -1594,6 +1614,7 @@ _assertSquareness(a) t, result_t = _commonType(a) a = _fastCopyAndTranspose(t, a) + a = _to_native_byte_order(a) n = a.shape[0] if isComplexType(t): lapack_routine = lapack_lite.zgetrf @@ -1752,6 +1773,7 @@ bstar = zeros((ldb, n_rhs), t) bstar[:b.shape[0],:n_rhs] = b.copy() a, bstar = _fastCopyAndTranspose(t, a, bstar) + a, bstar = _to_native_byte_order(a, bstar) s = zeros((min(m, n),), real_t) nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 ) iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int) Modified: trunk/numpy/linalg/tests/test_regression.py =================================================================== --- trunk/numpy/linalg/tests/test_regression.py 2010-05-11 20:44:05 UTC (rev 8406) +++ trunk/numpy/linalg/tests/test_regression.py 2010-05-11 20:45:02 UTC (rev 8407) @@ -57,6 +57,15 @@ TypeError.""" self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') + def test_lapack_endian(self): + # For bug #1482 + a = array([[5.7998084, -2.1825367 ], + [-2.1825367, 9.85910595]], dtype='>f8') + b = array(a, dtype=' Author: ptvirtan Date: 2010-05-13 05:54:44 -0500 (Thu, 13 May 2010) New Revision: 8408 Modified: trunk/numpy/core/code_generators/generate_ufunc_api.py Log: BUG: core: remove a double-DECREF in _import_umath (fixes #1483) Modified: trunk/numpy/core/code_generators/generate_ufunc_api.py =================================================================== --- trunk/numpy/core/code_generators/generate_ufunc_api.py 2010-05-11 20:45:02 UTC (rev 8407) +++ trunk/numpy/core/code_generators/generate_ufunc_api.py 2010-05-13 10:54:44 UTC (rev 8408) @@ -72,7 +72,6 @@ } PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api); Py_DECREF(c_api); - Py_DECREF(numpy); if (PyUFunc_API == NULL) { PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); return -1; From numpy-svn at scipy.org Thu May 13 06:54:57 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 13 May 2010 05:54:57 -0500 (CDT) Subject: [Numpy-svn] r8409 - trunk/numpy/core/code_generators Message-ID: <20100513105457.BA56F39CAF1@scipy.org> Author: ptvirtan Date: 2010-05-13 05:54:57 -0500 (Thu, 13 May 2010) New Revision: 8409 Modified: trunk/numpy/core/code_generators/generate_numpy_api.py trunk/numpy/core/code_generators/generate_ufunc_api.py Log: STY: core: reduce code duplication in _import_umath and _import_array Modified: trunk/numpy/core/code_generators/generate_numpy_api.py =================================================================== --- trunk/numpy/core/code_generators/generate_numpy_api.py 2010-05-13 10:54:44 UTC (rev 8408) +++ trunk/numpy/core/code_generators/generate_numpy_api.py 2010-05-13 10:54:57 UTC (rev 8409) @@ -57,12 +57,11 @@ return -1; } c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); if (c_api == NULL) { PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); - Py_DECREF(numpy); return -1; } - Py_DECREF(numpy); #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_CheckExact(c_api)) { @@ -71,10 +70,6 @@ return -1; } PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); - Py_DECREF(c_api); - if (PyArray_API == NULL) { - return -1; - } #else if (!PyCObject_Check(c_api)) { PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); @@ -82,12 +77,13 @@ return -1; } PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); +#endif Py_DECREF(c_api); if (PyArray_API == NULL) { PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); return -1; } -#endif + /* Perform runtime check of C API version */ if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { PyErr_Format(PyExc_RuntimeError, "module compiled against "\ Modified: trunk/numpy/core/code_generators/generate_ufunc_api.py =================================================================== --- trunk/numpy/core/code_generators/generate_ufunc_api.py 2010-05-13 10:54:44 UTC (rev 8408) +++ trunk/numpy/core/code_generators/generate_ufunc_api.py 2010-05-13 10:54:57 UTC (rev 8409) @@ -46,12 +46,11 @@ return -1; } c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); + Py_DECREF(numpy); if (c_api == NULL) { PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); - Py_DECREF(numpy); return -1; } - Py_DECREF(numpy); #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_CheckExact(c_api)) { @@ -60,10 +59,6 @@ return -1; } PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); - Py_DECREF(c_api); - if (PyUFunc_API == NULL) { - return -1; - } #else if (!PyCObject_Check(c_api)) { PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCObject object"); @@ -71,12 +66,12 @@ return -1; } PyUFunc_API = (void **)PyCObject_AsVoidPtr(c_api); +#endif Py_DECREF(c_api); if (PyUFunc_API == NULL) { PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); return -1; } -#endif return 0; } From numpy-svn at scipy.org Thu May 13 06:55:09 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 13 May 2010 05:55:09 -0500 (CDT) Subject: [Numpy-svn] r8410 - trunk/numpy/compat Message-ID: <20100513105509.597B139CAF1@scipy.org> Author: ptvirtan Date: 2010-05-13 05:55:09 -0500 (Thu, 13 May 2010) New Revision: 8410 Modified: trunk/numpy/compat/py3k.py Log: BUG: compat: fix a bug in open_latin1 Modified: trunk/numpy/compat/py3k.py =================================================================== --- trunk/numpy/compat/py3k.py 2010-05-13 10:54:57 UTC (rev 8409) +++ trunk/numpy/compat/py3k.py 2010-05-13 10:55:09 UTC (rev 8410) @@ -25,7 +25,7 @@ def isfileobj(f): return isinstance(f, io.FileIO) def open_latin1(filename, mode='r'): - return open(f, mode=mode, encoding='iso-8859-1') + return open(filename, mode=mode, encoding='iso-8859-1') strchar = 'U' else: bytes = str From numpy-svn at scipy.org Thu May 13 08:47:23 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 13 May 2010 07:47:23 -0500 (CDT) Subject: [Numpy-svn] r8412 - trunk/numpy/core/tests Message-ID: <20100513124723.B2F0939CAE6@scipy.org> Author: ptvirtan Date: 2010-05-13 07:47:23 -0500 (Thu, 13 May 2010) New Revision: 8412 Modified: trunk/numpy/core/tests/test_print.py Log: ENH: core/test: try more locales out in_foreign_locale to make it work also on modern Linux systems Modified: trunk/numpy/core/tests/test_print.py =================================================================== --- trunk/numpy/core/tests/test_print.py 2010-05-13 12:47:04 UTC (rev 8411) +++ trunk/numpy/core/tests/test_print.py 2010-05-13 12:47:23 UTC (rev 8412) @@ -200,19 +200,26 @@ # Locale tests: scalar types formatting should be independent of the locale def in_foreign_locale(func): - # XXX: How to query locale on a given system ? + """ + Swap LC_NUMERIC locale to one in which the decimal point is ',' and not '.' + If not possible, raise nose.SkipTest - # French is one language where the decimal is ',' not '.', and should be - # relatively common on many systems + """ + if sys.platform == 'win32': + locales = ['FRENCH'] + else: + locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] + def wrapper(*args, **kwargs): curloc = locale.getlocale(locale.LC_NUMERIC) try: - try: - if not sys.platform == 'win32': - locale.setlocale(locale.LC_NUMERIC, 'fr_FR') - else: - locale.setlocale(locale.LC_NUMERIC, 'FRENCH') - except locale.Error: + for loc in locales: + try: + locale.setlocale(locale.LC_NUMERIC, loc) + break + except locale.Error: + pass + else: raise nose.SkipTest("Skipping locale test, because " "French locale not found") return func(*args, **kwargs) From numpy-svn at scipy.org Thu May 13 08:47:07 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 13 May 2010 07:47:07 -0500 (CDT) Subject: [Numpy-svn] r8411 - in trunk/numpy/lib: . tests Message-ID: <20100513124707.6CB7B39C4B4@scipy.org> Author: ptvirtan Date: 2010-05-13 07:47:04 -0500 (Thu, 13 May 2010) New Revision: 8411 Modified: trunk/numpy/lib/npyio.py trunk/numpy/lib/tests/test_io.py Log: BUG/3K: lib: make savetxt work with filenames Modified: trunk/numpy/lib/npyio.py =================================================================== --- trunk/numpy/lib/npyio.py 2010-05-13 10:55:09 UTC (rev 8410) +++ trunk/numpy/lib/npyio.py 2010-05-13 12:47:04 UTC (rev 8411) @@ -27,7 +27,6 @@ else: from cStringIO import StringIO as BytesIO -_file = open _string_like = _is_string_like def seek_gzip_factory(f): @@ -285,7 +284,7 @@ import gzip if isinstance(file, basestring): - fid = _file(file, "rb") + fid = open(file, "rb") elif isinstance(file, gzip.GzipFile): fid = seek_gzip_factory(file) else: @@ -792,9 +791,9 @@ fh = gzip.open(fname, 'wb') else: if sys.version_info[0] >= 3: - fh = file(fname, 'wb') + fh = open(fname, 'wb') else: - fh = file(fname, 'w') + fh = open(fname, 'w') elif hasattr(fname, 'seek'): fh = fname else: Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2010-05-13 10:55:09 UTC (rev 8410) +++ trunk/numpy/lib/tests/test_io.py 2010-05-13 12:47:04 UTC (rev 8411) @@ -225,7 +225,18 @@ lines = c.readlines() assert_equal(lines, asbytes_nested(['01 : 2.0\n', '03 : 4.0\n'])) + def test_file_roundtrip(self): + f, name = mkstemp() + os.close(f) + try: + a = np.array([(1, 2), (3, 4)]) + np.savetxt(name, a) + b = np.loadtxt(name) + assert_array_equal(a, b) + finally: + os.unlink(name) + class TestLoadTxt(TestCase): def test_record(self): c = StringIO() From numpy-svn at scipy.org Sat May 15 18:11:10 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 15 May 2010 17:11:10 -0500 (CDT) Subject: [Numpy-svn] r8413 - trunk/numpy/lib Message-ID: <20100515221110.B151139CAFC@scipy.org> Author: oliphant Date: 2010-05-15 17:11:10 -0500 (Sat, 15 May 2010) New Revision: 8413 Modified: trunk/numpy/lib/function_base.py Log: Add percentile function. Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2010-05-13 12:47:23 UTC (rev 8412) +++ trunk/numpy/lib/function_base.py 2010-05-15 22:11:10 UTC (rev 8413) @@ -1,6 +1,6 @@ __docformat__ = "restructuredtext en" __all__ = ['select', 'piecewise', 'trim_zeros', - 'copy', 'iterable', + 'copy', 'iterable', 'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax', 'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average', @@ -2804,7 +2804,7 @@ See Also -------- - mean + mean, percentile Notes ----- @@ -2863,6 +2863,132 @@ # and check, use out array. return mean(sorted[indexer], axis=axis, out=out) +def percentile(a, q, axis=None, out=None, overwrite_input=False): + """ + Compute the qth percentile of the data along the specified axis. + + Returns the qth percentile of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + q : float in range of [0,100] (or sequence of floats) + percentile to compute which must be between 0 and 100 inclusive + axis : {None, int}, optional + Axis along which the percentiles are computed. The default (axis=None) + is to compute the median along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : {False, True}, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. Note that, if `overwrite_input` is True and the input + is not already an ndarray, an error will be raised. + + Returns + ------- + pcntile : ndarray + A new array holding the result (unless `out` is specified, in + which case that array is returned instead). If the input contains + integers, or floats of smaller precision than 64, then the output + data-type is float64. Otherwise, the output data-type is the same + as that of the input. + + See Also + -------- + mean, median + + Notes + ----- + Given a vector V of length N, the qth percentile of V is the qth ranked + value in a sorted copy of V. A weighted average of the two nearest neighbors + is used if the normalized ranking does not match q exactly. + The same as the median if q is 0.5; the same as the min if q is 0; + and the same as the max if q is 1 + + Examples + -------- + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.percentile(a, 0.5) + 3.5 + >>> np.percentile(a, 0.5, axis=0) + array([ 6.5, 4.5, 2.5]) + >>> np.percentile(a, 0.5, axis=1) + array([ 7., 2.]) + >>> m = np.percentile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.percentile(a, 0.5, axis=0, out=m) + array([ 6.5, 4.5, 2.5]) + >>> m + array([ 6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.percentile(b, 0.5, axis=1, overwrite_input=True) + array([ 7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.percentile(b, 0.5, axis=None, overwrite_input=True) + 3.5 + >>> assert not np.all(a==b) + + """ + if q == 0: + return a.min(axis=axis, out=out) + elif q == 100: + return a.max(axis=axis, out=out) + + if overwrite_input: + if axis is None: + sorted = a.ravel() + sorted.sort() + else: + a.sort(axis=axis) + sorted = a + else: + sorted = sort(a, axis=axis) + if axis is None: + axis = 0 + + return _compute_qth_percentile(sorted, q, axis, out) + +# handle sequence of q's without calling sort multiple times +def _compute_qth_percentile(sorted, q, axis, out): + if not isscalar(q): + return [_compute_qth_percentile(sorted, qi, axis, out) + for qi in q] + q = q / 100.0 + if (q < 0) or (q > 1): + raise ValueError, "percentile must be either in the range [0,100]" + + indexer = [slice(None)] * sorted.ndim + Nx = sorted.shape[axis] + index = q*(Nx-1) + i = int(index) + if i == index: + indexer[axis] = slice(i, i+1) + weights = array(1) + sumval = 1.0 + else: + indexer[axis] = slice(i, i+2) + j = i + 1 + weights = array([(j - index), (index - i)],float) + wshape = [1]*sorted.ndim + wshape[axis] = 2 + weights.shape = wshape + sumval = weights.sum() + + # Use add.reduce in both cases to coerce data type as well as + # check and use out array. + return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval + def trapz(y, x=None, dx=1.0, axis=-1): """ Integrate along the given axis using the composite trapezoidal rule. From numpy-svn at scipy.org Sun May 16 04:30:47 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 May 2010 03:30:47 -0500 (CDT) Subject: [Numpy-svn] r8414 - in trunk/numpy/lib: . tests Message-ID: <20100516083047.942C239CAE7@scipy.org> Author: stefan Date: 2010-05-16 03:30:47 -0500 (Sun, 16 May 2010) New Revision: 8414 Modified: trunk/numpy/lib/function_base.py trunk/numpy/lib/tests/test_function_base.py Log: BUG: Allow any array-like input to percentile. Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2010-05-15 22:11:10 UTC (rev 8413) +++ trunk/numpy/lib/function_base.py 2010-05-16 08:30:47 UTC (rev 8414) @@ -2940,6 +2940,8 @@ >>> assert not np.all(a==b) """ + a = np.asarray(a) + if q == 0: return a.min(axis=axis, out=out) elif q == 100: Modified: trunk/numpy/lib/tests/test_function_base.py =================================================================== --- trunk/numpy/lib/tests/test_function_base.py 2010-05-15 22:11:10 UTC (rev 8413) +++ trunk/numpy/lib/tests/test_function_base.py 2010-05-16 08:30:47 UTC (rev 8414) @@ -967,5 +967,8 @@ assert_array_equal(res[i], desired[i]) +def test_percentile_list(): + assert_equal(np.percentile([1,2,3], 0), 1) + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Sun May 16 04:31:03 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 May 2010 03:31:03 -0500 (CDT) Subject: [Numpy-svn] r8415 - in trunk/numpy/lib: . tests Message-ID: <20100516083103.2F5A839CAE7@scipy.org> Author: stefan Date: 2010-05-16 03:31:03 -0500 (Sun, 16 May 2010) New Revision: 8415 Modified: trunk/numpy/lib/function_base.py trunk/numpy/lib/tests/test_function_base.py Log: BUG: Correctly handle in-place output in percentile. Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2010-05-16 08:30:47 UTC (rev 8414) +++ trunk/numpy/lib/function_base.py 2010-05-16 08:31:03 UTC (rev 8415) @@ -2964,9 +2964,15 @@ # handle sequence of q's without calling sort multiple times def _compute_qth_percentile(sorted, q, axis, out): if not isscalar(q): - return [_compute_qth_percentile(sorted, qi, axis, out) - for qi in q] - q = q / 100.0 + p = [_compute_qth_percentile(sorted, qi, axis, None) + for qi in q] + + if out is not None: + out.flat = p + + return p + + q = q / 100.0 if (q < 0) or (q > 1): raise ValueError, "percentile must be either in the range [0,100]" Modified: trunk/numpy/lib/tests/test_function_base.py =================================================================== --- trunk/numpy/lib/tests/test_function_base.py 2010-05-16 08:30:47 UTC (rev 8414) +++ trunk/numpy/lib/tests/test_function_base.py 2010-05-16 08:31:03 UTC (rev 8415) @@ -968,7 +968,26 @@ def test_percentile_list(): - assert_equal(np.percentile([1,2,3], 0), 1) + assert_equal(np.percentile([1, 2, 3], 0), 1) +def test_percentile_out(): + x = np.array([1, 2, 3]) + y = np.zeros((3,)) + p = (1, 2, 3) + np.percentile(x, p, out=y) + assert_equal(y, np.percentile(x, p)) + + x = np.array([[1, 2, 3], + [4, 5, 6]]) + + y = np.zeros((3, 3)) + np.percentile(x, p, axis=0, out=y) + assert_equal(y, np.percentile(x, p, axis=0)) + + y = np.zeros((3, 2)) + np.percentile(x, p, axis=1, out=y) + assert_equal(y, np.percentile(x, p, axis=1)) + + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Sun May 16 19:05:31 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 May 2010 18:05:31 -0500 (CDT) Subject: [Numpy-svn] r8416 - in trunk/numpy/lib: . tests Message-ID: <20100516230531.35C0739CAF8@scipy.org> Author: pierregm Date: 2010-05-16 18:05:30 -0500 (Sun, 16 May 2010) New Revision: 8416 Modified: trunk/numpy/lib/_iotools.py trunk/numpy/lib/npyio.py trunk/numpy/lib/tests/test_io.py Log: * add a `replace_space` option to NameValidator * Force a file to be opened in 'U' mode (bug #1473) Modified: trunk/numpy/lib/_iotools.py =================================================================== --- trunk/numpy/lib/_iotools.py 2010-05-16 08:31:03 UTC (rev 8415) +++ trunk/numpy/lib/_iotools.py 2010-05-16 23:05:30 UTC (rev 8416) @@ -258,6 +258,8 @@ * If 'lower', field names are converted to lower case. The default value is True. + replace_space: '_', optional + Character(s) used in replacement of white spaces. Notes ----- @@ -281,7 +283,8 @@ defaultexcludelist = ['return', 'file', 'print'] defaultdeletechars = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") # - def __init__(self, excludelist=None, deletechars=None, case_sensitive=None): + def __init__(self, excludelist=None, deletechars=None, + case_sensitive=None, replace_space='_'): # Process the exclusion list .. if excludelist is None: excludelist = [] @@ -303,6 +306,8 @@ self.case_converter = lambda x: x.lower() else: self.case_converter = lambda x: x + # + self.replace_space = replace_space def validate(self, names, defaultfmt="f%i", nbfields=None): """ @@ -347,14 +352,16 @@ deletechars = self.deletechars excludelist = self.excludelist case_converter = self.case_converter + replace_space = self.replace_space # Initializes some variables ... validatednames = [] seen = dict() nbempty = 0 # for item in names: - item = case_converter(item) - item = item.strip().replace(' ', '_') + item = case_converter(item).strip() + if replace_space: + item = item.replace(' ', replace_space) item = ''.join([c for c in item if c not in deletechars]) if item == '': item = defaultfmt % nbempty Modified: trunk/numpy/lib/npyio.py =================================================================== --- trunk/numpy/lib/npyio.py 2010-05-16 08:31:03 UTC (rev 8415) +++ trunk/numpy/lib/npyio.py 2010-05-16 23:05:30 UTC (rev 8416) @@ -69,6 +69,8 @@ return f + + class BagObj(object): """ BagObj(obj) @@ -100,6 +102,8 @@ except KeyError: raise AttributeError, key + + class NpzFile(object): """ NpzFile(fid) @@ -921,7 +925,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, skip_header=0, skip_footer=0, converters=None, missing='', missing_values=None, filling_values=None, - usecols=None, names=None, excludelist=None, deletechars=None, + usecols=None, names=None, + excludelist=None, deletechars=None, replace_space='_', autostrip=False, case_sensitive=True, defaultfmt="f%i", unpack=None, usemask=False, loose=True, invalid_raise=True): """ @@ -978,6 +983,9 @@ A format used to define default field names, such as "f%i" or "f_%02i". autostrip : bool, optional Whether to automatically strip white spaces from the variables. + replace_space : char, optional + Character(s) used in replacement of white spaces in the variables names. + By default, use a '_'. case_sensitive : {True, False, 'upper', 'lower'}, optional If True, field names are case sensitive. If False or 'upper', field names are converted to upper case. @@ -1076,7 +1084,7 @@ # Initialize the filehandle, the LineSplitter and the NameValidator if isinstance(fname, basestring): - fhd = np.lib._datasource.open(fname) + fhd = np.lib._datasource.open(fname, 'U') elif not hasattr(fname, 'read'): raise TypeError("The input should be a string or a filehandle. "\ "(got %s instead)" % type(fname)) @@ -1086,7 +1094,8 @@ autostrip=autostrip)._handyman validate_names = NameValidator(excludelist=excludelist, deletechars=deletechars, - case_sensitive=case_sensitive) + case_sensitive=case_sensitive, + replace_space=replace_space) # Get the first valid lines after the first skiprows ones .. if skiprows: Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2010-05-16 08:31:03 UTC (rev 8415) +++ trunk/numpy/lib/tests/test_io.py 2010-05-16 23:05:30 UTC (rev 8416) @@ -1062,6 +1062,30 @@ dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) assert_equal(mtest, ctrl) + def test_replace_space(self): + "Test the 'replace_space' option" + txt = "A.A, B (B), C:C\n1, 2, 3.14" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(StringIO(txt), + delimiter=",", names=True, dtype=None) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(StringIO(txt), + delimiter=",", names=True, dtype=None, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(StringIO(txt), + delimiter=",", names=True, dtype=None, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + def test_incomplete_names(self): "Test w/ incomplete names" data = "A,,C\n0,1,2\n3,4,5" From numpy-svn at scipy.org Sun May 16 19:31:21 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 16 May 2010 18:31:21 -0500 (CDT) Subject: [Numpy-svn] r8417 - in trunk/numpy/ma: . tests Message-ID: <20100516233121.8B4D739CAF8@scipy.org> Author: pierregm Date: 2010-05-16 18:31:21 -0500 (Sun, 16 May 2010) New Revision: 8417 Modified: trunk/numpy/ma/extras.py trunk/numpy/ma/tests/test_extras.py Log: * Added `apply_over_axes` as requested in ticket #1480 Modified: trunk/numpy/ma/extras.py =================================================================== --- trunk/numpy/ma/extras.py 2010-05-16 23:05:30 UTC (rev 8416) +++ trunk/numpy/ma/extras.py 2010-05-16 23:31:21 UTC (rev 8417) @@ -11,14 +11,14 @@ __author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" __version__ = '1.0' __revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' +__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' -__all__ = ['apply_along_axis', 'atleast_1d', 'atleast_2d', 'atleast_3d', - 'average', +__all__ = ['apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', + 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack', 'compress_cols', 'compress_rowcols', 'compress_rows', 'count_masked', 'corrcoef', 'cov', - 'diagflat', 'dot','dstack', + 'diagflat', 'dot', 'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack', @@ -37,8 +37,8 @@ import warnings import core as ma -from core import MaskedArray, MAError, add, array, asarray, concatenate, count,\ - filled, getmask, getmaskarray, make_mask_descr, masked, masked_array,\ +from core import MaskedArray, MAError, add, array, asarray, concatenate, count, \ + filled, getmask, getmaskarray, make_mask_descr, masked, masked_array, \ mask_or, nomask, ones, sort, zeros #from core import * @@ -271,7 +271,7 @@ def __call__(self, *args, **params): func = getattr(np, self.__name__) - if len(args)==1: + if len(args) == 1: x = args[0] if isinstance(x, ndarray): _d = func(np.asarray(x), **params) @@ -284,7 +284,7 @@ else: arrays = [] args = list(args) - while len(args)>0 and issequence(args[0]): + while len(args) > 0 and issequence(args[0]): arrays.append(args.pop(0)) res = [] for x in arrays: @@ -317,8 +317,8 @@ """Flatten a sequence in place.""" k = 0 while (k != len(seq)): - while hasattr(seq[k],'__iter__'): - seq[k:(k+1)] = seq[k] + while hasattr(seq[k], '__iter__'): + seq[k:(k + 1)] = seq[k] k += 1 return seq @@ -333,12 +333,12 @@ axis += nd if (axis >= nd): raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." - % (axis,nd)) - ind = [0]*(nd-1) - i = np.zeros(nd,'O') + % (axis, nd)) + ind = [0] * (nd - 1) + i = np.zeros(nd, 'O') indlist = range(nd) indlist.remove(axis) - i[axis] = slice(None,None) + i[axis] = slice(None, None) outshape = np.asarray(arr.shape).take(indlist) i.put(indlist, ind) j = i.copy() @@ -364,8 +364,8 @@ # increment the index ind[-1] += 1 n = -1 - while (ind[n] >= outshape[n]) and (n > (1-nd)): - ind[n-1] += 1 + while (ind[n] >= outshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 ind[n] = 0 n -= 1 i.put(indlist, ind) @@ -391,8 +391,8 @@ # increment the index ind[-1] += 1 n = -1 - while (ind[n] >= holdshape[n]) and (n > (1-nd)): - ind[n-1] += 1 + while (ind[n] >= holdshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 ind[n] = 0 n -= 1 i.put(indlist, ind) @@ -411,6 +411,32 @@ apply_along_axis.__doc__ = np.apply_along_axis.__doc__ +def apply_over_axes(func, a, axes): + """ + (This docstring will be overwritten) + """ + val = np.asarray(a) + msk = getmaskarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: axis = N + axis + args = (val, axis) + res = ma.array(func(*(val, axis)), mask=func(*(msk, axis))) + if res.ndim == val.ndim: + (val, msk) = (res._data, res._mask) + else: + res = ma.expand_dims(res, axis) + if res.ndim == val.ndim: + (val, msk) = (res._data, res._mask) + else: + raise ValueError("Function is not returning"\ + " an array of correct shape") + return val +apply_over_axes.__doc__ = np.apply_over_axes.__doc__ + + def average(a, axis=None, weights=None, returned=False): """ Return the weighted average of array over the given axis. @@ -496,15 +522,15 @@ wsh = (1,) if wsh == ash: w = np.array(w, float, copy=0) - n = add.reduce(a*w, axis) + n = add.reduce(a * w, axis) d = add.reduce(w, axis) del w elif wsh == (ash[axis],): ni = ash[axis] - r = [None]*len(ash) + r = [None] * len(ash) r[axis] = slice(None, None, 1) - w = eval ("w["+ repr(tuple(r)) + "] * ones(ash, float)") - n = add.reduce(a*w, axis, dtype=float) + w = eval ("w[" + repr(tuple(r)) + "] * ones(ash, float)") + n = add.reduce(a * w, axis, dtype=float) d = add.reduce(w, axis, dtype=float) del w, r else: @@ -520,26 +546,26 @@ wsh = (1,) if wsh == ash: w = array(w, dtype=float, mask=mask, copy=0) - n = add.reduce(a*w, axis, dtype=float) + n = add.reduce(a * w, axis, dtype=float) d = add.reduce(w, axis, dtype=float) elif wsh == (ash[axis],): ni = ash[axis] - r = [None]*len(ash) + r = [None] * len(ash) r[axis] = slice(None, None, 1) - w = eval ("w["+ repr(tuple(r)) + \ + w = eval ("w[" + repr(tuple(r)) + \ "] * masked_array(ones(ash, float), mask)") - n = add.reduce(a*w, axis, dtype=float) + n = add.reduce(a * w, axis, dtype=float) d = add.reduce(w, axis, dtype=float) else: raise ValueError, 'average: weights wrong shape.' del w if n is masked or d is masked: return masked - result = n/d + result = n / d del n if isinstance(result, MaskedArray): - if ((axis is None) or (axis==0 and a.ndim == 1)) and \ + if ((axis is None) or (axis == 0 and a.ndim == 1)) and \ (result.mask is nomask): result = result._data if returned: @@ -615,12 +641,12 @@ """ def _median1D(data): - counts = filled(count(data),0) + counts = filled(count(data), 0) (idx, rmd) = divmod(counts, 2) if rmd: - choice = slice(idx, idx+1) + choice = slice(idx, idx + 1) else: - choice = slice(idx-1, idx+1) + choice = slice(idx - 1, idx + 1) return data[choice].mean(0) # if overwrite_input: @@ -710,7 +736,7 @@ if axis in [None, 1, -1]: for j in np.unique(masked[1]): idxc.remove(j) - return x._data[idxr][:,idxc] + return x._data[idxr][:, idxc] def compress_rows(a): """ @@ -827,7 +853,7 @@ if not axis: a[np.unique(maskedval[0])] = masked if axis in [None, 1, -1]: - a[:,np.unique(maskedval[1])] = masked + a[:, np.unique(maskedval[1])] = masked return a def mask_rows(a, axis=None): @@ -921,7 +947,7 @@ return mask_rowcols(a, 1) -def dot(a,b, strict=False): +def dot(a, b, strict=False): """ Return the dot product of two arrays. @@ -1114,15 +1140,15 @@ ar1, rev_idx = unique(ar1, return_inverse=True) ar2 = unique(ar2) - ar = ma.concatenate( (ar1, ar2) ) + ar = ma.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] equal_adj = (sar[1:] == sar[:-1]) - flag = ma.concatenate( (equal_adj, [False] ) ) - indx = order.argsort(kind='mergesort')[:len( ar1 )] + flag = ma.concatenate((equal_adj, [False])) + indx = order.argsort(kind='mergesort')[:len(ar1)] if assume_unique: return flag[indx] @@ -1199,10 +1225,10 @@ def setmember1d(ar1, ar2): """ This function is deprecated. Use ma.in1d() instead.""" ar1 = ma.asanyarray(ar1) - ar2 = ma.asanyarray( ar2 ) - ar = ma.concatenate((ar1, ar2 )) - b1 = ma.zeros(ar1.shape, dtype = np.int8) - b2 = ma.ones(ar2.shape, dtype = np.int8) + ar2 = ma.asanyarray(ar2) + ar = ma.concatenate((ar1, ar2)) + b1 = ma.zeros(ar1.shape, dtype=np.int8) + b2 = ma.ones(ar2.shape, dtype=np.int8) tt = ma.concatenate((b1, b2)) # We need this to be a stable sort, so always use 'mergesort' here. The @@ -1213,12 +1239,12 @@ aux2 = tt[perm] # flag = ediff1d( aux, 1 ) == 0 flag = ma.concatenate((aux[1:] == aux[:-1], [False])) - ii = ma.where( flag * aux2 )[0] - aux = perm[ii+1] - perm[ii+1] = perm[ii] + ii = ma.where(flag * aux2)[0] + aux = perm[ii + 1] + perm[ii + 1] = perm[ii] perm[ii] = aux # - indx = perm.argsort(kind='mergesort')[:len( ar1 )] + indx = perm.argsort(kind='mergesort')[:len(ar1)] # return flag[indx] @@ -1246,7 +1272,7 @@ rowvar = True # Make sure that rowvar is either 0 or 1 rowvar = int(bool(rowvar)) - axis = 1-rowvar + axis = 1 - rowvar if rowvar: tup = (slice(None), None) else: @@ -1267,7 +1293,7 @@ x.unshare_mask() y.unshare_mask() xmask = x._mask = y._mask = ymask = common_mask - x = ma.concatenate((x,y),axis) + x = ma.concatenate((x, y), axis) xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) x -= x.mean(axis=rowvar)[tup] return (x, xnotmask, rowvar) @@ -1321,10 +1347,10 @@ """ (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) if not rowvar: - fact = np.dot(xnotmask.T, xnotmask)*1. - (1 - bool(bias)) + fact = np.dot(xnotmask.T, xnotmask) * 1. - (1 - bool(bias)) result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() else: - fact = np.dot(xnotmask, xnotmask.T)*1. - (1 - bool(bias)) + fact = np.dot(xnotmask, xnotmask.T) * 1. - (1 - bool(bias)) result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() return result @@ -1369,10 +1395,10 @@ (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) # Compute the covariance matrix if not rowvar: - fact = np.dot(xnotmask.T, xnotmask)*1. - (1 - bool(bias)) + fact = np.dot(xnotmask.T, xnotmask) * 1. - (1 - bool(bias)) c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() else: - fact = np.dot(xnotmask, xnotmask.T)*1. - (1 - bool(bias)) + fact = np.dot(xnotmask, xnotmask.T) * 1. - (1 - bool(bias)) c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() # Check whether we have a scalar try: @@ -1384,20 +1410,20 @@ _denom = ma.sqrt(ma.multiply.outer(diag, diag)) else: _denom = diagflat(diag) - n = x.shape[1-rowvar] + n = x.shape[1 - rowvar] if rowvar: - for i in range(n-1): - for j in range(i+1,n): + for i in range(n - 1): + for j in range(i + 1, n): _x = mask_cols(vstack((x[i], x[j]))).var(axis=1, - ddof=1-bias) - _denom[i,j] = _denom[j,i] = ma.sqrt(ma.multiply.reduce(_x)) + ddof=1 - bias) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) else: - for i in range(n-1): - for j in range(i+1,n): - _x = mask_cols(vstack((x[:,i], x[:,j]))).var(axis=1, - ddof=1-bias) - _denom[i,j] = _denom[j,i] = ma.sqrt(ma.multiply.reduce(_x)) - return c/_denom + for i in range(n - 1): + for j in range(i + 1, n): + _x = mask_cols(vstack((x[:, i], x[:, j]))).var(axis=1, + ddof=1 - bias) + _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) + return c / _denom #####-------------------------------------------------------------------------- #---- --- Concatenation helpers --- @@ -1418,7 +1444,7 @@ def __init__(self, axis=0): AxisConcatenator.__init__(self, axis, matrix=False) - def __getitem__(self,key): + def __getitem__(self, key): if isinstance(key, str): raise MAError, "Unavailable for masked array." if type(key) is not tuple: @@ -1466,7 +1492,7 @@ if final_dtypedescr is not None: for k in scalars: objs[k] = objs[k].astype(final_dtypedescr) - res = concatenate(tuple(objs),axis=self.axis) + res = concatenate(tuple(objs), axis=self.axis) return self._retval(res) class mr_class(MAxisConcatenator): @@ -1538,10 +1564,10 @@ """ m = getmask(a) if m is nomask or not np.any(m): - return [0,-1] + return [0, -1] unmasked = np.flatnonzero(~m) if len(unmasked) > 0: - return unmasked[[0,-1]] + return unmasked[[0, -1]] else: return None @@ -1591,9 +1617,9 @@ if axis is None or a.ndim == 1: return flatnotmasked_edges(a) m = getmaskarray(a) - idx = array(np.indices(a.shape), mask=np.asarray([m]*a.ndim)) + idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), - tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]),] + tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ] def flatnotmasked_contiguous(a): @@ -1635,15 +1661,15 @@ """ m = getmask(a) if m is nomask: - return (a.size, [0,-1]) + return (a.size, [0, -1]) unmasked = np.flatnonzero(~m) if len(unmasked) == 0: return None result = [] - for (k, group) in itertools.groupby(enumerate(unmasked), lambda (i,x):i-x): + for (k, group) in itertools.groupby(enumerate(unmasked), lambda (i, x):i - x): tmp = np.array([g[1] for g in group], int) # result.append((tmp.size, tuple(tmp[[0,-1]]))) - result.append( slice(tmp[0], tmp[-1]) ) + result.append(slice(tmp[0], tmp[-1])) result.sort() return result @@ -1690,19 +1716,19 @@ a = asarray(a) nd = a.ndim if nd > 2: - raise NotImplementedError,"Currently limited to atmost 2D array." + raise NotImplementedError, "Currently limited to atmost 2D array." if axis is None or nd == 1: return flatnotmasked_contiguous(a) # result = [] # - other = (axis+1)%2 + other = (axis + 1) % 2 idx = [0, 0] idx[axis] = slice(None, None) # for i in range(a.shape[other]): idx[other] = i - result.append( flatnotmasked_contiguous(a[idx]) ) + result.append(flatnotmasked_contiguous(a[idx])) return result @@ -1831,16 +1857,16 @@ y = mask_rows(y) my = getmask(y) if my is not nomask: - m = mask_or(mx, my[:,0]) + m = mask_or(mx, my[:, 0]) else: m = mx else: - raise TypeError,"Expected a 1D or 2D array for y!" + raise TypeError, "Expected a 1D or 2D array for y!" if m is not nomask: x[m] = y[m] = masked # Set rcond if rcond is None : - rcond = len(x)*np.finfo(x.dtype).eps + rcond = len(x) * np.finfo(x.dtype).eps # Scale x to improve condition number scale = abs(x).max() if scale != 0 : Modified: trunk/numpy/ma/tests/test_extras.py =================================================================== --- trunk/numpy/ma/tests/test_extras.py 2010-05-16 23:05:30 UTC (rev 8416) +++ trunk/numpy/ma/tests/test_extras.py 2010-05-16 23:31:21 UTC (rev 8417) @@ -9,7 +9,7 @@ __author__ = "Pierre GF Gerard-Marchant ($Author: jarrod.millman $)" __version__ = '1.0' __revision__ = "$Revision: 3473 $" -__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' +__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $' import numpy as np from numpy.testing import TestCase, run_module_suite @@ -31,13 +31,13 @@ test = masked_all((2,), dtype=dt) control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) assert_equal(test, control) - test = masked_all((2,2), dtype=dt) + test = masked_all((2, 2), dtype=dt) control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], dtype=dt) assert_equal(test, control) # Nested dtype - dt = np.dtype([('a','f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) test = masked_all((2,), dtype=dt) control = array([(1, (1, 1)), (1, (1, 1))], mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) @@ -46,7 +46,7 @@ control = array([(1, (1, 1)), (1, (1, 1))], mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) assert_equal(test, control) - test = masked_all((1,1), dtype=dt) + test = masked_all((1, 1), dtype=dt) control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) assert_equal(test, control) @@ -65,7 +65,7 @@ control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) assert_equal(test, control) # Nested dtype - dt = np.dtype([('a','f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) control = array([(1, (1, 1)), (1, (1, 1))], mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) test = masked_all_like(control) @@ -85,7 +85,7 @@ a = masked_array(np.arange(10)) a[[0, 1, 2, 6, 8, 9]] = masked test = clump_unmasked(a) - control = [slice(3, 6), slice(7, 8),] + control = [slice(3, 6), slice(7, 8), ] assert_equal(test, control) @@ -94,7 +94,7 @@ "Several tests of average. Why so many ? Good point..." def test_testAverage1(self): "Test of average." - ott = array([0.,1.,2.,3.], mask=[True, False, False, False]) + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) assert_equal(2.0, average(ott, axis=0)) assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1) @@ -104,28 +104,28 @@ assert_equal(average(ott, axis=0).mask, [True]) ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) ott = ott.reshape(2, 2) - ott[:,1] = masked + ott[:, 1] = masked assert_equal(average(ott, axis=0), [2.0, 0.0]) assert_equal(average(ott, axis=1).mask[0], [True]) - assert_equal([2.,0.], average(ott, axis=0)) + assert_equal([2., 0.], average(ott, axis=0)) result, wts = average(ott, axis=0, returned=1) assert_equal(wts, [1., 0.]) def test_testAverage2(self): "More tests of average." - w1 = [0,1,1,1,1,0] - w2 = [[0,1,1,1,1,0],[1,0,0,0,0,1]] + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] x = arange(6, dtype=float_) assert_equal(average(x, axis=0), 2.5) assert_equal(average(x, axis=0, weights=w1), 2.5) - y = array([arange(6, dtype=float_), 2.0*arange(6)]) - assert_equal(average(y, None), np.add.reduce(np.arange(6))*3./12.) - assert_equal(average(y, axis=0), np.arange(6) * 3./2.) + y = array([arange(6, dtype=float_), 2.0 * arange(6)]) + assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) + assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) assert_equal(average(y, axis=1), [average(x, axis=0), average(x, axis=0) * 2.0]) - assert_equal(average(y, None, weights=w2), 20./6.) + assert_equal(average(y, None, weights=w2), 20. / 6.) assert_equal(average(y, axis=0, weights=w2), - [0.,1.,2.,3.,4.,10.]) + [0., 1., 2., 3., 4., 10.]) assert_equal(average(y, axis=1), [average(x, axis=0), average(x, axis=0) * 2.0]) m1 = zeros(6) @@ -139,11 +139,11 @@ assert_equal(average(masked_array(x, m5), axis=0), 0.0) assert_equal(count(average(masked_array(x, m4), axis=0)), 0) z = masked_array(y, m3) - assert_equal(average(z, None), 20./6.) - assert_equal(average(z, axis=0), [0.,1.,99.,99.,4.0, 7.5]) + assert_equal(average(z, None), 20. / 6.) + assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) assert_equal(average(z, axis=1), [2.5, 5.0]) - assert_equal(average(z,axis=0, weights=w2), - [0.,1., 99., 99., 4.0, 10.0]) + assert_equal(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0]) def test_testAverage3(self): "Yet more tests of average!" @@ -159,13 +159,13 @@ r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1) assert_equal(shape(w2), shape(r2)) a2d = array([[1, 2], [0, 4]], float) - a2dm = masked_array(a2d, [[False, False],[True, False]]) + a2dm = masked_array(a2d, [[False, False], [True, False]]) a2da = average(a2d, axis=0) assert_equal(a2da, [0.5, 3.0]) a2dma = average(a2dm, axis=0) assert_equal(a2dma, [1.0, 3.0]) a2dma = average(a2dm, axis=None) - assert_equal(a2dma, 7./3.) + assert_equal(a2dma, 7. / 3.) a2dma = average(a2dm, axis=1) assert_equal(a2dma, [1.5, 4.0]) @@ -184,33 +184,33 @@ def test_1d(self): "Tests mr_ on 1D arrays." - assert_array_equal(mr_[1,2,3,4,5,6],array([1,2,3,4,5,6])) + assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) b = ones(5) - m = [1,0,0,0,0] - d = masked_array(b,mask=m) - c = mr_[d,0,0,d] - self.assertTrue(isinstance(c,MaskedArray) or isinstance(c,core.MaskedArray)) - assert_array_equal(c,[1,1,1,1,1,0,0,1,1,1,1,1]) - assert_array_equal(c.mask, mr_[m,0,0,m]) + m = [1, 0, 0, 0, 0] + d = masked_array(b, mask=m) + c = mr_[d, 0, 0, d] + self.assertTrue(isinstance(c, MaskedArray) or isinstance(c, core.MaskedArray)) + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + assert_array_equal(c.mask, mr_[m, 0, 0, m]) def test_2d(self): "Tests mr_ on 2D arrays." - a_1 = rand(5,5) - a_2 = rand(5,5) - m_1 = np.round_(rand(5,5),0) - m_2 = np.round_(rand(5,5),0) - b_1 = masked_array(a_1,mask=m_1) - b_2 = masked_array(a_2,mask=m_2) - d = mr_['1',b_1,b_2] # append columns - self.assertTrue(d.shape == (5,10)) - assert_array_equal(d[:,:5],b_1) - assert_array_equal(d[:,5:],b_2) - assert_array_equal(d.mask, np.r_['1',m_1,m_2]) - d = mr_[b_1,b_2] - self.assertTrue(d.shape == (10,5)) - assert_array_equal(d[:5,:],b_1) - assert_array_equal(d[5:,:],b_2) - assert_array_equal(d.mask, np.r_[m_1,m_2]) + a_1 = rand(5, 5) + a_2 = rand(5, 5) + m_1 = np.round_(rand(5, 5), 0) + m_2 = np.round_(rand(5, 5), 0) + b_1 = masked_array(a_1, mask=m_1) + b_2 = masked_array(a_2, mask=m_2) + d = mr_['1', b_1, b_2] # append columns + self.assertTrue(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b_1) + assert_array_equal(d[:, 5:], b_2) + assert_array_equal(d.mask, np.r_['1', m_1, m_2]) + d = mr_[b_1, b_2] + self.assertTrue(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b_1) + assert_array_equal(d[5:, :], b_2) + assert_array_equal(d.mask, np.r_[m_1, m_2]) @@ -256,26 +256,26 @@ def test_contiguous(self): "Tests notmasked_contiguous" - a = masked_array(np.arange(24).reshape(3,8), - mask=[[0,0,0,0,1,1,1,1], - [1,1,1,1,1,1,1,1], - [0,0,0,0,0,0,1,0],]) + a = masked_array(np.arange(24).reshape(3, 8), + mask=[[0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 0], ]) tmp = notmasked_contiguous(a, None) - assert_equal(tmp[-1], slice(23,23,None)) - assert_equal(tmp[-2], slice(16,21,None)) - assert_equal(tmp[-3], slice(0,3,None)) + assert_equal(tmp[-1], slice(23, 23, None)) + assert_equal(tmp[-2], slice(16, 21, None)) + assert_equal(tmp[-3], slice(0, 3, None)) # tmp = notmasked_contiguous(a, 0) self.assertTrue(len(tmp[-1]) == 1) self.assertTrue(tmp[-2] is None) - assert_equal(tmp[-3],tmp[-1]) + assert_equal(tmp[-3], tmp[-1]) self.assertTrue(len(tmp[0]) == 2) # tmp = notmasked_contiguous(a, 1) - assert_equal(tmp[0][-1], slice(0,3,None)) + assert_equal(tmp[0][-1], slice(0, 3, None)) self.assertTrue(tmp[1] is None) - assert_equal(tmp[2][-1], slice(7,7,None)) - assert_equal(tmp[2][-2], slice(0,5,None)) + assert_equal(tmp[2][-1], slice(7, 7, None)) + assert_equal(tmp[2][-2], slice(0, 5, None)) @@ -283,114 +283,114 @@ "Tests 2D functions" def test_compress2d(self): "Tests compress2d" - x = array(np.arange(9).reshape(3,3), mask=[[1,0,0],[0,0,0],[0,0,0]]) - assert_equal(compress_rowcols(x), [[4,5],[7,8]] ) - assert_equal(compress_rowcols(x,0), [[3,4,5],[6,7,8]] ) - assert_equal(compress_rowcols(x,1), [[1,2],[4,5],[7,8]] ) - x = array(x._data, mask=[[0,0,0],[0,1,0],[0,0,0]]) - assert_equal(compress_rowcols(x), [[0,2],[6,8]] ) - assert_equal(compress_rowcols(x,0), [[0,1,2],[6,7,8]] ) - assert_equal(compress_rowcols(x,1), [[0,2],[3,5],[6,8]] ) - x = array(x._data, mask=[[1,0,0],[0,1,0],[0,0,0]]) - assert_equal(compress_rowcols(x), [[8]] ) - assert_equal(compress_rowcols(x,0), [[6,7,8]] ) - assert_equal(compress_rowcols(x,1,), [[2],[5],[8]] ) - x = array(x._data, mask=[[1,0,0],[0,1,0],[0,0,1]]) - assert_equal(compress_rowcols(x).size, 0 ) - assert_equal(compress_rowcols(x,0).size, 0 ) - assert_equal(compress_rowcols(x,1).size, 0 ) + x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) + assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) + assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[8]]) + assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) + assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_equal(compress_rowcols(x).size, 0) + assert_equal(compress_rowcols(x, 0).size, 0) + assert_equal(compress_rowcols(x, 1).size, 0) # def test_mask_rowcols(self): "Tests mask_rowcols." - x = array(np.arange(9).reshape(3,3), mask=[[1,0,0],[0,0,0],[0,0,0]]) - assert_equal(mask_rowcols(x).mask, [[1,1,1],[1,0,0],[1,0,0]] ) - assert_equal(mask_rowcols(x,0).mask, [[1,1,1],[0,0,0],[0,0,0]] ) - assert_equal(mask_rowcols(x,1).mask, [[1,0,0],[1,0,0],[1,0,0]] ) - x = array(x._data, mask=[[0,0,0],[0,1,0],[0,0,0]]) - assert_equal(mask_rowcols(x).mask, [[0,1,0],[1,1,1],[0,1,0]] ) - assert_equal(mask_rowcols(x,0).mask, [[0,0,0],[1,1,1],[0,0,0]] ) - assert_equal(mask_rowcols(x,1).mask, [[0,1,0],[0,1,0],[0,1,0]] ) - x = array(x._data, mask=[[1,0,0],[0,1,0],[0,0,0]]) - assert_equal(mask_rowcols(x).mask, [[1,1,1],[1,1,1],[1,1,0]] ) - assert_equal(mask_rowcols(x,0).mask, [[1,1,1],[1,1,1],[0,0,0]] ) - assert_equal(mask_rowcols(x,1,).mask, [[1,1,0],[1,1,0],[1,1,0]] ) - x = array(x._data, mask=[[1,0,0],[0,1,0],[0,0,1]]) + x = array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + assert_equal(mask_rowcols(x, 0).mask, [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1,).mask, [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) self.assertTrue(mask_rowcols(x).all() is masked) - self.assertTrue(mask_rowcols(x,0).all() is masked) - self.assertTrue(mask_rowcols(x,1).all() is masked) + self.assertTrue(mask_rowcols(x, 0).all() is masked) + self.assertTrue(mask_rowcols(x, 1).all() is masked) self.assertTrue(mask_rowcols(x).mask.all()) - self.assertTrue(mask_rowcols(x,0).mask.all()) - self.assertTrue(mask_rowcols(x,1).mask.all()) + self.assertTrue(mask_rowcols(x, 0).mask.all()) + self.assertTrue(mask_rowcols(x, 1).mask.all()) # def test_dot(self): "Tests dot product" - n = np.arange(1,7) + n = np.arange(1, 7) # - m = [1,0,0,0,0,0] - a = masked_array(n, mask=m).reshape(2,3) - b = masked_array(n, mask=m).reshape(3,2) - c = dot(a,b,True) - assert_equal(c.mask, [[1,1],[1,0]]) - c = dot(b,a,True) - assert_equal(c.mask, [[1,1,1],[1,0,0],[1,0,0]]) - c = dot(a,b,False) + m = [1, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, True) + assert_equal(c.mask, [[1, 1], [1, 0]]) + c = dot(b, a, True) + assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + c = dot(a, b, False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) - c = dot(b,a,False) + c = dot(b, a, False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # - m = [0,0,0,0,0,1] - a = masked_array(n, mask=m).reshape(2,3) - b = masked_array(n, mask=m).reshape(3,2) - c = dot(a,b,True) - assert_equal(c.mask,[[0,1],[1,1]]) - c = dot(b,a,True) - assert_equal(c.mask, [[0,0,1],[0,0,1],[1,1,1]]) - c = dot(a,b,False) + m = [0, 0, 0, 0, 0, 1] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, True) + assert_equal(c.mask, [[0, 1], [1, 1]]) + c = dot(b, a, True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) + c = dot(a, b, False) assert_equal(c, np.dot(a.filled(0), b.filled(0))) - assert_equal(c, dot(a,b)) - c = dot(b,a,False) + assert_equal(c, dot(a, b)) + c = dot(b, a, False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # - m = [0,0,0,0,0,0] - a = masked_array(n, mask=m).reshape(2,3) - b = masked_array(n, mask=m).reshape(3,2) - c = dot(a,b) - assert_equal(c.mask,nomask) - c = dot(b,a) - assert_equal(c.mask,nomask) + m = [0, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b) + assert_equal(c.mask, nomask) + c = dot(b, a) + assert_equal(c.mask, nomask) # - a = masked_array(n, mask=[1,0,0,0,0,0]).reshape(2,3) - b = masked_array(n, mask=[0,0,0,0,0,0]).reshape(3,2) - c = dot(a,b,True) - assert_equal(c.mask,[[1,1],[0,0]]) - c = dot(a,b,False) - assert_equal(c, np.dot(a.filled(0),b.filled(0))) - c = dot(b,a,True) - assert_equal(c.mask,[[1,0,0],[1,0,0],[1,0,0]]) - c = dot(b,a,False) - assert_equal(c, np.dot(b.filled(0),a.filled(0))) + a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, True) + assert_equal(c.mask, [[1, 1], [0, 0]]) + c = dot(a, b, False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, True) + assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + c = dot(b, a, False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) # - a = masked_array(n, mask=[0,0,0,0,0,1]).reshape(2,3) - b = masked_array(n, mask=[0,0,0,0,0,0]).reshape(3,2) - c = dot(a,b,True) - assert_equal(c.mask,[[0,0],[1,1]]) - c = dot(a,b) - assert_equal(c, np.dot(a.filled(0),b.filled(0))) - c = dot(b,a,True) - assert_equal(c.mask,[[0,0,1],[0,0,1],[0,0,1]]) - c = dot(b,a,False) + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, True) + assert_equal(c.mask, [[0, 0], [1, 1]]) + c = dot(a, b) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + c = dot(b, a, False) assert_equal(c, np.dot(b.filled(0), a.filled(0))) # - a = masked_array(n, mask=[0,0,0,0,0,1]).reshape(2,3) - b = masked_array(n, mask=[0,0,1,0,0,0]).reshape(3,2) - c = dot(a,b,True) - assert_equal(c.mask,[[1,0],[1,1]]) - c = dot(a,b,False) - assert_equal(c, np.dot(a.filled(0),b.filled(0))) - c = dot(b,a,True) - assert_equal(c.mask,[[0,0,1],[1,1,1],[0,0,1]]) - c = dot(b,a,False) - assert_equal(c, np.dot(b.filled(0),a.filled(0))) + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, True) + assert_equal(c.mask, [[1, 0], [1, 1]]) + c = dot(a, b, False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, True) + assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) + c = dot(b, a, False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) @@ -398,51 +398,63 @@ # "Tests 2D functions" def test_3d(self): - a = arange(12.).reshape(2,2,3) + a = arange(12.).reshape(2, 2, 3) def myfunc(b): return b[1] - xa = apply_along_axis(myfunc,2,a) - assert_equal(xa,[[1,4],[7,10]]) + xa = apply_along_axis(myfunc, 2, a) + assert_equal(xa, [[1, 4], [7, 10]]) +class TestApplyOverAxes(TestCase): + "Tests apply_over_axes" + def test_basic(self): + a = arange(24).reshape(2, 3, 4) + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[ 60], [ 92], [124]]]) + assert_equal(test, ctrl) + a[(a % 2).astype(np.bool)] = masked + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[ 30], [ 44], [60]]]) + + class TestMedian(TestCase): # def test_2d(self): "Tests median w/ 2D" - (n,p) = (101,30) - x = masked_array(np.linspace(-1.,1.,n),) + (n, p) = (101, 30) + x = masked_array(np.linspace(-1., 1., n),) x[:10] = x[-10:] = masked - z = masked_array(np.empty((n,p), dtype=float)) - z[:,0] = x[:] + z = masked_array(np.empty((n, p), dtype=float)) + z[:, 0] = x[:] idx = np.arange(len(x)) - for i in range(1,p): + for i in range(1, p): np.random.shuffle(idx) - z[:,i] = x[idx] - assert_equal(median(z[:,0]), 0) + z[:, i] = x[idx] + assert_equal(median(z[:, 0]), 0) assert_equal(median(z), 0) assert_equal(median(z, axis=0), np.zeros(p)) assert_equal(median(z.T, axis=1), np.zeros(p)) # def test_2d_waxis(self): "Tests median w/ 2D arrays and different axis." - x = masked_array(np.arange(30).reshape(10,3)) + x = masked_array(np.arange(30).reshape(10, 3)) x[:3] = x[-3:] = masked assert_equal(median(x), 14.5) - assert_equal(median(x, axis=0), [13.5,14.5,15.5]) - assert_equal(median(x,axis=1), [0,0,0,10,13,16,19,0,0,0]) - assert_equal(median(x,axis=1).mask, [1,1,1,0,0,0,0,1,1,1]) + assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) + assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) + assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) # def test_3d(self): "Tests median w/ 3D" - x = np.ma.arange(24).reshape(3,4,2) - x[x%3==0] = masked - assert_equal(median(x,0), [[12,9],[6,15],[12,9],[18,15]]) - x.shape = (4,3,2) - assert_equal(median(x,0),[[99,10],[11,99],[13,14]]) - x = np.ma.arange(24).reshape(4,3,2) - x[x%5==0] = masked - assert_equal(median(x,0), [[12,10],[8,9],[16,17]]) + x = np.ma.arange(24).reshape(3, 4, 2) + x[x % 3 == 0] = masked + assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) + x.shape = (4, 3, 2) + assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) + x = np.ma.arange(24).reshape(4, 3, 2) + x[x % 5 == 0] = masked + assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) @@ -461,7 +473,7 @@ def test_2d_wo_missing(self): "Test cov on 1 2D variable w/o missing values" - x = self.data.reshape(3,4) + x = self.data.reshape(3, 4) assert_almost_equal(np.cov(x), cov(x)) assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), @@ -495,19 +507,19 @@ "Test cov on 2D variable w/ missing value" x = self.data x[-1] = masked - x = x.reshape(3,4) + x = x.reshape(3, 4) valid = np.logical_not(getmaskarray(x)).astype(int) frac = np.dot(valid, valid.T) - xf = (x - x.mean(1)[:,None]).filled(0) - assert_almost_equal(cov(x), np.cov(xf) * (x.shape[1]-1) / (frac - 1.)) + xf = (x - x.mean(1)[:, None]).filled(0) + assert_almost_equal(cov(x), np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) assert_almost_equal(cov(x, bias=True), np.cov(xf, bias=True) * x.shape[1] / frac) frac = np.dot(valid.T, valid) xf = (x - x.mean(0)).filled(0) assert_almost_equal(cov(x, rowvar=False), - np.cov(xf, rowvar=False) * (x.shape[0]-1)/(frac - 1.)) + np.cov(xf, rowvar=False) * (x.shape[0] - 1) / (frac - 1.)) assert_almost_equal(cov(x, rowvar=False, bias=True), - np.cov(xf, rowvar=False, bias=True) * x.shape[0]/frac) + np.cov(xf, rowvar=False, bias=True) * x.shape[0] / frac) @@ -527,7 +539,7 @@ def test_2d_wo_missing(self): "Test corrcoef on 1 2D variable w/o missing values" - x = self.data.reshape(3,4) + x = self.data.reshape(3, 4) assert_almost_equal(np.corrcoef(x), corrcoef(x)) assert_almost_equal(np.corrcoef(x, rowvar=False), corrcoef(x, rowvar=False)) @@ -562,11 +574,11 @@ "Test corrcoef on 2D variable w/ missing value" x = self.data x[-1] = masked - x = x.reshape(3,4) + x = x.reshape(3, 4) test = corrcoef(x) control = np.corrcoef(x) - assert_almost_equal(test[:-1,:-1], control[:-1,:-1]) + assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) @@ -576,27 +588,27 @@ "Tests polyfit" # On ndarrays x = np.random.rand(10) - y = np.random.rand(20).reshape(-1,2) - assert_almost_equal(polyfit(x,y,3),np.polyfit(x,y,3)) + y = np.random.rand(20).reshape(-1, 2) + assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) # ON 1D maskedarrays x = x.view(MaskedArray) x[0] = masked y = y.view(MaskedArray) - y[0,0] = y[-1,-1] = masked + y[0, 0] = y[-1, -1] = masked # - (C,R,K,S,D) = polyfit(x,y[:,0],3,full=True) - (c,r,k,s,d) = np.polyfit(x[1:], y[1:,0].compressed(), 3, full=True) - for (a,a_) in zip((C,R,K,S,D),(c,r,k,s,d)): + (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) # - (C,R,K,S,D) = polyfit(x,y[:,-1],3,full=True) - (c,r,k,s,d) = np.polyfit(x[1:-1], y[1:-1,-1], 3, full=True) - for (a,a_) in zip((C,R,K,S,D),(c,r,k,s,d)): + (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) # - (C,R,K,S,D) = polyfit(x,y,3,full=True) - (c,r,k,s,d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True) - for (a,a_) in zip((C,R,K,S,D),(c,r,k,s,d)): + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): assert_almost_equal(a, a_) @@ -632,7 +644,7 @@ "Test all masked" data = masked_array([1, 1, 1], mask=True) test = unique(data, return_index=True, return_inverse=True) - assert_equal(test[0], masked_array([1,], mask=[True])) + assert_equal(test[0], masked_array([1, ], mask=[True])) assert_equal(test[1], [0]) assert_equal(test[2], [0, 0, 0]) # @@ -642,10 +654,10 @@ assert_equal(test[0], masked_array(masked)) assert_equal(test[1], [0]) assert_equal(test[2], [0]) - + def test_ediff1d(self): "Tests mediff1d" - x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) test = ediff1d(x) assert_equal(test, control) @@ -654,14 +666,14 @@ # def test_ediff1d_tobegin(self): "Test ediff1d w/ to_begin" - x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) test = ediff1d(x, to_begin=masked) control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) # - test = ediff1d(x, to_begin=[1,2,3]) + test = ediff1d(x, to_begin=[1, 2, 3]) control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) assert_equal(test, control) assert_equal(test.data, control.data) @@ -669,14 +681,14 @@ # def test_ediff1d_toend(self): "Test ediff1d w/ to_end" - x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) test = ediff1d(x, to_end=masked) control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) # - test = ediff1d(x, to_end=[1,2,3]) + test = ediff1d(x, to_end=[1, 2, 3]) control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) assert_equal(test, control) assert_equal(test.data, control.data) @@ -684,14 +696,14 @@ # def test_ediff1d_tobegin_toend(self): "Test ediff1d w/ to_begin and to_end" - x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) test = ediff1d(x, to_end=masked, to_begin=masked) control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) assert_equal(test, control) assert_equal(test.data, control.data) assert_equal(test.mask, control.mask) # - test = ediff1d(x, to_end=[1,2,3], to_begin=masked) + test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) control = array([0, 1, 1, 1, 4, 1, 2, 3], mask=[1, 1, 0, 0, 1, 0, 0, 0]) assert_equal(test, control) assert_equal(test.data, control.data) @@ -735,8 +747,8 @@ test = setxor1d(a, b) assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) # - a = array( [1, 2, 3] ) - b = array( [6, 5, 4] ) + a = array([1, 2, 3]) + b = array([6, 5, 4]) test = setxor1d(a, b) assert(isinstance(test, MaskedArray)) assert_equal(test, [1, 2, 3, 4, 5, 6]) @@ -747,10 +759,10 @@ assert(isinstance(test, MaskedArray)) assert_equal(test, [1, 2, 3, 4, 5, 6]) # - assert_array_equal([], setxor1d([],[])) + assert_array_equal([], setxor1d([], [])) - def test_in1d( self ): + def test_in1d(self): "Test in1d" a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) @@ -762,10 +774,10 @@ test = in1d(a, b) assert_equal(test, [True, True, False, True, True]) # - assert_array_equal([], in1d([],[])) + assert_array_equal([], in1d([], [])) - def test_union1d( self ): + def test_union1d(self): "Test union1d" a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) @@ -773,10 +785,10 @@ control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) assert_equal(test, control) # - assert_array_equal([], union1d([],[])) + assert_array_equal([], union1d([], [])) - def test_setdiff1d( self ): + def test_setdiff1d(self): "Test setdiff1d" a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) b = array([2, 4, 3, 3, 2, 1, 5]) @@ -790,9 +802,9 @@ def test_setdiff1d_char_array(self): "Test setdiff1d_charray" - a = np.array(['a','b','c']) - b = np.array(['a','b','s']) - assert_array_equal(setdiff1d(a,b), np.array(['c'])) + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) class TestShapeBase(TestCase): From numpy-svn at scipy.org Tue May 18 05:48:25 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 18 May 2010 04:48:25 -0500 (CDT) Subject: [Numpy-svn] r8418 - in trunk/numpy/core/src: multiarray umath Message-ID: <20100518094825.EFF4439CAEA@scipy.org> Author: ptvirtan Date: 2010-05-18 04:48:25 -0500 (Tue, 18 May 2010) New Revision: 8418 Modified: trunk/numpy/core/src/multiarray/common.c trunk/numpy/core/src/multiarray/descriptor.c trunk/numpy/core/src/multiarray/scalarapi.c trunk/numpy/core/src/umath/loops.c.src Log: STY: core: add some parentheses Modified: trunk/numpy/core/src/multiarray/common.c =================================================================== --- trunk/numpy/core/src/multiarray/common.c 2010-05-16 23:31:21 UTC (rev 8417) +++ trunk/numpy/core/src/multiarray/common.c 2010-05-18 09:48:25 UTC (rev 8418) @@ -602,7 +602,7 @@ * and unpickled array can be set and reset writeable * -- could be abused -- */ - if PyString_Check(base) { + if (PyString_Check(base)) { return TRUE; } if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) { Modified: trunk/numpy/core/src/multiarray/descriptor.c =================================================================== --- trunk/numpy/core/src/multiarray/descriptor.c 2010-05-16 23:31:21 UTC (rev 8417) +++ trunk/numpy/core/src/multiarray/descriptor.c 2010-05-18 09:48:25 UTC (rev 8418) @@ -209,7 +209,7 @@ type->elsize = itemsize; } } - else if PyDict_Check(val) { + else if (PyDict_Check(val)) { /* Assume it's a metadata dictionary */ if (PyDict_Merge(type->metadata, val, 0) == -1) { Py_DECREF(type); Modified: trunk/numpy/core/src/multiarray/scalarapi.c =================================================================== --- trunk/numpy/core/src/multiarray/scalarapi.c 2010-05-16 23:31:21 UTC (rev 8417) +++ trunk/numpy/core/src/multiarray/scalarapi.c 2010-05-18 09:48:25 UTC (rev 8418) @@ -658,7 +658,7 @@ if (obj == NULL) { return NULL; } - if PyTypeNum_ISDATETIME(type_num) { + if (PyTypeNum_ISDATETIME(type_num)) { /* * We need to copy the resolution information over to the scalar * Get the void * from the metadata dictionary @@ -674,7 +674,7 @@ memcpy(&(((PyDatetimeScalarObject *)obj)->obmeta), dt_data, sizeof(PyArray_DatetimeMetaData)); } - if PyTypeNum_ISFLEXIBLE(type_num) { + if (PyTypeNum_ISFLEXIBLE(type_num)) { if (type_num == PyArray_STRING) { destptr = PyString_AS_STRING(obj); ((PyStringObject *)obj)->ob_shash = -1; Modified: trunk/numpy/core/src/umath/loops.c.src =================================================================== --- trunk/numpy/core/src/umath/loops.c.src 2010-05-16 23:31:21 UTC (rev 8417) +++ trunk/numpy/core/src/umath/loops.c.src 2010-05-18 09:48:25 UTC (rev 8418) @@ -416,7 +416,7 @@ if (result == NULL) { return; } - if PyTuple_Check(result) { + if (PyTuple_Check(result)) { if (nout != PyTuple_Size(result)) { Py_DECREF(result); return; From numpy-svn at scipy.org Fri May 21 01:35:58 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 May 2010 00:35:58 -0500 (CDT) Subject: [Numpy-svn] r8419 - in trunk/numpy/polynomial: . tests Message-ID: <20100521053558.F1C7139CAF2@scipy.org> Author: charris Date: 2010-05-21 00:35:58 -0500 (Fri, 21 May 2010) New Revision: 8419 Modified: trunk/numpy/polynomial/chebyshev.py trunk/numpy/polynomial/polynomial.py trunk/numpy/polynomial/tests/test_chebyshev.py trunk/numpy/polynomial/tests/test_polynomial.py Log: ENH: 1) Let {poly,cheb}int accept 0 for the number of integrations. 2) Let {poly,cheb}(int,der} accept floating integers for number of integrations or derivations, raise ValueError otherwise. 3) Add tests for same. Modified: trunk/numpy/polynomial/chebyshev.py =================================================================== --- trunk/numpy/polynomial/chebyshev.py 2010-05-18 09:48:25 UTC (rev 8418) +++ trunk/numpy/polynomial/chebyshev.py 2010-05-21 05:35:58 UTC (rev 8419) @@ -846,25 +846,29 @@ array([ 12., 96.]) """ - # cs is a trimmed copy - [cs] = pu.as_series([cs]) - if m < 0 : - raise ValueError, "The order of derivation must be positive" + cnt = int(m) + + if cnt != m: + raise ValueError, "The order of derivation must be integer" + if cnt < 0 : + raise ValueError, "The order of derivation must be non-negative" if not np.isscalar(scl) : raise ValueError, "The scl parameter must be a scalar" - if m == 0 : + # cs is a trimmed copy + [cs] = pu.as_series([cs]) + if cnt == 0: return cs - elif m >= len(cs) : + elif cnt >= len(cs): return cs[:1]*0 else : zs = _cseries_to_zseries(cs) - for i in range(m) : + for i in range(cnt): zs = _zseries_der(zs)*scl return _zseries_to_cseries(zs) -def chebint(cs, m=1, k=[], lbnd=0, scl=1) : +def chebint(cs, m=1, k=[], lbnd=0, scl=1): """ Integrate a Chebyshev series. @@ -941,11 +945,15 @@ array([-1., 1., -1., -1.]) """ + cnt = int(m) if np.isscalar(k) : k = [k] - if m < 1 : - raise ValueError, "The order of integration must be positive" - if len(k) > m : + + if cnt != m: + raise ValueError, "The order of integration must be integer" + if cnt < 0 : + raise ValueError, "The order of integration must be non-negative" + if len(k) > cnt : raise ValueError, "Too many integration constants" if not np.isscalar(lbnd) : raise ValueError, "The lbnd parameter must be a scalar" @@ -954,13 +962,16 @@ # cs is a trimmed copy [cs] = pu.as_series([cs]) - k = list(k) + [0]*(m - len(k)) - for i in range(m) : - zs = _cseries_to_zseries(cs)*scl - zs = _zseries_int(zs) - cs = _zseries_to_cseries(zs) - cs[0] += k[i] - chebval(lbnd, cs) - return cs + if cnt == 0: + return cs + else: + k = list(k) + [0]*(cnt - len(k)) + for i in range(cnt) : + zs = _cseries_to_zseries(cs)*scl + zs = _zseries_int(zs) + cs = _zseries_to_cseries(zs) + cs[0] += k[i] - chebval(lbnd, cs) + return cs def chebval(x, cs): """Evaluate a Chebyshev series. Modified: trunk/numpy/polynomial/polynomial.py =================================================================== --- trunk/numpy/polynomial/polynomial.py 2010-05-18 09:48:25 UTC (rev 8418) +++ trunk/numpy/polynomial/polynomial.py 2010-05-21 05:35:58 UTC (rev 8419) @@ -410,7 +410,7 @@ prd = np.convolve(prd, cs) return prd -def polyder(cs, m=1, scl=1) : +def polyder(cs, m=1, scl=1): """ Differentiate a polynomial. @@ -454,25 +454,29 @@ array([ 6., 24.]) """ + cnt = int(m) + + if cnt != m: + raise ValueError, "The order of derivation must be integer" + if cnt < 0: + raise ValueError, "The order of derivation must be non-negative" + if not np.isscalar(scl): + raise ValueError, "The scl parameter must be a scalar" + # cs is a trimmed copy [cs] = pu.as_series([cs]) - if m < 0 : - raise ValueError, "The order of derivation must be positive" - if not np.isscalar(scl) : - raise ValueError, "The scl parameter must be a scalar" - - if m == 0 : + if cnt == 0: return cs - elif m >= len(cs) : + elif cnt >= len(cs): return cs[:1]*0 else : n = len(cs) d = np.arange(n)*scl - for i in range(m) : + for i in range(cnt): cs[i:] *= d[:n-i] return cs[i+1:].copy() -def polyint(cs, m=1, k=[], lbnd=0, scl=1) : +def polyint(cs, m=1, k=[], lbnd=0, scl=1): """ Integrate a polynomial. @@ -544,11 +548,15 @@ array([ 0., -2., -2., -2.]) """ + cnt = int(m) if np.isscalar(k) : k = [k] - if m < 1 : - raise ValueError, "The order of integration must be positive" - if len(k) > m : + + if cnt != m: + raise ValueError, "The order of integration must be integer" + if cnt < 0 : + raise ValueError, "The order of integration must be non-negative" + if len(k) > cnt : raise ValueError, "Too many integration constants" if not np.isscalar(lbnd) : raise ValueError, "The lbnd parameter must be a scalar" @@ -557,14 +565,17 @@ # cs is a trimmed copy [cs] = pu.as_series([cs]) - k = list(k) + [0]*(m - len(k)) - fac = np.arange(1, len(cs) + m)/scl - ret = np.zeros(len(cs) + m, dtype=cs.dtype) - ret[m:] = cs - for i in range(m) : - ret[m - i:] /= fac[:len(cs) + i] - ret[m - i - 1] += k[i] - polyval(lbnd, ret[m - i - 1:]) - return ret + if cnt == 0: + return cs + else: + k = list(k) + [0]*(cnt - len(k)) + fac = np.arange(1, len(cs) + cnt)/scl + ret = np.zeros(len(cs) + cnt, dtype=cs.dtype) + ret[cnt:] = cs + for i in range(cnt) : + ret[cnt - i:] /= fac[:len(cs) + i] + ret[cnt - i - 1] += k[i] - polyval(lbnd, ret[cnt - i - 1:]) + return ret def polyval(x, cs): """ Modified: trunk/numpy/polynomial/tests/test_chebyshev.py =================================================================== --- trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-18 09:48:25 UTC (rev 8418) +++ trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-21 05:35:58 UTC (rev 8419) @@ -136,6 +136,7 @@ def test_chebint(self) : # check exceptions + assert_raises(ValueError, ch.chebint, [0], .5) assert_raises(ValueError, ch.chebint, [0], -1) assert_raises(ValueError, ch.chebint, [0], 1, [0,0]) assert_raises(ValueError, ch.chebint, [0], 1, lbnd=[0,0]) @@ -211,18 +212,22 @@ def test_chebder(self) : # check exceptions + assert_raises(ValueError, ch.chebder, [0], .5) assert_raises(ValueError, ch.chebder, [0], -1) + # check that zeroth deriviative does nothing for i in range(5) : tgt = [1] + [0]*i res = ch.chebder(tgt, m=0) assert_equal(trim(res), trim(tgt)) + # check that derivation is the inverse of integration for i in range(5) : for j in range(2,5) : tgt = [1] + [0]*i res = ch.chebder(ch.chebint(tgt, m=j), m=j) assert_almost_equal(trim(res), trim(tgt)) + # check derivation with scaling for i in range(5) : for j in range(2,5) : @@ -258,6 +263,7 @@ for i in range(4) : coef = [0]*i + [1] assert_almost_equal(v[...,i], ch.chebval(x, coef)) + # check for 2d x x = np.array([[1,2],[3,4],[5,6]]) v = ch.chebvander(x, 3) @@ -269,6 +275,7 @@ def test_chebfit(self) : def f(x) : return x*(x - 1)*(x - 2) + # Test exceptions assert_raises(ValueError, ch.chebfit, [1], [1], -1) assert_raises(TypeError, ch.chebfit, [[1]], [1], 0) @@ -276,6 +283,7 @@ assert_raises(TypeError, ch.chebfit, [1], [[[1]]], 0) assert_raises(TypeError, ch.chebfit, [1, 2], [1], 0) assert_raises(TypeError, ch.chebfit, [1], [1, 2], 0) + # Test fit x = np.linspace(0,2) y = f(x) @@ -290,8 +298,10 @@ def test_chebtrim(self) : coef = [2, -1, 1, 0] + # Test exceptions assert_raises(ValueError, ch.chebtrim, coef, -1) + # Test results assert_equal(ch.chebtrim(coef), coef[:-1]) assert_equal(ch.chebtrim(coef, 1), coef[:-3]) Modified: trunk/numpy/polynomial/tests/test_polynomial.py =================================================================== --- trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-18 09:48:25 UTC (rev 8418) +++ trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-21 05:35:58 UTC (rev 8419) @@ -121,6 +121,7 @@ def test_polyint(self) : # check exceptions + assert_raises(ValueError, poly.polyint, [0], .5) assert_raises(ValueError, poly.polyint, [0], -1) assert_raises(ValueError, poly.polyint, [0], 1, [0,0]) assert_raises(ValueError, poly.polyint, [0], 1, lbnd=[0,0]) @@ -191,6 +192,7 @@ def test_polyder(self) : # check exceptions + assert_raises(ValueError, poly.polyder, [0], .5) assert_raises(ValueError, poly.polyder, [0], -1) # check that zeroth deriviative does nothing @@ -241,6 +243,7 @@ for i in range(4) : coef = [0]*i + [1] assert_almost_equal(v[...,i], poly.polyval(x, coef)) + # check for 2d x x = np.array([[1,2],[3,4],[5,6]]) v = poly.polyvander(x, 3) @@ -252,6 +255,7 @@ def test_polyfit(self) : def f(x) : return x*(x - 1)*(x - 2) + # Test exceptions assert_raises(ValueError, poly.polyfit, [1], [1], -1) assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) @@ -259,6 +263,7 @@ assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) + # Test fit x = np.linspace(0,2) y = f(x) @@ -273,8 +278,10 @@ def test_polytrim(self) : coef = [2, -1, 1, 0] + # Test exceptions assert_raises(ValueError, poly.polytrim, coef, -1) + # Test results assert_equal(poly.polytrim(coef), coef[:-1]) assert_equal(poly.polytrim(coef, 1), coef[:-3]) From numpy-svn at scipy.org Fri May 21 01:36:01 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 May 2010 00:36:01 -0500 (CDT) Subject: [Numpy-svn] r8420 - in trunk/numpy/polynomial: . tests Message-ID: <20100521053601.BB21339CB24@scipy.org> Author: charris Date: 2010-05-21 00:36:01 -0500 (Fri, 21 May 2010) New Revision: 8420 Modified: trunk/numpy/polynomial/polytemplate.py trunk/numpy/polynomial/tests/test_polynomial.py Log: ENH: Add degree method to the Chebyshev and Polynomial classes. Modified: trunk/numpy/polynomial/polytemplate.py =================================================================== --- trunk/numpy/polynomial/polytemplate.py 2010-05-21 05:35:58 UTC (rev 8419) +++ trunk/numpy/polynomial/polytemplate.py 2010-05-21 05:36:01 UTC (rev 8420) @@ -303,6 +303,10 @@ # Extra numeric functions. # + def degree(self) : + """The degree of the series.""" + return len(self) - 1 + def convert(self, domain=None, kind=None) : """Convert to different class and/or domain. Modified: trunk/numpy/polynomial/tests/test_polynomial.py =================================================================== --- trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-21 05:35:58 UTC (rev 8419) +++ trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-21 05:36:01 UTC (rev 8420) @@ -370,6 +370,9 @@ xx = 2*x - 1 assert_almost_equal(self.p2(x), self.p1(xx)) + def test_degree(self) : + assert_equal(self.p1.degree(), 2) + def test_convert(self) : x = np.linspace(-1,1) p = self.p1.convert(domain=[0,1]) From numpy-svn at scipy.org Fri May 21 01:36:05 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 May 2010 00:36:05 -0500 (CDT) Subject: [Numpy-svn] r8421 - in trunk/numpy/polynomial: . tests Message-ID: <20100521053605.EA26B39CB30@scipy.org> Author: charris Date: 2010-05-21 00:36:05 -0500 (Fri, 21 May 2010) New Revision: 8421 Modified: trunk/numpy/polynomial/polytemplate.py trunk/numpy/polynomial/tests/test_chebyshev.py trunk/numpy/polynomial/tests/test_polynomial.py Log: CHG: Change the truncate method of the Chebyshev and Polynomial classes to take degree instead of length. This seems to fit better with normal usage. I feel this change is safe at this time because these new classes seem to be little used as yet. Modified: trunk/numpy/polynomial/polytemplate.py =================================================================== --- trunk/numpy/polynomial/polytemplate.py 2010-05-21 05:36:01 UTC (rev 8420) +++ trunk/numpy/polynomial/polytemplate.py 2010-05-21 05:36:05 UTC (rev 8421) @@ -391,19 +391,20 @@ """ return self.__class__(pu.trimcoef(self.coef, tol), self.domain) - def truncate(self, size) : - """Truncate series by discarding trailing coefficients. + def truncate(self, deg) : + """Truncate series to degree `deg`. - Reduce the $name series to length `size` by removing trailing - coefficients. The value of `size` must be greater than zero. This - is most likely to be useful in least squares fits when the high - order coefficients are very small. + Return a $name series obtained from the current instance by discarding + all terms of degree greater than `deg`. The value of `deg` must be + non-negative. This operation is most likely to be useful in least squares + fits when the high order coefficients are very small. Parameters: ----------- - size : int - The series is reduced to length `size` by discarding trailing - coefficients. The value of `size` must be greater than zero. + deg : non-negative int + The series is reduced to degree `deg` by discarding the + coefficients of the higher degree terms. The value of `deg` + must be non-negative. Returns: ------- @@ -411,9 +412,10 @@ New instance of $name with truncated coefficients. """ - if size < 1 : - raise ValueError("size must be > 0") - if size >= len(self.coef) : + size = int(deg) + 1 + if size != deg + 1 or size < 1 : + raise ValueError("deg must be a non-negative integer") + if size >= len(self) : return self.__class__(self.coef, self.domain) else : return self.__class__(self.coef[:size], self.domain) Modified: trunk/numpy/polynomial/tests/test_chebyshev.py =================================================================== --- trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-21 05:36:01 UTC (rev 8420) +++ trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-21 05:36:05 UTC (rev 8421) @@ -416,11 +416,12 @@ assert_equal(p.trim(1e-5).coef, coef[:1]) def test_truncate(self) : - assert_raises(ValueError, self.p1.truncate, 0) - assert_equal(len(self.p1.truncate(4)), 3) + assert_raises(ValueError, self.p1.truncate, .5) + assert_raises(ValueError, self.p1.truncate, -1) assert_equal(len(self.p1.truncate(3)), 3) - assert_equal(len(self.p1.truncate(2)), 2) - assert_equal(len(self.p1.truncate(1)), 1) + assert_equal(len(self.p1.truncate(2)), 3) + assert_equal(len(self.p1.truncate(1)), 2) + assert_equal(len(self.p1.truncate(0)), 1) def test_copy(self) : p = self.p1.copy() Modified: trunk/numpy/polynomial/tests/test_polynomial.py =================================================================== --- trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-21 05:36:01 UTC (rev 8420) +++ trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-21 05:36:05 UTC (rev 8421) @@ -390,11 +390,12 @@ assert_equal(p.trim(1e-5).coef, coef[:1]) def test_truncate(self) : - assert_raises(ValueError, self.p1.truncate, 0) - assert_equal(len(self.p1.truncate(4)), 3) + assert_raises(ValueError, self.p1.truncate, .5) + assert_raises(ValueError, self.p1.truncate, -1) assert_equal(len(self.p1.truncate(3)), 3) - assert_equal(len(self.p1.truncate(2)), 2) - assert_equal(len(self.p1.truncate(1)), 1) + assert_equal(len(self.p1.truncate(2)), 3) + assert_equal(len(self.p1.truncate(1)), 2) + assert_equal(len(self.p1.truncate(0)), 1) def test_copy(self) : p = self.p1.copy() From numpy-svn at scipy.org Fri May 21 01:36:08 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 May 2010 00:36:08 -0500 (CDT) Subject: [Numpy-svn] r8422 - trunk/numpy/polynomial Message-ID: <20100521053608.5FA0339CAF2@scipy.org> Author: charris Date: 2010-05-21 00:36:08 -0500 (Fri, 21 May 2010) New Revision: 8422 Modified: trunk/numpy/polynomial/polytemplate.py Log: ENH: Change deriv and integ method documentation of the Chebyshev and Polynomial classes to reflect the enhanced behaviour of the base functions. Modified: trunk/numpy/polynomial/polytemplate.py =================================================================== --- trunk/numpy/polynomial/polytemplate.py 2010-05-21 05:36:05 UTC (rev 8421) +++ trunk/numpy/polynomial/polytemplate.py 2010-05-21 05:36:08 UTC (rev 8422) @@ -442,7 +442,7 @@ Parameters: ----------- - m : positive integer + m : non-negative integer The number of integrations to perform. k : array_like Integration constants. The first constant is applied to the @@ -455,8 +455,7 @@ Returns: -------- integral : $name - The integral of the original series defined with the same - domain. + The integral of the original series with the same domain. See Also -------- @@ -480,14 +479,13 @@ Parameters: ----------- - m : positive integer + m : non-negative integer The number of integrations to perform. Returns: -------- derivative : $name - The derivative of the original series defined with the same - domain. + The derivative of the original series with the same domain. See Also -------- From numpy-svn at scipy.org Fri May 21 01:36:10 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 May 2010 00:36:10 -0500 (CDT) Subject: [Numpy-svn] r8423 - trunk/numpy/polynomial/tests Message-ID: <20100521053610.D476439CB25@scipy.org> Author: charris Date: 2010-05-21 00:36:10 -0500 (Fri, 21 May 2010) New Revision: 8423 Modified: trunk/numpy/polynomial/tests/test_chebyshev.py Log: ENH: Add test for the degree method added to the Chebyshev Modified: trunk/numpy/polynomial/tests/test_chebyshev.py =================================================================== --- trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-21 05:36:08 UTC (rev 8422) +++ trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-21 05:36:10 UTC (rev 8423) @@ -399,6 +399,9 @@ xx = 2*x - 1 assert_almost_equal(self.p2(x), self.p1(xx)) + def test_degree(self) : + assert_equal(self.p1.degree(), 2) + def test_convert(self) : x = np.linspace(-1,1) p = self.p1.convert(domain=[0,1]) From numpy-svn at scipy.org Fri May 21 01:36:13 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 May 2010 00:36:13 -0500 (CDT) Subject: [Numpy-svn] r8424 - in trunk/numpy/polynomial: . tests Message-ID: <20100521053613.AB7B239CAF2@scipy.org> Author: charris Date: 2010-05-21 00:36:13 -0500 (Fri, 21 May 2010) New Revision: 8424 Modified: trunk/numpy/polynomial/polytemplate.py trunk/numpy/polynomial/tests/test_chebyshev.py trunk/numpy/polynomial/tests/test_polynomial.py Log: CHG: Change the default domain for the fit class method of the Chebyshev and Polynomial classes to None. Add 'default' as a possible value of the domain argument to specify the default domain. This change fits better with my experience with this method. I feel it is safe to make this change at this late date because the functions seem little used as yet and I would like to get them 'right' before folks catch on to their presence. Modified: trunk/numpy/polynomial/polytemplate.py =================================================================== --- trunk/numpy/polynomial/polytemplate.py 2010-05-21 05:36:10 UTC (rev 8423) +++ trunk/numpy/polynomial/polytemplate.py 2010-05-21 05:36:13 UTC (rev 8424) @@ -514,7 +514,7 @@ return pu.mapdomain(roots, $domain, self.domain) @staticmethod - def fit(x, y, deg, domain=$domain, rcond=None, full=False) : + def fit(x, y, deg, domain=None, rcond=None, full=False) : """Least squares fit to data. Return a `$name` instance that is the least squares fit to the data @@ -533,10 +533,11 @@ passing in a 2D-array that contains one dataset per column. deg : int Degree of the fitting polynomial - domain : {None, [beg, end]}, optional + domain : {None, [beg, end], 'default'}, optional Domain to use for the returned $name instance. If ``None``, - then a minimal domain that covers the points `x` is chosen. The - default value is ``$domain``. + then a minimal domain that covers the points `x` is chosen. + If ``'default'`` the default domain ``$domain`` is used. The + default value is ``None``. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be @@ -568,6 +569,8 @@ """ if domain is None : domain = pu.getdomain(x) + elif domain == 'default' : + domain = $domain xnew = pu.mapdomain(x, domain, $domain) res = ${nick}fit(xnew, y, deg, rcond=None, full=full) if full : Modified: trunk/numpy/polynomial/tests/test_chebyshev.py =================================================================== --- trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-21 05:36:10 UTC (rev 8423) +++ trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-21 05:36:13 UTC (rev 8424) @@ -463,7 +463,13 @@ return x*(x - 1)*(x - 2) x = np.linspace(0,3) y = f(x) + + # test default value of domain p = ch.Chebyshev.fit(x, y, 3) + assert_almost_equal(p.domain, [0,3]) + + # test that fit works in given domains + p = ch.Chebyshev.fit(x, y, 3, 'default') assert_almost_equal(p(x), y) p = ch.Chebyshev.fit(x, y, 3, None) assert_almost_equal(p(x), y) Modified: trunk/numpy/polynomial/tests/test_polynomial.py =================================================================== --- trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-21 05:36:10 UTC (rev 8423) +++ trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-21 05:36:13 UTC (rev 8424) @@ -434,7 +434,13 @@ return x*(x - 1)*(x - 2) x = np.linspace(0,3) y = f(x) + + # test default value of domain p = poly.Polynomial.fit(x, y, 3) + assert_almost_equal(p.domain, [0,3]) + + # test that fit works in given domains + p = poly.Polynomial.fit(x, y, 3, 'default') assert_almost_equal(p(x), y) p = poly.Polynomial.fit(x, y, 3, None) assert_almost_equal(p(x), y) From numpy-svn at scipy.org Fri May 21 01:36:15 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 May 2010 00:36:15 -0500 (CDT) Subject: [Numpy-svn] r8425 - trunk/doc/release Message-ID: <20100521053615.D775139CB29@scipy.org> Author: charris Date: 2010-05-21 00:36:15 -0500 (Fri, 21 May 2010) New Revision: 8425 Modified: trunk/doc/release/2.0.0-notes.rst Log: Note the changes in polynomial.polynomial and polynomial.chebyshev in the release notes. Modified: trunk/doc/release/2.0.0-notes.rst =================================================================== --- trunk/doc/release/2.0.0-notes.rst 2010-05-21 05:36:13 UTC (rev 8424) +++ trunk/doc/release/2.0.0-notes.rst 2010-05-21 05:36:15 UTC (rev 8425) @@ -67,3 +67,28 @@ enumerations without needing to concern themselves with the macro expansions and their side- effects. +Changes +======= + +polynomial.polynomial +--------------------- + +* The polyint and polyder functions now check that the specified number integrations or + derivations is a non-negative integer. The number 0 is a valid value for both + functions. +* A degree method has been added to the Polynomial class. +* The fit class function of the Polynomial class now uses None as the default + domain for the fit. The domain can be specified as 'default' to use the + Polynomial default domain [-1, 1]. + +polynomial.chebyshev +-------------------- + +* The chebint and chebder functions now check that the specified number integrations or + derivations is a non-negative integer. The number 0 is a valid value for both + functions. +* A degree method has been added to the Chebyshev class. +* The fit class function of the Chebyshev class now uses None as the default + domain for the fit. The domain can be specified as 'default' to use the + Chebyshev default domain [-1, 1]. + From numpy-svn at scipy.org Fri May 21 01:36:18 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 21 May 2010 00:36:18 -0500 (CDT) Subject: [Numpy-svn] r8426 - trunk/doc/release Message-ID: <20100521053618.404C439CB23@scipy.org> Author: charris Date: 2010-05-21 00:36:18 -0500 (Fri, 21 May 2010) New Revision: 8426 Modified: trunk/doc/release/2.0.0-notes.rst Log: Add some more info to the release notes for 2.0.0. Modified: trunk/doc/release/2.0.0-notes.rst =================================================================== --- trunk/doc/release/2.0.0-notes.rst 2010-05-21 05:36:15 UTC (rev 8425) +++ trunk/doc/release/2.0.0-notes.rst 2010-05-21 05:36:18 UTC (rev 8426) @@ -77,6 +77,9 @@ derivations is a non-negative integer. The number 0 is a valid value for both functions. * A degree method has been added to the Polynomial class. +* The truncate method of the Polynomial class now takes the degree of the + desired result as an argument instead of the number of coefficients. This + seems more natural. * The fit class function of the Polynomial class now uses None as the default domain for the fit. The domain can be specified as 'default' to use the Polynomial default domain [-1, 1]. @@ -88,6 +91,9 @@ derivations is a non-negative integer. The number 0 is a valid value for both functions. * A degree method has been added to the Chebyshev class. +* The truncate method of the Chebyshev class now takes the degree of the + desired result as an argument instead of the number of coefficients. This + seems more natural. * The fit class function of the Chebyshev class now uses None as the default domain for the fit. The domain can be specified as 'default' to use the Chebyshev default domain [-1, 1]. From numpy-svn at scipy.org Sun May 23 18:02:08 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 23 May 2010 17:02:08 -0500 (CDT) Subject: [Numpy-svn] r8427 - in trunk: doc/release numpy/polynomial numpy/polynomial/tests Message-ID: <20100523220208.6913C39CAE7@scipy.org> Author: charris Date: 2010-05-23 17:02:08 -0500 (Sun, 23 May 2010) New Revision: 8427 Modified: trunk/doc/release/2.0.0-notes.rst trunk/numpy/polynomial/polytemplate.py trunk/numpy/polynomial/tests/test_chebyshev.py trunk/numpy/polynomial/tests/test_polynomial.py Log: REV: Revert the changes to the truncate method of Polynomial and Chebyshev. On second thought it was a bad idea to make such a radical change to existing behaviour. It was also hard to document the variations ;) Modified: trunk/doc/release/2.0.0-notes.rst =================================================================== --- trunk/doc/release/2.0.0-notes.rst 2010-05-21 05:36:18 UTC (rev 8426) +++ trunk/doc/release/2.0.0-notes.rst 2010-05-23 22:02:08 UTC (rev 8427) @@ -77,9 +77,6 @@ derivations is a non-negative integer. The number 0 is a valid value for both functions. * A degree method has been added to the Polynomial class. -* The truncate method of the Polynomial class now takes the degree of the - desired result as an argument instead of the number of coefficients. This - seems more natural. * The fit class function of the Polynomial class now uses None as the default domain for the fit. The domain can be specified as 'default' to use the Polynomial default domain [-1, 1]. @@ -91,9 +88,6 @@ derivations is a non-negative integer. The number 0 is a valid value for both functions. * A degree method has been added to the Chebyshev class. -* The truncate method of the Chebyshev class now takes the degree of the - desired result as an argument instead of the number of coefficients. This - seems more natural. * The fit class function of the Chebyshev class now uses None as the default domain for the fit. The domain can be specified as 'default' to use the Chebyshev default domain [-1, 1]. Modified: trunk/numpy/polynomial/polytemplate.py =================================================================== --- trunk/numpy/polynomial/polytemplate.py 2010-05-21 05:36:18 UTC (rev 8426) +++ trunk/numpy/polynomial/polytemplate.py 2010-05-23 22:02:08 UTC (rev 8427) @@ -391,20 +391,19 @@ """ return self.__class__(pu.trimcoef(self.coef, tol), self.domain) - def truncate(self, deg) : - """Truncate series to degree `deg`. + def truncate(self, size) : + """Truncate series to length `size`. - Return a $name series obtained from the current instance by discarding - all terms of degree greater than `deg`. The value of `deg` must be - non-negative. This operation is most likely to be useful in least squares - fits when the high order coefficients are very small. + Reduce the $name series to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. This + can be useful in least squares where the coefficients of the + high degree terms may be very small. Parameters: ----------- - deg : non-negative int - The series is reduced to degree `deg` by discarding the - coefficients of the higher degree terms. The value of `deg` - must be non-negative. + size : positive int + The series is reduced to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. Returns: ------- @@ -412,13 +411,13 @@ New instance of $name with truncated coefficients. """ - size = int(deg) + 1 - if size != deg + 1 or size < 1 : - raise ValueError("deg must be a non-negative integer") - if size >= len(self) : + isize = int(size) + if isize != size or isize < 1 : + raise ValueError("size must be a positive integer") + if isize >= len(self.coef) : return self.__class__(self.coef, self.domain) else : - return self.__class__(self.coef[:size], self.domain) + return self.__class__(self.coef[:isize], self.domain) def copy(self) : """Return a copy. @@ -442,7 +441,7 @@ Parameters: ----------- - m : non-negative integer + m : non-negative int The number of integrations to perform. k : array_like Integration constants. The first constant is applied to the @@ -455,7 +454,7 @@ Returns: -------- integral : $name - The integral of the original series with the same domain. + The integral of the series using the same domain. See Also -------- @@ -479,13 +478,13 @@ Parameters: ----------- - m : non-negative integer + m : non-negative int The number of integrations to perform. Returns: -------- derivative : $name - The derivative of the original series with the same domain. + The derivative of the series using the same domain. See Also -------- Modified: trunk/numpy/polynomial/tests/test_chebyshev.py =================================================================== --- trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-21 05:36:18 UTC (rev 8426) +++ trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-23 22:02:08 UTC (rev 8427) @@ -420,11 +420,11 @@ def test_truncate(self) : assert_raises(ValueError, self.p1.truncate, .5) - assert_raises(ValueError, self.p1.truncate, -1) + assert_raises(ValueError, self.p1.truncate, 0) + assert_equal(len(self.p1.truncate(4)), 3) assert_equal(len(self.p1.truncate(3)), 3) - assert_equal(len(self.p1.truncate(2)), 3) - assert_equal(len(self.p1.truncate(1)), 2) - assert_equal(len(self.p1.truncate(0)), 1) + assert_equal(len(self.p1.truncate(2)), 2) + assert_equal(len(self.p1.truncate(1)), 1) def test_copy(self) : p = self.p1.copy() Modified: trunk/numpy/polynomial/tests/test_polynomial.py =================================================================== --- trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-21 05:36:18 UTC (rev 8426) +++ trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-23 22:02:08 UTC (rev 8427) @@ -391,11 +391,11 @@ def test_truncate(self) : assert_raises(ValueError, self.p1.truncate, .5) - assert_raises(ValueError, self.p1.truncate, -1) + assert_raises(ValueError, self.p1.truncate, 0) + assert_equal(len(self.p1.truncate(4)), 3) assert_equal(len(self.p1.truncate(3)), 3) - assert_equal(len(self.p1.truncate(2)), 3) - assert_equal(len(self.p1.truncate(1)), 2) - assert_equal(len(self.p1.truncate(0)), 1) + assert_equal(len(self.p1.truncate(2)), 2) + assert_equal(len(self.p1.truncate(1)), 1) def test_copy(self) : p = self.p1.copy() From numpy-svn at scipy.org Sun May 23 18:02:11 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 23 May 2010 17:02:11 -0500 (CDT) Subject: [Numpy-svn] r8428 - in trunk/numpy/polynomial: . tests Message-ID: <20100523220211.5530C39CAF3@scipy.org> Author: charris Date: 2010-05-23 17:02:11 -0500 (Sun, 23 May 2010) New Revision: 8428 Modified: trunk/numpy/polynomial/polytemplate.py trunk/numpy/polynomial/tests/test_chebyshev.py trunk/numpy/polynomial/tests/test_polynomial.py Log: ENH: Add reduce method to polynomial.Chebyshev and polynomial.Polynomial. This method behaves like truncate except it takes the degree of the result instead of the number of coefficients. Modified: trunk/numpy/polynomial/polytemplate.py =================================================================== --- trunk/numpy/polynomial/polytemplate.py 2010-05-23 22:02:08 UTC (rev 8427) +++ trunk/numpy/polynomial/polytemplate.py 2010-05-23 22:02:11 UTC (rev 8428) @@ -304,9 +304,42 @@ # def degree(self) : - """The degree of the series.""" + """The degree of the series. + + Notes + ----- + .. versionadded:: 2.0.0 + + """ return len(self) - 1 + def reduce(self, deg) : + """Reduce the degree of the series. + + Reduce the degree of the $name series to `deg` by discarding the + high order terms. If `deg` is greater than the current degree a + copy of the current series is returned. This can be useful in least + squares where the coefficients of the high degree terms may be very + small. + + Parameters: + ----------- + deg : non-negative int + The series is reduced to degree `deg` by discarding the high + order terms. The value of `deg` must be a non-negative integer. + + Returns: + ------- + new_instance : $name + New instance of $name with reduced degree. + + Notes + ----- + .. versionadded:: 2.0.0 + + """ + return self.truncate(deg + 1) + def convert(self, domain=None, kind=None) : """Convert to different class and/or domain. Modified: trunk/numpy/polynomial/tests/test_chebyshev.py =================================================================== --- trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-23 22:02:08 UTC (rev 8427) +++ trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-23 22:02:11 UTC (rev 8428) @@ -402,6 +402,14 @@ def test_degree(self) : assert_equal(self.p1.degree(), 2) + def test_reduce(self) : + assert_raises(ValueError, self.p1.reduce, .5) + assert_raises(ValueError, self.p1.reduce, -1) + assert_equal(len(self.p1.reduce(3)), 3) + assert_equal(len(self.p1.reduce(2)), 3) + assert_equal(len(self.p1.reduce(1)), 2) + assert_equal(len(self.p1.reduce(0)), 1) + def test_convert(self) : x = np.linspace(-1,1) p = self.p1.convert(domain=[0,1]) Modified: trunk/numpy/polynomial/tests/test_polynomial.py =================================================================== --- trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-23 22:02:08 UTC (rev 8427) +++ trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-23 22:02:11 UTC (rev 8428) @@ -373,6 +373,14 @@ def test_degree(self) : assert_equal(self.p1.degree(), 2) + def test_reduce(self) : + assert_raises(ValueError, self.p1.reduce, .5) + assert_raises(ValueError, self.p1.reduce, -1) + assert_equal(len(self.p1.reduce(3)), 3) + assert_equal(len(self.p1.reduce(2)), 3) + assert_equal(len(self.p1.reduce(1)), 2) + assert_equal(len(self.p1.reduce(0)), 1) + def test_convert(self) : x = np.linspace(-1,1) p = self.p1.convert(domain=[0,1]) From numpy-svn at scipy.org Sun May 23 18:02:14 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 23 May 2010 17:02:14 -0500 (CDT) Subject: [Numpy-svn] r8429 - in trunk/numpy/polynomial: . tests Message-ID: <20100523220214.06ACA39CAE7@scipy.org> Author: charris Date: 2010-05-23 17:02:13 -0500 (Sun, 23 May 2010) New Revision: 8429 Modified: trunk/numpy/polynomial/polytemplate.py trunk/numpy/polynomial/tests/test_chebyshev.py trunk/numpy/polynomial/tests/test_polynomial.py Log: CHG: Use [] instead of 'default' to specify the default domain in Chebyshev.fit and Polynomial.fit. Document the change from numpy 1.4.x. Modified: trunk/numpy/polynomial/polytemplate.py =================================================================== --- trunk/numpy/polynomial/polytemplate.py 2010-05-23 22:02:11 UTC (rev 8428) +++ trunk/numpy/polynomial/polytemplate.py 2010-05-23 22:02:13 UTC (rev 8429) @@ -565,11 +565,12 @@ passing in a 2D-array that contains one dataset per column. deg : int Degree of the fitting polynomial - domain : {None, [beg, end], 'default'}, optional + domain : {None, [], [beg, end]}, optional Domain to use for the returned $name instance. If ``None``, then a minimal domain that covers the points `x` is chosen. - If ``'default'`` the default domain ``$domain`` is used. The - default value is ``None``. + If ``[]`` the default domain ``$domain`` is used. The + default value is $domain in numpy 1.4.x and ``None`` in + numpy 2.0.0. The keyword value ``[]`` was added in numpy 2.0.0. rcond : float, optional Relative condition number of the fit. Singular values smaller than this relative to the largest singular value will be @@ -601,7 +602,7 @@ """ if domain is None : domain = pu.getdomain(x) - elif domain == 'default' : + elif domain == [] : domain = $domain xnew = pu.mapdomain(x, domain, $domain) res = ${nick}fit(xnew, y, deg, rcond=None, full=full) Modified: trunk/numpy/polynomial/tests/test_chebyshev.py =================================================================== --- trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-23 22:02:11 UTC (rev 8428) +++ trunk/numpy/polynomial/tests/test_chebyshev.py 2010-05-23 22:02:13 UTC (rev 8429) @@ -477,11 +477,12 @@ assert_almost_equal(p.domain, [0,3]) # test that fit works in given domains - p = ch.Chebyshev.fit(x, y, 3, 'default') - assert_almost_equal(p(x), y) p = ch.Chebyshev.fit(x, y, 3, None) assert_almost_equal(p(x), y) assert_almost_equal(p.domain, [0,3]) + p = ch.Chebyshev.fit(x, y, 3, []) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, [-1, 1]) def test_identity(self) : x = np.linspace(0,3) Modified: trunk/numpy/polynomial/tests/test_polynomial.py =================================================================== --- trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-23 22:02:11 UTC (rev 8428) +++ trunk/numpy/polynomial/tests/test_polynomial.py 2010-05-23 22:02:13 UTC (rev 8429) @@ -448,11 +448,12 @@ assert_almost_equal(p.domain, [0,3]) # test that fit works in given domains - p = poly.Polynomial.fit(x, y, 3, 'default') - assert_almost_equal(p(x), y) p = poly.Polynomial.fit(x, y, 3, None) assert_almost_equal(p(x), y) assert_almost_equal(p.domain, [0,3]) + p = poly.Polynomial.fit(x, y, 3, []) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, [-1, 1]) def test_identity(self) : x = np.linspace(0,3) From numpy-svn at scipy.org Sun May 23 18:02:16 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 23 May 2010 17:02:16 -0500 (CDT) Subject: [Numpy-svn] r8430 - trunk/doc/release Message-ID: <20100523220216.5FAA239CAE7@scipy.org> Author: charris Date: 2010-05-23 17:02:16 -0500 (Sun, 23 May 2010) New Revision: 8430 Modified: trunk/doc/release/2.0.0-notes.rst Log: Update 2.0.0 release documentation to reflect changes in the Chebyshev and Polynomial classes. Modified: trunk/doc/release/2.0.0-notes.rst =================================================================== --- trunk/doc/release/2.0.0-notes.rst 2010-05-23 22:02:13 UTC (rev 8429) +++ trunk/doc/release/2.0.0-notes.rst 2010-05-23 22:02:16 UTC (rev 8430) @@ -77,9 +77,12 @@ derivations is a non-negative integer. The number 0 is a valid value for both functions. * A degree method has been added to the Polynomial class. +* A reduce method has been added to the Polynomial class. It operates like + truncate except that the argument is the desired degree of the result, + not the number of coefficients. * The fit class function of the Polynomial class now uses None as the default - domain for the fit. The domain can be specified as 'default' to use the - Polynomial default domain [-1, 1]. + domain for the fit. The default Polynomial domain can be specified by using + [] as the domain value. polynomial.chebyshev -------------------- @@ -88,7 +91,10 @@ derivations is a non-negative integer. The number 0 is a valid value for both functions. * A degree method has been added to the Chebyshev class. +* A reduce method has been added to the Chebyshev class. It operates like + truncate except that the argument is the desired degree of the result, + not the number of coefficients. * The fit class function of the Chebyshev class now uses None as the default - domain for the fit. The domain can be specified as 'default' to use the - Chebyshev default domain [-1, 1]. + domain for the fit. The default Chebyshev domain can be specified by using + [] as the domain value. From numpy-svn at scipy.org Mon May 24 11:47:24 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 24 May 2010 10:47:24 -0500 (CDT) Subject: [Numpy-svn] r8431 - trunk/numpy/numarray Message-ID: <20100524154724.7D3B339CAE6@scipy.org> Author: charris Date: 2010-05-24 10:47:24 -0500 (Mon, 24 May 2010) New Revision: 8431 Modified: trunk/numpy/numarray/_capi.c Log: BUG: Fix problem with numarray _capi.c and python 2.7. Modified: trunk/numpy/numarray/_capi.c =================================================================== --- trunk/numpy/numarray/_capi.c 2010-05-23 22:02:16 UTC (rev 8430) +++ trunk/numpy/numarray/_capi.c 2010-05-24 15:47:24 UTC (rev 8431) @@ -3402,14 +3402,18 @@ _Error = PyErr_NewException("numpy.numarray._capi.error", NULL, NULL); /* Create a CObject containing the API pointer array's address */ +#if PY_VERSION_HEX >= 0x03010000 + m = PyModule_Create(&moduledef); +#else + m = Py_InitModule("_capi", _libnumarrayMethods); +#endif + #if PY_VERSION_HEX >= 0x02070000 - m = PyModule_Create(&moduledef); c_api_object = PyCapsule_New((void *)libnumarray_API, NULL, NULL); if (c_api_object == NULL) { PyErr_Clear(); } #else - m = Py_InitModule("_capi", _libnumarrayMethods); c_api_object = PyCObject_FromVoidPtr((void *)libnumarray_API, NULL); #endif From numpy-svn at scipy.org Mon May 24 11:47:27 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 24 May 2010 10:47:27 -0500 (CDT) Subject: [Numpy-svn] r8432 - trunk/numpy/core/src/multiarray Message-ID: <20100524154727.3F6AB39CAF8@scipy.org> Author: charris Date: 2010-05-24 10:47:27 -0500 (Mon, 24 May 2010) New Revision: 8432 Modified: trunk/numpy/core/src/multiarray/item_selection.c Log: ENH: Make searchsorted faster by actually dividing the interval in the middle. Modified: trunk/numpy/core/src/multiarray/item_selection.c =================================================================== --- trunk/numpy/core/src/multiarray/item_selection.c 2010-05-24 15:47:24 UTC (rev 8431) +++ trunk/numpy/core/src/multiarray/item_selection.c 2010-05-24 15:47:27 UTC (rev 8432) @@ -1387,7 +1387,7 @@ intp imin = 0; intp imax = nelts; while (imin < imax) { - intp imid = imin + ((imax - imin) >> 2); + intp imid = imin + ((imax - imin) >> 1); if (compare(parr + elsize*imid, pkey, key) < 0) { imin = imid + 1; } @@ -1430,7 +1430,7 @@ intp imin = 0; intp imax = nelts; while (imin < imax) { - intp imid = imin + ((imax - imin) >> 2); + intp imid = imin + ((imax - imin) >> 1); if (compare(parr + elsize*imid, pkey, key) <= 0) { imin = imid + 1; } From numpy-svn at scipy.org Mon May 24 11:47:29 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 24 May 2010 10:47:29 -0500 (CDT) Subject: [Numpy-svn] r8433 - trunk/numpy/lib/src Message-ID: <20100524154729.D2FA039CAFF@scipy.org> Author: charris Date: 2010-05-24 10:47:29 -0500 (Mon, 24 May 2010) New Revision: 8433 Modified: trunk/numpy/lib/src/_compiled_base.c Log: BUG: Make interp handle 'right' keyword correctly. Add check for empty arrays of sample points. Closes ticket #1064. Modified: trunk/numpy/lib/src/_compiled_base.c =================================================================== --- trunk/numpy/lib/src/_compiled_base.c 2010-05-24 15:47:27 UTC (rev 8432) +++ trunk/numpy/lib/src/_compiled_base.c 2010-05-24 15:47:29 UTC (rev 8433) @@ -407,45 +407,39 @@ return NULL; } -/* - * binary_search accepts three arguments: a numeric value and - * a numeric array and its length. It assumes that the array is sorted in - * increasing order. It returns the index of the array's - * largest element which is <= the value. It will return -1 if - * the value is less than the least element of the array. - * self is not used +/** @brief Use bisection on a sorted array to find first entry > key. + * + * Use bisection to find an index i s.t. arr[i] <= key < arr[i + 1]. If there is + * no such i the error returns are: + * key < arr[0] -- -1 + * key == arr[len - 1] -- len - 1 + * key > arr[len - 1] -- len + * The array is assumed contiguous and sorted in ascending order. + * + * @param key key value. + * @param arr contiguous sorted array to be searched. + * @param len length of the array. + * @return index */ static npy_intp -binary_search(double dval, double dlist [], npy_intp len) +binary_search(double key, double arr [], npy_intp len) { - npy_intp bottom , top , middle, result; + npy_intp imin = 0; + npy_intp imax = len; - if (dval < dlist [0]) { - result = -1; + if (key > arr[len - 1]) { + return len; } - else { - bottom = 0; - top = len - 1; - while (bottom < top) { - middle = (top + bottom) / 2; - if (dlist [middle] < dval) { - bottom = middle + 1; - } - else if (dlist [middle] > dval) { - top = middle - 1; - } - else { - return middle; - } + while (imin < imax) { + npy_intp imid = imin + ((imax - imin) >> 1); + if (key >= arr[imid]) { + imin = imid + 1; } - if (dlist [bottom] > dval) { - result = bottom - 1; - } else { - result = bottom; + imax = imid; } } - return result; + return imin - 1; } static PyObject * @@ -478,8 +472,12 @@ if (ax == NULL) { goto fail; } - lenxp = axp->dimensions[0]; + if (lenxp == 0) { + PyErr_SetString(PyExc_ValueError, + "array of sample points is empty"); + goto fail; + } if (afp->dimensions[0] != lenxp) { PyErr_SetString(PyExc_ValueError, "fp and xp are not of the same length."); @@ -517,20 +515,23 @@ } } - slopes = (double *) PyDataMem_NEW((lenxp-1)*sizeof(double)); + slopes = (double *) PyDataMem_NEW((lenxp - 1)*sizeof(double)); for (i = 0; i < lenxp - 1; i++) { slopes[i] = (dy[i + 1] - dy[i])/(dx[i + 1] - dx[i]); } for (i = 0; i < lenx; i++) { indx = binary_search(dz[i], dx, lenxp); - if (indx < 0) { + if (indx == -1) { dres[i] = lval; } - else if (indx >= lenxp - 1) { + else if (indx == lenxp - 1) { + dres[i] = dy[indx]; + } + else if (indx == lenxp) { dres[i] = rval; } else { - dres[i] = slopes[indx]*(dz[i]-dx[indx]) + dy[indx]; + dres[i] = slopes[indx]*(dz[i] - dx[indx]) + dy[indx]; } } From numpy-svn at scipy.org Mon May 24 11:47:32 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 24 May 2010 10:47:32 -0500 (CDT) Subject: [Numpy-svn] r8434 - trunk/numpy/lib/tests Message-ID: <20100524154732.E1C0C39CAE6@scipy.org> Author: charris Date: 2010-05-24 10:47:32 -0500 (Mon, 24 May 2010) New Revision: 8434 Modified: trunk/numpy/lib/tests/test_function_base.py Log: ENH: Test the exceptions and the left, right keywords of the interp function. Modified: trunk/numpy/lib/tests/test_function_base.py =================================================================== --- trunk/numpy/lib/tests/test_function_base.py 2010-05-24 15:47:29 UTC (rev 8433) +++ trunk/numpy/lib/tests/test_function_base.py 2010-05-24 15:47:32 UTC (rev 8434) @@ -935,12 +935,22 @@ class TestInterp(TestCase): + def test_exceptions(self): + assert_raises(ValueError, interp, 0, [], []) + assert_raises(ValueError, interp, 0, [0], [1, 2]) + def test_basic(self): x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) x0 = np.linspace(0, 1, 50) assert_almost_equal(np.interp(x0, x, y), x0) + def test_right_left_behavior(self): + assert_equal(interp([-1, 0, 1], [0], [1]), [1,1,1]) + assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0,1,1]) + assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1,1,0]) + assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0,1,0]) + def test_scalar_interpolation_point(self): x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) From numpy-svn at scipy.org Mon May 24 13:03:42 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 24 May 2010 12:03:42 -0500 (CDT) Subject: [Numpy-svn] r8435 - trunk/numpy/core Message-ID: <20100524170342.1A15939CAF8@scipy.org> Author: rkern Date: 2010-05-24 12:03:41 -0500 (Mon, 24 May 2010) New Revision: 8435 Modified: trunk/numpy/core/setup.py Log: BUG: typo. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2010-05-24 15:47:32 UTC (rev 8434) +++ trunk/numpy/core/setup.py 2010-05-24 17:03:41 UTC (rev 8435) @@ -252,7 +252,7 @@ result = config_cmd.check_header('Python.h') if not result: raise SystemError( - "Cannot compiler 'Python.h'. Perhaps you need to "\ + "Cannot compile 'Python.h'. Perhaps you need to "\ "install python-dev|python-devel.") # Check basic types sizes From numpy-svn at scipy.org Mon May 24 13:04:09 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 24 May 2010 12:04:09 -0500 (CDT) Subject: [Numpy-svn] r8436 - in trunk/numpy/core: src/multiarray tests Message-ID: <20100524170409.A487339CAF8@scipy.org> Author: rkern Date: 2010-05-24 12:04:09 -0500 (Mon, 24 May 2010) New Revision: 8436 Modified: trunk/numpy/core/src/multiarray/descriptor.c trunk/numpy/core/tests/test_dtype.py Log: BUG: Add some guards for bad inputs to the dtype constructor. Modified: trunk/numpy/core/src/multiarray/descriptor.c =================================================================== --- trunk/numpy/core/src/multiarray/descriptor.c 2010-05-24 17:03:41 UTC (rev 8435) +++ trunk/numpy/core/src/multiarray/descriptor.c 2010-05-24 17:04:09 UTC (rev 8436) @@ -1010,6 +1010,9 @@ } tup = PyTuple_New(len); descr = PyObject_GetItem(descrs, index); + if (!descr) { + goto fail; + } ret = PyArray_DescrConverter(descr, &newdescr); Py_DECREF(descr); if (ret == PY_FAIL) { @@ -1025,6 +1028,9 @@ if (offsets) { long offset; off = PyObject_GetItem(offsets, index); + if (!off) { + goto fail; + } offset = PyInt_AsLong(off); PyTuple_SET_ITEM(tup, 1, off); if (offset < totalsize) { @@ -1046,6 +1052,9 @@ PyTuple_SET_ITEM(tup, 2, item); } name = PyObject_GetItem(names, index); + if (!name) { + goto fail; + } Py_DECREF(index); #if defined(NPY_PY3K) if (!PyUString_Check(name)) { Modified: trunk/numpy/core/tests/test_dtype.py =================================================================== --- trunk/numpy/core/tests/test_dtype.py 2010-05-24 17:03:41 UTC (rev 8435) +++ trunk/numpy/core/tests/test_dtype.py 2010-05-24 17:04:09 UTC (rev 8436) @@ -33,6 +33,15 @@ self.assertTrue(hash(a) != hash(b), "%s and %s hash the same !" % (a, b)) + def test_not_lists(self): + """Test if an appropriate exception is raised when passing bad values to + the dtype constructor. + """ + self.assertRaises(TypeError, np.dtype, + dict(names=set(['A', 'B']), formats=['f8', 'i4'])) + self.assertRaises(TypeError, np.dtype, + dict(names=['A', 'B'], formats=set(['f8', 'i4']))) + class TestSubarray(TestCase): def test_single_subarray(self): a = np.dtype((np.int, (2))) From numpy-svn at scipy.org Mon May 24 23:45:12 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 24 May 2010 22:45:12 -0500 (CDT) Subject: [Numpy-svn] r8437 - trunk/numpy/distutils Message-ID: <20100525034512.20D0739CAE6@scipy.org> Author: stefan Date: 2010-05-24 22:45:11 -0500 (Mon, 24 May 2010) New Revision: 8437 Modified: trunk/numpy/distutils/system_info.py Log: BUG: Use correct include path when multiple copies of NumPy is installed. Modified: trunk/numpy/distutils/system_info.py =================================================================== --- trunk/numpy/distutils/system_info.py 2010-05-24 17:04:09 UTC (rev 8436) +++ trunk/numpy/distutils/system_info.py 2010-05-25 03:45:11 UTC (rev 8437) @@ -1532,6 +1532,13 @@ if name=='lib': break prefix.append(name) + + # Ask numpy for its own include path before attempting anything else + try: + include_dirs.append(getattr(module, 'get_include')()) + except AttributeError: + pass + include_dirs.append(distutils.sysconfig.get_python_inc( prefix=os.sep.join(prefix))) except ImportError: From numpy-svn at scipy.org Tue May 25 09:32:03 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 May 2010 08:32:03 -0500 (CDT) Subject: [Numpy-svn] r8438 - in trunk/numpy/core: src/umath tests Message-ID: <20100525133203.7440D39CAEF@scipy.org> Author: dhuard Date: 2010-05-25 08:32:03 -0500 (Tue, 25 May 2010) New Revision: 8438 Modified: trunk/numpy/core/src/umath/umathmodule.c.src trunk/numpy/core/tests/test_umath.py Log: Fix for ticket #866 (ldexp on 64 bits). Tested on ubuntu 64 bits. Modified: trunk/numpy/core/src/umath/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umath/umathmodule.c.src 2010-05-25 03:45:11 UTC (rev 8437) +++ trunk/numpy/core/src/umath/umathmodule.c.src 2010-05-25 13:32:03 UTC (rev 8438) @@ -188,9 +188,9 @@ #ifdef HAVE_LDEXPF PyArray_FLOAT, PyArray_INT, PyArray_FLOAT, #endif - PyArray_DOUBLE, PyArray_INT, PyArray_DOUBLE + PyArray_DOUBLE, PyArray_LONG, PyArray_DOUBLE #ifdef HAVE_LDEXPL - ,PyArray_LONGDOUBLE, PyArray_INT, PyArray_LONGDOUBLE + ,PyArray_LONGDOUBLE, PyArray_LONG, PyArray_LONGDOUBLE #endif }; Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2010-05-25 03:45:11 UTC (rev 8437) +++ trunk/numpy/core/tests/test_umath.py 2010-05-25 13:32:03 UTC (rev 8438) @@ -353,6 +353,13 @@ assert_arctan2_isnan(np.nan, np.nan) +class TestLdexp(TestCase): + def test_ldexp(self): + assert_almost_equal(ncu.ldexp(2., 3), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float32), np.array(3, np.int16)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float32), np.array(3, np.int32)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float64), np.array(3, np.int32)), 16.) + class TestMaximum(TestCase): def test_reduce_complex(self): assert_equal(np.maximum.reduce([1,2j]),1) From numpy-svn at scipy.org Tue May 25 12:48:09 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 May 2010 11:48:09 -0500 (CDT) Subject: [Numpy-svn] r8439 - trunk/numpy/core/tests Message-ID: <20100525164809.B0EAE39CAF1@scipy.org> Author: dhuard Date: 2010-05-25 11:48:09 -0500 (Tue, 25 May 2010) New Revision: 8439 Modified: trunk/numpy/core/tests/test_umath.py Log: Added tests for float96 for ldexp (ticket #866). Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2010-05-25 13:32:03 UTC (rev 8438) +++ trunk/numpy/core/tests/test_umath.py 2010-05-25 16:48:09 UTC (rev 8439) @@ -358,8 +358,12 @@ assert_almost_equal(ncu.ldexp(2., 3), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.float32), np.array(3, np.int16)), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.float32), np.array(3, np.int32)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float64), np.array(3, np.int16)), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.float64), np.array(3, np.int32)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float96), np.array(3, np.int16)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float96), np.array(3, np.int32)), 16.) + class TestMaximum(TestCase): def test_reduce_complex(self): assert_equal(np.maximum.reduce([1,2j]),1) From numpy-svn at scipy.org Tue May 25 13:22:51 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 May 2010 12:22:51 -0500 (CDT) Subject: [Numpy-svn] r8440 - trunk/numpy/core/src/multiarray Message-ID: <20100525172251.1A3CD39CAE6@scipy.org> Author: charris Date: 2010-05-25 12:22:50 -0500 (Tue, 25 May 2010) New Revision: 8440 Modified: trunk/numpy/core/src/multiarray/methods.c Log: STY: Remove some hard tabs. Modified: trunk/numpy/core/src/multiarray/methods.c =================================================================== --- trunk/numpy/core/src/multiarray/methods.c 2010-05-25 16:48:09 UTC (rev 8439) +++ trunk/numpy/core/src/multiarray/methods.c 2010-05-25 17:22:50 UTC (rev 8440) @@ -28,20 +28,20 @@ static int NpyArg_ParseKeywords(PyObject *keys, const char *format, char **kwlist, ...) { - PyObject *args = PyTuple_New(0); - int ret; - va_list va; + PyObject *args = PyTuple_New(0); + int ret; + va_list va; - if (args == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "Failed to allocate new tuple"); - return 0; - } - va_start(va, kwlist); - ret = PyArg_VaParseTupleAndKeywords(args, keys, format, kwlist, va); - va_end(va); - Py_DECREF(args); - return ret; + if (args == NULL) { + PyErr_SetString(PyExc_RuntimeError, + "Failed to allocate new tuple"); + return 0; + } + va_start(va, kwlist); + ret = PyArg_VaParseTupleAndKeywords(args, keys, format, kwlist, va); + va_end(va); + Py_DECREF(args); + return ret; } /* Should only be used if x is known to be an nd-array */ From numpy-svn at scipy.org Tue May 25 13:38:56 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 May 2010 12:38:56 -0500 (CDT) Subject: [Numpy-svn] r8441 - trunk/numpy/core/tests Message-ID: <20100525173856.E87B439CAE9@scipy.org> Author: charris Date: 2010-05-25 12:38:56 -0500 (Tue, 25 May 2010) New Revision: 8441 Modified: trunk/numpy/core/tests/test_umath.py Log: BUG: Make test using np.float96 portable by using np.longdouble instead. Modified: trunk/numpy/core/tests/test_umath.py =================================================================== --- trunk/numpy/core/tests/test_umath.py 2010-05-25 17:22:50 UTC (rev 8440) +++ trunk/numpy/core/tests/test_umath.py 2010-05-25 17:38:56 UTC (rev 8441) @@ -360,8 +360,8 @@ assert_almost_equal(ncu.ldexp(np.array(2., np.float32), np.array(3, np.int32)), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.float64), np.array(3, np.int16)), 16.) assert_almost_equal(ncu.ldexp(np.array(2., np.float64), np.array(3, np.int32)), 16.) - assert_almost_equal(ncu.ldexp(np.array(2., np.float96), np.array(3, np.int16)), 16.) - assert_almost_equal(ncu.ldexp(np.array(2., np.float96), np.array(3, np.int32)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), np.array(3, np.int16)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), np.array(3, np.int32)), 16.) class TestMaximum(TestCase): From numpy-svn at scipy.org Tue May 25 22:26:12 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 May 2010 21:26:12 -0500 (CDT) Subject: [Numpy-svn] r8442 - trunk/numpy/random/mtrand Message-ID: <20100526022612.A86B539CAE6@scipy.org> Author: charris Date: 2010-05-25 21:26:12 -0500 (Tue, 25 May 2010) New Revision: 8442 Modified: trunk/numpy/random/mtrand/initarray.c trunk/numpy/random/mtrand/randomkit.c Log: STY: Some c coding style cleanups. Modified: trunk/numpy/random/mtrand/initarray.c =================================================================== --- trunk/numpy/random/mtrand/initarray.c 2010-05-25 17:38:56 UTC (rev 8441) +++ trunk/numpy/random/mtrand/initarray.c 2010-05-26 02:26:12 UTC (rev 8442) @@ -1,76 +1,77 @@ -/* These function have been adapted from Python 2.4.1's _randommodule.c +/* + * These function have been adapted from Python 2.4.1's _randommodule.c + * + * The following changes have been made to it in 2005 by Robert Kern: + * + * * init_by_array has been declared extern, has a void return, and uses the + * rk_state structure to hold its data. + * + * The original file has the following verbatim comments: + * + * ------------------------------------------------------------------ + * The code in this module was based on a download from: + * http://www.math.keio.ac.jp/~matumoto/MT2002/emt19937ar.html + * + * It was modified in 2002 by Raymond Hettinger as follows: + * + * * the principal computational lines untouched except for tabbing. + * + * * renamed genrand_res53() to random_random() and wrapped + * in python calling/return code. + * + * * genrand_int32() and the helper functions, init_genrand() + * and init_by_array(), were declared static, wrapped in + * Python calling/return code. also, their global data + * references were replaced with structure references. + * + * * unused functions from the original were deleted. + * new, original C python code was added to implement the + * Random() interface. + * + * The following are the verbatim comments from the original code: + * + * A C-program for MT19937, with initialization improved 2002/1/26. + * Coded by Takuji Nishimura and Makoto Matsumoto. + * + * Before using, initialize the state by using init_genrand(seed) + * or init_by_array(init_key, key_length). + * + * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. The names of its contributors may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Any feedback is very welcome. + * http://www.math.keio.ac.jp/matumoto/emt.html + * email: matumoto at math.keio.ac.jp + */ - The following changes have been made to it in 2005 by Robert Kern: - - * init_by_array has been declared extern, has a void return, and uses the - rk_state structure to hold its data. - - The original file has the following verbatim comments: - - ------------------------------------------------------------------ - The code in this module was based on a download from: - http://www.math.keio.ac.jp/~matumoto/MT2002/emt19937ar.html - - It was modified in 2002 by Raymond Hettinger as follows: - - * the principal computational lines untouched except for tabbing. - - * renamed genrand_res53() to random_random() and wrapped - in python calling/return code. - - * genrand_int32() and the helper functions, init_genrand() - and init_by_array(), were declared static, wrapped in - Python calling/return code. also, their global data - references were replaced with structure references. - - * unused functions from the original were deleted. - new, original C python code was added to implement the - Random() interface. - - The following are the verbatim comments from the original code: - - A C-program for MT19937, with initialization improved 2002/1/26. - Coded by Takuji Nishimura and Makoto Matsumoto. - - Before using, initialize the state by using init_genrand(seed) - or init_by_array(init_key, key_length). - - Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. The names of its contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - Any feedback is very welcome. - http://www.math.keio.ac.jp/matumoto/emt.html - email: matumoto at math.keio.ac.jp -*/ - #include "initarray.h" static void @@ -81,56 +82,69 @@ init_genrand(rk_state *self, unsigned long s) { int mti; - unsigned long *mt; + unsigned long *mt = self->key; - mt = self->key; - mt[0]= s & 0xffffffffUL; - for (mti=1; mti> 30)) + mti); - /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ - /* In the previous versions, MSBs of the seed affect */ - /* only MSBs of the array mt[]. */ - /* 2002/01/09 modified by Makoto Matsumoto */ + mt[0] = s & 0xffffffffUL; + for (mti = 1; mti < RK_STATE_LEN; mti++) { + /* + * See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. + * In the previous versions, MSBs of the seed affect + * only MSBs of the array mt[]. + * 2002/01/09 modified by Makoto Matsumoto + */ + mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti); + /* for > 32 bit machines */ mt[mti] &= 0xffffffffUL; - /* for >32 bit machines */ } self->pos = mti; return; } -/* initialize by an array with array-length */ -/* init_key is the array for initializing keys */ -/* key_length is its length */ +/* + * initialize by an array with array-length + * init_key is the array for initializing keys + * key_length is its length + */ extern void init_by_array(rk_state *self, unsigned long init_key[], unsigned long key_length) { - unsigned int i, j, k; /* was signed in the original code. RDH 12/16/2002 */ - unsigned long *mt; + /* was signed in the original code. RDH 12/16/2002 */ + unsigned int i = 1; + unsigned int j = 0; + unsigned long *mt = self->key; + unsigned int k; - mt = self->key; init_genrand(self, 19650218UL); - i=1; j=0; - k = (RK_STATE_LEN>key_length ? RK_STATE_LEN : key_length); + k = (RK_STATE_LEN > key_length ? RK_STATE_LEN : key_length); for (; k; k--) { - mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1664525UL)) - + init_key[j] + j; /* non linear */ - mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ - i++; j++; - if (i>=RK_STATE_LEN) { mt[0] = mt[RK_STATE_LEN-1]; i=1; } - if (j>=key_length) j=0; + /* non linear */ + mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525UL)) + + init_key[j] + j; + /* for > 32 bit machines */ + mt[i] &= 0xffffffffUL; + i++; + j++; + if (i >= RK_STATE_LEN) { + mt[0] = mt[RK_STATE_LEN - 1]; + i = 1; + } + if (j >= key_length) { + j = 0; + } } - for (k=RK_STATE_LEN-1; k; k--) { + for (k = RK_STATE_LEN - 1; k; k--) { mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL)) - i; /* non linear */ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ i++; - if (i>=RK_STATE_LEN) { mt[0] = mt[RK_STATE_LEN-1]; i=1; } + if (i >= RK_STATE_LEN) { + mt[0] = mt[RK_STATE_LEN - 1]; + i = 1; + } } mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */ self->has_gauss = 0; self->has_binomial = 0; } - Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2010-05-25 17:38:56 UTC (rev 8441) +++ trunk/numpy/random/mtrand/randomkit.c 2010-05-26 02:26:12 UTC (rev 8442) @@ -3,7 +3,7 @@ /* * Copyright (c) 2003-2005, Jean-Sebastien Roy (js at jeannot.org) * - * The rk_random and rk_seed functions algorithms and the original design of + * The rk_random and rk_seed functions algorithms and the original design of * the Mersenne Twister RNG: * * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, @@ -12,7 +12,7 @@ * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * @@ -35,13 +35,13 @@ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * + * * Original algorithm for the implementation of rk_interval function from * Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by * Magnus Jonsson. * * Constants used in the rk_double implementation by Isaku Wada. - * + * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including @@ -49,10 +49,10 @@ * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: - * + * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. - * + * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. @@ -72,16 +72,23 @@ #include #ifdef _WIN32 -/* Windows */ -/* XXX: we have to use this ugly defined(__GNUC__) because it is not easy to - * detect the compiler used in distutils itself */ +/* + * Windows + * XXX: we have to use this ugly defined(__GNUC__) because it is not easy to + * detect the compiler used in distutils itself + */ #if (defined(__GNUC__) && defined(NPY_NEEDS_MINGW_TIME_WORKAROUND)) -/* FIXME: ideally, we should set this to the real version of MSVCRT. We need - * something higher than 0x601 to enable _ftime64 and co */ + +/* + * FIXME: ideally, we should set this to the real version of MSVCRT. We need + * something higher than 0x601 to enable _ftime64 and co + */ #define __MSVCRT_VERSION__ 0x0700 #include #include -/* mingw msvcr lib import wrongly export _ftime, which does not exist in the + +/* + * mingw msvcr lib import wrongly export _ftime, which does not exist in the * actual msvc runtime for version >= 8; we make it an alias to _ftime64, which * is available in those versions of the runtime */ @@ -131,7 +138,7 @@ seed &= 0xffffffffUL; /* Knuth's PRNG as used in the Mersenne Twister reference implementation */ - for (pos=0; poskey[pos] = seed; seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL; } @@ -157,20 +164,21 @@ #ifndef _WIN32 struct timeval tv; #else - struct _timeb tv; + struct _timeb tv; #endif int i; if (rk_devfill(state->key, sizeof(state->key), 0) == RK_NOERR) { - state->key[0] |= 0x80000000UL; /* ensures non-zero key */ - state->pos = RK_STATE_LEN; - state->has_gauss = 0; - state->has_binomial = 0; + /* ensures non-zero key */ + state->key[0] |= 0x80000000UL; + state->pos = RK_STATE_LEN; + state->has_gauss = 0; + state->has_binomial = 0; - for (i = 0; i < 624; i++) { - state->key[i] &= 0xffffffffUL; - } - return RK_NOERR; + for (i = 0; i < 624; i++) { + state->key[i] &= 0xffffffffUL; + } + return RK_NOERR; } #ifndef _WIN32 @@ -196,10 +204,10 @@ unsigned long rk_random(rk_state *state) { unsigned long y; - + if (state->pos == RK_STATE_LEN) { int i; - + for (i = 0; i < N - M; i++) { y = (state->key[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK); state->key[i] = state->key[i+M] ^ (y>>1) ^ (-(y & 1) & MATRIX_A); @@ -208,19 +216,19 @@ y = (state->key[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK); state->key[i] = state->key[i+(M-N)] ^ (y>>1) ^ (-(y & 1) & MATRIX_A); } - y = (state->key[N-1] & UPPER_MASK) | (state->key[0] & LOWER_MASK); - state->key[N-1] = state->key[M-1] ^ (y>>1) ^ (-(y & 1) & MATRIX_A); - + y = (state->key[N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK); + state->key[N - 1] = state->key[M - 1] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A); + state->pos = 0; } y = state->key[state->pos++]; - + /* Tempering */ y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680UL; y ^= (y << 15) & 0xefc60000UL; y ^= (y >> 18); - + return y; } @@ -242,8 +250,9 @@ { unsigned long mask = max, value; - if (max == 0) return 0; - + if (max == 0) { + return 0; + } /* Smallest bit mask >= max */ mask |= mask >> 1; mask |= mask >> 2; @@ -265,7 +274,6 @@ #else while ((value = (rk_ulong(state) & mask)) > max); #endif - return value; } @@ -280,7 +288,7 @@ { unsigned long r; unsigned char *buf = buffer; - + for (; size >= 4; size -= 4) { r = rk_random(state); *(buf++) = r & 0xFF; @@ -288,7 +296,7 @@ *(buf++) = (r >> 16) & 0xFF; *(buf++) = (r >> 24) & 0xFF; } - + if (!size) { return; } @@ -298,7 +306,8 @@ } } -rk_error rk_devfill(void *buffer, size_t size, int strong) +rk_error +rk_devfill(void *buffer, size_t size, int strong) { #ifndef _WIN32 FILE *rfile; @@ -339,7 +348,8 @@ return RK_ENODEV; } -rk_error rk_altfill(void *buffer, size_t size, int strong, rk_state *state) +rk_error +rk_altfill(void *buffer, size_t size, int strong, rk_state *state) { rk_error err; @@ -350,7 +360,8 @@ return err; } -double rk_gauss(rk_state *state) +double +rk_gauss(rk_state *state) { if (state->has_gauss) { state->has_gauss = 0; @@ -365,7 +376,7 @@ r2 = x1*x1 + x2*x2; } while (r2 >= 1.0 || r2 == 0.0); - + /* Box-Muller transform */ f = sqrt(-2.0*log(r2)/r2); state->has_gauss = 1; @@ -374,5 +385,3 @@ return f*x2; } } - - From numpy-svn at scipy.org Tue May 25 22:26:15 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 May 2010 21:26:15 -0500 (CDT) Subject: [Numpy-svn] r8443 - trunk/numpy/random/mtrand Message-ID: <20100526022615.1FB4A39CAFC@scipy.org> Author: charris Date: 2010-05-25 21:26:14 -0500 (Tue, 25 May 2010) New Revision: 8443 Modified: trunk/numpy/random/mtrand/randomkit.c Log: STY: A Few more coding style cleanups. Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2010-05-26 02:26:12 UTC (rev 8442) +++ trunk/numpy/random/mtrand/randomkit.c 2010-05-26 02:26:14 UTC (rev 8443) @@ -98,6 +98,7 @@ #include #define _FTIME(x) _ftime((x)) #endif + #ifndef RK_NO_WINCRYPT /* Windows crypto */ #ifndef _WIN32_WINNT @@ -106,6 +107,7 @@ #include #include #endif + #else /* Unix */ #include @@ -132,7 +134,8 @@ /* static functions */ static unsigned long rk_hash(unsigned long key); -void rk_seed(unsigned long seed, rk_state *state) +void +rk_seed(unsigned long seed, rk_state *state) { int pos; seed &= 0xffffffffUL; @@ -148,7 +151,8 @@ } /* Thomas Wang 32 bits integer hash function */ -unsigned long rk_hash(unsigned long key) +unsigned long +rk_hash(unsigned long key) { key += ~(key << 15); key ^= (key >> 10); @@ -159,7 +163,8 @@ return key; } -rk_error rk_randomseed(rk_state *state) +rk_error +rk_randomseed(rk_state *state) { #ifndef _WIN32 struct timeval tv; @@ -201,7 +206,8 @@ #define LOWER_MASK 0x7fffffffUL /* Slightly optimised reference implementation of the Mersenne Twister */ -unsigned long rk_random(rk_state *state) +unsigned long +rk_random(rk_state *state) { unsigned long y; @@ -232,12 +238,14 @@ return y; } -long rk_long(rk_state *state) +long +rk_long(rk_state *state) { return rk_ulong(state) >> 1; } -unsigned long rk_ulong(rk_state *state) +unsigned long +rk_ulong(rk_state *state) { #if ULONG_MAX <= 0xffffffffUL return rk_random(state); @@ -246,7 +254,8 @@ #endif } -unsigned long rk_interval(unsigned long max, rk_state *state) +unsigned long +rk_interval(unsigned long max, rk_state *state) { unsigned long mask = max, value; @@ -277,14 +286,16 @@ return value; } -double rk_double(rk_state *state) +double +rk_double(rk_state *state) { /* shifts : 67108864 = 0x4000000, 9007199254740992 = 0x20000000000000 */ long a = rk_random(state) >> 5, b = rk_random(state) >> 6; return (a * 67108864.0 + b) / 9007199254740992.0; } -void rk_fill(void *buffer, size_t size, rk_state *state) +void +rk_fill(void *buffer, size_t size, rk_state *state) { unsigned long r; unsigned char *buf = buffer; From numpy-svn at scipy.org Tue May 25 22:26:17 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 May 2010 21:26:17 -0500 (CDT) Subject: [Numpy-svn] r8444 - trunk/numpy/random/mtrand Message-ID: <20100526022617.B644339CAE9@scipy.org> Author: charris Date: 2010-05-25 21:26:17 -0500 (Tue, 25 May 2010) New Revision: 8444 Modified: trunk/numpy/random/mtrand/initarray.c trunk/numpy/random/mtrand/randomkit.c Log: BUG, STY: Make gaussian random number generators with identical behaviour have identical pickles. Modified: trunk/numpy/random/mtrand/initarray.c =================================================================== --- trunk/numpy/random/mtrand/initarray.c 2010-05-26 02:26:14 UTC (rev 8443) +++ trunk/numpy/random/mtrand/initarray.c 2010-05-26 02:26:17 UTC (rev 8444) @@ -145,6 +145,7 @@ } mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */ + self->gauss = 0; self->has_gauss = 0; self->has_binomial = 0; } Modified: trunk/numpy/random/mtrand/randomkit.c =================================================================== --- trunk/numpy/random/mtrand/randomkit.c 2010-05-26 02:26:14 UTC (rev 8443) +++ trunk/numpy/random/mtrand/randomkit.c 2010-05-26 02:26:17 UTC (rev 8444) @@ -127,8 +127,8 @@ char *rk_strerror[RK_ERR_MAX] = { - "no error", - "random device unvavailable" + "no error", + "random device unvavailable" }; /* static functions */ @@ -137,17 +137,18 @@ void rk_seed(unsigned long seed, rk_state *state) { - int pos; - seed &= 0xffffffffUL; + int pos; + seed &= 0xffffffffUL; - /* Knuth's PRNG as used in the Mersenne Twister reference implementation */ - for (pos = 0; pos < RK_STATE_LEN; pos++) { - state->key[pos] = seed; - seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL; - } - state->pos = RK_STATE_LEN; - state->has_gauss = 0; - state->has_binomial = 0; + /* Knuth's PRNG as used in the Mersenne Twister reference implementation */ + for (pos = 0; pos < RK_STATE_LEN; pos++) { + state->key[pos] = seed; + seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL; + } + state->pos = RK_STATE_LEN; + state->gauss = 0; + state->has_gauss = 0; + state->has_binomial = 0; } /* Thomas Wang 32 bits integer hash function */ @@ -177,6 +178,7 @@ /* ensures non-zero key */ state->key[0] |= 0x80000000UL; state->pos = RK_STATE_LEN; + state->gauss = 0; state->has_gauss = 0; state->has_binomial = 0; @@ -375,8 +377,10 @@ rk_gauss(rk_state *state) { if (state->has_gauss) { + const double tmp = state->gauss; + state->gauss = 0; state->has_gauss = 0; - return state->gauss; + return tmp; } else { double f, x1, x2, r2; @@ -390,9 +394,9 @@ /* Box-Muller transform */ f = sqrt(-2.0*log(r2)/r2); - state->has_gauss = 1; /* Keep for next call */ state->gauss = f*x1; + state->has_gauss = 1; return f*x2; } } From numpy-svn at scipy.org Tue May 25 22:55:10 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 25 May 2010 21:55:10 -0500 (CDT) Subject: [Numpy-svn] r8445 - trunk/numpy/lib Message-ID: <20100526025510.D2FDB39C4B4@scipy.org> Author: charris Date: 2010-05-25 21:55:09 -0500 (Tue, 25 May 2010) New Revision: 8445 Modified: trunk/numpy/lib/function_base.py Log: BUG: Fix failed detection of unsigned integers in _nanop. Fixes ticket #1300. Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2010-05-26 02:26:17 UTC (rev 8444) +++ trunk/numpy/lib/function_base.py 2010-05-26 02:55:09 UTC (rev 8445) @@ -1298,7 +1298,7 @@ mask = isnan(a) # We only need to take care of NaN's in floating point arrays - if not np.issubdtype(y.dtype, int): + if not np.issubdtype(y.dtype, np.integer): # y[mask] = fill # We can't use fancy indexing here as it'll mess w/ MaskedArrays # Instead, let's fill the array directly... From numpy-svn at scipy.org Fri May 28 11:53:17 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 28 May 2010 10:53:17 -0500 (CDT) Subject: [Numpy-svn] r8446 - trunk/numpy/core/src/npymath Message-ID: <20100528155317.AE02F39C4B4@scipy.org> Author: charris Date: 2010-05-28 10:53:17 -0500 (Fri, 28 May 2010) New Revision: 8446 Modified: trunk/numpy/core/src/npymath/npy_math_complex.c.src Log: BUG: Fix typo. Closes ticket #1496. Modified: trunk/numpy/core/src/npymath/npy_math_complex.c.src =================================================================== --- trunk/numpy/core/src/npymath/npy_math_complex.c.src 2010-05-26 02:55:09 UTC (rev 8445) +++ trunk/numpy/core/src/npymath/npy_math_complex.c.src 2010-05-28 15:53:17 UTC (rev 8446) @@ -242,7 +242,7 @@ /**begin repeat1 * #kind = cabs,carg# - * #KIND = CABS,carg# + * #KIND = CABS,CARG# */ #ifdef HAVE_ at KIND@@C@ @type@ npy_ at kind@@c@(@ctype@ z) From numpy-svn at scipy.org Fri May 28 21:39:44 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 28 May 2010 20:39:44 -0500 (CDT) Subject: [Numpy-svn] r8447 - trunk/numpy/lib/tests Message-ID: <20100529013944.72A5539C4B4@scipy.org> Author: charris Date: 2010-05-28 20:39:44 -0500 (Fri, 28 May 2010) New Revision: 8447 Modified: trunk/numpy/lib/tests/test_function_base.py Log: ENH: Add tests for nan[arg]min, nan[arg]max) for various integer types. Modified: trunk/numpy/lib/tests/test_function_base.py =================================================================== --- trunk/numpy/lib/tests/test_function_base.py 2010-05-28 15:53:17 UTC (rev 8446) +++ trunk/numpy/lib/tests/test_function_base.py 2010-05-29 01:39:44 UTC (rev 8447) @@ -781,6 +781,38 @@ assert_equal(np.isinf(a), np.zeros((2, 4), dtype=bool)) +class TestNanFunctsIntTypes(TestCase): + + int_types = (int8, int16, int32, int64, uint8, uint16, uint32, uint64) + + def setUp(self, *args, **kwargs): + self.A = array([127, 39, 93, 87, 46]) + + def integer_arrays(self): + for dtype in self.int_types: + yield self.A.astype(dtype) + + def test_nanmin(self): + min_value = min(self.A) + for A in self.integer_arrays(): + assert_equal(nanmin(A), min_value) + + def test_nanmax(self): + max_value = max(self.A) + for A in self.integer_arrays(): + assert_equal(nanmax(A), max_value) + + def test_nanargmin(self): + min_arg = argmin(self.A) + for A in self.integer_arrays(): + assert_equal(nanargmin(A), min_arg) + + def test_nanargmax(self): + max_arg = argmax(self.A) + for A in self.integer_arrays(): + assert_equal(nanargmax(A), max_arg) + + class TestCorrCoef(TestCase): def test_simple(self): A = array([[ 0.15391142, 0.18045767, 0.14197213], From numpy-svn at scipy.org Sun May 30 12:03:52 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 30 May 2010 11:03:52 -0500 (CDT) Subject: [Numpy-svn] r8448 - trunk/numpy/core/src/multiarray Message-ID: <20100530160352.1380D39CAF5@scipy.org> Author: charris Date: 2010-05-30 11:03:51 -0500 (Sun, 30 May 2010) New Revision: 8448 Modified: trunk/numpy/core/src/multiarray/scalarapi.c Log: BUG: Don't directly access Python object internals. Modified: trunk/numpy/core/src/multiarray/scalarapi.c =================================================================== --- trunk/numpy/core/src/multiarray/scalarapi.c 2010-05-29 01:39:44 UTC (rev 8447) +++ trunk/numpy/core/src/multiarray/scalarapi.c 2010-05-30 16:03:51 UTC (rev 8448) @@ -371,10 +371,8 @@ if (ret == NULL) { return NULL; } - PyArrayScalar_VAL(ret, CDouble).real = - ((PyComplexObject *)object)->cval.real; - PyArrayScalar_VAL(ret, CDouble).imag = - ((PyComplexObject *)object)->cval.imag; + PyArrayScalar_VAL(ret, CDouble).real = PyComplex_RealAsDouble(object); + PyArrayScalar_VAL(ret, CDouble).imag = PyComplex_ImagAsDouble(object); } else if (PyLong_Check(object)) { longlong val;