From numpy-svn at scipy.org Wed Sep 1 19:22:30 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 1 Sep 2010 18:22:30 -0500 (CDT) Subject: [Numpy-svn] r8679 - trunk/doc/sphinxext Message-ID: <20100901232230.83AB939CD37@scipy.org> Author: ptvirtan Date: 2010-09-01 18:22:30 -0500 (Wed, 01 Sep 2010) New Revision: 8679 Modified: trunk/doc/sphinxext/docscrape.py Log: sphinxext: more lenient parsing for See Also sections Modified: trunk/doc/sphinxext/docscrape.py =================================================================== --- trunk/doc/sphinxext/docscrape.py 2010-08-31 14:02:59 UTC (rev 8678) +++ trunk/doc/sphinxext/docscrape.py 2010-09-01 23:22:30 UTC (rev 8679) @@ -232,7 +232,8 @@ current_func = None if ',' in line: for func in line.split(','): - push_item(func, []) + if func.strip(): + push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: From numpy-svn at scipy.org Thu Sep 2 16:32:43 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 2 Sep 2010 15:32:43 -0500 (CDT) Subject: [Numpy-svn] r8680 - branches/1.5.x/doc/sphinxext Message-ID: <20100902203243.709B639CD43@scipy.org> Author: ptvirtan Date: 2010-09-02 15:32:43 -0500 (Thu, 02 Sep 2010) New Revision: 8680 Modified: branches/1.5.x/doc/sphinxext/plot_directive.py Log: sphinxext: (backport r8605) bug fix + some features in plot_directive Modified: branches/1.5.x/doc/sphinxext/plot_directive.py =================================================================== --- branches/1.5.x/doc/sphinxext/plot_directive.py 2010-09-01 23:22:30 UTC (rev 8679) +++ branches/1.5.x/doc/sphinxext/plot_directive.py 2010-09-02 20:32:43 UTC (rev 8680) @@ -64,6 +64,9 @@ that determine the file format and the DPI. For entries whose DPI was omitted, sensible defaults are chosen. + plot_html_show_formats + Whether to show links to the files in HTML. + TODO ---- @@ -96,6 +99,7 @@ app.add_config_value('plot_include_source', False, True) app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) app.add_config_value('plot_basedir', None, True) + app.add_config_value('plot_html_show_formats', True, True) app.add_directive('plot', plot_directive, True, (0, 1, False), **plot_directive_options) @@ -174,19 +178,24 @@ {{ option }} {% endfor %} - ( {%- if not source_code -%} - `Source code <{{source_link}}>`__ + (`Source code <{{source_link}}>`__ + {%- if html_show_formats -%} {%- for fmt in img.formats -%} , `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ {%- endfor -%} + {%- endif -%} + ) {%- else -%} + {%- if html_show_formats -%} + ( {%- for fmt in img.formats -%} {%- if not loop.first -%}, {% endif -%} `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ {%- endfor -%} + ) + {%- endif -%} {%- endif -%} - ) {% endfor %} {{ only_latex }} @@ -321,12 +330,12 @@ only_latex=only_latex, options=opts, images=images, - source_code=source_code) + source_code=source_code, + html_show_formats=config.plot_html_show_formats) lines = result.split("\n") if len(lines): - state_machine.insert_input( - lines, state_machine.input_lines.source(0)) + state_machine.insert_input(lines, source=source_file_name) # copy image files to builder's output directory if not os.path.exists(dest_dir): From numpy-svn at scipy.org Thu Sep 2 16:33:09 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 2 Sep 2010 15:33:09 -0500 (CDT) Subject: [Numpy-svn] r8681 - branches/1.5.x/doc/sphinxext Message-ID: <20100902203309.9E73139CD43@scipy.org> Author: ptvirtan Date: 2010-09-02 15:33:09 -0500 (Thu, 02 Sep 2010) New Revision: 8681 Modified: branches/1.5.x/doc/sphinxext/plot_directive.py Log: sphinxext: (backport r8610) plot_directive: insert figures at points where plt.show() is called in the text Modified: branches/1.5.x/doc/sphinxext/plot_directive.py =================================================================== --- branches/1.5.x/doc/sphinxext/plot_directive.py 2010-09-02 20:32:43 UTC (rev 8680) +++ branches/1.5.x/doc/sphinxext/plot_directive.py 2010-09-02 20:33:09 UTC (rev 8681) @@ -164,40 +164,38 @@ {{ only_html }} - {% if source_code %} - (`Source code <{{ source_link }}>`__) + {% if source_link or (html_show_formats and not multi_image) %} + ( + {%- if source_link -%} + `Source code <{{ source_link }}>`__ + {%- endif -%} + {%- if html_show_formats and not multi_image -%} + {%- for img in images -%} + {%- for fmt in img.formats -%} + {%- if source_link or not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + {%- endfor -%} + {%- endif -%} + ) + {% endif %} - .. admonition:: Output - :class: plot-output + {% for img in images %} + .. figure:: {{ build_dir }}/{{ img.basename }}.png + {%- for option in options %} + {{ option }} + {% endfor %} - {% endif %} + {% if html_show_formats and multi_image -%} + ( + {%- for fmt in img.formats -%} + {%- if not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + ) + {%- endif -%} + {% endfor %} - {% for img in images %} - .. figure:: {{ build_dir }}/{{ img.basename }}.png - {%- for option in options %} - {{ option }} - {% endfor %} - - {%- if not source_code -%} - (`Source code <{{source_link}}>`__ - {%- if html_show_formats -%} - {%- for fmt in img.formats -%} - , `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ - {%- endfor -%} - {%- endif -%} - ) - {%- else -%} - {%- if html_show_formats -%} - ( - {%- for fmt in img.formats -%} - {%- if not loop.first -%}, {% endif -%} - `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ - {%- endfor -%} - ) - {%- endif -%} - {%- endif -%} - {% endfor %} - {{ only_latex }} {% for img in images %} @@ -290,60 +288,75 @@ # make figures try: - images = makefig(code, source_file_name, build_dir, output_base, - config) + results = makefig(code, source_file_name, build_dir, output_base, + config) + errors = [] except PlotError, err: reporter = state.memo.reporter sm = reporter.system_message( - 3, "Exception occurred in plotting %s: %s" % (output_base, err), + 2, "Exception occurred in plotting %s: %s" % (output_base, err), line=lineno) - return [sm] + results = [(code, [])] + errors = [sm] # generate output restructuredtext - if options['include-source']: - if is_doctest: - lines = [''] - lines += [row.rstrip() for row in code.split('\n')] + total_lines = [] + for j, (code_piece, images) in enumerate(results): + if options['include-source']: + if is_doctest: + lines = [''] + lines += [row.rstrip() for row in code_piece.split('\n')] + else: + lines = ['.. code-block:: python', ''] + lines += [' %s' % row.rstrip() + for row in code_piece.split('\n')] + source_code = "\n".join(lines) else: - lines = ['.. code-block:: python', ''] - lines += [' %s' % row.rstrip() for row in code.split('\n')] - source_code = "\n".join(lines) - else: - source_code = "" + source_code = "" - opts = [':%s: %s' % (key, val) for key, val in options.items() - if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] + opts = [':%s: %s' % (key, val) for key, val in options.items() + if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] - if sphinx.__version__ >= "0.6": - only_html = ".. only:: html" - only_latex = ".. only:: latex" - else: - only_html = ".. htmlonly::" - only_latex = ".. latexonly::" + if sphinx.__version__ >= "0.6": + only_html = ".. only:: html" + only_latex = ".. only:: latex" + else: + only_html = ".. htmlonly::" + only_latex = ".. latexonly::" - result = format_template( - TEMPLATE, - dest_dir=dest_dir_link, - build_dir=build_dir_link, - source_link=source_link, - only_html=only_html, - only_latex=only_latex, - options=opts, - images=images, - source_code=source_code, - html_show_formats=config.plot_html_show_formats) + if j == 0: + src_link = source_link + else: + src_link = None - lines = result.split("\n") - if len(lines): - state_machine.insert_input(lines, source=source_file_name) + result = format_template( + TEMPLATE, + dest_dir=dest_dir_link, + build_dir=build_dir_link, + source_link=src_link, + multi_image=len(images) > 1, + only_html=only_html, + only_latex=only_latex, + options=opts, + images=images, + source_code=source_code, + html_show_formats=config.plot_html_show_formats) + total_lines.extend(result.split("\n")) + total_lines.extend("\n") + + if total_lines: + state_machine.insert_input(total_lines, source=source_file_name) + # copy image files to builder's output directory if not os.path.exists(dest_dir): os.makedirs(dest_dir) - for img in images: - for fn in img.filenames(): - shutil.copyfile(fn, os.path.join(dest_dir, os.path.basename(fn))) + for code_piece, images in results: + for img in images: + for fn in img.filenames(): + shutil.copyfile(fn, os.path.join(dest_dir, + os.path.basename(fn))) # copy script (if necessary) if source_file_name == rst_file: @@ -352,7 +365,7 @@ f.write(unescape_doctest(code)) f.close() - return [] + return errors #------------------------------------------------------------------------------ @@ -398,10 +411,32 @@ code += "\n" return code +def split_code_at_show(text): + """ + Split code at plt.show() + + """ + + parts = [] + is_doctest = contains_doctest(text) + + part = [] + for line in text.split("\n"): + if (not is_doctest and line.strip() == 'plt.show()') or \ + (is_doctest and line.strip() == '>>> plt.show()'): + part.append(line) + parts.append("\n".join(part)) + part = [] + else: + part.append(line) + if "\n".join(part).strip(): + parts.append("\n".join(part)) + return parts + class PlotError(RuntimeError): pass -def run_code(code, code_path): +def run_code(code, code_path, ns=None): # Change the working directory to the directory of the example, so # it can get at its data files, if any. pwd = os.getcwd() @@ -422,8 +457,10 @@ try: try: code = unescape_doctest(code) - ns = {} - exec setup.config.plot_pre_code in ns + if ns is None: + ns = {} + if not ns: + exec setup.config.plot_pre_code in ns exec code in ns except (Exception, SystemExit), err: raise PlotError(traceback.format_exc()) @@ -468,6 +505,8 @@ # -- Try to determine if all images already exist + code_pieces = split_code_at_show(code) + # Look for single-figure output files first all_exists = True img = ImageFile(output_base, output_dir) @@ -478,56 +517,68 @@ img.formats.append(format) if all_exists: - return [img] + return [(code, [img])] # Then look for multi-figure output files - images = [] + results = [] all_exists = True - for i in xrange(1000): - img = ImageFile('%s_%02d' % (output_base, i), output_dir) - for format, dpi in formats: - if out_of_date(code_path, img.filename(format)): - all_exists = False + for i, code_piece in enumerate(code_pieces): + images = [] + for j in xrange(1000): + img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + # assume that if we have one, we have them all + if not all_exists: + all_exists = (j > 0) break - img.formats.append(format) - - # assume that if we have one, we have them all + images.append(img) if not all_exists: - all_exists = (i > 0) break - images.append(img) + results.append((code_piece, images)) if all_exists: - return images + return results # -- We didn't find the files, so build them - # Clear between runs - plt.close('all') + results = [] + ns = {} - # Run code - run_code(code, code_path) + for i, code_piece in enumerate(code_pieces): + # Clear between runs + plt.close('all') - # Collect images - images = [] + # Run code + run_code(code_piece, code_path, ns) - fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() - for i, figman in enumerate(fig_managers): - if len(fig_managers) == 1: - img = ImageFile(output_base, output_dir) - else: - img = ImageFile("%s_%02d" % (output_base, i), output_dir) - images.append(img) - for format, dpi in formats: - try: - figman.canvas.figure.savefig(img.filename(format), dpi=dpi) - except exceptions.BaseException, err: - raise PlotError(traceback.format_exc()) - img.formats.append(format) + # Collect images + images = [] + fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() + for j, figman in enumerate(fig_managers): + if len(fig_managers) == 1 and len(code_pieces) == 1: + img = ImageFile(output_base, output_dir) + else: + img = ImageFile("%s_%02d_%02d" % (output_base, i, j), + output_dir) + images.append(img) + for format, dpi in formats: + try: + figman.canvas.figure.savefig(img.filename(format), dpi=dpi) + except exceptions.BaseException, err: + raise PlotError(traceback.format_exc()) + img.formats.append(format) - return images + # Results + results.append((code_piece, images)) + return results + #------------------------------------------------------------------------------ # Relative pathnames #------------------------------------------------------------------------------ From numpy-svn at scipy.org Thu Sep 2 16:33:24 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 2 Sep 2010 15:33:24 -0500 (CDT) Subject: [Numpy-svn] r8682 - branches/1.5.x/doc/sphinxext Message-ID: <20100902203324.DDA1239CD43@scipy.org> Author: ptvirtan Date: 2010-09-02 15:33:24 -0500 (Thu, 02 Sep 2010) New Revision: 8682 Modified: branches/1.5.x/doc/sphinxext/docscrape.py Log: sphinxext: (backport r8679) more lenient parsing for See Also sections Modified: branches/1.5.x/doc/sphinxext/docscrape.py =================================================================== --- branches/1.5.x/doc/sphinxext/docscrape.py 2010-09-02 20:33:09 UTC (rev 8681) +++ branches/1.5.x/doc/sphinxext/docscrape.py 2010-09-02 20:33:24 UTC (rev 8682) @@ -232,7 +232,8 @@ current_func = None if ',' in line: for func in line.split(','): - push_item(func, []) + if func.strip(): + push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: From numpy-svn at scipy.org Sat Sep 4 05:51:57 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 4 Sep 2010 04:51:57 -0500 (CDT) Subject: [Numpy-svn] r8683 - trunk Message-ID: <20100904095157.E167939CC3F@scipy.org> Author: rgommers Date: 2010-09-04 04:51:57 -0500 (Sat, 04 Sep 2010) New Revision: 8683 Modified: trunk/setup.py Log: Add doc/swig, doc/cython and doc/pyrex dirs back. Closes #1088. Modified: trunk/setup.py =================================================================== --- trunk/setup.py 2010-09-02 20:33:24 UTC (rev 8682) +++ trunk/setup.py 2010-09-04 09:51:57 UTC (rev 8683) @@ -150,6 +150,12 @@ config.add_subpackage('numpy') + # we want these files also in binaries/installed files, so it belongs here + # instead of in Manifest.in + config.add_data_files(('doc/cython/'), + ('doc/pyrex/'), + ('doc/swig/')) + config.get_version('numpy/version.py') # sets config.version return config From numpy-svn at scipy.org Sat Sep 4 05:52:24 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 4 Sep 2010 04:52:24 -0500 (CDT) Subject: [Numpy-svn] r8684 - in trunk/doc: source/reference swig/doc Message-ID: <20100904095224.7E1F639CC3F@scipy.org> Author: rgommers Date: 2010-09-04 04:52:24 -0500 (Sat, 04 Sep 2010) New Revision: 8684 Added: trunk/doc/source/reference/swig.interface-file.rst trunk/doc/source/reference/swig.rst trunk/doc/source/reference/swig.testing.rst Removed: trunk/doc/swig/doc/numpy_swig.txt trunk/doc/swig/doc/testing.txt Modified: trunk/doc/source/reference/index.rst Log: DOC: integrate doc/swig/doc documentation with reference guide. Modified: trunk/doc/source/reference/index.rst =================================================================== --- trunk/doc/source/reference/index.rst 2010-09-04 09:51:57 UTC (rev 8683) +++ trunk/doc/source/reference/index.rst 2010-09-04 09:52:24 UTC (rev 8684) @@ -24,6 +24,7 @@ distutils c-api internals + swig Acknowledgements Copied: trunk/doc/source/reference/swig.interface-file.rst (from rev 8683, trunk/doc/swig/doc/numpy_swig.txt) =================================================================== --- trunk/doc/source/reference/swig.interface-file.rst (rev 0) +++ trunk/doc/source/reference/swig.interface-file.rst 2010-09-04 09:52:24 UTC (rev 8684) @@ -0,0 +1,930 @@ +Numpy.i: a SWIG Interface File for NumPy +======================================== + +Introduction +------------ + +The Simple Wrapper and Interface Generator (or `SWIG +`_) is a powerful tool for generating wrapper +code for interfacing to a wide variety of scripting languages. +`SWIG`_ can parse header files, and using only the code prototypes, +create an interface to the target language. But `SWIG`_ is not +omnipotent. For example, it cannot know from the prototype:: + + double rms(double* seq, int n); + +what exactly ``seq`` is. Is it a single value to be altered in-place? +Is it an array, and if so what is its length? Is it input-only? +Output-only? Input-output? `SWIG`_ cannot determine these details, +and does not attempt to do so. + +If we designed ``rms``, we probably made it a routine that takes an +input-only array of length ``n`` of ``double`` values called ``seq`` +and returns the root mean square. The default behavior of `SWIG`_, +however, will be to create a wrapper function that compiles, but is +nearly impossible to use from the scripting language in the way the C +routine was intended. + +For Python, the preferred way of handling contiguous (or technically, +*strided*) blocks of homogeneous data is with NumPy, which provides full +object-oriented access to multidimensial arrays of data. Therefore, the most +logical Python interface for the ``rms`` function would be (including doc +string):: + + def rms(seq): + """ + rms: return the root mean square of a sequence + rms(numpy.ndarray) -> double + rms(list) -> double + rms(tuple) -> double + """ + +where ``seq`` would be a NumPy array of ``double`` values, and its +length ``n`` would be extracted from ``seq`` internally before being +passed to the C routine. Even better, since NumPy supports +construction of arrays from arbitrary Python sequences, ``seq`` +itself could be a nearly arbitrary sequence (so long as each element +can be converted to a ``double``) and the wrapper code would +internally convert it to a NumPy array before extracting its data +and length. + +`SWIG`_ allows these types of conversions to be defined via a +mechanism called typemaps. This document provides information on how +to use ``numpy.i``, a `SWIG`_ interface file that defines a series of +typemaps intended to make the type of array-related conversions +described above relatively simple to implement. For example, suppose +that the ``rms`` function prototype defined above was in a header file +named ``rms.h``. To obtain the Python interface discussed above, +your `SWIG`_ interface file would need the following:: + + %{ + #define SWIG_FILE_WITH_INIT + #include "rms.h" + %} + + %include "numpy.i" + + %init %{ + import_array(); + %} + + %apply (double* IN_ARRAY1, int DIM1) {(double* seq, int n)}; + %include "rms.h" + +Typemaps are keyed off a list of one or more function arguments, +either by type or by type and name. We will refer to such lists as +*signatures*. One of the many typemaps defined by ``numpy.i`` is used +above and has the signature ``(double* IN_ARRAY1, int DIM1)``. The +argument names are intended to suggest that the ``double*`` argument +is an input array of one dimension and that the ``int`` represents +that dimension. This is precisely the pattern in the ``rms`` +prototype. + +Most likely, no actual prototypes to be wrapped will have the argument +names ``IN_ARRAY1`` and ``DIM1``. We use the ``%apply`` directive to +apply the typemap for one-dimensional input arrays of type ``double`` +to the actual prototype used by ``rms``. Using ``numpy.i`` +effectively, therefore, requires knowing what typemaps are available +and what they do. + +A `SWIG`_ interface file that includes the `SWIG`_ directives given +above will produce wrapper code that looks something like:: + + 1 PyObject *_wrap_rms(PyObject *args) { + 2 PyObject *resultobj = 0; + 3 double *arg1 = (double *) 0 ; + 4 int arg2 ; + 5 double result; + 6 PyArrayObject *array1 = NULL ; + 7 int is_new_object1 = 0 ; + 8 PyObject * obj0 = 0 ; + 9 + 10 if (!PyArg_ParseTuple(args,(char *)"O:rms",&obj0)) SWIG_fail; + 11 { + 12 array1 = obj_to_array_contiguous_allow_conversion( + 13 obj0, NPY_DOUBLE, &is_new_object1); + 14 npy_intp size[1] = { + 15 -1 + 16 }; + 17 if (!array1 || !require_dimensions(array1, 1) || + 18 !require_size(array1, size, 1)) SWIG_fail; + 19 arg1 = (double*) array1->data; + 20 arg2 = (int) array1->dimensions[0]; + 21 } + 22 result = (double)rms(arg1,arg2); + 23 resultobj = SWIG_From_double((double)(result)); + 24 { + 25 if (is_new_object1 && array1) Py_DECREF(array1); + 26 } + 27 return resultobj; + 28 fail: + 29 { + 30 if (is_new_object1 && array1) Py_DECREF(array1); + 31 } + 32 return NULL; + 33 } + +The typemaps from ``numpy.i`` are responsible for the following lines +of code: 12--20, 25 and 30. Line 10 parses the input to the ``rms`` +function. From the format string ``"O:rms"``, we can see that the +argument list is expected to be a single Python object (specified +by the ``O`` before the colon) and whose pointer is stored in +``obj0``. A number of functions, supplied by ``numpy.i``, are called +to make and check the (possible) conversion from a generic Python +object to a NumPy array. These functions are explained in the +section `Helper Functions`_, but hopefully their names are +self-explanatory. At line 12 we use ``obj0`` to construct a NumPy +array. At line 17, we check the validity of the result: that it is +non-null and that it has a single dimension of arbitrary length. Once +these states are verified, we extract the data buffer and length in +lines 19 and 20 so that we can call the underlying C function at line +22. Line 25 performs memory management for the case where we have +created a new array that is no longer needed. + +This code has a significant amount of error handling. Note the +``SWIG_fail`` is a macro for ``goto fail``, refering to the label at +line 28. If the user provides the wrong number of arguments, this +will be caught at line 10. If construction of the NumPy array +fails or produces an array with the wrong number of dimensions, these +errors are caught at line 17. And finally, if an error is detected, +memory is still managed correctly at line 30. + +Note that if the C function signature was in a different order:: + + double rms(int n, double* seq); + +that `SWIG`_ would not match the typemap signature given above with +the argument list for ``rms``. Fortunately, ``numpy.i`` has a set of +typemaps with the data pointer given last:: + + %apply (int DIM1, double* IN_ARRAY1) {(int n, double* seq)}; + +This simply has the effect of switching the definitions of ``arg1`` +and ``arg2`` in lines 3 and 4 of the generated code above, and their +assignments in lines 19 and 20. + +Using numpy.i +------------- + +The ``numpy.i`` file is currently located in the ``numpy/docs/swig`` +sub-directory under the ``numpy`` installation directory. Typically, +you will want to copy it to the directory where you are developing +your wrappers. If it is ever adopted by `SWIG`_ developers, then it +will be installed in a standard place where `SWIG`_ can find it. + +A simple module that only uses a single `SWIG`_ interface file should +include the following:: + + %{ + #define SWIG_FILE_WITH_INIT + %} + %include "numpy.i" + %init %{ + import_array(); + %} + +Within a compiled Python module, ``import_array()`` should only get +called once. This could be in a C/C++ file that you have written and +is linked to the module. If this is the case, then none of your +interface files should ``#define SWIG_FILE_WITH_INIT`` or call +``import_array()``. Or, this initialization call could be in a +wrapper file generated by `SWIG`_ from an interface file that has the +``%init`` block as above. If this is the case, and you have more than +one `SWIG`_ interface file, then only one interface file should +``#define SWIG_FILE_WITH_INIT`` and call ``import_array()``. + +Available Typemaps +------------------ + +The typemap directives provided by ``numpy.i`` for arrays of different +data types, say ``double`` and ``int``, and dimensions of different +types, say ``int`` or ``long``, are identical to one another except +for the C and NumPy type specifications. The typemaps are +therefore implemented (typically behind the scenes) via a macro:: + + %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE) + +that can be invoked for appropriate ``(DATA_TYPE, DATA_TYPECODE, +DIM_TYPE)`` triplets. For example:: + + %numpy_typemaps(double, NPY_DOUBLE, int) + %numpy_typemaps(int, NPY_INT , int) + +The ``numpy.i`` interface file uses the ``%numpy_typemaps`` macro to +implement typemaps for the following C data types and ``int`` +dimension types: + + * ``signed char`` + * ``unsigned char`` + * ``short`` + * ``unsigned short`` + * ``int`` + * ``unsigned int`` + * ``long`` + * ``unsigned long`` + * ``long long`` + * ``unsigned long long`` + * ``float`` + * ``double`` + +In the following descriptions, we reference a generic ``DATA_TYPE``, which +could be any of the C data types listed above, and ``DIM_TYPE`` which +should be one of the many types of integers. + +The typemap signatures are largely differentiated on the name given to +the buffer pointer. Names with ``FARRAY`` are for FORTRAN-ordered +arrays, and names with ``ARRAY`` are for C-ordered (or 1D arrays). + +Input Arrays +```````````` + +Input arrays are defined as arrays of data that are passed into a +routine but are not altered in-place or returned to the user. The +Python input array is therefore allowed to be almost any Python +sequence (such as a list) that can be converted to the requested type +of array. The input array signatures are + +1D: + + * ``( DATA_TYPE IN_ARRAY1[ANY] )`` + * ``( DATA_TYPE* IN_ARRAY1, int DIM1 )`` + * ``( int DIM1, DATA_TYPE* IN_ARRAY1 )`` + +2D: + + * ``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` + * ``( DATA_TYPE* IN_ARRAY2, int DIM1, int DIM2 )`` + * ``( int DIM1, int DIM2, DATA_TYPE* IN_ARRAY2 )`` + * ``( DATA_TYPE* IN_FARRAY2, int DIM1, int DIM2 )`` + * ``( int DIM1, int DIM2, DATA_TYPE* IN_FARRAY2 )`` + +3D: + + * ``( DATA_TYPE IN_ARRAY3[ANY][ANY][ANY] )`` + * ``( DATA_TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3 )`` + * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_ARRAY3 )`` + * ``( DATA_TYPE* IN_FARRAY3, int DIM1, int DIM2, int DIM3 )`` + * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_FARRAY3 )`` + +The first signature listed, ``( DATA_TYPE IN_ARRAY[ANY] )`` is for +one-dimensional arrays with hard-coded dimensions. Likewise, +``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` is for two-dimensional arrays +with hard-coded dimensions, and similarly for three-dimensional. + +In-Place Arrays +``````````````` + +In-place arrays are defined as arrays that are modified in-place. The +input values may or may not be used, but the values at the time the +function returns are significant. The provided Python argument +must therefore be a NumPy array of the required type. The in-place +signatures are + +1D: + + * ``( DATA_TYPE INPLACE_ARRAY1[ANY] )`` + * ``( DATA_TYPE* INPLACE_ARRAY1, int DIM1 )`` + * ``( int DIM1, DATA_TYPE* INPLACE_ARRAY1 )`` + +2D: + + * ``( DATA_TYPE INPLACE_ARRAY2[ANY][ANY] )`` + * ``( DATA_TYPE* INPLACE_ARRAY2, int DIM1, int DIM2 )`` + * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_ARRAY2 )`` + * ``( DATA_TYPE* INPLACE_FARRAY2, int DIM1, int DIM2 )`` + * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_FARRAY2 )`` + +3D: + + * ``( DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY] )`` + * ``( DATA_TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3 )`` + * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_ARRAY3 )`` + * ``( DATA_TYPE* INPLACE_FARRAY3, int DIM1, int DIM2, int DIM3 )`` + * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_FARRAY3 )`` + +These typemaps now check to make sure that the ``INPLACE_ARRAY`` +arguments use native byte ordering. If not, an exception is raised. + +Argout Arrays +````````````` + +Argout arrays are arrays that appear in the input arguments in C, but +are in fact output arrays. This pattern occurs often when there is +more than one output variable and the single return argument is +therefore not sufficient. In Python, the convential way to return +multiple arguments is to pack them into a sequence (tuple, list, etc.) +and return the sequence. This is what the argout typemaps do. If a +wrapped function that uses these argout typemaps has more than one +return argument, they are packed into a tuple or list, depending on +the version of Python. The Python user does not pass these +arrays in, they simply get returned. For the case where a dimension +is specified, the python user must provide that dimension as an +argument. The argout signatures are + +1D: + + * ``( DATA_TYPE ARGOUT_ARRAY1[ANY] )`` + * ``( DATA_TYPE* ARGOUT_ARRAY1, int DIM1 )`` + * ``( int DIM1, DATA_TYPE* ARGOUT_ARRAY1 )`` + +2D: + + * ``( DATA_TYPE ARGOUT_ARRAY2[ANY][ANY] )`` + +3D: + + * ``( DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY] )`` + +These are typically used in situations where in C/C++, you would +allocate a(n) array(s) on the heap, and call the function to fill the +array(s) values. In Python, the arrays are allocated for you and +returned as new array objects. + +Note that we support ``DATA_TYPE*`` argout typemaps in 1D, but not 2D +or 3D. This is because of a quirk with the `SWIG`_ typemap syntax and +cannot be avoided. Note that for these types of 1D typemaps, the +Python function will take a single argument representing ``DIM1``. + +Argoutview Arrays +````````````````` + +Argoutview arrays are for when your C code provides you with a view of +its internal data and does not require any memory to be allocated by +the user. This can be dangerous. There is almost no way to guarantee +that the internal data from the C code will remain in existence for +the entire lifetime of the NumPy array that encapsulates it. If +the user destroys the object that provides the view of the data before +destroying the NumPy array, then using that array my result in bad +memory references or segmentation faults. Nevertheless, there are +situations, working with large data sets, where you simply have no +other choice. + +The C code to be wrapped for argoutview arrays are characterized by +pointers: pointers to the dimensions and double pointers to the data, +so that these values can be passed back to the user. The argoutview +typemap signatures are therefore + +1D: + + * ``( DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )`` + * ``( DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1 )`` + +2D: + + * ``( DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` + * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2 )`` + * ``( DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` + * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2 )`` + +3D: + + * ``( DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` + * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)`` + * ``( DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` + * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)`` + +Note that arrays with hard-coded dimensions are not supported. These +cannot follow the double pointer signatures of these typemaps. + +Output Arrays +````````````` + +The ``numpy.i`` interface file does not support typemaps for output +arrays, for several reasons. First, C/C++ return arguments are +limited to a single value. This prevents obtaining dimension +information in a general way. Second, arrays with hard-coded lengths +are not permitted as return arguments. In other words:: + + double[3] newVector(double x, double y, double z); + +is not legal C/C++ syntax. Therefore, we cannot provide typemaps of +the form:: + + %typemap(out) (TYPE[ANY]); + +If you run into a situation where a function or method is returning a +pointer to an array, your best bet is to write your own version of the +function to be wrapped, either with ``%extend`` for the case of class +methods or ``%ignore`` and ``%rename`` for the case of functions. + +Other Common Types: bool +```````````````````````` + +Note that C++ type ``bool`` is not supported in the list in the +`Available Typemaps`_ section. NumPy bools are a single byte, while +the C++ ``bool`` is four bytes (at least on my system). Therefore:: + + %numpy_typemaps(bool, NPY_BOOL, int) + +will result in typemaps that will produce code that reference +improper data lengths. You can implement the following macro +expansion:: + + %numpy_typemaps(bool, NPY_UINT, int) + +to fix the data length problem, and `Input Arrays`_ will work fine, +but `In-Place Arrays`_ might fail type-checking. + +Other Common Types: complex +``````````````````````````` + +Typemap conversions for complex floating-point types is also not +supported automatically. This is because Python and NumPy are +written in C, which does not have native complex types. Both +Python and NumPy implement their own (essentially equivalent) +``struct`` definitions for complex variables:: + + /* Python */ + typedef struct {double real; double imag;} Py_complex; + + /* NumPy */ + typedef struct {float real, imag;} npy_cfloat; + typedef struct {double real, imag;} npy_cdouble; + +We could have implemented:: + + %numpy_typemaps(Py_complex , NPY_CDOUBLE, int) + %numpy_typemaps(npy_cfloat , NPY_CFLOAT , int) + %numpy_typemaps(npy_cdouble, NPY_CDOUBLE, int) + +which would have provided automatic type conversions for arrays of +type ``Py_complex``, ``npy_cfloat`` and ``npy_cdouble``. However, it +seemed unlikely that there would be any independent (non-Python, +non-NumPy) application code that people would be using `SWIG`_ to +generate a Python interface to, that also used these definitions +for complex types. More likely, these application codes will define +their own complex types, or in the case of C++, use ``std::complex``. +Assuming these data structures are compatible with Python and +NumPy complex types, ``%numpy_typemap`` expansions as above (with +the user's complex type substituted for the first argument) should +work. + +NumPy Array Scalars and SWIG +---------------------------- + +`SWIG`_ has sophisticated type checking for numerical types. For +example, if your C/C++ routine expects an integer as input, the code +generated by `SWIG`_ will check for both Python integers and +Python long integers, and raise an overflow error if the provided +Python integer is too big to cast down to a C integer. With the +introduction of NumPy scalar arrays into your Python code, you +might conceivably extract an integer from a NumPy array and attempt +to pass this to a `SWIG`_-wrapped C/C++ function that expects an +``int``, but the `SWIG`_ type checking will not recognize the NumPy +array scalar as an integer. (Often, this does in fact work -- it +depends on whether NumPy recognizes the integer type you are using +as inheriting from the Python integer type on the platform you are +using. Sometimes, this means that code that works on a 32-bit machine +will fail on a 64-bit machine.) + +If you get a Python error that looks like the following:: + + TypeError: in method 'MyClass_MyMethod', argument 2 of type 'int' + +and the argument you are passing is an integer extracted from a +NumPy array, then you have stumbled upon this problem. The +solution is to modify the `SWIG`_ type conversion system to accept +`Numpy`_ array scalars in addition to the standard integer types. +Fortunately, this capabilitiy has been provided for you. Simply copy +the file:: + + pyfragments.swg + +to the working build directory for you project, and this problem will +be fixed. It is suggested that you do this anyway, as it only +increases the capabilities of your Python interface. + +Why is There a Second File? +``````````````````````````` + +The `SWIG`_ type checking and conversion system is a complicated +combination of C macros, `SWIG`_ macros, `SWIG`_ typemaps and `SWIG`_ +fragments. Fragments are a way to conditionally insert code into your +wrapper file if it is needed, and not insert it if not needed. If +multiple typemaps require the same fragment, the fragment only gets +inserted into your wrapper code once. + +There is a fragment for converting a Python integer to a C +``long``. There is a different fragment that converts a Python +integer to a C ``int``, that calls the rountine defined in the +``long`` fragment. We can make the changes we want here by changing +the definition for the ``long`` fragment. `SWIG`_ determines the +active definition for a fragment using a "first come, first served" +system. That is, we need to define the fragment for ``long`` +conversions prior to `SWIG`_ doing it internally. `SWIG`_ allows us +to do this by putting our fragment definitions in the file +``pyfragments.swg``. If we were to put the new fragment definitions +in ``numpy.i``, they would be ignored. + +Helper Functions +---------------- + +The ``numpy.i`` file containes several macros and routines that it +uses internally to build its typemaps. However, these functions may +be useful elsewhere in your interface file. These macros and routines +are implemented as fragments, which are described briefly in the +previous section. If you try to use one or more of the following +macros or functions, but your compiler complains that it does not +recognize the symbol, then you need to force these fragments to appear +in your code using:: + + %fragment("NumPy_Fragments"); + +in your `SWIG`_ interface file. + +Macros +`````` + + **is_array(a)** + Evaluates as true if ``a`` is non-``NULL`` and can be cast to a + ``PyArrayObject*``. + + **array_type(a)** + Evaluates to the integer data type code of ``a``, assuming ``a`` can + be cast to a ``PyArrayObject*``. + + **array_numdims(a)** + Evaluates to the integer number of dimensions of ``a``, assuming + ``a`` can be cast to a ``PyArrayObject*``. + + **array_dimensions(a)** + Evaluates to an array of type ``npy_intp`` and length + ``array_numdims(a)``, giving the lengths of all of the dimensions + of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. + + **array_size(a,i)** + Evaluates to the ``i``-th dimension size of ``a``, assuming ``a`` + can be cast to a ``PyArrayObject*``. + + **array_data(a)** + Evaluates to a pointer of type ``void*`` that points to the data + buffer of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. + + **array_is_contiguous(a)** + Evaluates as true if ``a`` is a contiguous array. Equivalent to + ``(PyArray_ISCONTIGUOUS(a))``. + + **array_is_native(a)** + Evaluates as true if the data buffer of ``a`` uses native byte + order. Equivalent to ``(PyArray_ISNOTSWAPPED(a))``. + + **array_is_fortran(a)** + Evaluates as true if ``a`` is FORTRAN ordered. + +Routines +```````` + + **pytype_string()** + + Return type: ``char*`` + + Arguments: + + * ``PyObject* py_obj``, a general Python object. + + Return a string describing the type of ``py_obj``. + + + **typecode_string()** + + Return type: ``char*`` + + Arguments: + + * ``int typecode``, a NumPy integer typecode. + + Return a string describing the type corresponding to the NumPy + ``typecode``. + + **type_match()** + + Return type: ``int`` + + Arguments: + + * ``int actual_type``, the NumPy typecode of a NumPy array. + + * ``int desired_type``, the desired NumPy typecode. + + Make sure that ``actual_type`` is compatible with + ``desired_type``. For example, this allows character and + byte types, or int and long types, to match. This is now + equivalent to ``PyArray_EquivTypenums()``. + + + **obj_to_array_no_conversion()** + + Return type: ``PyArrayObject*`` + + Arguments: + + * ``PyObject* input``, a general Python object. + + * ``int typecode``, the desired NumPy typecode. + + Cast ``input`` to a ``PyArrayObject*`` if legal, and ensure that + it is of type ``typecode``. If ``input`` cannot be cast, or the + ``typecode`` is wrong, set a Python error and return ``NULL``. + + + **obj_to_array_allow_conversion()** + + Return type: ``PyArrayObject*`` + + Arguments: + + * ``PyObject* input``, a general Python object. + + * ``int typecode``, the desired NumPy typecode of the resulting + array. + + * ``int* is_new_object``, returns a value of 0 if no conversion + performed, else 1. + + Convert ``input`` to a NumPy array with the given ``typecode``. + On success, return a valid ``PyArrayObject*`` with the correct + type. On failure, the Python error string will be set and the + routine returns ``NULL``. + + + **make_contiguous()** + + Return type: ``PyArrayObject*`` + + Arguments: + + * ``PyArrayObject* ary``, a NumPy array. + + * ``int* is_new_object``, returns a value of 0 if no conversion + performed, else 1. + + * ``int min_dims``, minimum allowable dimensions. + + * ``int max_dims``, maximum allowable dimensions. + + Check to see if ``ary`` is contiguous. If so, return the input + pointer and flag it as not a new object. If it is not contiguous, + create a new ``PyArrayObject*`` using the original data, flag it + as a new object and return the pointer. + + + **obj_to_array_contiguous_allow_conversion()** + + Return type: ``PyArrayObject*`` + + Arguments: + + * ``PyObject* input``, a general Python object. + + * ``int typecode``, the desired NumPy typecode of the resulting + array. + + * ``int* is_new_object``, returns a value of 0 if no conversion + performed, else 1. + + Convert ``input`` to a contiguous ``PyArrayObject*`` of the + specified type. If the input object is not a contiguous + ``PyArrayObject*``, a new one will be created and the new object + flag will be set. + + + **require_contiguous()** + + Return type: ``int`` + + Arguments: + + * ``PyArrayObject* ary``, a NumPy array. + + Test whether ``ary`` is contiguous. If so, return 1. Otherwise, + set a Python error and return 0. + + + **require_native()** + + Return type: ``int`` + + Arguments: + + * ``PyArray_Object* ary``, a NumPy array. + + Require that ``ary`` is not byte-swapped. If the array is not + byte-swapped, return 1. Otherwise, set a Python error and + return 0. + + **require_dimensions()** + + Return type: ``int`` + + Arguments: + + * ``PyArrayObject* ary``, a NumPy array. + + * ``int exact_dimensions``, the desired number of dimensions. + + Require ``ary`` to have a specified number of dimensions. If the + array has the specified number of dimensions, return 1. + Otherwise, set a Python error and return 0. + + + **require_dimensions_n()** + + Return type: ``int`` + + Arguments: + + * ``PyArrayObject* ary``, a NumPy array. + + * ``int* exact_dimensions``, an array of integers representing + acceptable numbers of dimensions. + + * ``int n``, the length of ``exact_dimensions``. + + Require ``ary`` to have one of a list of specified number of + dimensions. If the array has one of the specified number of + dimensions, return 1. Otherwise, set the Python error string + and return 0. + + + **require_size()** + + Return type: ``int`` + + Arguments: + + * ``PyArrayObject* ary``, a NumPy array. + + * ``npy_int* size``, an array representing the desired lengths of + each dimension. + + * ``int n``, the length of ``size``. + + Require ``ary`` to have a specified shape. If the array has the + specified shape, return 1. Otherwise, set the Python error + string and return 0. + + + **require_fortran()** + + Return type: ``int`` + + Arguments: + + * ``PyArrayObject* ary``, a NumPy array. + + Require the given ``PyArrayObject`` to to be FORTRAN ordered. If + the the ``PyArrayObject`` is already FORTRAN ordered, do nothing. + Else, set the FORTRAN ordering flag and recompute the strides. + + +Beyond the Provided Typemaps +---------------------------- + +There are many C or C++ array/NumPy array situations not covered by +a simple ``%include "numpy.i"`` and subsequent ``%apply`` directives. + +A Common Example +```````````````` + +Consider a reasonable prototype for a dot product function:: + + double dot(int len, double* vec1, double* vec2); + +The Python interface that we want is:: + + def dot(vec1, vec2): + """ + dot(PyObject,PyObject) -> double + """ + +The problem here is that there is one dimension argument and two array +arguments, and our typemaps are set up for dimensions that apply to a +single array (in fact, `SWIG`_ does not provide a mechanism for +associating ``len`` with ``vec2`` that takes two Python input +arguments). The recommended solution is the following:: + + %apply (int DIM1, double* IN_ARRAY1) {(int len1, double* vec1), + (int len2, double* vec2)} + %rename (dot) my_dot; + %exception my_dot { + $action + if (PyErr_Occurred()) SWIG_fail; + } + %inline %{ + double my_dot(int len1, double* vec1, int len2, double* vec2) { + if (len1 != len2) { + PyErr_Format(PyExc_ValueError, + "Arrays of lengths (%d,%d) given", + len1, len2); + return 0.0; + } + return dot(len1, vec1, vec2); + } + %} + +If the header file that contains the prototype for ``double dot()`` +also contains other prototypes that you want to wrap, so that you need +to ``%include`` this header file, then you will also need a ``%ignore +dot;`` directive, placed after the ``%rename`` and before the +``%include`` directives. Or, if the function in question is a class +method, you will want to use ``%extend`` rather than ``%inline`` in +addition to ``%ignore``. + +**A note on error handling:** Note that ``my_dot`` returns a +``double`` but that it can also raise a Python error. The +resulting wrapper function will return a Python float +representation of 0.0 when the vector lengths do not match. Since +this is not ``NULL``, the Python interpreter will not know to check +for an error. For this reason, we add the ``%exception`` directive +above for ``my_dot`` to get the behavior we want (note that +``$action`` is a macro that gets expanded to a valid call to +``my_dot``). In general, you will probably want to write a `SWIG`_ +macro to perform this task. + +Other Situations +```````````````` + +There are other wrapping situations in which ``numpy.i`` may be +helpful when you encounter them. + + * In some situations, it is possible that you could use the + ``%numpy_templates`` macro to implement typemaps for your own + types. See the `Other Common Types: bool`_ or `Other Common + Types: complex`_ sections for examples. Another situation is if + your dimensions are of a type other than ``int`` (say ``long`` for + example):: + + %numpy_typemaps(double, NPY_DOUBLE, long) + + * You can use the code in ``numpy.i`` to write your own typemaps. + For example, if you had a four-dimensional array as a function + argument, you could cut-and-paste the appropriate + three-dimensional typemaps into your interface file. The + modifications for the fourth dimension would be trivial. + + * Sometimes, the best approach is to use the ``%extend`` directive + to define new methods for your classes (or overload existing ones) + that take a ``PyObject*`` (that either is or can be converted to a + ``PyArrayObject*``) instead of a pointer to a buffer. In this + case, the helper routines in ``numpy.i`` can be very useful. + + * Writing typemaps can be a bit nonintuitive. If you have specific + questions about writing `SWIG`_ typemaps for NumPy, the + developers of ``numpy.i`` do monitor the + `Numpy-discussion `_ and + `Swig-user `_ mail lists. + +A Final Note +```````````` + +When you use the ``%apply`` directive, as is usually necessary to use +``numpy.i``, it will remain in effect until you tell `SWIG`_ that it +shouldn't be. If the arguments to the functions or methods that you +are wrapping have common names, such as ``length`` or ``vector``, +these typemaps may get applied in situations you do not expect or +want. Therefore, it is always a good idea to add a ``%clear`` +directive after you are done with a specific typemap:: + + %apply (double* IN_ARRAY1, int DIM1) {(double* vector, int length)} + %include "my_header.h" + %clear (double* vector, int length); + +In general, you should target these typemap signatures specifically +where you want them, and then clear them after you are done. + +Summary +------- + +Out of the box, ``numpy.i`` provides typemaps that support conversion +between NumPy arrays and C arrays: + + * That can be one of 12 different scalar types: ``signed char``, + ``unsigned char``, ``short``, ``unsigned short``, ``int``, + ``unsigned int``, ``long``, ``unsigned long``, ``long long``, + ``unsigned long long``, ``float`` and ``double``. + + * That support 41 different argument signatures for each data type, + including: + + + One-dimensional, two-dimensional and three-dimensional arrays. + + + Input-only, in-place, argout and argoutview behavior. + + + Hard-coded dimensions, data-buffer-then-dimensions + specification, and dimensions-then-data-buffer specification. + + + Both C-ordering ("last dimension fastest") or FORTRAN-ordering + ("first dimension fastest") support for 2D and 3D arrays. + +The ``numpy.i`` interface file also provides additional tools for +wrapper developers, including: + + * A `SWIG`_ macro (``%numpy_typemaps``) with three arguments for + implementing the 41 argument signatures for the user's choice of + (1) C data type, (2) NumPy data type (assuming they match), and + (3) dimension type. + + * Nine C macros and 13 C functions that can be used to write + specialized typemaps, extensions, or inlined functions that handle + cases not covered by the provided typemaps. + Added: trunk/doc/source/reference/swig.rst =================================================================== --- trunk/doc/source/reference/swig.rst (rev 0) +++ trunk/doc/source/reference/swig.rst 2010-09-04 09:52:24 UTC (rev 8684) @@ -0,0 +1,12 @@ +************** +Numpy and SWIG +************** + +.. sectionauthor:: Bill Spotz + + +.. toctree:: + :maxdepth: 2 + + swig.interface-file + swig.testing Copied: trunk/doc/source/reference/swig.testing.rst (from rev 8683, trunk/doc/swig/doc/testing.txt) =================================================================== --- trunk/doc/source/reference/swig.testing.rst (rev 0) +++ trunk/doc/source/reference/swig.testing.rst 2010-09-04 09:52:24 UTC (rev 8684) @@ -0,0 +1,164 @@ +Testing the numpy.i Typemaps +============================ + +Introduction +------------ + +Writing tests for the ``numpy.i`` `SWIG `_ +interface file is a combinatorial headache. At present, 12 different +data types are supported, each with 23 different argument signatures, +for a total of 276 typemaps supported "out of the box". Each of these +typemaps, in turn, might require several unit tests in order to verify +expected behavior for both proper and improper inputs. Currently, +this results in 1,020 individual unit tests that are performed when +``make test`` is run in the ``numpy/docs/swig`` subdirectory. + +To facilitate this many similar unit tests, some high-level +programming techniques are employed, including C and `SWIG`_ macros, +as well as Python inheritance. The purpose of this document is to describe +the testing infrastructure employed to verify that the ``numpy.i`` +typemaps are working as expected. + +Testing Organization +-------------------- + +There are three indepedent testing frameworks supported, for one-, +two-, and three-dimensional arrays respectively. For one-dimensional +arrays, there are two C++ files, a header and a source, named:: + + Vector.h + Vector.cxx + +that contain prototypes and code for a variety of functions that have +one-dimensional arrays as function arguments. The file:: + + Vector.i + +is a `SWIG`_ interface file that defines a python module ``Vector`` +that wraps the functions in ``Vector.h`` while utilizing the typemaps +in ``numpy.i`` to correctly handle the C arrays. + +The ``Makefile`` calls ``swig`` to generate ``Vector.py`` and +``Vector_wrap.cxx``, and also executes the ``setup.py`` script that +compiles ``Vector_wrap.cxx`` and links together the extension module +``_Vector.so`` or ``_Vector.dylib``, depending on the platform. This +extension module and the proxy file ``Vector.py`` are both placed in a +subdirectory under the ``build`` directory. + +The actual testing takes place with a Python script named:: + + testVector.py + +that uses the standard Python library module ``unittest``, which +performs several tests of each function defined in ``Vector.h`` for +each data type supported. + +Two-dimensional arrays are tested in exactly the same manner. The +above description applies, but with ``Matrix`` substituted for +``Vector``. For three-dimensional tests, substitute ``Tensor`` for +``Vector``. For the descriptions that follow, we will reference the +``Vector`` tests, but the same information applies to ``Matrix`` and +``Tensor`` tests. + +The command ``make test`` will ensure that all of the test software is +built and then run all three test scripts. + +Testing Header Files +-------------------- + +``Vector.h`` is a C++ header file that defines a C macro called +``TEST_FUNC_PROTOS`` that takes two arguments: ``TYPE``, which is a +data type name such as ``unsigned int``; and ``SNAME``, which is a +short name for the same data type with no spaces, e.g. ``uint``. This +macro defines several function prototypes that have the prefix +``SNAME`` and have at least one argument that is an array of type +``TYPE``. Those functions that have return arguments return a +``TYPE`` value. + +``TEST_FUNC_PROTOS`` is then implemented for all of the data types +supported by ``numpy.i``: + + * ``signed char`` + * ``unsigned char`` + * ``short`` + * ``unsigned short`` + * ``int`` + * ``unsigned int`` + * ``long`` + * ``unsigned long`` + * ``long long`` + * ``unsigned long long`` + * ``float`` + * ``double`` + +Testing Source Files +-------------------- + +``Vector.cxx`` is a C++ source file that implements compilable code +for each of the function prototypes specified in ``Vector.h``. It +defines a C macro ``TEST_FUNCS`` that has the same arguments and works +in the same way as ``TEST_FUNC_PROTOS`` does in ``Vector.h``. +``TEST_FUNCS`` is implemented for each of the 12 data types as above. + +Testing SWIG Interface Files +---------------------------- + +``Vector.i`` is a `SWIG`_ interface file that defines python module +``Vector``. It follows the conventions for using ``numpy.i`` as +described in this chapter. It defines a `SWIG`_ macro +``%apply_numpy_typemaps`` that has a single argument ``TYPE``. +It uses the `SWIG`_ directive ``%apply`` to apply the provided +typemaps to the argument signatures found in ``Vector.h``. This macro +is then implemented for all of the data types supported by +``numpy.i``. It then does a ``%include "Vector.h"`` to wrap all of +the function prototypes in ``Vector.h`` using the typemaps in +``numpy.i``. + +Testing Python Scripts +---------------------- + +After ``make`` is used to build the testing extension modules, +``testVector.py`` can be run to execute the tests. As with other +scripts that use ``unittest`` to facilitate unit testing, +``testVector.py`` defines a class that inherits from +``unittest.TestCase``:: + + class VectorTestCase(unittest.TestCase): + +However, this class is not run directly. Rather, it serves as a base +class to several other python classes, each one specific to a +particular data type. The ``VectorTestCase`` class stores two strings +for typing information: + + **self.typeStr** + A string that matches one of the ``SNAME`` prefixes used in + ``Vector.h`` and ``Vector.cxx``. For example, ``"double"``. + + **self.typeCode** + A short (typically single-character) string that represents a + data type in numpy and corresponds to ``self.typeStr``. For + example, if ``self.typeStr`` is ``"double"``, then + ``self.typeCode`` should be ``"d"``. + +Each test defined by the ``VectorTestCase`` class extracts the python +function it is trying to test by accessing the ``Vector`` module's +dictionary:: + + length = Vector.__dict__[self.typeStr + "Length"] + +In the case of double precision tests, this will return the python +function ``Vector.doubleLength``. + +We then define a new test case class for each supported data type with +a short definition such as:: + + class doubleTestCase(VectorTestCase): + def __init__(self, methodName="runTest"): + VectorTestCase.__init__(self, methodName) + self.typeStr = "double" + self.typeCode = "d" + +Each of these 12 classes is collected into a ``unittest.TestSuite``, +which is then executed. Errors and failures are summed together and +returned as the exit argument. Any non-zero result indicates that at +least one test did not pass. Deleted: trunk/doc/swig/doc/numpy_swig.txt =================================================================== --- trunk/doc/swig/doc/numpy_swig.txt 2010-09-04 09:51:57 UTC (rev 8683) +++ trunk/doc/swig/doc/numpy_swig.txt 2010-09-04 09:52:24 UTC (rev 8684) @@ -1,950 +0,0 @@ -========================================== - numpy.i: a SWIG Interface File for NumPy -========================================== - -:Author: Bill Spotz -:Institution: Sandia National Laboratories -:Date: 1 December, 2007 - -.. contents:: - -Introduction -============ - -The Simple Wrapper and Interface Generator (or `SWIG -`_) is a powerful tool for generating wrapper -code for interfacing to a wide variety of scripting languages. -`SWIG`_ can parse header files, and using only the code prototypes, -create an interface to the target language. But `SWIG`_ is not -omnipotent. For example, it cannot know from the prototype:: - - double rms(double* seq, int n); - -what exactly ``seq`` is. Is it a single value to be altered in-place? -Is it an array, and if so what is its length? Is it input-only? -Output-only? Input-output? `SWIG`_ cannot determine these details, -and does not attempt to do so. - -If we designed ``rms``, we probably made it a routine that takes an -input-only array of length ``n`` of ``double`` values called ``seq`` -and returns the root mean square. The default behavior of `SWIG`_, -however, will be to create a wrapper function that compiles, but is -nearly impossible to use from the scripting language in the way the C -routine was intended. - -For `python `_, the preferred way of handling -contiguous (or technically, *strided*) blocks of homogeneous data is -with the module `NumPy `_, which provides full -object-oriented access to multidimensial arrays of data. Therefore, -the most logical `python`_ interface for the ``rms`` function would be -(including doc string):: - - def rms(seq): - """ - rms: return the root mean square of a sequence - rms(numpy.ndarray) -> double - rms(list) -> double - rms(tuple) -> double - """ - -where ``seq`` would be a `NumPy`_ array of ``double`` values, and its -length ``n`` would be extracted from ``seq`` internally before being -passed to the C routine. Even better, since `NumPy`_ supports -construction of arrays from arbitrary `python`_ sequences, ``seq`` -itself could be a nearly arbitrary sequence (so long as each element -can be converted to a ``double``) and the wrapper code would -internally convert it to a `NumPy`_ array before extracting its data -and length. - -`SWIG`_ allows these types of conversions to be defined via a -mechanism called typemaps. This document provides information on how -to use ``numpy.i``, a `SWIG`_ interface file that defines a series of -typemaps intended to make the type of array-related conversions -described above relatively simple to implement. For example, suppose -that the ``rms`` function prototype defined above was in a header file -named ``rms.h``. To obtain the `python`_ interface discussed above, -your `SWIG`_ interface file would need the following:: - - %{ - #define SWIG_FILE_WITH_INIT - #include "rms.h" - %} - - %include "numpy.i" - - %init %{ - import_array(); - %} - - %apply (double* IN_ARRAY1, int DIM1) {(double* seq, int n)}; - %include "rms.h" - -Typemaps are keyed off a list of one or more function arguments, -either by type or by type and name. We will refer to such lists as -*signatures*. One of the many typemaps defined by ``numpy.i`` is used -above and has the signature ``(double* IN_ARRAY1, int DIM1)``. The -argument names are intended to suggest that the ``double*`` argument -is an input array of one dimension and that the ``int`` represents -that dimension. This is precisely the pattern in the ``rms`` -prototype. - -Most likely, no actual prototypes to be wrapped will have the argument -names ``IN_ARRAY1`` and ``DIM1``. We use the ``%apply`` directive to -apply the typemap for one-dimensional input arrays of type ``double`` -to the actual prototype used by ``rms``. Using ``numpy.i`` -effectively, therefore, requires knowing what typemaps are available -and what they do. - -A `SWIG`_ interface file that includes the `SWIG`_ directives given -above will produce wrapper code that looks something like:: - - 1 PyObject *_wrap_rms(PyObject *args) { - 2 PyObject *resultobj = 0; - 3 double *arg1 = (double *) 0 ; - 4 int arg2 ; - 5 double result; - 6 PyArrayObject *array1 = NULL ; - 7 int is_new_object1 = 0 ; - 8 PyObject * obj0 = 0 ; - 9 - 10 if (!PyArg_ParseTuple(args,(char *)"O:rms",&obj0)) SWIG_fail; - 11 { - 12 array1 = obj_to_array_contiguous_allow_conversion( - 13 obj0, NPY_DOUBLE, &is_new_object1); - 14 npy_intp size[1] = { - 15 -1 - 16 }; - 17 if (!array1 || !require_dimensions(array1, 1) || - 18 !require_size(array1, size, 1)) SWIG_fail; - 19 arg1 = (double*) array1->data; - 20 arg2 = (int) array1->dimensions[0]; - 21 } - 22 result = (double)rms(arg1,arg2); - 23 resultobj = SWIG_From_double((double)(result)); - 24 { - 25 if (is_new_object1 && array1) Py_DECREF(array1); - 26 } - 27 return resultobj; - 28 fail: - 29 { - 30 if (is_new_object1 && array1) Py_DECREF(array1); - 31 } - 32 return NULL; - 33 } - -The typemaps from ``numpy.i`` are responsible for the following lines -of code: 12--20, 25 and 30. Line 10 parses the input to the ``rms`` -function. From the format string ``"O:rms"``, we can see that the -argument list is expected to be a single `python`_ object (specified -by the ``O`` before the colon) and whose pointer is stored in -``obj0``. A number of functions, supplied by ``numpy.i``, are called -to make and check the (possible) conversion from a generic `python`_ -object to a `NumPy`_ array. These functions are explained in the -section `Helper Functions`_, but hopefully their names are -self-explanatory. At line 12 we use ``obj0`` to construct a `NumPy`_ -array. At line 17, we check the validity of the result: that it is -non-null and that it has a single dimension of arbitrary length. Once -these states are verified, we extract the data buffer and length in -lines 19 and 20 so that we can call the underlying C function at line -22. Line 25 performs memory management for the case where we have -created a new array that is no longer needed. - -This code has a significant amount of error handling. Note the -``SWIG_fail`` is a macro for ``goto fail``, refering to the label at -line 28. If the user provides the wrong number of arguments, this -will be caught at line 10. If construction of the `NumPy`_ array -fails or produces an array with the wrong number of dimensions, these -errors are caught at line 17. And finally, if an error is detected, -memory is still managed correctly at line 30. - -Note that if the C function signature was in a different order:: - - double rms(int n, double* seq); - -that `SWIG`_ would not match the typemap signature given above with -the argument list for ``rms``. Fortunately, ``numpy.i`` has a set of -typemaps with the data pointer given last:: - - %apply (int DIM1, double* IN_ARRAY1) {(int n, double* seq)}; - -This simply has the effect of switching the definitions of ``arg1`` -and ``arg2`` in lines 3 and 4 of the generated code above, and their -assignments in lines 19 and 20. - -Using numpy.i -============= - -The ``numpy.i`` file is currently located in the ``numpy/docs/swig`` -sub-directory under the ``numpy`` installation directory. Typically, -you will want to copy it to the directory where you are developing -your wrappers. If it is ever adopted by `SWIG`_ developers, then it -will be installed in a standard place where `SWIG`_ can find it. - -A simple module that only uses a single `SWIG`_ interface file should -include the following:: - - %{ - #define SWIG_FILE_WITH_INIT - %} - %include "numpy.i" - %init %{ - import_array(); - %} - -Within a compiled `python`_ module, ``import_array()`` should only get -called once. This could be in a C/C++ file that you have written and -is linked to the module. If this is the case, then none of your -interface files should ``#define SWIG_FILE_WITH_INIT`` or call -``import_array()``. Or, this initialization call could be in a -wrapper file generated by `SWIG`_ from an interface file that has the -``%init`` block as above. If this is the case, and you have more than -one `SWIG`_ interface file, then only one interface file should -``#define SWIG_FILE_WITH_INIT`` and call ``import_array()``. - -Available Typemaps -================== - -The typemap directives provided by ``numpy.i`` for arrays of different -data types, say ``double`` and ``int``, and dimensions of different -types, say ``int`` or ``long``, are identical to one another except -for the C and `NumPy`_ type specifications. The typemaps are -therefore implemented (typically behind the scenes) via a macro:: - - %numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE) - -that can be invoked for appropriate ``(DATA_TYPE, DATA_TYPECODE, -DIM_TYPE)`` triplets. For example:: - - %numpy_typemaps(double, NPY_DOUBLE, int) - %numpy_typemaps(int, NPY_INT , int) - -The ``numpy.i`` interface file uses the ``%numpy_typemaps`` macro to -implement typemaps for the following C data types and ``int`` -dimension types: - - * ``signed char`` - * ``unsigned char`` - * ``short`` - * ``unsigned short`` - * ``int`` - * ``unsigned int`` - * ``long`` - * ``unsigned long`` - * ``long long`` - * ``unsigned long long`` - * ``float`` - * ``double`` - -In the following descriptions, we reference a generic ``DATA_TYPE``, which -could be any of the C data types listed above, and ``DIM_TYPE`` which -should be one of the many types of integers. - -The typemap signatures are largely differentiated on the name given to -the buffer pointer. Names with ``FARRAY`` are for FORTRAN-ordered -arrays, and names with ``ARRAY`` are for C-ordered (or 1D arrays). - -Input Arrays ------------- - -Input arrays are defined as arrays of data that are passed into a -routine but are not altered in-place or returned to the user. The -`python`_ input array is therefore allowed to be almost any `python`_ -sequence (such as a list) that can be converted to the requested type -of array. The input array signatures are - -1D: - - * ``( DATA_TYPE IN_ARRAY1[ANY] )`` - * ``( DATA_TYPE* IN_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* IN_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` - * ``( DATA_TYPE* IN_ARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* IN_ARRAY2 )`` - * ``( DATA_TYPE* IN_FARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* IN_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE IN_ARRAY3[ANY][ANY][ANY] )`` - * ``( DATA_TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_ARRAY3 )`` - * ``( DATA_TYPE* IN_FARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_FARRAY3 )`` - -The first signature listed, ``( DATA_TYPE IN_ARRAY[ANY] )`` is for -one-dimensional arrays with hard-coded dimensions. Likewise, -``( DATA_TYPE IN_ARRAY2[ANY][ANY] )`` is for two-dimensional arrays -with hard-coded dimensions, and similarly for three-dimensional. - -In-Place Arrays ---------------- - -In-place arrays are defined as arrays that are modified in-place. The -input values may or may not be used, but the values at the time the -function returns are significant. The provided `python`_ argument -must therefore be a `NumPy`_ array of the required type. The in-place -signatures are - -1D: - - * ``( DATA_TYPE INPLACE_ARRAY1[ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* INPLACE_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE INPLACE_ARRAY2[ANY][ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_ARRAY2 )`` - * ``( DATA_TYPE* INPLACE_FARRAY2, int DIM1, int DIM2 )`` - * ``( int DIM1, int DIM2, DATA_TYPE* INPLACE_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY] )`` - * ``( DATA_TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_ARRAY3 )`` - * ``( DATA_TYPE* INPLACE_FARRAY3, int DIM1, int DIM2, int DIM3 )`` - * ``( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_FARRAY3 )`` - -These typemaps now check to make sure that the ``INPLACE_ARRAY`` -arguments use native byte ordering. If not, an exception is raised. - -Argout Arrays -------------- - -Argout arrays are arrays that appear in the input arguments in C, but -are in fact output arrays. This pattern occurs often when there is -more than one output variable and the single return argument is -therefore not sufficient. In `python`_, the convential way to return -multiple arguments is to pack them into a sequence (tuple, list, etc.) -and return the sequence. This is what the argout typemaps do. If a -wrapped function that uses these argout typemaps has more than one -return argument, they are packed into a tuple or list, depending on -the version of `python`_. The `python`_ user does not pass these -arrays in, they simply get returned. For the case where a dimension -is specified, the python user must provide that dimension as an -argument. The argout signatures are - -1D: - - * ``( DATA_TYPE ARGOUT_ARRAY1[ANY] )`` - * ``( DATA_TYPE* ARGOUT_ARRAY1, int DIM1 )`` - * ``( int DIM1, DATA_TYPE* ARGOUT_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE ARGOUT_ARRAY2[ANY][ANY] )`` - -3D: - - * ``( DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY] )`` - -These are typically used in situations where in C/C++, you would -allocate a(n) array(s) on the heap, and call the function to fill the -array(s) values. In `python`_, the arrays are allocated for you and -returned as new array objects. - -Note that we support ``DATA_TYPE*`` argout typemaps in 1D, but not 2D -or 3D. This is because of a quirk with the `SWIG`_ typemap syntax and -cannot be avoided. Note that for these types of 1D typemaps, the -`python`_ function will take a single argument representing ``DIM1``. - -Argoutview Arrays ------------------ - -Argoutview arrays are for when your C code provides you with a view of -its internal data and does not require any memory to be allocated by -the user. This can be dangerous. There is almost no way to guarantee -that the internal data from the C code will remain in existence for -the entire lifetime of the `NumPy`_ array that encapsulates it. If -the user destroys the object that provides the view of the data before -destroying the `NumPy`_ array, then using that array my result in bad -memory references or segmentation faults. Nevertheless, there are -situations, working with large data sets, where you simply have no -other choice. - -The C code to be wrapped for argoutview arrays are characterized by -pointers: pointers to the dimensions and double pointers to the data, -so that these values can be passed back to the user. The argoutview -typemap signatures are therefore - -1D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )`` - * ``( DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1 )`` - -2D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2 )`` - * ``( DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2 )`` - -3D: - - * ``( DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)`` - * ``( DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)`` - * ``( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)`` - -Note that arrays with hard-coded dimensions are not supported. These -cannot follow the double pointer signatures of these typemaps. - -Output Arrays -------------- - -The ``numpy.i`` interface file does not support typemaps for output -arrays, for several reasons. First, C/C++ return arguments are -limited to a single value. This prevents obtaining dimension -information in a general way. Second, arrays with hard-coded lengths -are not permitted as return arguments. In other words:: - - double[3] newVector(double x, double y, double z); - -is not legal C/C++ syntax. Therefore, we cannot provide typemaps of -the form:: - - %typemap(out) (TYPE[ANY]); - -If you run into a situation where a function or method is returning a -pointer to an array, your best bet is to write your own version of the -function to be wrapped, either with ``%extend`` for the case of class -methods or ``%ignore`` and ``%rename`` for the case of functions. - -Other Common Types: bool ------------------------- - -Note that C++ type ``bool`` is not supported in the list in the -`Available Typemaps`_ section. NumPy bools are a single byte, while -the C++ ``bool`` is four bytes (at least on my system). Therefore:: - - %numpy_typemaps(bool, NPY_BOOL, int) - -will result in typemaps that will produce code that reference -improper data lengths. You can implement the following macro -expansion:: - - %numpy_typemaps(bool, NPY_UINT, int) - -to fix the data length problem, and `Input Arrays`_ will work fine, -but `In-Place Arrays`_ might fail type-checking. - -Other Common Types: complex ---------------------------- - -Typemap conversions for complex floating-point types is also not -supported automatically. This is because `python`_ and `NumPy`_ are -written in C, which does not have native complex types. Both -`python`_ and `NumPy`_ implement their own (essentially equivalent) -``struct`` definitions for complex variables:: - - /* Python */ - typedef struct {double real; double imag;} Py_complex; - - /* NumPy */ - typedef struct {float real, imag;} npy_cfloat; - typedef struct {double real, imag;} npy_cdouble; - -We could have implemented:: - - %numpy_typemaps(Py_complex , NPY_CDOUBLE, int) - %numpy_typemaps(npy_cfloat , NPY_CFLOAT , int) - %numpy_typemaps(npy_cdouble, NPY_CDOUBLE, int) - -which would have provided automatic type conversions for arrays of -type ``Py_complex``, ``npy_cfloat`` and ``npy_cdouble``. However, it -seemed unlikely that there would be any independent (non-`python`_, -non-`NumPy`_) application code that people would be using `SWIG`_ to -generate a `python`_ interface to, that also used these definitions -for complex types. More likely, these application codes will define -their own complex types, or in the case of C++, use ``std::complex``. -Assuming these data structures are compatible with `python`_ and -`NumPy`_ complex types, ``%numpy_typemap`` expansions as above (with -the user's complex type substituted for the first argument) should -work. - -NumPy Array Scalars and SWIG -============================ - -`SWIG`_ has sophisticated type checking for numerical types. For -example, if your C/C++ routine expects an integer as input, the code -generated by `SWIG`_ will check for both `python`_ integers and -`python`_ long integers, and raise an overflow error if the provided -`python`_ integer is too big to cast down to a C integer. With the -introduction of `NumPy`_ scalar arrays into your `python`_ code, you -might conceivably extract an integer from a `NumPy`_ array and attempt -to pass this to a `SWIG`_-wrapped C/C++ function that expects an -``int``, but the `SWIG`_ type checking will not recognize the `NumPy`_ -array scalar as an integer. (Often, this does in fact work -- it -depends on whether `NumPy`_ recognizes the integer type you are using -as inheriting from the `python`_ integer type on the platform you are -using. Sometimes, this means that code that works on a 32-bit machine -will fail on a 64-bit machine.) - -If you get a `python`_ error that looks like the following:: - - TypeError: in method 'MyClass_MyMethod', argument 2 of type 'int' - -and the argument you are passing is an integer extracted from a -`NumPy`_ array, then you have stumbled upon this problem. The -solution is to modify the `SWIG`_ type conversion system to accept -`Numpy`_ array scalars in addition to the standard integer types. -Fortunately, this capabilitiy has been provided for you. Simply copy -the file:: - - pyfragments.swg - -to the working build directory for you project, and this problem will -be fixed. It is suggested that you do this anyway, as it only -increases the capabilities of your `python`_ interface. - -Why is There a Second File? ---------------------------- - -The `SWIG`_ type checking and conversion system is a complicated -combination of C macros, `SWIG`_ macros, `SWIG`_ typemaps and `SWIG`_ -fragments. Fragments are a way to conditionally insert code into your -wrapper file if it is needed, and not insert it if not needed. If -multiple typemaps require the same fragment, the fragment only gets -inserted into your wrapper code once. - -There is a fragment for converting a `python`_ integer to a C -``long``. There is a different fragment that converts a `python`_ -integer to a C ``int``, that calls the rountine defined in the -``long`` fragment. We can make the changes we want here by changing -the definition for the ``long`` fragment. `SWIG`_ determines the -active definition for a fragment using a "first come, first served" -system. That is, we need to define the fragment for ``long`` -conversions prior to `SWIG`_ doing it internally. `SWIG`_ allows us -to do this by putting our fragment definitions in the file -``pyfragments.swg``. If we were to put the new fragment definitions -in ``numpy.i``, they would be ignored. - -Helper Functions -================ - -The ``numpy.i`` file containes several macros and routines that it -uses internally to build its typemaps. However, these functions may -be useful elsewhere in your interface file. These macros and routines -are implemented as fragments, which are described briefly in the -previous section. If you try to use one or more of the following -macros or functions, but your compiler complains that it does not -recognize the symbol, then you need to force these fragments to appear -in your code using:: - - %fragment("NumPy_Fragments"); - -in your `SWIG`_ interface file. - -Macros ------- - - **is_array(a)** - Evaluates as true if ``a`` is non-``NULL`` and can be cast to a - ``PyArrayObject*``. - - **array_type(a)** - Evaluates to the integer data type code of ``a``, assuming ``a`` can - be cast to a ``PyArrayObject*``. - - **array_numdims(a)** - Evaluates to the integer number of dimensions of ``a``, assuming - ``a`` can be cast to a ``PyArrayObject*``. - - **array_dimensions(a)** - Evaluates to an array of type ``npy_intp`` and length - ``array_numdims(a)``, giving the lengths of all of the dimensions - of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. - - **array_size(a,i)** - Evaluates to the ``i``-th dimension size of ``a``, assuming ``a`` - can be cast to a ``PyArrayObject*``. - - **array_data(a)** - Evaluates to a pointer of type ``void*`` that points to the data - buffer of ``a``, assuming ``a`` can be cast to a ``PyArrayObject*``. - - **array_is_contiguous(a)** - Evaluates as true if ``a`` is a contiguous array. Equivalent to - ``(PyArray_ISCONTIGUOUS(a))``. - - **array_is_native(a)** - Evaluates as true if the data buffer of ``a`` uses native byte - order. Equivalent to ``(PyArray_ISNOTSWAPPED(a))``. - - **array_is_fortran(a)** - Evaluates as true if ``a`` is FORTRAN ordered. - -Routines --------- - - **pytype_string()** - - Return type: ``char*`` - - Arguments: - - * ``PyObject* py_obj``, a general `python`_ object. - - Return a string describing the type of ``py_obj``. - - - **typecode_string()** - - Return type: ``char*`` - - Arguments: - - * ``int typecode``, a `NumPy`_ integer typecode. - - Return a string describing the type corresponding to the `NumPy`_ - ``typecode``. - - **type_match()** - - Return type: ``int`` - - Arguments: - - * ``int actual_type``, the `NumPy`_ typecode of a `NumPy`_ array. - - * ``int desired_type``, the desired `NumPy`_ typecode. - - Make sure that ``actual_type`` is compatible with - ``desired_type``. For example, this allows character and - byte types, or int and long types, to match. This is now - equivalent to ``PyArray_EquivTypenums()``. - - - **obj_to_array_no_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general `python`_ object. - - * ``int typecode``, the desired `NumPy`_ typecode. - - Cast ``input`` to a ``PyArrayObject*`` if legal, and ensure that - it is of type ``typecode``. If ``input`` cannot be cast, or the - ``typecode`` is wrong, set a `python`_ error and return ``NULL``. - - - **obj_to_array_allow_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general `python`_ object. - - * ``int typecode``, the desired `NumPy`_ typecode of the resulting - array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - Convert ``input`` to a `NumPy`_ array with the given ``typecode``. - On success, return a valid ``PyArrayObject*`` with the correct - type. On failure, the `python`_ error string will be set and the - routine returns ``NULL``. - - - **make_contiguous()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - * ``int min_dims``, minimum allowable dimensions. - - * ``int max_dims``, maximum allowable dimensions. - - Check to see if ``ary`` is contiguous. If so, return the input - pointer and flag it as not a new object. If it is not contiguous, - create a new ``PyArrayObject*`` using the original data, flag it - as a new object and return the pointer. - - - **obj_to_array_contiguous_allow_conversion()** - - Return type: ``PyArrayObject*`` - - Arguments: - - * ``PyObject* input``, a general `python`_ object. - - * ``int typecode``, the desired `NumPy`_ typecode of the resulting - array. - - * ``int* is_new_object``, returns a value of 0 if no conversion - performed, else 1. - - Convert ``input`` to a contiguous ``PyArrayObject*`` of the - specified type. If the input object is not a contiguous - ``PyArrayObject*``, a new one will be created and the new object - flag will be set. - - - **require_contiguous()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - Test whether ``ary`` is contiguous. If so, return 1. Otherwise, - set a `python`_ error and return 0. - - - **require_native()** - - Return type: ``int`` - - Arguments: - - * ``PyArray_Object* ary``, a `NumPy`_ array. - - Require that ``ary`` is not byte-swapped. If the array is not - byte-swapped, return 1. Otherwise, set a `python`_ error and - return 0. - - **require_dimensions()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``int exact_dimensions``, the desired number of dimensions. - - Require ``ary`` to have a specified number of dimensions. If the - array has the specified number of dimensions, return 1. - Otherwise, set a `python`_ error and return 0. - - - **require_dimensions_n()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``int* exact_dimensions``, an array of integers representing - acceptable numbers of dimensions. - - * ``int n``, the length of ``exact_dimensions``. - - Require ``ary`` to have one of a list of specified number of - dimensions. If the array has one of the specified number of - dimensions, return 1. Otherwise, set the `python`_ error string - and return 0. - - - **require_size()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - * ``npy_int* size``, an array representing the desired lengths of - each dimension. - - * ``int n``, the length of ``size``. - - Require ``ary`` to have a specified shape. If the array has the - specified shape, return 1. Otherwise, set the `python`_ error - string and return 0. - - - **require_fortran()** - - Return type: ``int`` - - Arguments: - - * ``PyArrayObject* ary``, a `NumPy`_ array. - - Require the given ``PyArrayObject`` to to be FORTRAN ordered. If - the the ``PyArrayObject`` is already FORTRAN ordered, do nothing. - Else, set the FORTRAN ordering flag and recompute the strides. - - -Beyond the Provided Typemaps -============================ - -There are many C or C++ array/`NumPy`_ array situations not covered by -a simple ``%include "numpy.i"`` and subsequent ``%apply`` directives. - -A Common Example ----------------- - -Consider a reasonable prototype for a dot product function:: - - double dot(int len, double* vec1, double* vec2); - -The `python`_ interface that we want is:: - - def dot(vec1, vec2): - """ - dot(PyObject,PyObject) -> double - """ - -The problem here is that there is one dimension argument and two array -arguments, and our typemaps are set up for dimensions that apply to a -single array (in fact, `SWIG`_ does not provide a mechanism for -associating ``len`` with ``vec2`` that takes two `python`_ input -arguments). The recommended solution is the following:: - - %apply (int DIM1, double* IN_ARRAY1) {(int len1, double* vec1), - (int len2, double* vec2)} - %rename (dot) my_dot; - %exception my_dot { - $action - if (PyErr_Occurred()) SWIG_fail; - } - %inline %{ - double my_dot(int len1, double* vec1, int len2, double* vec2) { - if (len1 != len2) { - PyErr_Format(PyExc_ValueError, - "Arrays of lengths (%d,%d) given", - len1, len2); - return 0.0; - } - return dot(len1, vec1, vec2); - } - %} - -If the header file that contains the prototype for ``double dot()`` -also contains other prototypes that you want to wrap, so that you need -to ``%include`` this header file, then you will also need a ``%ignore -dot;`` directive, placed after the ``%rename`` and before the -``%include`` directives. Or, if the function in question is a class -method, you will want to use ``%extend`` rather than ``%inline`` in -addition to ``%ignore``. - -**A note on error handling:** Note that ``my_dot`` returns a -``double`` but that it can also raise a `python`_ error. The -resulting wrapper function will return a `python`_ float -representation of 0.0 when the vector lengths do not match. Since -this is not ``NULL``, the `python`_ interpreter will not know to check -for an error. For this reason, we add the ``%exception`` directive -above for ``my_dot`` to get the behavior we want (note that -``$action`` is a macro that gets expanded to a valid call to -``my_dot``). In general, you will probably want to write a `SWIG`_ -macro to perform this task. - -Other Situations ----------------- - -There are other wrapping situations in which ``numpy.i`` may be -helpful when you encounter them. - - * In some situations, it is possible that you could use the - ``%numpy_templates`` macro to implement typemaps for your own - types. See the `Other Common Types: bool`_ or `Other Common - Types: complex`_ sections for examples. Another situation is if - your dimensions are of a type other than ``int`` (say ``long`` for - example):: - - %numpy_typemaps(double, NPY_DOUBLE, long) - - * You can use the code in ``numpy.i`` to write your own typemaps. - For example, if you had a four-dimensional array as a function - argument, you could cut-and-paste the appropriate - three-dimensional typemaps into your interface file. The - modifications for the fourth dimension would be trivial. - - * Sometimes, the best approach is to use the ``%extend`` directive - to define new methods for your classes (or overload existing ones) - that take a ``PyObject*`` (that either is or can be converted to a - ``PyArrayObject*``) instead of a pointer to a buffer. In this - case, the helper routines in ``numpy.i`` can be very useful. - - * Writing typemaps can be a bit nonintuitive. If you have specific - questions about writing `SWIG`_ typemaps for `NumPy`_, the - developers of ``numpy.i`` do monitor the - `Numpy-discussion `_ and - `Swig-user `_ mail lists. - -A Final Note ------------- - -When you use the ``%apply`` directive, as is usually necessary to use -``numpy.i``, it will remain in effect until you tell `SWIG`_ that it -shouldn't be. If the arguments to the functions or methods that you -are wrapping have common names, such as ``length`` or ``vector``, -these typemaps may get applied in situations you do not expect or -want. Therefore, it is always a good idea to add a ``%clear`` -directive after you are done with a specific typemap:: - - %apply (double* IN_ARRAY1, int DIM1) {(double* vector, int length)} - %include "my_header.h" - %clear (double* vector, int length); - -In general, you should target these typemap signatures specifically -where you want them, and then clear them after you are done. - -Summary -======= - -Out of the box, ``numpy.i`` provides typemaps that support conversion -between `NumPy`_ arrays and C arrays: - - * That can be one of 12 different scalar types: ``signed char``, - ``unsigned char``, ``short``, ``unsigned short``, ``int``, - ``unsigned int``, ``long``, ``unsigned long``, ``long long``, - ``unsigned long long``, ``float`` and ``double``. - - * That support 41 different argument signatures for each data type, - including: - - + One-dimensional, two-dimensional and three-dimensional arrays. - - + Input-only, in-place, argout and argoutview behavior. - - + Hard-coded dimensions, data-buffer-then-dimensions - specification, and dimensions-then-data-buffer specification. - - + Both C-ordering ("last dimension fastest") or FORTRAN-ordering - ("first dimension fastest") support for 2D and 3D arrays. - -The ``numpy.i`` interface file also provides additional tools for -wrapper developers, including: - - * A `SWIG`_ macro (``%numpy_typemaps``) with three arguments for - implementing the 41 argument signatures for the user's choice of - (1) C data type, (2) `NumPy`_ data type (assuming they match), and - (3) dimension type. - - * Nine C macros and 13 C functions that can be used to write - specialized typemaps, extensions, or inlined functions that handle - cases not covered by the provided typemaps. - -Acknowledgements -================ - -Many people have worked to glue `SWIG`_ and `NumPy`_ together (as well -as `SWIG`_ and the predecessors of `NumPy`_, Numeric and numarray). -The effort to standardize this work into ``numpy.i`` began at the 2005 -`SciPy `_ Conference with a conversation between -Fernando Perez and myself. Fernando collected helper functions and -typemaps from Eric Jones, Michael Hunter, Anna Omelchenko and Michael -Sanner. Sebastian Hasse and Georg Holzmann have also provided -additional error checking and use cases. The work of these -contributors has made this end result possible. Deleted: trunk/doc/swig/doc/testing.txt =================================================================== --- trunk/doc/swig/doc/testing.txt 2010-09-04 09:51:57 UTC (rev 8683) +++ trunk/doc/swig/doc/testing.txt 2010-09-04 09:52:24 UTC (rev 8684) @@ -1,173 +0,0 @@ -============================ -Testing the numpy.i Typemaps -============================ - -:Author: Bill Spotz -:Institution: Sandia National Laboratories -:Date: 6 April, 2007 - -.. contents:: - -Introduction -============ - -Writing tests for the ``numpy.i`` `SWIG `_ -interface file is a combinatorial headache. At present, 12 different -data types are supported, each with 23 different argument signatures, -for a total of 276 typemaps supported "out of the box". Each of these -typemaps, in turn, might require several unit tests in order to verify -expected behavior for both proper and improper inputs. Currently, -this results in 1,020 individual unit tests that are performed when -``make test`` is run in the ``numpy/docs/swig`` subdirectory. - -To facilitate this many similar unit tests, some high-level -programming techniques are employed, including C and `SWIG`_ macros, -as well as `python `_ inheritance. The -purpose of this document is to describe the testing infrastructure -employed to verify that the ``numpy.i`` typemaps are working as -expected. - -Testing Organization -==================== - -There are three indepedent testing frameworks supported, for one-, -two-, and three-dimensional arrays respectively. For one-dimensional -arrays, there are two C++ files, a header and a source, named:: - - Vector.h - Vector.cxx - -that contain prototypes and code for a variety of functions that have -one-dimensional arrays as function arguments. The file:: - - Vector.i - -is a `SWIG`_ interface file that defines a python module ``Vector`` -that wraps the functions in ``Vector.h`` while utilizing the typemaps -in ``numpy.i`` to correctly handle the C arrays. - -The ``Makefile`` calls ``swig`` to generate ``Vector.py`` and -``Vector_wrap.cxx``, and also executes the ``setup.py`` script that -compiles ``Vector_wrap.cxx`` and links together the extension module -``_Vector.so`` or ``_Vector.dylib``, depending on the platform. This -extension module and the proxy file ``Vector.py`` are both placed in a -subdirectory under the ``build`` directory. - -The actual testing takes place with a `python`_ script named:: - - testVector.py - -that uses the standard `python`_ library module ``unittest``, which -performs several tests of each function defined in ``Vector.h`` for -each data type supported. - -Two-dimensional arrays are tested in exactly the same manner. The -above description applies, but with ``Matrix`` substituted for -``Vector``. For three-dimensional tests, substitute ``Tensor`` for -``Vector``. For the descriptions that follow, we will reference the -``Vector`` tests, but the same information applies to ``Matrix`` and -``Tensor`` tests. - -The command ``make test`` will ensure that all of the test software is -built and then run all three test scripts. - -Testing Header Files -==================== - -``Vector.h`` is a C++ header file that defines a C macro called -``TEST_FUNC_PROTOS`` that takes two arguments: ``TYPE``, which is a -data type name such as ``unsigned int``; and ``SNAME``, which is a -short name for the same data type with no spaces, e.g. ``uint``. This -macro defines several function prototypes that have the prefix -``SNAME`` and have at least one argument that is an array of type -``TYPE``. Those functions that have return arguments return a -``TYPE`` value. - -``TEST_FUNC_PROTOS`` is then implemented for all of the data types -supported by ``numpy.i``: - - * ``signed char`` - * ``unsigned char`` - * ``short`` - * ``unsigned short`` - * ``int`` - * ``unsigned int`` - * ``long`` - * ``unsigned long`` - * ``long long`` - * ``unsigned long long`` - * ``float`` - * ``double`` - -Testing Source Files -==================== - -``Vector.cxx`` is a C++ source file that implements compilable code -for each of the function prototypes specified in ``Vector.h``. It -defines a C macro ``TEST_FUNCS`` that has the same arguments and works -in the same way as ``TEST_FUNC_PROTOS`` does in ``Vector.h``. -``TEST_FUNCS`` is implemented for each of the 12 data types as above. - -Testing SWIG Interface Files -============================ - -``Vector.i`` is a `SWIG`_ interface file that defines python module -``Vector``. It follows the conventions for using ``numpy.i`` as -described in the `numpy.i documentation `_. It -defines a `SWIG`_ macro ``%apply_numpy_typemaps`` that has a single -argument ``TYPE``. It uses the `SWIG`_ directive ``%apply`` as -described in the `numpy.i documentation`_ to apply the provided -typemaps to the argument signatures found in ``Vector.h``. This macro -is then implemented for all of the data types supported by -``numpy.i``. It then does a ``%include "Vector.h"`` to wrap all of -the function prototypes in ``Vector.h`` using the typemaps in -``numpy.i``. - -Testing Python Scripts -====================== - -After ``make`` is used to build the testing extension modules, -``testVector.py`` can be run to execute the tests. As with other -scripts that use ``unittest`` to facilitate unit testing, -``testVector.py`` defines a class that inherits from -``unittest.TestCase``:: - - class VectorTestCase(unittest.TestCase): - -However, this class is not run directly. Rather, it serves as a base -class to several other python classes, each one specific to a -particular data type. The ``VectorTestCase`` class stores two strings -for typing information: - - **self.typeStr** - A string that matches one of the ``SNAME`` prefixes used in - ``Vector.h`` and ``Vector.cxx``. For example, ``"double"``. - - **self.typeCode** - A short (typically single-character) string that represents a - data type in numpy and corresponds to ``self.typeStr``. For - example, if ``self.typeStr`` is ``"double"``, then - ``self.typeCode`` should be ``"d"``. - -Each test defined by the ``VectorTestCase`` class extracts the python -function it is trying to test by accessing the ``Vector`` module's -dictionary:: - - length = Vector.__dict__[self.typeStr + "Length"] - -In the case of double precision tests, this will return the python -function ``Vector.doubleLength``. - -We then define a new test case class for each supported data type with -a short definition such as:: - - class doubleTestCase(VectorTestCase): - def __init__(self, methodName="runTest"): - VectorTestCase.__init__(self, methodName) - self.typeStr = "double" - self.typeCode = "d" - -Each of these 12 classes is collected into a ``unittest.TestSuite``, -which is then executed. Errors and failures are summed together and -returned as the exit argument. Any non-zero result indicates that at -least one test did not pass. From numpy-svn at scipy.org Sat Sep 4 05:52:44 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 4 Sep 2010 04:52:44 -0500 (CDT) Subject: [Numpy-svn] r8685 - trunk/doc/swig/doc Message-ID: <20100904095244.CF71F39CC3F@scipy.org> Author: rgommers Date: 2010-09-04 04:52:44 -0500 (Sat, 04 Sep 2010) New Revision: 8685 Removed: trunk/doc/swig/doc/Makefile trunk/doc/swig/doc/numpy_swig.html trunk/doc/swig/doc/numpy_swig.pdf trunk/doc/swig/doc/testing.html trunk/doc/swig/doc/testing.pdf Log: DOC: remove built separate swig docs. Deleted: trunk/doc/swig/doc/Makefile =================================================================== --- trunk/doc/swig/doc/Makefile 2010-09-04 09:52:24 UTC (rev 8684) +++ trunk/doc/swig/doc/Makefile 2010-09-04 09:52:44 UTC (rev 8685) @@ -1,51 +0,0 @@ -# ReStructured Text -RST2HTML = rst2html.py -RST2LATEX = rst2latex.py -RFLAGS = --generator --time -HTML_FLAGS = --no-xml-declaration -LATEX_FLAGS = -LATEX = pdflatex - -# Web pages that need to be made -WEB_PAGES = numpy_swig.html testing.html - -# LaTeX files that need to be made -LATEX_FILES = numpy_swig.tex testing.tex - -# PDF files that need to be made -PDF_FILES = numpy_swig.pdf testing.pdf - -# Default target: documentation -.PHONY : doc -doc: html pdf - -# HTML target -.PHONY : html -html: $(WEB_PAGES) - -# Rule: %.txt -> %.html -%.html: %.txt - $(RST2HTML) $(RFLAGS) $(HTML_FLAGS) $< $@ - -# LaTeX target -.PHONY : tex -tex: $(LATEX_FILES) - -# Rule: %.txt -> %.tex -%.tex: %.txt - $(RST2LATEX) $(RFLAGS) $(LATEX_FLAGS) $< $@ - -# PDF target -.PHONY : pdf -pdf: $(PDF_FILES) - -# Rule: %.tex -> %.pdf -%.pdf: %.tex - $(LATEX) $< - $(LATEX) $< - -# Clean target -.PHONY : clean -clean: - $(RM) $(LATEX_FILES) - $(RM) *.pyc *.aux *.dvi *.log *.out *~ Deleted: trunk/doc/swig/doc/numpy_swig.html =================================================================== --- trunk/doc/swig/doc/numpy_swig.html 2010-09-04 09:52:24 UTC (rev 8684) +++ trunk/doc/swig/doc/numpy_swig.html 2010-09-04 09:52:44 UTC (rev 8685) @@ -1,1244 +0,0 @@ - - - - - -numpy.i: a SWIG Interface File for NumPy - - - - - -
-

numpy.i: a SWIG Interface File for NumPy

- --- - - - - - - - -
Author:Bill Spotz
Institution:Sandia National Laboratories
Date:1 December, 2007
- -
-

Introduction

-

The Simple Wrapper and Interface Generator (or SWIG) is a powerful tool for generating wrapper -code for interfacing to a wide variety of scripting languages. -SWIG can parse header files, and using only the code prototypes, -create an interface to the target language. But SWIG is not -omnipotent. For example, it cannot know from the prototype:

-
-double rms(double* seq, int n);
-
-

what exactly seq is. Is it a single value to be altered in-place? -Is it an array, and if so what is its length? Is it input-only? -Output-only? Input-output? SWIG cannot determine these details, -and does not attempt to do so.

-

If we designed rms, we probably made it a routine that takes an -input-only array of length n of double values called seq -and returns the root mean square. The default behavior of SWIG, -however, will be to create a wrapper function that compiles, but is -nearly impossible to use from the scripting language in the way the C -routine was intended.

-

For python, the preferred way of handling -contiguous (or technically, strided) blocks of homogeneous data is -with the module NumPy, which provides full -object-oriented access to multidimensial arrays of data. Therefore, -the most logical python interface for the rms function would be -(including doc string):

-
-def rms(seq):
-    """
-    rms: return the root mean square of a sequence
-    rms(numpy.ndarray) -> double
-    rms(list) -> double
-    rms(tuple) -> double
-    """
-
-

where seq would be a NumPy array of double values, and its -length n would be extracted from seq internally before being -passed to the C routine. Even better, since NumPy supports -construction of arrays from arbitrary python sequences, seq -itself could be a nearly arbitrary sequence (so long as each element -can be converted to a double) and the wrapper code would -internally convert it to a NumPy array before extracting its data -and length.

-

SWIG allows these types of conversions to be defined via a -mechanism called typemaps. This document provides information on how -to use numpy.i, a SWIG interface file that defines a series of -typemaps intended to make the type of array-related conversions -described above relatively simple to implement. For example, suppose -that the rms function prototype defined above was in a header file -named rms.h. To obtain the python interface discussed above, -your SWIG interface file would need the following:

-
-%{
-#define SWIG_FILE_WITH_INIT
-#include "rms.h"
-%}
-
-%include "numpy.i"
-
-%init %{
-import_array();
-%}
-
-%apply (double* IN_ARRAY1, int DIM1) {(double* seq, int n)};
-%include "rms.h"
-
-

Typemaps are keyed off a list of one or more function arguments, -either by type or by type and name. We will refer to such lists as -signatures. One of the many typemaps defined by numpy.i is used -above and has the signature (double* IN_ARRAY1, int DIM1). The -argument names are intended to suggest that the double* argument -is an input array of one dimension and that the int represents -that dimension. This is precisely the pattern in the rms -prototype.

-

Most likely, no actual prototypes to be wrapped will have the argument -names IN_ARRAY1 and DIM1. We use the %apply directive to -apply the typemap for one-dimensional input arrays of type double -to the actual prototype used by rms. Using numpy.i -effectively, therefore, requires knowing what typemaps are available -and what they do.

-

A SWIG interface file that includes the SWIG directives given -above will produce wrapper code that looks something like:

-
- 1 PyObject *_wrap_rms(PyObject *args) {
- 2   PyObject *resultobj = 0;
- 3   double *arg1 = (double *) 0 ;
- 4   int arg2 ;
- 5   double result;
- 6   PyArrayObject *array1 = NULL ;
- 7   int is_new_object1 = 0 ;
- 8   PyObject * obj0 = 0 ;
- 9
-10   if (!PyArg_ParseTuple(args,(char *)"O:rms",&obj0)) SWIG_fail;
-11   {
-12     array1 = obj_to_array_contiguous_allow_conversion(
-13                  obj0, NPY_DOUBLE, &is_new_object1);
-14     npy_intp size[1] = {
-15       -1
-16     };
-17     if (!array1 || !require_dimensions(array1, 1) ||
-18         !require_size(array1, size, 1)) SWIG_fail;
-19     arg1 = (double*) array1->data;
-20     arg2 = (int) array1->dimensions[0];
-21   }
-22   result = (double)rms(arg1,arg2);
-23   resultobj = SWIG_From_double((double)(result));
-24   {
-25     if (is_new_object1 && array1) Py_DECREF(array1);
-26   }
-27   return resultobj;
-28 fail:
-29   {
-30     if (is_new_object1 && array1) Py_DECREF(array1);
-31   }
-32   return NULL;
-33 }
-
-

The typemaps from numpy.i are responsible for the following lines -of code: 12--20, 25 and 30. Line 10 parses the input to the rms -function. From the format string "O:rms", we can see that the -argument list is expected to be a single python object (specified -by the O before the colon) and whose pointer is stored in -obj0. A number of functions, supplied by numpy.i, are called -to make and check the (possible) conversion from a generic python -object to a NumPy array. These functions are explained in the -section Helper Functions, but hopefully their names are -self-explanatory. At line 12 we use obj0 to construct a NumPy -array. At line 17, we check the validity of the result: that it is -non-null and that it has a single dimension of arbitrary length. Once -these states are verified, we extract the data buffer and length in -lines 19 and 20 so that we can call the underlying C function at line -22. Line 25 performs memory management for the case where we have -created a new array that is no longer needed.

-

This code has a significant amount of error handling. Note the -SWIG_fail is a macro for goto fail, refering to the label at -line 28. If the user provides the wrong number of arguments, this -will be caught at line 10. If construction of the NumPy array -fails or produces an array with the wrong number of dimensions, these -errors are caught at line 17. And finally, if an error is detected, -memory is still managed correctly at line 30.

-

Note that if the C function signature was in a different order:

-
-double rms(int n, double* seq);
-
-

that SWIG would not match the typemap signature given above with -the argument list for rms. Fortunately, numpy.i has a set of -typemaps with the data pointer given last:

-
-%apply (int DIM1, double* IN_ARRAY1) {(int n, double* seq)};
-
-

This simply has the effect of switching the definitions of arg1 -and arg2 in lines 3 and 4 of the generated code above, and their -assignments in lines 19 and 20.

-
-
-

Using numpy.i

-

The numpy.i file is currently located in the numpy/docs/swig -sub-directory under the numpy installation directory. Typically, -you will want to copy it to the directory where you are developing -your wrappers. If it is ever adopted by SWIG developers, then it -will be installed in a standard place where SWIG can find it.

-

A simple module that only uses a single SWIG interface file should -include the following:

-
-%{
-#define SWIG_FILE_WITH_INIT
-%}
-%include "numpy.i"
-%init %{
-import_array();
-%}
-
-

Within a compiled python module, import_array() should only get -called once. This could be in a C/C++ file that you have written and -is linked to the module. If this is the case, then none of your -interface files should #define SWIG_FILE_WITH_INIT or call -import_array(). Or, this initialization call could be in a -wrapper file generated by SWIG from an interface file that has the -%init block as above. If this is the case, and you have more than -one SWIG interface file, then only one interface file should -#define SWIG_FILE_WITH_INIT and call import_array().

-
-
-

Available Typemaps

-

The typemap directives provided by numpy.i for arrays of different -data types, say double and int, and dimensions of different -types, say int or long, are identical to one another except -for the C and NumPy type specifications. The typemaps are -therefore implemented (typically behind the scenes) via a macro:

-
-%numpy_typemaps(DATA_TYPE, DATA_TYPECODE, DIM_TYPE)
-
-

that can be invoked for appropriate (DATA_TYPE, DATA_TYPECODE, -DIM_TYPE) triplets. For example:

-
-%numpy_typemaps(double, NPY_DOUBLE, int)
-%numpy_typemaps(int,    NPY_INT   , int)
-
-

The numpy.i interface file uses the %numpy_typemaps macro to -implement typemaps for the following C data types and int -dimension types:

-
-
    -
  • signed char
  • -
  • unsigned char
  • -
  • short
  • -
  • unsigned short
  • -
  • int
  • -
  • unsigned int
  • -
  • long
  • -
  • unsigned long
  • -
  • long long
  • -
  • unsigned long long
  • -
  • float
  • -
  • double
  • -
-
-

In the following descriptions, we reference a generic DATA_TYPE, which -could be any of the C data types listed above, and DIM_TYPE which -should be one of the many types of integers.

-

The typemap signatures are largely differentiated on the name given to -the buffer pointer. Names with FARRAY are for FORTRAN-ordered -arrays, and names with ARRAY are for C-ordered (or 1D arrays).

-
-

Input Arrays

-

Input arrays are defined as arrays of data that are passed into a -routine but are not altered in-place or returned to the user. The -python input array is therefore allowed to be almost any python -sequence (such as a list) that can be converted to the requested type -of array. The input array signatures are

-

1D:

-
-
    -
  • ( DATA_TYPE IN_ARRAY1[ANY] )
  • -
  • ( DATA_TYPE* IN_ARRAY1, int DIM1 )
  • -
  • ( int DIM1, DATA_TYPE* IN_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE IN_ARRAY2[ANY][ANY] )
  • -
  • ( DATA_TYPE* IN_ARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* IN_ARRAY2 )
  • -
  • ( DATA_TYPE* IN_FARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* IN_FARRAY2 )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE IN_ARRAY3[ANY][ANY][ANY] )
  • -
  • ( DATA_TYPE* IN_ARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_ARRAY3 )
  • -
  • ( DATA_TYPE* IN_FARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* IN_FARRAY3 )
  • -
-
-

The first signature listed, ( DATA_TYPE IN_ARRAY[ANY] ) is for -one-dimensional arrays with hard-coded dimensions. Likewise, -( DATA_TYPE IN_ARRAY2[ANY][ANY] ) is for two-dimensional arrays -with hard-coded dimensions, and similarly for three-dimensional.

-
-
-

In-Place Arrays

-

In-place arrays are defined as arrays that are modified in-place. The -input values may or may not be used, but the values at the time the -function returns are significant. The provided python argument -must therefore be a NumPy array of the required type. The in-place -signatures are

-

1D:

-
-
    -
  • ( DATA_TYPE INPLACE_ARRAY1[ANY] )
  • -
  • ( DATA_TYPE* INPLACE_ARRAY1, int DIM1 )
  • -
  • ( int DIM1, DATA_TYPE* INPLACE_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE INPLACE_ARRAY2[ANY][ANY] )
  • -
  • ( DATA_TYPE* INPLACE_ARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* INPLACE_ARRAY2 )
  • -
  • ( DATA_TYPE* INPLACE_FARRAY2, int DIM1, int DIM2 )
  • -
  • ( int DIM1, int DIM2, DATA_TYPE* INPLACE_FARRAY2 )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE INPLACE_ARRAY3[ANY][ANY][ANY] )
  • -
  • ( DATA_TYPE* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_ARRAY3 )
  • -
  • ( DATA_TYPE* INPLACE_FARRAY3, int DIM1, int DIM2, int DIM3 )
  • -
  • ( int DIM1, int DIM2, int DIM3, DATA_TYPE* INPLACE_FARRAY3 )
  • -
-
-

These typemaps now check to make sure that the INPLACE_ARRAY -arguments use native byte ordering. If not, an exception is raised.

-
-
-

Argout Arrays

-

Argout arrays are arrays that appear in the input arguments in C, but -are in fact output arrays. This pattern occurs often when there is -more than one output variable and the single return argument is -therefore not sufficient. In python, the convential way to return -multiple arguments is to pack them into a sequence (tuple, list, etc.) -and return the sequence. This is what the argout typemaps do. If a -wrapped function that uses these argout typemaps has more than one -return argument, they are packed into a tuple or list, depending on -the version of python. The python user does not pass these -arrays in, they simply get returned. For the case where a dimension -is specified, the python user must provide that dimension as an -argument. The argout signatures are

-

1D:

-
-
    -
  • ( DATA_TYPE ARGOUT_ARRAY1[ANY] )
  • -
  • ( DATA_TYPE* ARGOUT_ARRAY1, int DIM1 )
  • -
  • ( int DIM1, DATA_TYPE* ARGOUT_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE ARGOUT_ARRAY2[ANY][ANY] )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY] )
  • -
-
-

These are typically used in situations where in C/C++, you would -allocate a(n) array(s) on the heap, and call the function to fill the -array(s) values. In python, the arrays are allocated for you and -returned as new array objects.

-

Note that we support DATA_TYPE* argout typemaps in 1D, but not 2D -or 3D. This is because of a quirk with the SWIG typemap syntax and -cannot be avoided. Note that for these types of 1D typemaps, the -python function will take a single argument representing DIM1.

-
-
-

Argoutview Arrays

-

Argoutview arrays are for when your C code provides you with a view of -its internal data and does not require any memory to be allocated by -the user. This can be dangerous. There is almost no way to guarantee -that the internal data from the C code will remain in existence for -the entire lifetime of the NumPy array that encapsulates it. If -the user destroys the object that provides the view of the data before -destroying the NumPy array, then using that array my result in bad -memory references or segmentation faults. Nevertheless, there are -situations, working with large data sets, where you simply have no -other choice.

-

The C code to be wrapped for argoutview arrays are characterized by -pointers: pointers to the dimensions and double pointers to the data, -so that these values can be passed back to the user. The argoutview -typemap signatures are therefore

-

1D:

-
-
    -
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY1, DIM_TYPE* DIM1 )
  • -
  • ( DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1 )
  • -
-
-

2D:

-
-
    -
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2 )
  • -
  • ( DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2 )
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2 )
  • -
-
-

3D:

-
-
    -
  • ( DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_ARRAY3)
  • -
  • ( DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3)
  • -
  • ( DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DATA_TYPE** ARGOUTVIEW_FARRAY3)
  • -
-
-

Note that arrays with hard-coded dimensions are not supported. These -cannot follow the double pointer signatures of these typemaps.

-
-
-

Output Arrays

-

The numpy.i interface file does not support typemaps for output -arrays, for several reasons. First, C/C++ return arguments are -limited to a single value. This prevents obtaining dimension -information in a general way. Second, arrays with hard-coded lengths -are not permitted as return arguments. In other words:

-
-double[3] newVector(double x, double y, double z);
-
-

is not legal C/C++ syntax. Therefore, we cannot provide typemaps of -the form:

-
-%typemap(out) (TYPE[ANY]);
-
-

If you run into a situation where a function or method is returning a -pointer to an array, your best bet is to write your own version of the -function to be wrapped, either with %extend for the case of class -methods or %ignore and %rename for the case of functions.

-
-
-

Other Common Types: bool

-

Note that C++ type bool is not supported in the list in the -Available Typemaps section. NumPy bools are a single byte, while -the C++ bool is four bytes (at least on my system). Therefore:

-
-%numpy_typemaps(bool, NPY_BOOL, int)
-
-

will result in typemaps that will produce code that reference -improper data lengths. You can implement the following macro -expansion:

-
-%numpy_typemaps(bool, NPY_UINT, int)
-
-

to fix the data length problem, and Input Arrays will work fine, -but In-Place Arrays might fail type-checking.

-
-
-

Other Common Types: complex

-

Typemap conversions for complex floating-point types is also not -supported automatically. This is because python and NumPy are -written in C, which does not have native complex types. Both -python and NumPy implement their own (essentially equivalent) -struct definitions for complex variables:

-
-/* Python */
-typedef struct {double real; double imag;} Py_complex;
-
-/* NumPy */
-typedef struct {float  real, imag;} npy_cfloat;
-typedef struct {double real, imag;} npy_cdouble;
-
-

We could have implemented:

-
-%numpy_typemaps(Py_complex , NPY_CDOUBLE, int)
-%numpy_typemaps(npy_cfloat , NPY_CFLOAT , int)
-%numpy_typemaps(npy_cdouble, NPY_CDOUBLE, int)
-
-

which would have provided automatic type conversions for arrays of -type Py_complex, npy_cfloat and npy_cdouble. However, it -seemed unlikely that there would be any independent (non-python, -non-NumPy) application code that people would be using SWIG to -generate a python interface to, that also used these definitions -for complex types. More likely, these application codes will define -their own complex types, or in the case of C++, use std::complex. -Assuming these data structures are compatible with python and -NumPy complex types, %numpy_typemap expansions as above (with -the user's complex type substituted for the first argument) should -work.

-
-
-
-

NumPy Array Scalars and SWIG

-

SWIG has sophisticated type checking for numerical types. For -example, if your C/C++ routine expects an integer as input, the code -generated by SWIG will check for both python integers and -python long integers, and raise an overflow error if the provided -python integer is too big to cast down to a C integer. With the -introduction of NumPy scalar arrays into your python code, you -might conceivably extract an integer from a NumPy array and attempt -to pass this to a SWIG-wrapped C/C++ function that expects an -int, but the SWIG type checking will not recognize the NumPy -array scalar as an integer. (Often, this does in fact work -- it -depends on whether NumPy recognizes the integer type you are using -as inheriting from the python integer type on the platform you are -using. Sometimes, this means that code that works on a 32-bit machine -will fail on a 64-bit machine.)

-

If you get a python error that looks like the following:

-
-TypeError: in method 'MyClass_MyMethod', argument 2 of type 'int'
-
-

and the argument you are passing is an integer extracted from a -NumPy array, then you have stumbled upon this problem. The -solution is to modify the SWIG type conversion system to accept -Numpy array scalars in addition to the standard integer types. -Fortunately, this capabilitiy has been provided for you. Simply copy -the file:

-
-pyfragments.swg
-
-

to the working build directory for you project, and this problem will -be fixed. It is suggested that you do this anyway, as it only -increases the capabilities of your python interface.

-
-

Why is There a Second File?

-

The SWIG type checking and conversion system is a complicated -combination of C macros, SWIG macros, SWIG typemaps and SWIG -fragments. Fragments are a way to conditionally insert code into your -wrapper file if it is needed, and not insert it if not needed. If -multiple typemaps require the same fragment, the fragment only gets -inserted into your wrapper code once.

-

There is a fragment for converting a python integer to a C -long. There is a different fragment that converts a python -integer to a C int, that calls the rountine defined in the -long fragment. We can make the changes we want here by changing -the definition for the long fragment. SWIG determines the -active definition for a fragment using a "first come, first served" -system. That is, we need to define the fragment for long -conversions prior to SWIG doing it internally. SWIG allows us -to do this by putting our fragment definitions in the file -pyfragments.swg. If we were to put the new fragment definitions -in numpy.i, they would be ignored.

-
-
-
-

Helper Functions

-

The numpy.i file containes several macros and routines that it -uses internally to build its typemaps. However, these functions may -be useful elsewhere in your interface file. These macros and routines -are implemented as fragments, which are described briefly in the -previous section. If you try to use one or more of the following -macros or functions, but your compiler complains that it does not -recognize the symbol, then you need to force these fragments to appear -in your code using:

-
-%fragment("NumPy_Fragments");
-
-

in your SWIG interface file.

-
-

Macros

-
-
-
is_array(a)
-
Evaluates as true if a is non-NULL and can be cast to a -PyArrayObject*.
-
array_type(a)
-
Evaluates to the integer data type code of a, assuming a can -be cast to a PyArrayObject*.
-
array_numdims(a)
-
Evaluates to the integer number of dimensions of a, assuming -a can be cast to a PyArrayObject*.
-
array_dimensions(a)
-
Evaluates to an array of type npy_intp and length -array_numdims(a), giving the lengths of all of the dimensions -of a, assuming a can be cast to a PyArrayObject*.
-
array_size(a,i)
-
Evaluates to the i-th dimension size of a, assuming a -can be cast to a PyArrayObject*.
-
array_data(a)
-
Evaluates to a pointer of type void* that points to the data -buffer of a, assuming a can be cast to a PyArrayObject*.
-
array_is_contiguous(a)
-
Evaluates as true if a is a contiguous array. Equivalent to -(PyArray_ISCONTIGUOUS(a)).
-
array_is_native(a)
-
Evaluates as true if the data buffer of a uses native byte -order. Equivalent to (PyArray_ISNOTSWAPPED(a)).
-
array_is_fortran(a)
-
Evaluates as true if a is FORTRAN ordered.
-
-
-
-
-

Routines

-
-

pytype_string()

-
-

Return type: char*

-

Arguments:

-
    -
  • PyObject* py_obj, a general python object.
  • -
-

Return a string describing the type of py_obj.

-
-

typecode_string()

-
-

Return type: char*

-

Arguments:

-
    -
  • int typecode, a NumPy integer typecode.
  • -
-

Return a string describing the type corresponding to the NumPy -typecode.

-
-

type_match()

-
-

Return type: int

-

Arguments:

-
    -
  • int actual_type, the NumPy typecode of a NumPy array.
  • -
  • int desired_type, the desired NumPy typecode.
  • -
-

Make sure that actual_type is compatible with -desired_type. For example, this allows character and -byte types, or int and long types, to match. This is now -equivalent to PyArray_EquivTypenums().

-
-

obj_to_array_no_conversion()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyObject* input, a general python object.
  • -
  • int typecode, the desired NumPy typecode.
  • -
-

Cast input to a PyArrayObject* if legal, and ensure that -it is of type typecode. If input cannot be cast, or the -typecode is wrong, set a python error and return NULL.

-
-

obj_to_array_allow_conversion()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyObject* input, a general python object.
  • -
  • int typecode, the desired NumPy typecode of the resulting -array.
  • -
  • int* is_new_object, returns a value of 0 if no conversion -performed, else 1.
  • -
-

Convert input to a NumPy array with the given typecode. -On success, return a valid PyArrayObject* with the correct -type. On failure, the python error string will be set and the -routine returns NULL.

-
-

make_contiguous()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • int* is_new_object, returns a value of 0 if no conversion -performed, else 1.
  • -
  • int min_dims, minimum allowable dimensions.
  • -
  • int max_dims, maximum allowable dimensions.
  • -
-

Check to see if ary is contiguous. If so, return the input -pointer and flag it as not a new object. If it is not contiguous, -create a new PyArrayObject* using the original data, flag it -as a new object and return the pointer.

-
-

obj_to_array_contiguous_allow_conversion()

-
-

Return type: PyArrayObject*

-

Arguments:

-
    -
  • PyObject* input, a general python object.
  • -
  • int typecode, the desired NumPy typecode of the resulting -array.
  • -
  • int* is_new_object, returns a value of 0 if no conversion -performed, else 1.
  • -
-

Convert input to a contiguous PyArrayObject* of the -specified type. If the input object is not a contiguous -PyArrayObject*, a new one will be created and the new object -flag will be set.

-
-

require_contiguous()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
-

Test whether ary is contiguous. If so, return 1. Otherwise, -set a python error and return 0.

-
-

require_native()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArray_Object* ary, a NumPy array.
  • -
-

Require that ary is not byte-swapped. If the array is not -byte-swapped, return 1. Otherwise, set a python error and -return 0.

-
-

require_dimensions()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • int exact_dimensions, the desired number of dimensions.
  • -
-

Require ary to have a specified number of dimensions. If the -array has the specified number of dimensions, return 1. -Otherwise, set a python error and return 0.

-
-

require_dimensions_n()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • int* exact_dimensions, an array of integers representing -acceptable numbers of dimensions.
  • -
  • int n, the length of exact_dimensions.
  • -
-

Require ary to have one of a list of specified number of -dimensions. If the array has one of the specified number of -dimensions, return 1. Otherwise, set the python error string -and return 0.

-
-

require_size()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
  • npy_int* size, an array representing the desired lengths of -each dimension.
  • -
  • int n, the length of size.
  • -
-

Require ary to have a specified shape. If the array has the -specified shape, return 1. Otherwise, set the python error -string and return 0.

-
-

require_fortran()

-
-

Return type: int

-

Arguments:

-
    -
  • PyArrayObject* ary, a NumPy array.
  • -
-

Require the given PyArrayObject to to be FORTRAN ordered. If -the the PyArrayObject is already FORTRAN ordered, do nothing. -Else, set the FORTRAN ordering flag and recompute the strides.

-
-
-
-
-
-

Beyond the Provided Typemaps

-

There are many C or C++ array/NumPy array situations not covered by -a simple %include "numpy.i" and subsequent %apply directives.

-
-

A Common Example

-

Consider a reasonable prototype for a dot product function:

-
-double dot(int len, double* vec1, double* vec2);
-
-

The python interface that we want is:

-
-def dot(vec1, vec2):
-    """
-    dot(PyObject,PyObject) -> double
-    """
-
-

The problem here is that there is one dimension argument and two array -arguments, and our typemaps are set up for dimensions that apply to a -single array (in fact, SWIG does not provide a mechanism for -associating len with vec2 that takes two python input -arguments). The recommended solution is the following:

-
-%apply (int DIM1, double* IN_ARRAY1) {(int len1, double* vec1),
-                                      (int len2, double* vec2)}
-%rename (dot) my_dot;
-%exception my_dot {
-    $action
-    if (PyErr_Occurred()) SWIG_fail;
-}
-%inline %{
-double my_dot(int len1, double* vec1, int len2, double* vec2) {
-    if (len1 != len2) {
-        PyErr_Format(PyExc_ValueError,
-                     "Arrays of lengths (%d,%d) given",
-                     len1, len2);
-        return 0.0;
-    }
-    return dot(len1, vec1, vec2);
-}
-%}
-
-

If the header file that contains the prototype for double dot() -also contains other prototypes that you want to wrap, so that you need -to %include this header file, then you will also need a %ignore -dot; directive, placed after the %rename and before the -%include directives. Or, if the function in question is a class -method, you will want to use %extend rather than %inline in -addition to %ignore.

-

A note on error handling: Note that my_dot returns a -double but that it can also raise a python error. The -resulting wrapper function will return a python float -representation of 0.0 when the vector lengths do not match. Since -this is not NULL, the python interpreter will not know to check -for an error. For this reason, we add the %exception directive -above for my_dot to get the behavior we want (note that -$action is a macro that gets expanded to a valid call to -my_dot). In general, you will probably want to write a SWIG -macro to perform this task.

-
-
-

Other Situations

-

There are other wrapping situations in which numpy.i may be -helpful when you encounter them.

-
-
    -
  • In some situations, it is possible that you could use the -%numpy_templates macro to implement typemaps for your own -types. See the Other Common Types: bool or Other Common -Types: complex sections for examples. Another situation is if -your dimensions are of a type other than int (say long for -example):

    -
    -%numpy_typemaps(double, NPY_DOUBLE, long)
    -
    -
  • -
  • You can use the code in numpy.i to write your own typemaps. -For example, if you had a four-dimensional array as a function -argument, you could cut-and-paste the appropriate -three-dimensional typemaps into your interface file. The -modifications for the fourth dimension would be trivial.

    -
  • -
  • Sometimes, the best approach is to use the %extend directive -to define new methods for your classes (or overload existing ones) -that take a PyObject* (that either is or can be converted to a -PyArrayObject*) instead of a pointer to a buffer. In this -case, the helper routines in numpy.i can be very useful.

    -
  • -
  • Writing typemaps can be a bit nonintuitive. If you have specific -questions about writing SWIG typemaps for NumPy, the -developers of numpy.i do monitor the -Numpy-discussion and -Swig-user mail lists.

    -
  • -
-
-
-
-

A Final Note

-

When you use the %apply directive, as is usually necessary to use -numpy.i, it will remain in effect until you tell SWIG that it -shouldn't be. If the arguments to the functions or methods that you -are wrapping have common names, such as length or vector, -these typemaps may get applied in situations you do not expect or -want. Therefore, it is always a good idea to add a %clear -directive after you are done with a specific typemap:

-
-%apply (double* IN_ARRAY1, int DIM1) {(double* vector, int length)}
-%include "my_header.h"
-%clear (double* vector, int length);
-
-

In general, you should target these typemap signatures specifically -where you want them, and then clear them after you are done.

-
-
-
-

Summary

-

Out of the box, numpy.i provides typemaps that support conversion -between NumPy arrays and C arrays:

-
-
    -
  • That can be one of 12 different scalar types: signed char, -unsigned char, short, unsigned short, int, -unsigned int, long, unsigned long, long long, -unsigned long long, float and double.
  • -
  • That support 41 different argument signatures for each data type, -including:
      -
    • One-dimensional, two-dimensional and three-dimensional arrays.
    • -
    • Input-only, in-place, argout and argoutview behavior.
    • -
    • Hard-coded dimensions, data-buffer-then-dimensions -specification, and dimensions-then-data-buffer specification.
    • -
    • Both C-ordering ("last dimension fastest") or FORTRAN-ordering -("first dimension fastest") support for 2D and 3D arrays.
    • -
    -
  • -
-
-

The numpy.i interface file also provides additional tools for -wrapper developers, including:

-
-
    -
  • A SWIG macro (%numpy_typemaps) with three arguments for -implementing the 41 argument signatures for the user's choice of -(1) C data type, (2) NumPy data type (assuming they match), and -(3) dimension type.
  • -
  • Nine C macros and 13 C functions that can be used to write -specialized typemaps, extensions, or inlined functions that handle -cases not covered by the provided typemaps.
  • -
-
-
-
-

Acknowledgements

-

Many people have worked to glue SWIG and NumPy together (as well -as SWIG and the predecessors of NumPy, Numeric and numarray). -The effort to standardize this work into numpy.i began at the 2005 -SciPy Conference with a conversation between -Fernando Perez and myself. Fernando collected helper functions and -typemaps from Eric Jones, Michael Hunter, Anna Omelchenko and Michael -Sanner. Sebastian Hasse and Georg Holzmann have also provided -additional error checking and use cases. The work of these -contributors has made this end result possible.

-
-
- - - Deleted: trunk/doc/swig/doc/numpy_swig.pdf =================================================================== (Binary files differ) Deleted: trunk/doc/swig/doc/testing.html =================================================================== --- trunk/doc/swig/doc/testing.html 2010-09-04 09:52:24 UTC (rev 8684) +++ trunk/doc/swig/doc/testing.html 2010-09-04 09:52:44 UTC (rev 8685) @@ -1,482 +0,0 @@ - - - - - -Testing the numpy.i Typemaps - - - - - -
-

Testing the numpy.i Typemaps

- --- - - - - - - - -
Author:Bill Spotz
Institution:Sandia National Laboratories
Date:6 April, 2007
- -
-

Introduction

-

Writing tests for the numpy.i SWIG -interface file is a combinatorial headache. At present, 12 different -data types are supported, each with 23 different argument signatures, -for a total of 276 typemaps supported "out of the box". Each of these -typemaps, in turn, might require several unit tests in order to verify -expected behavior for both proper and improper inputs. Currently, -this results in 1,020 individual unit tests that are performed when -make test is run in the numpy/docs/swig subdirectory.

-

To facilitate this many similar unit tests, some high-level -programming techniques are employed, including C and SWIG macros, -as well as python inheritance. The -purpose of this document is to describe the testing infrastructure -employed to verify that the numpy.i typemaps are working as -expected.

-
-
-

Testing Organization

-

There are three indepedent testing frameworks supported, for one-, -two-, and three-dimensional arrays respectively. For one-dimensional -arrays, there are two C++ files, a header and a source, named:

-
-Vector.h
-Vector.cxx
-
-

that contain prototypes and code for a variety of functions that have -one-dimensional arrays as function arguments. The file:

-
-Vector.i
-
-

is a SWIG interface file that defines a python module Vector -that wraps the functions in Vector.h while utilizing the typemaps -in numpy.i to correctly handle the C arrays.

-

The Makefile calls swig to generate Vector.py and -Vector_wrap.cxx, and also executes the setup.py script that -compiles Vector_wrap.cxx and links together the extension module -_Vector.so or _Vector.dylib, depending on the platform. This -extension module and the proxy file Vector.py are both placed in a -subdirectory under the build directory.

-

The actual testing takes place with a python script named:

-
-testVector.py
-
-

that uses the standard python library module unittest, which -performs several tests of each function defined in Vector.h for -each data type supported.

-

Two-dimensional arrays are tested in exactly the same manner. The -above description applies, but with Matrix substituted for -Vector. For three-dimensional tests, substitute Tensor for -Vector. For the descriptions that follow, we will reference the -Vector tests, but the same information applies to Matrix and -Tensor tests.

-

The command make test will ensure that all of the test software is -built and then run all three test scripts.

-
-
-

Testing Header Files

-

Vector.h is a C++ header file that defines a C macro called -TEST_FUNC_PROTOS that takes two arguments: TYPE, which is a -data type name such as unsigned int; and SNAME, which is a -short name for the same data type with no spaces, e.g. uint. This -macro defines several function prototypes that have the prefix -SNAME and have at least one argument that is an array of type -TYPE. Those functions that have return arguments return a -TYPE value.

-

TEST_FUNC_PROTOS is then implemented for all of the data types -supported by numpy.i:

-
-
    -
  • signed char
  • -
  • unsigned char
  • -
  • short
  • -
  • unsigned short
  • -
  • int
  • -
  • unsigned int
  • -
  • long
  • -
  • unsigned long
  • -
  • long long
  • -
  • unsigned long long
  • -
  • float
  • -
  • double
  • -
-
-
-
-

Testing Source Files

-

Vector.cxx is a C++ source file that implements compilable code -for each of the function prototypes specified in Vector.h. It -defines a C macro TEST_FUNCS that has the same arguments and works -in the same way as TEST_FUNC_PROTOS does in Vector.h. -TEST_FUNCS is implemented for each of the 12 data types as above.

-
-
-

Testing SWIG Interface Files

-

Vector.i is a SWIG interface file that defines python module -Vector. It follows the conventions for using numpy.i as -described in the numpy.i documentation. It -defines a SWIG macro %apply_numpy_typemaps that has a single -argument TYPE. It uses the SWIG directive %apply as -described in the numpy.i documentation to apply the provided -typemaps to the argument signatures found in Vector.h. This macro -is then implemented for all of the data types supported by -numpy.i. It then does a %include "Vector.h" to wrap all of -the function prototypes in Vector.h using the typemaps in -numpy.i.

-
-
-

Testing Python Scripts

-

After make is used to build the testing extension modules, -testVector.py can be run to execute the tests. As with other -scripts that use unittest to facilitate unit testing, -testVector.py defines a class that inherits from -unittest.TestCase:

-
-class VectorTestCase(unittest.TestCase):
-
-

However, this class is not run directly. Rather, it serves as a base -class to several other python classes, each one specific to a -particular data type. The VectorTestCase class stores two strings -for typing information:

-
-
-
self.typeStr
-
A string that matches one of the SNAME prefixes used in -Vector.h and Vector.cxx. For example, "double".
-
self.typeCode
-
A short (typically single-character) string that represents a -data type in numpy and corresponds to self.typeStr. For -example, if self.typeStr is "double", then -self.typeCode should be "d".
-
-
-

Each test defined by the VectorTestCase class extracts the python -function it is trying to test by accessing the Vector module's -dictionary:

-
-length = Vector.__dict__[self.typeStr + "Length"]
-
-

In the case of double precision tests, this will return the python -function Vector.doubleLength.

-

We then define a new test case class for each supported data type with -a short definition such as:

-
-class doubleTestCase(VectorTestCase):
-    def __init__(self, methodName="runTest"):
-        VectorTestCase.__init__(self, methodName)
-        self.typeStr  = "double"
-        self.typeCode = "d"
-
-

Each of these 12 classes is collected into a unittest.TestSuite, -which is then executed. Errors and failures are summed together and -returned as the exit argument. Any non-zero result indicates that at -least one test did not pass.

-
-
- - - Deleted: trunk/doc/swig/doc/testing.pdf =================================================================== (Binary files differ) From numpy-svn at scipy.org Sat Sep 4 05:53:01 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 4 Sep 2010 04:53:01 -0500 (CDT) Subject: [Numpy-svn] r8686 - trunk/doc/swig Message-ID: <20100904095301.AD19039CC3F@scipy.org> Author: rgommers Date: 2010-09-04 04:53:01 -0500 (Sat, 04 Sep 2010) New Revision: 8686 Modified: trunk/doc/swig/Makefile trunk/doc/swig/README Log: DOC: update doc/swig makefile and readme after moving swig/doc into refguide. Modified: trunk/doc/swig/Makefile =================================================================== --- trunk/doc/swig/Makefile 2010-09-04 09:52:44 UTC (rev 8685) +++ trunk/doc/swig/Makefile 2010-09-04 09:53:01 UTC (rev 8686) @@ -20,11 +20,6 @@ test: cd $@ && make $@ -# Target doc -.PHONY : doc -doc: - cd $@ && make - # Target clean .PHONY : clean clean: Modified: trunk/doc/swig/README =================================================================== --- trunk/doc/swig/README 2010-09-04 09:52:44 UTC (rev 8685) +++ trunk/doc/swig/README 2010-09-04 09:53:01 UTC (rev 8686) @@ -9,24 +9,9 @@ Documentation ------------- -Documentation for how to use numpy.i is in the doc directory. The -primary source file here is numpy_swig.txt, a restructured text file -that documents how to use numpy.i. The Makefile in doc allows for the -conversion of numpy_swig.txt to HTML (if you have docutils installed) -and to PDF (if you have docutils and latex/pdftex installed). This -should not be necessary, however, as numpy_swig.html and -numpy_swig.pdf are stored in the repository. +Documentation for how to use numpy.i, as well as for the testing system +used here, can be found in the NumPy reference guide. -The same is true for a file called doc/testing.txt, which describes -the testing system used here. - -If you have the prerequisites installed and wish to build the HTML and -PDF documentation, this can be achieved by calling:: - - $ make doc - -from the shell. - Testing ------- The tests are a good example of what we are trying to do with numpy.i. From numpy-svn at scipy.org Sat Sep 4 06:10:42 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 4 Sep 2010 05:10:42 -0500 (CDT) Subject: [Numpy-svn] r8687 - in trunk: . doc/release Message-ID: <20100904101042.035E339CC3F@scipy.org> Author: rgommers Date: 2010-09-04 05:10:42 -0500 (Sat, 04 Sep 2010) New Revision: 8687 Modified: trunk/doc/release/1.5.0-notes.rst trunk/pavement.py Log: REL: Apply changes to release notes and paver script in 1.5.x branch to trunk. Modified: trunk/doc/release/1.5.0-notes.rst =================================================================== --- trunk/doc/release/1.5.0-notes.rst 2010-09-04 09:53:01 UTC (rev 8686) +++ trunk/doc/release/1.5.0-notes.rst 2010-09-04 10:10:42 UTC (rev 8687) @@ -12,8 +12,12 @@ This is the first NumPy release which is compatible with Python 3. Support for Python 3 and Python 2 is done from a single code base. Extensive notes on changes can be found at -``_. +``_. +Note that the Numpy testing framework relies on nose, which does not have a +Python 3 compatible release yet. A working Python 3 branch of nose can be found +at ``_ however. + Porting of SciPy to Python 3 is expected to be completed soon. :pep:`3118` compatibility Modified: trunk/pavement.py =================================================================== --- trunk/pavement.py 2010-09-04 09:53:01 UTC (rev 8686) +++ trunk/pavement.py 2010-09-04 10:10:42 UTC (rev 8687) @@ -106,7 +106,9 @@ MPKG_PYTHON = { "2.5": ["/Library/Frameworks/Python.framework/Versions/2.5/bin/python"], - "2.6": ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python"] + "2.6": ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python"], + "2.7": ["/Library/Frameworks/Python.framework/Versions/2.7/bin/python"], + "3.1": ["/Library/Frameworks/Python.framework/Versions/3.1/bin/python3"], } SSE3_CFG = {'ATLAS': r'C:\local\lib\yop\sse3'} @@ -117,6 +119,8 @@ if sys.platform =="darwin": WINDOWS_PYTHON = { + "3.1": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python31/python.exe"], + "2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"], "2.6": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"], "2.5": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python25/python.exe"] } @@ -125,6 +129,8 @@ MAKENSIS = ["wine", "makensis"] elif sys.platform == "win32": WINDOWS_PYTHON = { + "3.1": ["C:\Python31\python3.exe"], + "2.7": ["C:\Python27\python.exe"], "2.6": ["C:\Python26\python.exe"], "2.5": ["C:\Python25\python.exe"], } @@ -134,6 +140,8 @@ MAKENSIS = ["makensis"] else: WINDOWS_PYTHON = { + "3.1": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python31/python.exe"], + "2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"], "2.6": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python26/python.exe"], "2.5": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python25/python.exe"] } @@ -194,7 +202,7 @@ pyver = options.python_version def copy_bdist(arch): # Copy the wininst in dist into the release directory - if pyver[0] >= 3: + if int(pyver[0]) >= 3: source = os.path.join('build', 'py3k', 'dist', wininst_name(pyver)) else: source = os.path.join('dist', wininst_name(pyver)) From numpy-svn at scipy.org Sat Sep 4 06:17:19 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 4 Sep 2010 05:17:19 -0500 (CDT) Subject: [Numpy-svn] r8688 - trunk/numpy/core/tests Message-ID: <20100904101719.6002D39CC3F@scipy.org> Author: rgommers Date: 2010-09-04 05:17:19 -0500 (Sat, 04 Sep 2010) New Revision: 8688 Modified: trunk/numpy/core/tests/test_regression.py Log: TST: Change outdated message for knownfail described in #1081. Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2010-09-04 10:10:42 UTC (rev 8687) +++ trunk/numpy/core/tests/test_regression.py 2010-09-04 10:17:19 UTC (rev 8688) @@ -1134,7 +1134,7 @@ t = ((1,), np.array(1)) assert_raises(ValueError, lambda: np.array(t)) - @dec.knownfailureif(True, "Fix this for 1.5.0.") + @dec.knownfailureif(True, "This is a corner case, see ticket #1081.") def test_array_from_sequence_scalar_array2(self): """Ticket #1081: weird array with strange input...""" t = np.array([np.array([]), np.array(0, object)]) From numpy-svn at scipy.org Sat Sep 4 09:08:27 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 4 Sep 2010 08:08:27 -0500 (CDT) Subject: [Numpy-svn] r8689 - trunk/numpy/distutils/command Message-ID: <20100904130827.7A9FA39CC3F@scipy.org> Author: ptvirtan Date: 2010-09-04 08:08:27 -0500 (Sat, 04 Sep 2010) New Revision: 8689 Modified: trunk/numpy/distutils/command/config.py Log: BUG: distutils: use // in a binary search (fixes #1604 on Python 3) Modified: trunk/numpy/distutils/command/config.py =================================================================== --- trunk/numpy/distutils/command/config.py 2010-09-04 10:17:19 UTC (rev 8688) +++ trunk/numpy/distutils/command/config.py 2010-09-04 13:08:27 UTC (rev 8689) @@ -271,7 +271,7 @@ high = mid # Binary search: while low != high: - mid = (high - low) / 2 + low + mid = (high - low) // 2 + low try: self._compile(body % {'type': type_name, 'size': mid}, headers, include_dirs, 'c') From numpy-svn at scipy.org Sat Sep 4 09:08:43 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 4 Sep 2010 08:08:43 -0500 (CDT) Subject: [Numpy-svn] r8690 - trunk/doc/sphinxext Message-ID: <20100904130843.2681139CC3F@scipy.org> Author: ptvirtan Date: 2010-09-04 08:08:43 -0500 (Sat, 04 Sep 2010) New Revision: 8690 Modified: trunk/doc/sphinxext/numpydoc.py Log: sphinxext: be explicit about Sphinx version requirements Modified: trunk/doc/sphinxext/numpydoc.py =================================================================== --- trunk/doc/sphinxext/numpydoc.py 2010-09-04 13:08:27 UTC (rev 8689) +++ trunk/doc/sphinxext/numpydoc.py 2010-09-04 13:08:43 UTC (rev 8690) @@ -16,6 +16,11 @@ """ +import sphinx + +if sphinx.__version__ < '1.0.1': + raise RuntimeError("Sphinx 1.0.1 or newer is required") + import os, re, pydoc from docscrape_sphinx import get_doc_object, SphinxDocString from sphinx.util.compat import Directive From numpy-svn at scipy.org Sat Sep 4 09:28:24 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 4 Sep 2010 08:28:24 -0500 (CDT) Subject: [Numpy-svn] r8691 - branches/1.5.x/numpy/distutils/command Message-ID: <20100904132824.0535939CC3F@scipy.org> Author: ptvirtan Date: 2010-09-04 08:28:23 -0500 (Sat, 04 Sep 2010) New Revision: 8691 Modified: branches/1.5.x/numpy/distutils/command/config.py Log: BUG (backport r8689): distutils: use // in a binary search (fixes #1604 on Python 3) Modified: branches/1.5.x/numpy/distutils/command/config.py =================================================================== --- branches/1.5.x/numpy/distutils/command/config.py 2010-09-04 13:08:43 UTC (rev 8690) +++ branches/1.5.x/numpy/distutils/command/config.py 2010-09-04 13:28:23 UTC (rev 8691) @@ -271,7 +271,7 @@ high = mid # Binary search: while low != high: - mid = (high - low) / 2 + low + mid = (high - low) // 2 + low try: self._compile(body % {'type': type_name, 'size': mid}, headers, include_dirs, 'c') From numpy-svn at scipy.org Sun Sep 5 00:40:37 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 4 Sep 2010 23:40:37 -0500 (CDT) Subject: [Numpy-svn] r8692 - in trunk/doc: . release Message-ID: <20100905044037.430EC39CD56@scipy.org> Author: rgommers Date: 2010-09-04 23:40:37 -0500 (Sat, 04 Sep 2010) New Revision: 8692 Added: trunk/doc/release/2.0.0-notes.rst Modified: trunk/doc/HOWTO_RELEASE.txt Log: DOC: some more details on release process, and blank 2.0.0 notes. Modified: trunk/doc/HOWTO_RELEASE.txt =================================================================== --- trunk/doc/HOWTO_RELEASE.txt 2010-09-04 13:28:23 UTC (rev 8691) +++ trunk/doc/HOWTO_RELEASE.txt 2010-09-05 04:40:37 UTC (rev 8692) @@ -194,6 +194,12 @@ .. note:: The following steps are repeated for the beta(s), release candidates(s) and the final release. +Merge doc wiki edits +-------------------- +The edits in the documentation wiki suitable for merging should be merged, +ideally just before making the release branch. How to do this is described in +detail in doc/HOWTO_MERGE_WIKI_DOCS.txt. + Check deprecations ------------------ Before the release branch is made, it should be checked that all deprecated @@ -211,6 +217,9 @@ - for SciPy, supported NumPy version(s) - outlook for the near future +Also make sure that as soon as the branch is made, there is a new release notes +file in trunk for the next release. + Create the release "tag" ------------------------ :: @@ -268,3 +277,12 @@ During the beta/RC phase an explicit request for testing the binaries with several other libraries (SciPy/Matplotlib/Pygame) should be posted on the mailing list. + +After the final release +----------------------- +After the final release is announced, a few administrative tasks are left to be +done: + + - Forward port changes in the release branch to release notes and release + scripts, if any, to trunk. + - Update the Milestones in Trac. Added: trunk/doc/release/2.0.0-notes.rst =================================================================== --- trunk/doc/release/2.0.0-notes.rst (rev 0) +++ trunk/doc/release/2.0.0-notes.rst 2010-09-05 04:40:37 UTC (rev 8692) @@ -0,0 +1,16 @@ +========================= +NumPy 2.0.0 Release Notes +========================= + + +Highlights +========== + + +New features +============ + + +Changes +======= + From numpy-svn at scipy.org Sun Sep 5 09:24:16 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 5 Sep 2010 08:24:16 -0500 (CDT) Subject: [Numpy-svn] r8693 - trunk/doc/sphinxext Message-ID: <20100905132416.BCF5F39CD62@scipy.org> Author: ptvirtan Date: 2010-09-05 08:24:16 -0500 (Sun, 05 Sep 2010) New Revision: 8693 Modified: trunk/doc/sphinxext/plot_directive.py Log: sphinxext/plot_directive: more robust relpath Modified: trunk/doc/sphinxext/plot_directive.py =================================================================== --- trunk/doc/sphinxext/plot_directive.py 2010-09-05 04:40:37 UTC (rev 8692) +++ trunk/doc/sphinxext/plot_directive.py 2010-09-05 13:24:16 UTC (rev 8693) @@ -586,38 +586,55 @@ try: from os.path import relpath except ImportError: - def relpath(target, base=os.curdir): - """ - Return a relative path to the target from either the current - dir or an optional base dir. Base can be a directory - specified either as absolute or relative to current dir. - """ + # Copied from Python 2.7 + if 'posix' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir - if not os.path.exists(target): - raise OSError, 'Target does not exist: '+target + if not path: + raise ValueError("no path specified") - if not os.path.isdir(base): - raise OSError, 'Base is not a directory or does not exist: '+base + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) - base_list = (os.path.abspath(base)).split(os.sep) - target_list = (os.path.abspath(target)).split(os.sep) + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) - # On the windows platform the target may be on a completely - # different drive from the base. - if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]: - raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper() + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + elif 'nt' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir, splitunc - # Starting from the filepath root, work out how much of the - # filepath is shared by base and target. - for i in range(min(len(base_list), len(target_list))): - if base_list[i] <> target_list[i]: break - else: - # If we broke out of the loop, i is pointing to the first - # differing path elements. If we didn't break out of the - # loop, i is pointing to identical path elements. - # Increment i so that in all cases it points to the first - # differing path elements. - i+=1 + if not path: + raise ValueError("no path specified") + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + if start_list[0].lower() != path_list[0].lower(): + unc_path, rest = splitunc(path) + unc_start, rest = splitunc(start) + if bool(unc_path) ^ bool(unc_start): + raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" + % (path, start)) + else: + raise ValueError("path is on drive %s, start on drive %s" + % (path_list[0], start_list[0])) + # Work out how much of the filepath is shared by start and path. + for i in range(min(len(start_list), len(path_list))): + if start_list[i].lower() != path_list[i].lower(): + break + else: + i += 1 - rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:] - return os.path.join(*rel_list) + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + else: + raise RuntimeError("Unsupported platform (no relpath available!)") From numpy-svn at scipy.org Sun Sep 5 09:36:35 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 5 Sep 2010 08:36:35 -0500 (CDT) Subject: [Numpy-svn] r8694 - trunk/doc Message-ID: <20100905133635.EAF8C39CD62@scipy.org> Author: ptvirtan Date: 2010-09-05 08:36:35 -0500 (Sun, 05 Sep 2010) New Revision: 8694 Modified: trunk/doc/HOWTO_RELEASE.txt Log: DOC: add instructions for updating the docs to HOWTO_RELEASE.txt Modified: trunk/doc/HOWTO_RELEASE.txt =================================================================== --- trunk/doc/HOWTO_RELEASE.txt 2010-09-05 13:24:16 UTC (rev 8693) +++ trunk/doc/HOWTO_RELEASE.txt 2010-09-05 13:36:35 UTC (rev 8694) @@ -200,6 +200,15 @@ ideally just before making the release branch. How to do this is described in detail in doc/HOWTO_MERGE_WIKI_DOCS.txt. +Check that docs can be built +---------------------------- +Do:: + + cd doc/ + make dist + +to check that the documentation is in a buildable state. + Check deprecations ------------------ Before the release branch is made, it should be checked that all deprecated @@ -263,6 +272,35 @@ ``bdist_wininst``, should also be uploaded to PyPi so ``easy_install numpy`` works. +Update docs.scipy.org +--------------------- +Do the following (or ask the doc people to take care of it): + +Rebuild and upload documentation: + +- ``cd numpy/doc`` +- ``make dist`` +- Check that the built documentation is OK. +- ``touch output-is-fine`` +- ``make upload UPLOAD_TARGET=USERNAME at docs.scipy.org:/home/docserver/www-root/doc/numpy-1.5.x/`` + +where USERNAME should be replaced by your account on +``docs.scipy.org``, and ``numpy-1.5.x`` by the version number of the +*release series*. For instance, for Numpy 1.5.1, it should be +``numpy-1.5.x`` and for Numpy 2.0.0 ``numpy-2.0.x``. + +Rebuild and upload ``docs.scipy.org`` front page, if the release +series is a new one. The front page sources are located in the Scipy +repository: + +- ``cd scipy/doc/frontpage`` +- Edit ``_templates/indexcontent.html`` to add links to the new release series. +- ``make dist`` +- Check that the built documentation is OK. +- ``touch output-is-fine`` +- ``make upload USER=USERNAME`` + + Update scipy.org ---------------- A release announcement with a link to the download site should be placed in the From numpy-svn at scipy.org Sun Sep 5 09:54:07 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 5 Sep 2010 08:54:07 -0500 (CDT) Subject: [Numpy-svn] r8695 - trunk/numpy/distutils Message-ID: <20100905135407.73EF039CD62@scipy.org> Author: cdavid Date: 2010-09-05 08:54:07 -0500 (Sun, 05 Sep 2010) New Revision: 8695 Modified: trunk/numpy/distutils/npy_pkg_config.py Log: BUG: do not add empty string when interpolating values in LibraryInfo. This adds superflous space in strings, which cause issues when executing commands outside shell control (e.g. '-I/usr/include ' will not add '/usr/include' but '/usr/include ' into the search path of compilers) Modified: trunk/numpy/distutils/npy_pkg_config.py =================================================================== --- trunk/numpy/distutils/npy_pkg_config.py 2010-09-05 13:36:35 UTC (rev 8694) +++ trunk/numpy/distutils/npy_pkg_config.py 2010-09-05 13:54:07 UTC (rev 8695) @@ -313,7 +313,8 @@ # Update sec dict for oname, ovalue in nsections[rname].items(): - sections[rname][oname] += ' %s' % ovalue + if ovalue: + sections[rname][oname] += ' %s' % ovalue return meta, vars, sections, reqs From numpy-svn at scipy.org Sun Sep 5 11:23:40 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 5 Sep 2010 10:23:40 -0500 (CDT) Subject: [Numpy-svn] r8696 - trunk/numpy/core/src/multiarray Message-ID: <20100905152340.0B46139CD66@scipy.org> Author: ptvirtan Date: 2010-09-05 10:23:39 -0500 (Sun, 05 Sep 2010) New Revision: 8696 Modified: trunk/numpy/core/src/multiarray/buffer.c Log: BUG: core/buffer: ensure that array_dealloc runs correctly even when PyErr flag is set (fixes #1605) Modified: trunk/numpy/core/src/multiarray/buffer.c =================================================================== --- trunk/numpy/core/src/multiarray/buffer.c 2010-09-05 13:54:07 UTC (rev 8695) +++ trunk/numpy/core/src/multiarray/buffer.c 2010-09-05 15:23:39 UTC (rev 8696) @@ -671,7 +671,23 @@ NPY_NO_EXPORT void _array_dealloc_buffer_info(PyArrayObject *self) { + int reset_error_state = 0; + PyObject *ptype, *pvalue, *ptraceback; + + /* This function may be called when processing an exception -- + * we need to stash the error state to avoid confusing PyDict + */ + + if (PyErr_Occurred()) { + reset_error_state = 1; + PyErr_Fetch(&ptype, &pvalue, &ptraceback); + } + _buffer_clear_info((PyObject*)self); + + if (reset_error_state) { + PyErr_Restore(ptype, pvalue, ptraceback); + } } #else From numpy-svn at scipy.org Sun Sep 5 11:29:22 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 5 Sep 2010 10:29:22 -0500 (CDT) Subject: [Numpy-svn] r8697 - branches/1.5.x/numpy/core/src/multiarray Message-ID: <20100905152922.EF1B139CD67@scipy.org> Author: ptvirtan Date: 2010-09-05 10:29:22 -0500 (Sun, 05 Sep 2010) New Revision: 8697 Modified: branches/1.5.x/numpy/core/src/multiarray/buffer.c Log: BUG: core/buffer: ensure that array_dealloc runs correctly even when PyErr flag is set (fixes #1605) Modified: branches/1.5.x/numpy/core/src/multiarray/buffer.c =================================================================== --- branches/1.5.x/numpy/core/src/multiarray/buffer.c 2010-09-05 15:23:39 UTC (rev 8696) +++ branches/1.5.x/numpy/core/src/multiarray/buffer.c 2010-09-05 15:29:22 UTC (rev 8697) @@ -671,7 +671,23 @@ NPY_NO_EXPORT void _array_dealloc_buffer_info(PyArrayObject *self) { + int reset_error_state = 0; + PyObject *ptype, *pvalue, *ptraceback; + + /* This function may be called when processing an exception -- + * we need to stash the error state to avoid confusing PyDict + */ + + if (PyErr_Occurred()) { + reset_error_state = 1; + PyErr_Fetch(&ptype, &pvalue, &ptraceback); + } + _buffer_clear_info((PyObject*)self); + + if (reset_error_state) { + PyErr_Restore(ptype, pvalue, ptraceback); + } } #else From numpy-svn at scipy.org Tue Sep 7 08:58:57 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 7 Sep 2010 07:58:57 -0500 (CDT) Subject: [Numpy-svn] r8698 - trunk/tools/numpy-macosx-installer Message-ID: <20100907125857.630D039CCF3@scipy.org> Author: rgommers Date: 2010-09-07 07:58:57 -0500 (Tue, 07 Sep 2010) New Revision: 8698 Modified: trunk/tools/numpy-macosx-installer/new-create-dmg Log: REL: forward-port change in size of dmg image from 1.5.x branch. Modified: trunk/tools/numpy-macosx-installer/new-create-dmg =================================================================== --- trunk/tools/numpy-macosx-installer/new-create-dmg 2010-09-05 15:29:22 UTC (rev 8697) +++ trunk/tools/numpy-macosx-installer/new-create-dmg 2010-09-07 12:58:57 UTC (rev 8698) @@ -58,7 +58,7 @@ DMG_NAME="$(basename "$DMG_PATH")" DMG_TEMP_NAME="$DMG_DIR/rw.${DMG_NAME}" SRC_FOLDER="$(cd "$2" > /dev/null; pwd)" -DMG_SIZE=10m +DMG_SIZE=12m test -z "$VOLUME_NAME" && VOLUME_NAME="$(basename "$DMG_PATH" .dmg)" # AUX_PATH="$(cd "$(dirname $0)"; pwd)/support" From numpy-svn at scipy.org Sat Sep 11 10:52:07 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 09:52:07 -0500 (CDT) Subject: [Numpy-svn] r8699 - in trunk: numpy/core/tests tools Message-ID: <20100911145207.B5B9739CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 09:52:07 -0500 (Sat, 11 Sep 2010) New Revision: 8699 Modified: trunk/numpy/core/tests/test_blasdot.py trunk/tools/py3tool.py Log: BUG: core: fix _dotblas usage on Py3 (fixes #1609) Modified: trunk/numpy/core/tests/test_blasdot.py =================================================================== --- trunk/numpy/core/tests/test_blasdot.py 2010-09-07 12:58:57 UTC (rev 8698) +++ trunk/numpy/core/tests/test_blasdot.py 2010-09-11 14:52:07 UTC (rev 8699) @@ -1,5 +1,5 @@ from numpy.core import zeros, float64 -from numpy.testing import TestCase, assert_almost_equal +from numpy.testing import dec, TestCase, assert_almost_equal, assert_ from numpy.core.multiarray import inner as inner_ DECPREC = 14 @@ -12,3 +12,17 @@ a = zeros(shape = (1, 80), dtype = float64) p = inner_(a, a) assert_almost_equal(p, 0, decimal = DECPREC) + +try: + import numpy.core._dotblas as _dotblas +except ImportError: + _dotblas = None + + at dec.skipif(_dotblas is None, "Numpy is not compiled with _dotblas") +def test_blasdot_used(): + from numpy.core import dot, vdot, inner, alterdot, restoredot + assert_(dot is _dotblas.dot) + assert_(vdot is _dotblas.vdot) + assert_(inner is _dotblas.inner) + assert_(alterdot is _dotblas.alterdot) + assert_(restoredot is _dotblas.restoredot) Modified: trunk/tools/py3tool.py =================================================================== --- trunk/tools/py3tool.py 2010-09-07 12:58:57 UTC (rev 8698) +++ trunk/tools/py3tool.py 2010-09-11 14:52:07 UTC (rev 8699) @@ -157,7 +157,7 @@ for mod in ['multiarray', 'scalarmath', 'umath', '_sort', '_compiled_base', 'core', 'lib', 'testing', 'fft', 'polynomial', 'random', 'ma', 'linalg', 'compat', - 'mtrand']: + 'mtrand', '_dotblas']: text = re.sub(r'^(\s*)import %s' % mod, r'\1from . import %s' % mod, text, flags=re.M) From numpy-svn at scipy.org Sat Sep 11 11:13:42 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 10:13:42 -0500 (CDT) Subject: [Numpy-svn] r8700 - trunk/numpy/testing Message-ID: <20100911151342.A7E3D39CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 10:13:42 -0500 (Sat, 11 Sep 2010) New Revision: 8700 Modified: trunk/numpy/testing/nosetester.py Log: TST: disable --detailed-errors by default -- the output it gives is more confusing than useful Modified: trunk/numpy/testing/nosetester.py =================================================================== --- trunk/numpy/testing/nosetester.py 2010-09-11 14:52:07 UTC (rev 8699) +++ trunk/numpy/testing/nosetester.py 2010-09-11 15:13:42 UTC (rev 8700) @@ -230,9 +230,6 @@ argv+=['--cover-package=%s' % self.package_name, '--with-coverage', '--cover-tests', '--cover-inclusive', '--cover-erase'] - # enable assert introspection - argv += ['--detailed-errors'] - # bypass these samples under distutils argv += ['--exclude','f2py_ext'] argv += ['--exclude','f2py_f90_ext'] From numpy-svn at scipy.org Sat Sep 11 11:47:22 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 10:47:22 -0500 (CDT) Subject: [Numpy-svn] r8701 - in trunk/numpy/core: include/numpy src/multiarray tests Message-ID: <20100911154722.DCBC239CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 10:47:22 -0500 (Sat, 11 Sep 2010) New Revision: 8701 Modified: trunk/numpy/core/include/numpy/npy_3kcompat.h trunk/numpy/core/src/multiarray/methods.c trunk/numpy/core/src/multiarray/multiarraymodule.c trunk/numpy/core/tests/test_regression.py Log: BUG: core: sync Python 3 file handle position in tofile/fromfile (fixes #1610) Modified: trunk/numpy/core/include/numpy/npy_3kcompat.h =================================================================== --- trunk/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 15:13:42 UTC (rev 8700) +++ trunk/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 15:47:22 UTC (rev 8701) @@ -149,14 +149,19 @@ #endif /* - * PyFile_AsFile + * PyFile_* compatibility */ #if defined(NPY_PY3K) + +/* + * Get a FILE* handle to the file represented by the Python object + */ static NPY_INLINE FILE* npy_PyFile_Dup(PyObject *file, char *mode) { int fd, fd2; PyObject *ret, *os; + FILE *handle; /* Flush first to ensure things end up in the file in the correct order */ ret = PyObject_CallMethod(file, "flush", ""); if (ret == NULL) { @@ -179,11 +184,62 @@ fd2 = PyNumber_AsSsize_t(ret, NULL); Py_DECREF(ret); #ifdef _WIN32 - return _fdopen(fd2, mode); + handle = _fdopen(fd2, mode); #else - return fdopen(fd2, mode); + handle = fdopen(fd2, mode); #endif + if (handle == NULL) { + PyErr_SetString(PyExc_IOError, + "Getting a FILE* from a Python file object failed"); + } + return handle; } + +/* + * Close the dup-ed file handle, and seek the Python one to the current position + */ +static NPY_INLINE int +npy_PyFile_DupClose(PyObject *file, FILE* handle) +{ + PyObject *ret; + long position; + position = ftell(handle); + fclose(handle); + + ret = PyObject_CallMethod(file, "seek", "li", position, 0); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +static int +npy_PyFile_Check(PyObject *file) +{ + static PyTypeObject *fileio = NULL; + + if (fileio == NULL) { + PyObject *mod; + mod = PyImport_ImportModule("io"); + if (mod == NULL) { + return 0; + } + fileio = (PyTypeObject*)PyObject_GetAttrString(mod, "FileIO"); + Py_DECREF(mod); + } + + if (fileio != NULL) { + return PyObject_TypeCheck(file, fileio); + } +} + +#else + +#define npy_PyFile_Dup(file, mode) PyFile_AsFile(file) +#define npy_PyFile_DupClose(file, handle) (0) +#define npy_PyFile_Check PyFile_Check + #endif static NPY_INLINE PyObject* Modified: trunk/numpy/core/src/multiarray/methods.c =================================================================== --- trunk/numpy/core/src/multiarray/methods.c 2010-09-11 15:13:42 UTC (rev 8700) +++ trunk/numpy/core/src/multiarray/methods.c 2010-09-11 15:47:22 UTC (rev 8701) @@ -496,7 +496,7 @@ static PyObject * array_tofile(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int ret; + int ret, ret2; PyObject *file; FILE *fd; char *sep = ""; @@ -517,11 +517,7 @@ else { Py_INCREF(file); } -#if defined(NPY_PY3K) fd = npy_PyFile_Dup(file, "wb"); -#else - fd = PyFile_AsFile(file); -#endif if (fd == NULL) { PyErr_SetString(PyExc_IOError, "first argument must be a " \ "string or open file"); @@ -529,11 +525,9 @@ return NULL; } ret = PyArray_ToFile(self, fd, sep, format); -#if defined(NPY_PY3K) - fclose(fd); -#endif + ret2 = npy_PyFile_DupClose(file, fd); Py_DECREF(file); - if (ret < 0) { + if (ret < 0 || ret2 < 0) { return NULL; } Py_INCREF(Py_None); Modified: trunk/numpy/core/src/multiarray/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarray/multiarraymodule.c 2010-09-11 15:13:42 UTC (rev 8700) +++ trunk/numpy/core/src/multiarray/multiarraymodule.c 2010-09-11 15:47:22 UTC (rev 8701) @@ -1706,6 +1706,7 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) { PyObject *file = NULL, *ret; + int ok; FILE *fp; char *sep = ""; Py_ssize_t nin = -1; @@ -1727,11 +1728,7 @@ else { Py_INCREF(file); } -#if defined(NPY_PY3K) fp = npy_PyFile_Dup(file, "rb"); -#else - fp = PyFile_AsFile(file); -#endif if (fp == NULL) { PyErr_SetString(PyExc_IOError, "first argument must be an open file"); @@ -1742,10 +1739,12 @@ type = PyArray_DescrFromType(PyArray_DEFAULT); } ret = PyArray_FromFile(fp, type, (intp) nin, sep); -#if defined(NPY_PY3K) - fclose(fp); -#endif + ok = npy_PyFile_DupClose(file, fp); Py_DECREF(file); + if (ok < 0) { + Py_DECREF(ret); + return NULL; + } return ret; } Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2010-09-11 15:13:42 UTC (rev 8700) +++ trunk/numpy/core/tests/test_regression.py 2010-09-11 15:47:22 UTC (rev 8701) @@ -7,6 +7,7 @@ from numpy.testing import * from numpy.testing.utils import _assert_valid_refcount from numpy.compat import asbytes, asunicode, asbytes_nested +import tempfile import numpy as np if sys.version_info[0] >= 3: @@ -1373,5 +1374,26 @@ c2 = sys.getrefcount(rgba) assert_equal(c1, c2) + def test_fromfile_tofile_seeks(self): + # On Python 3, tofile/fromfile used to get (#1610) the Python + # file handle out of sync + f = tempfile.TemporaryFile() + f.write(np.arange(255, dtype='u1').tostring()) + + f.seek(20) + ret = np.fromfile(f, count=4, dtype='u1') + assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) + assert_equal(f.tell(), 24) + + f.seek(40) + np.array([1, 2, 3], dtype='u1').tofile(f) + assert_equal(f.tell(), 43) + + f.seek(40) + data = f.read(3) + assert_equal(data, asbytes("\x01\x02\x03")) + + f.close() + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Sat Sep 11 11:56:32 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 10:56:32 -0500 (CDT) Subject: [Numpy-svn] r8702 - trunk/numpy/core/include/numpy Message-ID: <20100911155632.6EA3739CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 10:56:32 -0500 (Sat, 11 Sep 2010) New Revision: 8702 Modified: trunk/numpy/core/include/numpy/npy_3kcompat.h Log: BUG: core: fix a missing return branch in npy_3kcompat.h Modified: trunk/numpy/core/include/numpy/npy_3kcompat.h =================================================================== --- trunk/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 15:47:22 UTC (rev 8701) +++ trunk/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 15:56:32 UTC (rev 8702) @@ -232,6 +232,7 @@ if (fileio != NULL) { return PyObject_TypeCheck(file, fileio); } + return 0; } #else From numpy-svn at scipy.org Sat Sep 11 12:08:55 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 11:08:55 -0500 (CDT) Subject: [Numpy-svn] r8703 - in branches/1.5.x: numpy/core/tests tools Message-ID: <20100911160855.AE1DE39CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 11:08:55 -0500 (Sat, 11 Sep 2010) New Revision: 8703 Modified: branches/1.5.x/numpy/core/tests/test_blasdot.py branches/1.5.x/tools/py3tool.py Log: BUG: (backport r8699) core: fix _dotblas usage on Py3 (fixes #1609) Modified: branches/1.5.x/numpy/core/tests/test_blasdot.py =================================================================== --- branches/1.5.x/numpy/core/tests/test_blasdot.py 2010-09-11 15:56:32 UTC (rev 8702) +++ branches/1.5.x/numpy/core/tests/test_blasdot.py 2010-09-11 16:08:55 UTC (rev 8703) @@ -1,5 +1,5 @@ from numpy.core import zeros, float64 -from numpy.testing import TestCase, assert_almost_equal +from numpy.testing import dec, TestCase, assert_almost_equal, assert_ from numpy.core.multiarray import inner as inner_ DECPREC = 14 @@ -12,3 +12,17 @@ a = zeros(shape = (1, 80), dtype = float64) p = inner_(a, a) assert_almost_equal(p, 0, decimal = DECPREC) + +try: + import numpy.core._dotblas as _dotblas +except ImportError: + _dotblas = None + + at dec.skipif(_dotblas is None, "Numpy is not compiled with _dotblas") +def test_blasdot_used(): + from numpy.core import dot, vdot, inner, alterdot, restoredot + assert_(dot is _dotblas.dot) + assert_(vdot is _dotblas.vdot) + assert_(inner is _dotblas.inner) + assert_(alterdot is _dotblas.alterdot) + assert_(restoredot is _dotblas.restoredot) Modified: branches/1.5.x/tools/py3tool.py =================================================================== --- branches/1.5.x/tools/py3tool.py 2010-09-11 15:56:32 UTC (rev 8702) +++ branches/1.5.x/tools/py3tool.py 2010-09-11 16:08:55 UTC (rev 8703) @@ -157,7 +157,7 @@ for mod in ['multiarray', 'scalarmath', 'umath', '_sort', '_compiled_base', 'core', 'lib', 'testing', 'fft', 'polynomial', 'random', 'ma', 'linalg', 'compat', - 'mtrand']: + 'mtrand', '_dotblas']: text = re.sub(r'^(\s*)import %s' % mod, r'\1from . import %s' % mod, text, flags=re.M) From numpy-svn at scipy.org Sat Sep 11 12:09:12 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 11:09:12 -0500 (CDT) Subject: [Numpy-svn] r8704 - branches/1.5.x/numpy/testing Message-ID: <20100911160912.715DF39CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 11:09:12 -0500 (Sat, 11 Sep 2010) New Revision: 8704 Modified: branches/1.5.x/numpy/testing/nosetester.py Log: TST: (backport r8700) disable --detailed-errors by default -- the output it gives is more confusing than useful Modified: branches/1.5.x/numpy/testing/nosetester.py =================================================================== --- branches/1.5.x/numpy/testing/nosetester.py 2010-09-11 16:08:55 UTC (rev 8703) +++ branches/1.5.x/numpy/testing/nosetester.py 2010-09-11 16:09:12 UTC (rev 8704) @@ -230,9 +230,6 @@ argv+=['--cover-package=%s' % self.package_name, '--with-coverage', '--cover-tests', '--cover-inclusive', '--cover-erase'] - # enable assert introspection - argv += ['--detailed-errors'] - # bypass these samples under distutils argv += ['--exclude','f2py_ext'] argv += ['--exclude','f2py_f90_ext'] From numpy-svn at scipy.org Sat Sep 11 12:09:34 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 11:09:34 -0500 (CDT) Subject: [Numpy-svn] r8705 - in branches/1.5.x/numpy/core: include/numpy src/multiarray tests Message-ID: <20100911160934.57C3739CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 11:09:34 -0500 (Sat, 11 Sep 2010) New Revision: 8705 Modified: branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h branches/1.5.x/numpy/core/src/multiarray/methods.c branches/1.5.x/numpy/core/src/multiarray/multiarraymodule.c branches/1.5.x/numpy/core/tests/test_regression.py Log: BUG: (backport r8701) core: sync Python 3 file handle position in tofile/fromfile (fixes #1610) Modified: branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h =================================================================== --- branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 16:09:12 UTC (rev 8704) +++ branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 16:09:34 UTC (rev 8705) @@ -149,14 +149,19 @@ #endif /* - * PyFile_AsFile + * PyFile_* compatibility */ #if defined(NPY_PY3K) + +/* + * Get a FILE* handle to the file represented by the Python object + */ static NPY_INLINE FILE* npy_PyFile_Dup(PyObject *file, char *mode) { int fd, fd2; PyObject *ret, *os; + FILE *handle; /* Flush first to ensure things end up in the file in the correct order */ ret = PyObject_CallMethod(file, "flush", ""); if (ret == NULL) { @@ -179,11 +184,62 @@ fd2 = PyNumber_AsSsize_t(ret, NULL); Py_DECREF(ret); #ifdef _WIN32 - return _fdopen(fd2, mode); + handle = _fdopen(fd2, mode); #else - return fdopen(fd2, mode); + handle = fdopen(fd2, mode); #endif + if (handle == NULL) { + PyErr_SetString(PyExc_IOError, + "Getting a FILE* from a Python file object failed"); + } + return handle; } + +/* + * Close the dup-ed file handle, and seek the Python one to the current position + */ +static NPY_INLINE int +npy_PyFile_DupClose(PyObject *file, FILE* handle) +{ + PyObject *ret; + long position; + position = ftell(handle); + fclose(handle); + + ret = PyObject_CallMethod(file, "seek", "li", position, 0); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +static int +npy_PyFile_Check(PyObject *file) +{ + static PyTypeObject *fileio = NULL; + + if (fileio == NULL) { + PyObject *mod; + mod = PyImport_ImportModule("io"); + if (mod == NULL) { + return 0; + } + fileio = (PyTypeObject*)PyObject_GetAttrString(mod, "FileIO"); + Py_DECREF(mod); + } + + if (fileio != NULL) { + return PyObject_TypeCheck(file, fileio); + } +} + +#else + +#define npy_PyFile_Dup(file, mode) PyFile_AsFile(file) +#define npy_PyFile_DupClose(file, handle) (0) +#define npy_PyFile_Check PyFile_Check + #endif static NPY_INLINE PyObject* Modified: branches/1.5.x/numpy/core/src/multiarray/methods.c =================================================================== --- branches/1.5.x/numpy/core/src/multiarray/methods.c 2010-09-11 16:09:12 UTC (rev 8704) +++ branches/1.5.x/numpy/core/src/multiarray/methods.c 2010-09-11 16:09:34 UTC (rev 8705) @@ -496,7 +496,7 @@ static PyObject * array_tofile(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int ret; + int ret, ret2; PyObject *file; FILE *fd; char *sep = ""; @@ -517,11 +517,7 @@ else { Py_INCREF(file); } -#if defined(NPY_PY3K) fd = npy_PyFile_Dup(file, "wb"); -#else - fd = PyFile_AsFile(file); -#endif if (fd == NULL) { PyErr_SetString(PyExc_IOError, "first argument must be a " \ "string or open file"); @@ -529,11 +525,9 @@ return NULL; } ret = PyArray_ToFile(self, fd, sep, format); -#if defined(NPY_PY3K) - fclose(fd); -#endif + ret2 = npy_PyFile_DupClose(file, fd); Py_DECREF(file); - if (ret < 0) { + if (ret < 0 || ret2 < 0) { return NULL; } Py_INCREF(Py_None); Modified: branches/1.5.x/numpy/core/src/multiarray/multiarraymodule.c =================================================================== --- branches/1.5.x/numpy/core/src/multiarray/multiarraymodule.c 2010-09-11 16:09:12 UTC (rev 8704) +++ branches/1.5.x/numpy/core/src/multiarray/multiarraymodule.c 2010-09-11 16:09:34 UTC (rev 8705) @@ -1669,6 +1669,7 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) { PyObject *file = NULL, *ret; + int ok; FILE *fp; char *sep = ""; Py_ssize_t nin = -1; @@ -1690,11 +1691,7 @@ else { Py_INCREF(file); } -#if defined(NPY_PY3K) fp = npy_PyFile_Dup(file, "rb"); -#else - fp = PyFile_AsFile(file); -#endif if (fp == NULL) { PyErr_SetString(PyExc_IOError, "first argument must be an open file"); @@ -1705,10 +1702,12 @@ type = PyArray_DescrFromType(PyArray_DEFAULT); } ret = PyArray_FromFile(fp, type, (intp) nin, sep); -#if defined(NPY_PY3K) - fclose(fp); -#endif + ok = npy_PyFile_DupClose(file, fp); Py_DECREF(file); + if (ok < 0) { + Py_DECREF(ret); + return NULL; + } return ret; } Modified: branches/1.5.x/numpy/core/tests/test_regression.py =================================================================== --- branches/1.5.x/numpy/core/tests/test_regression.py 2010-09-11 16:09:12 UTC (rev 8704) +++ branches/1.5.x/numpy/core/tests/test_regression.py 2010-09-11 16:09:34 UTC (rev 8705) @@ -7,6 +7,7 @@ from numpy.testing import * from numpy.testing.utils import _assert_valid_refcount from numpy.compat import asbytes, asunicode, asbytes_nested +import tempfile import numpy as np if sys.version_info[0] >= 3: @@ -1377,5 +1378,26 @@ c2 = sys.getrefcount(rgba) assert_equal(c1, c2) + def test_fromfile_tofile_seeks(self): + # On Python 3, tofile/fromfile used to get (#1610) the Python + # file handle out of sync + f = tempfile.TemporaryFile() + f.write(np.arange(255, dtype='u1').tostring()) + + f.seek(20) + ret = np.fromfile(f, count=4, dtype='u1') + assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) + assert_equal(f.tell(), 24) + + f.seek(40) + np.array([1, 2, 3], dtype='u1').tofile(f) + assert_equal(f.tell(), 43) + + f.seek(40) + data = f.read(3) + assert_equal(data, asbytes("\x01\x02\x03")) + + f.close() + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Sat Sep 11 12:09:49 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 11:09:49 -0500 (CDT) Subject: [Numpy-svn] r8706 - branches/1.5.x/numpy/core/include/numpy Message-ID: <20100911160949.F006239CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 11:09:49 -0500 (Sat, 11 Sep 2010) New Revision: 8706 Modified: branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h Log: BUG: (backport r8702) core: fix a missing return branch in npy_3kcompat.h Modified: branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h =================================================================== --- branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 16:09:34 UTC (rev 8705) +++ branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 16:09:49 UTC (rev 8706) @@ -232,6 +232,7 @@ if (fileio != NULL) { return PyObject_TypeCheck(file, fileio); } + return 0; } #else From numpy-svn at scipy.org Sat Sep 11 12:56:14 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 11:56:14 -0500 (CDT) Subject: [Numpy-svn] r8707 - in trunk/numpy/core: include/numpy tests Message-ID: <20100911165614.5188A39CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 11:56:14 -0500 (Sat, 11 Sep 2010) New Revision: 8707 Modified: trunk/numpy/core/include/numpy/npy_3kcompat.h trunk/numpy/core/tests/test_regression.py Log: BUG: core: on Python3, seek file handle to the current position in npy_PyFile_Dup (fixing #1610) Modified: trunk/numpy/core/include/numpy/npy_3kcompat.h =================================================================== --- trunk/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 16:09:49 UTC (rev 8706) +++ trunk/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 16:56:14 UTC (rev 8707) @@ -161,6 +161,7 @@ { int fd, fd2; PyObject *ret, *os; + Py_ssize_t pos; FILE *handle; /* Flush first to ensure things end up in the file in the correct order */ ret = PyObject_CallMethod(file, "flush", ""); @@ -192,6 +193,18 @@ PyErr_SetString(PyExc_IOError, "Getting a FILE* from a Python file object failed"); } + ret = PyObject_CallMethod(file, "tell", ""); + if (ret == NULL) { + fclose(handle); + return NULL; + } + pos = PyNumber_AsSsize_t(ret, PyExc_OverflowError); + Py_DECREF(ret); + if (PyErr_Occurred()) { + fclose(handle); + return NULL; + } + fseek(handle, pos, SEEK_SET); return handle; } Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2010-09-11 16:09:49 UTC (rev 8706) +++ trunk/numpy/core/tests/test_regression.py 2010-09-11 16:56:14 UTC (rev 8707) @@ -1393,6 +1393,11 @@ data = f.read(3) assert_equal(data, asbytes("\x01\x02\x03")) + f.seek(80) + f.read(4) + data = np.fromfile(f, dtype='u1', count=4) + assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) + f.close() if __name__ == "__main__": From numpy-svn at scipy.org Sat Sep 11 13:05:45 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 12:05:45 -0500 (CDT) Subject: [Numpy-svn] r8708 - in branches/1.5.x/numpy/core: include/numpy tests Message-ID: <20100911170545.9FF8639CD34@scipy.org> Author: ptvirtan Date: 2010-09-11 12:05:45 -0500 (Sat, 11 Sep 2010) New Revision: 8708 Modified: branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h branches/1.5.x/numpy/core/tests/test_regression.py Log: BUG: (backport r8707) core: on Python3, seek file handle to the current position in npy_PyFile_Dup (fixing #1610) Modified: branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h =================================================================== --- branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 16:56:14 UTC (rev 8707) +++ branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 17:05:45 UTC (rev 8708) @@ -161,6 +161,7 @@ { int fd, fd2; PyObject *ret, *os; + Py_ssize_t pos; FILE *handle; /* Flush first to ensure things end up in the file in the correct order */ ret = PyObject_CallMethod(file, "flush", ""); @@ -192,6 +193,18 @@ PyErr_SetString(PyExc_IOError, "Getting a FILE* from a Python file object failed"); } + ret = PyObject_CallMethod(file, "tell", ""); + if (ret == NULL) { + fclose(handle); + return NULL; + } + pos = PyNumber_AsSsize_t(ret, PyExc_OverflowError); + Py_DECREF(ret); + if (PyErr_Occurred()) { + fclose(handle); + return NULL; + } + fseek(handle, pos, SEEK_SET); return handle; } Modified: branches/1.5.x/numpy/core/tests/test_regression.py =================================================================== --- branches/1.5.x/numpy/core/tests/test_regression.py 2010-09-11 16:56:14 UTC (rev 8707) +++ branches/1.5.x/numpy/core/tests/test_regression.py 2010-09-11 17:05:45 UTC (rev 8708) @@ -1397,6 +1397,11 @@ data = f.read(3) assert_equal(data, asbytes("\x01\x02\x03")) + f.seek(80) + f.read(4) + data = np.fromfile(f, dtype='u1', count=4) + assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) + f.close() if __name__ == "__main__": From numpy-svn at scipy.org Sat Sep 11 14:11:57 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 13:11:57 -0500 (CDT) Subject: [Numpy-svn] r8709 - trunk/numpy/core/include/numpy Message-ID: <20100911181157.3D2FA39CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 13:11:57 -0500 (Sat, 11 Sep 2010) New Revision: 8709 Modified: trunk/numpy/core/include/numpy/npy_3kcompat.h Log: BUG: core: fix npy_PyFile_Check implementation on Py3 Modified: trunk/numpy/core/include/numpy/npy_3kcompat.h =================================================================== --- trunk/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 17:05:45 UTC (rev 8708) +++ trunk/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 18:11:57 UTC (rev 8709) @@ -227,25 +227,16 @@ return 0; } -static int +static NPY_INLINE int npy_PyFile_Check(PyObject *file) { - static PyTypeObject *fileio = NULL; - - if (fileio == NULL) { - PyObject *mod; - mod = PyImport_ImportModule("io"); - if (mod == NULL) { - return 0; - } - fileio = (PyTypeObject*)PyObject_GetAttrString(mod, "FileIO"); - Py_DECREF(mod); + int fd; + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + PyErr_Clear(); + return 0; } - - if (fileio != NULL) { - return PyObject_TypeCheck(file, fileio); - } - return 0; + return 1; } #else From numpy-svn at scipy.org Sat Sep 11 14:13:18 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 13:13:18 -0500 (CDT) Subject: [Numpy-svn] r8710 - branches/1.5.x/numpy/core/include/numpy Message-ID: <20100911181318.0805439CCC0@scipy.org> Author: ptvirtan Date: 2010-09-11 13:13:17 -0500 (Sat, 11 Sep 2010) New Revision: 8710 Modified: branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h Log: BUG: (backport r8709) core: fix npy_PyFile_Check implementation on Py3 Modified: branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h =================================================================== --- branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 18:11:57 UTC (rev 8709) +++ branches/1.5.x/numpy/core/include/numpy/npy_3kcompat.h 2010-09-11 18:13:17 UTC (rev 8710) @@ -227,25 +227,16 @@ return 0; } -static int +static NPY_INLINE int npy_PyFile_Check(PyObject *file) { - static PyTypeObject *fileio = NULL; - - if (fileio == NULL) { - PyObject *mod; - mod = PyImport_ImportModule("io"); - if (mod == NULL) { - return 0; - } - fileio = (PyTypeObject*)PyObject_GetAttrString(mod, "FileIO"); - Py_DECREF(mod); + int fd; + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + PyErr_Clear(); + return 0; } - - if (fileio != NULL) { - return PyObject_TypeCheck(file, fileio); - } - return 0; + return 1; } #else From numpy-svn at scipy.org Sat Sep 11 17:34:37 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 16:34:37 -0500 (CDT) Subject: [Numpy-svn] r8711 - trunk/numpy/distutils/fcompiler Message-ID: <20100911213437.356C439CC7C@scipy.org> Author: ptvirtan Date: 2010-09-11 16:34:37 -0500 (Sat, 11 Sep 2010) New Revision: 8711 Modified: trunk/numpy/distutils/fcompiler/__init__.py Log: BUG: distutils: make is_free_format et al. not choke on non-ascii Fortran files Modified: trunk/numpy/distutils/fcompiler/__init__.py =================================================================== --- trunk/numpy/distutils/fcompiler/__init__.py 2010-09-11 18:13:17 UTC (rev 8710) +++ trunk/numpy/distutils/fcompiler/__init__.py 2010-09-11 21:34:37 UTC (rev 8711) @@ -25,6 +25,8 @@ except NameError: from sets import Set as set +from numpy.compat import open_latin1 + from distutils.sysconfig import get_config_var, get_python_lib from distutils.fancy_getopt import FancyGetopt from distutils.errors import DistutilsModuleError, \ @@ -911,7 +913,7 @@ # f90 allows both fixed and free format, assuming fixed unless # signs of free format are detected. result = 0 - f = open(file,'r') + f = open_latin1(file,'r') line = f.readline() n = 10000 # the number of non-comment lines to scan for hints if _has_f_header(line): @@ -931,7 +933,7 @@ return result def has_f90_header(src): - f = open(src,'r') + f = open_latin1(src,'r') line = f.readline() f.close() return _has_f90_header(line) or _has_fix_header(line) @@ -944,7 +946,7 @@ Return a dictionary {:}. """ flags = {} - f = open(src,'r') + f = open_latin1(src,'r') i = 0 for line in f.readlines(): i += 1 From numpy-svn at scipy.org Sat Sep 11 17:36:23 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 11 Sep 2010 16:36:23 -0500 (CDT) Subject: [Numpy-svn] r8712 - branches/1.5.x/numpy/distutils/fcompiler Message-ID: <20100911213623.37B5139CC7C@scipy.org> Author: ptvirtan Date: 2010-09-11 16:36:23 -0500 (Sat, 11 Sep 2010) New Revision: 8712 Modified: branches/1.5.x/numpy/distutils/fcompiler/__init__.py Log: BUG: (backport r8711) distutils: make is_free_format et al. not choke on non-ascii Fortran files Modified: branches/1.5.x/numpy/distutils/fcompiler/__init__.py =================================================================== --- branches/1.5.x/numpy/distutils/fcompiler/__init__.py 2010-09-11 21:34:37 UTC (rev 8711) +++ branches/1.5.x/numpy/distutils/fcompiler/__init__.py 2010-09-11 21:36:23 UTC (rev 8712) @@ -25,6 +25,8 @@ except NameError: from sets import Set as set +from numpy.compat import open_latin1 + from distutils.sysconfig import get_config_var, get_python_lib from distutils.fancy_getopt import FancyGetopt from distutils.errors import DistutilsModuleError, \ @@ -911,7 +913,7 @@ # f90 allows both fixed and free format, assuming fixed unless # signs of free format are detected. result = 0 - f = open(file,'r') + f = open_latin1(file,'r') line = f.readline() n = 10000 # the number of non-comment lines to scan for hints if _has_f_header(line): @@ -931,7 +933,7 @@ return result def has_f90_header(src): - f = open(src,'r') + f = open_latin1(src,'r') line = f.readline() f.close() return _has_f90_header(line) or _has_fix_header(line) @@ -944,7 +946,7 @@ Return a dictionary {:}. """ flags = {} - f = open(src,'r') + f = open_latin1(src,'r') i = 0 for line in f.readlines(): i += 1 From numpy-svn at scipy.org Mon Sep 13 08:34:38 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 13 Sep 2010 07:34:38 -0500 (CDT) Subject: [Numpy-svn] r8713 - in trunk/numpy/lib: . tests Message-ID: <20100913123438.4EDB939CC9C@scipy.org> Author: pierregm Date: 2010-09-13 07:34:37 -0500 (Mon, 13 Sep 2010) New Revision: 8713 Modified: trunk/numpy/lib/_iotools.py trunk/numpy/lib/tests/test__iotools.py Log: * fixed 'flatten_dtype' to support fields w/ titles (bug #1591). Thx to Stefan vdW for the fix. * added a unittest for flatten_dtype Modified: trunk/numpy/lib/_iotools.py =================================================================== --- trunk/numpy/lib/_iotools.py 2010-09-11 21:36:23 UTC (rev 8712) +++ trunk/numpy/lib/_iotools.py 2010-09-13 12:34:37 UTC (rev 8713) @@ -132,8 +132,8 @@ else: types = [] for field in names: - (typ, _) = ndtype.fields[field] - flat_dt = flatten_dtype(typ, flatten_base) + info = ndtype.fields[field] + flat_dt = flatten_dtype(info[0], flatten_base) types.extend(flat_dt) return types Modified: trunk/numpy/lib/tests/test__iotools.py =================================================================== --- trunk/numpy/lib/tests/test__iotools.py 2010-09-11 21:36:23 UTC (rev 8712) +++ trunk/numpy/lib/tests/test__iotools.py 2010-09-13 12:34:37 UTC (rev 8713) @@ -10,8 +10,8 @@ import time import numpy as np -from numpy.lib._iotools import LineSplitter, NameValidator, StringConverter,\ - has_nested_fields, easy_dtype +from numpy.lib._iotools import LineSplitter, NameValidator, StringConverter, \ + has_nested_fields, easy_dtype, flatten_dtype from numpy.testing import * from numpy.compat import asbytes, asbytes_nested @@ -37,10 +37,10 @@ def test_tab_delimiter(self): "Test tab delimiter" - strg= asbytes(" 1\t 2\t 3\t 4\t 5 6") + strg = asbytes(" 1\t 2\t 3\t 4\t 5 6") test = LineSplitter(asbytes('\t'))(strg) assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5 6'])) - strg= asbytes(" 1 2\t 3 4\t 5 6") + strg = asbytes(" 1 2\t 3 4\t 5 6") test = LineSplitter(asbytes('\t'))(strg) assert_equal(test, asbytes_nested(['1 2', '3 4', '5 6'])) @@ -70,11 +70,11 @@ def test_variable_fixed_width(self): strg = asbytes(" 1 3 4 5 6# test") - test = LineSplitter((3,6,6,3))(strg) + test = LineSplitter((3, 6, 6, 3))(strg) assert_equal(test, asbytes_nested(['1', '3', '4 5', '6'])) # strg = asbytes(" 1 3 4 5 6# test") - test = LineSplitter((6,6,9))(strg) + test = LineSplitter((6, 6, 9))(strg) assert_equal(test, asbytes_nested(['1', '3 4', '5 6'])) @@ -97,7 +97,7 @@ def test_excludelist(self): "Test excludelist" names = ['dates', 'data', 'Other Data', 'mask'] - validator = NameValidator(excludelist = ['dates', 'data', 'mask']) + validator = NameValidator(excludelist=['dates', 'data', 'mask']) test = validator.validate(names) assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) # @@ -117,7 +117,7 @@ "Test validate nb names" namelist = ('a', 'b', 'c') validator = NameValidator() - assert_equal(validator(namelist, nbfields=1), ('a', )) + assert_equal(validator(namelist, nbfields=1), ('a',)) assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), ['a', 'b', 'c', 'g0', 'g1']) # @@ -159,7 +159,7 @@ converter.upgrade(asbytes('0j')) assert_equal(converter._status, 3) converter.upgrade(asbytes('a')) - assert_equal(converter._status, len(converter._mapper)-1) + assert_equal(converter._status, len(converter._mapper) - 1) # def test_missing(self): "Tests the use of missing values." @@ -178,7 +178,7 @@ def test_upgrademapper(self): "Tests updatemapper" dateparser = _bytes_to_date - StringConverter.upgrade_mapper(dateparser, date(2000,1,1)) + StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) convert = StringConverter(dateparser, date(2000, 1, 1)) test = convert(asbytes('2001-01-01')) assert_equal(test, date(2001, 01, 01)) @@ -196,7 +196,7 @@ def test_keep_default(self): "Make sure we don't lose an explicit default" converter = StringConverter(None, missing_values=asbytes(''), - default=-999) + default= -999) converter.upgrade(asbytes('3.14159265')) assert_equal(converter.default, -999) assert_equal(converter.type, np.dtype(float)) @@ -287,3 +287,25 @@ assert_equal(easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) + + def test_flatten_dtype(self): + "Testing flatten_dtype" + # Standard dtype + dt = np.dtype([("a", "f8"), ("b", "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + # Recursive dtype + dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) + # dtype with shaped fields + dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, int]) + dt_flat = flatten_dtype(dt, True) + assert_equal(dt_flat, [float] * 2 + [int] * 3) + # dtype w/ titles + dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + From numpy-svn at scipy.org Mon Sep 13 11:43:27 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 13 Sep 2010 10:43:27 -0500 (CDT) Subject: [Numpy-svn] r8714 - in trunk/numpy/ma: . tests Message-ID: <20100913154327.7ADAE39CC9C@scipy.org> Author: pierregm Date: 2010-09-13 10:43:27 -0500 (Mon, 13 Sep 2010) New Revision: 8714 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: * ma.core._print_templates: switched the keys 'short' and 'long' to 'short_std' and 'long_std' respectively (bug #1586) * Fixed incorrect broadcasting in ma.power (bug #1606) Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2010-09-13 12:34:37 UTC (rev 8713) +++ trunk/numpy/ma/core.py 2010-09-13 15:43:27 UTC (rev 8714) @@ -2296,14 +2296,14 @@ np.putmask(curdata, curmask, printopt) return -_print_templates = dict(long="""\ +_print_templates = dict(long_std="""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) """, - short="""\ + short_std="""\ masked_%(name)s(data = %(data)s, %(nlen)s mask = %(mask)s, %(nlen)s fill_value = %(fill)s) @@ -3574,8 +3574,8 @@ return _print_templates['short_flx'] % parameters return _print_templates['long_flx'] % parameters elif n <= 1: - return _print_templates['short'] % parameters - return _print_templates['long'] % parameters + return _print_templates['short_std'] % parameters + return _print_templates['long_std'] % parameters def __eq__(self, other): @@ -5972,7 +5972,7 @@ ids = _frommethod('ids') maximum = _maximum_operation() mean = _frommethod('mean') -minimum = _minimum_operation () +minimum = _minimum_operation() nonzero = _frommethod('nonzero') prod = _frommethod('prod') product = _frommethod('prod') @@ -6040,8 +6040,7 @@ if m is not nomask: if not (result.ndim): return masked - m |= invalid - result._mask = m + result._mask = np.logical_or(m, invalid) # Fix the invalid parts if invalid.any(): if not result.ndim: Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2010-09-13 12:34:37 UTC (rev 8713) +++ trunk/numpy/ma/tests/test_core.py 2010-09-13 15:43:27 UTC (rev 8714) @@ -2975,7 +2975,40 @@ assert_almost_equal(x, y) assert_almost_equal(x._data, y._data) + def test_power_w_broadcasting(self): + "Test power w/ broadcasting" + a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) + a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) + b1 = np.array([2, 4, 3]) + b1m = array(b1, mask=[0, 1, 0]) + b2 = np.array([b1, b1]) + b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) + # + ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], + mask=[[1, 1, 0], [0, 1, 1]]) + # No broadcasting, base & exp w/ mask + test = a2m ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # No broadcasting, base w/ mask, exp w/o mask + test = a2m ** b2 + assert_equal(test, ctrl) + assert_equal(test.mask, a2m.mask) + # No broadcasting, base w/o mask, exp w/ mask + test = a2 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, b2m.mask) + # + ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], + mask=[[0, 1, 0], [0, 1, 0]]) + test = b1 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + test = b2m ** b1 + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + def test_where(self): "Test the where function" x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) From numpy-svn at scipy.org Mon Sep 13 19:54:54 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 13 Sep 2010 18:54:54 -0500 (CDT) Subject: [Numpy-svn] r8715 - in trunk/numpy/lib: . tests Message-ID: <20100913235454.A5F5239CD02@scipy.org> Author: pierregm Date: 2010-09-13 18:54:54 -0500 (Mon, 13 Sep 2010) New Revision: 8715 Modified: trunk/numpy/lib/npyio.py trunk/numpy/lib/tests/test_io.py Log: * fixed the behavior of {{{skip_footer}}} in {{{genfromtxt}}} when some invalid lines are present (bug #1593) Modified: trunk/numpy/lib/npyio.py =================================================================== --- trunk/numpy/lib/npyio.py 2010-09-13 15:43:27 UTC (rev 8714) +++ trunk/numpy/lib/npyio.py 2010-09-13 23:54:54 UTC (rev 8715) @@ -1343,10 +1343,10 @@ try: values = [values[_] for _ in usecols] except IndexError: - append_to_invalid((i, nbvalues)) + append_to_invalid((i + skip_header + 1, nbvalues)) continue elif nbvalues != nbcols: - append_to_invalid((i, nbvalues)) + append_to_invalid((i + skip_header + 1, nbvalues)) continue # Store the values append_to_rows(tuple(values)) @@ -1354,11 +1354,6 @@ append_to_masks(tuple([v.strip() in m for (v, m) in zip(values, missing_values)])) - # Strip the last skip_footer data - if skip_footer > 0: - rows = rows[:-skip_footer] - if usemask: - masks = masks[:-skip_footer] # Upgrade the converters (if needed) if dtype is None: @@ -1378,17 +1373,23 @@ raise ConverterError(errmsg) # Check that we don't have invalid values - if len(invalid) > 0: - nbrows = len(rows) + nbinvalid = len(invalid) + if nbinvalid > 0: + nbrows = len(rows) + nbinvalid - skip_footer # Construct the error message template = " Line #%%i (got %%i columns instead of %i)" % nbcols if skip_footer > 0: - nbrows -= skip_footer - errmsg = [template % (i + skip_header + 1, nb) - for (i, nb) in invalid if i < nbrows] - else: - errmsg = [template % (i + skip_header + 1, nb) - for (i, nb) in invalid] + nbinvalid_skipped = len([_ for _ in invalid + if _[0] > nbrows + skip_header]) + invalid = invalid[:nbinvalid - nbinvalid_skipped] + skip_footer -= nbinvalid_skipped +# +# nbrows -= skip_footer +# errmsg = [template % (i, nb) +# for (i, nb) in invalid if i < nbrows] +# else: + errmsg = [template % (i, nb) + for (i, nb) in invalid] if len(errmsg): errmsg.insert(0, "Some errors were detected !") errmsg = "\n".join(errmsg) @@ -1399,6 +1400,13 @@ else: warnings.warn(errmsg, ConversionWarning) + # Strip the last skip_footer data + if skip_footer > 0: + rows = rows[:-skip_footer] + if usemask: + masks = masks[:-skip_footer] + + # Convert each value according to the converter: # We want to modify the list in place to avoid creating a new one... # if loose: Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2010-09-13 15:43:27 UTC (rev 8714) +++ trunk/numpy/lib/tests/test_io.py 2010-09-13 23:54:54 UTC (rev 8715) @@ -539,10 +539,33 @@ data[-1] = "99,99" kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10) test = np.genfromtxt(StringIO(asbytes("\n".join(data))), **kwargs) - ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(40)], + ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)], dtype=[(_, float) for _ in "ABC"]) assert_equal(test, ctrl) + def test_skip_footer_with_invalid(self): + import warnings + basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' + warnings.filterwarnings("ignore") + # Footer too small to get rid of all invalid values + assert_raises(ValueError, np.genfromtxt, + StringIO(basestr), skip_footer=1) +# except ValueError: +# pass + a = np.genfromtxt(StringIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + a = np.genfromtxt(StringIO(basestr), skip_footer=3) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' + a = np.genfromtxt(StringIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) + a = np.genfromtxt(StringIO(basestr), skip_footer=3, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) + warnings.resetwarnings() + + def test_header(self): "Test retrieving a header" data = StringIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') From numpy-svn at scipy.org Wed Sep 15 17:07:05 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 15 Sep 2010 16:07:05 -0500 (CDT) Subject: [Numpy-svn] r8716 - trunk Message-ID: <20100915210705.DC53B39CD50@scipy.org> Author: ptvirtan Date: 2010-09-15 16:07:05 -0500 (Wed, 15 Sep 2010) New Revision: 8716 Modified: trunk/README.txt Log: Point README.txt to the Git repository Modified: trunk/README.txt =================================================================== --- trunk/README.txt 2010-09-13 23:54:54 UTC (rev 8715) +++ trunk/README.txt 2010-09-15 21:07:05 UTC (rev 8716) @@ -17,7 +17,7 @@ python -c 'import numpy; numpy.test()' The most current development version is always available from our -subversion repository: +Git repository: -http://svn.scipy.org/svn/numpy/trunk +http://github.com/numpy/numpy From numpy-svn at scipy.org Thu Sep 16 04:16:10 2010 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 16 Sep 2010 03:16:10 -0500 (CDT) Subject: [Numpy-svn] we work on your groin region Message-ID: <20100916081610.2019739CCFF@scipy.org> An HTML attachment was scrubbed... URL: From noreply at github.com Thu Sep 23 13:44:42 2010 From: noreply at github.com (noreply at github.com) Date: Thu, 23 Sep 2010 10:44:42 -0700 Subject: [Numpy-svn] [numpy/numpy] 29cccb: BUG: Fix generated f2py bootstrap script to work w... Message-ID: <20100923174442.8549042316@smtp1.rs.github.com> Branch: refs/heads/master Home: http://github.com/numpy/numpy Commit: 29cccb69fd9f4177d06b5e74e719cad11abb014b http://github.com/numpy/numpy/commit/29cccb69fd9f4177d06b5e74e719cad11abb014b Author: Charles Harris Date: 2010-09-22 (Wed, 22 Sep 2010) Changed paths: M numpy/f2py/setup.py Log Message: ----------- BUG: Fix generated f2py bootstrap script to work with python 3k. Thanks to Lisandro Dalcin. Commit: 8f6114b11efc2a01fe1158a444aeb788ddea6b01 http://github.com/numpy/numpy/commit/8f6114b11efc2a01fe1158a444aeb788ddea6b01 Author: Charles Harris Date: 2010-09-22 (Wed, 22 Sep 2010) Changed paths: M numpy/f2py/setup.py Log Message: ----------- BUG: Fix previous fix to numpy/g2py/setup.py. Commit: 50479e9a6f6d07e92fa2f16dbb8abfadf83c332d http://github.com/numpy/numpy/commit/50479e9a6f6d07e92fa2f16dbb8abfadf83c332d Author: Charles Harris Date: 2010-09-22 (Wed, 22 Sep 2010) Changed paths: M numpy/f2py/setup.py Log Message: ----------- FIX: And add missing \n for error message. From noreply at github.com Thu Sep 23 22:47:55 2010 From: noreply at github.com (noreply at github.com) Date: Thu, 23 Sep 2010 19:47:55 -0700 Subject: [Numpy-svn] [numpy/numpy] 346678: BUGLET: Need "\\n" in string template. Message-ID: <20100924024755.28151422D7@smtp1.rs.github.com> Branch: refs/heads/master Home: http://github.com/numpy/numpy Commit: 34667890e3927d79c5b716de9ecb17480198d9c1 http://github.com/numpy/numpy/commit/34667890e3927d79c5b716de9ecb17480198d9c1 Author: Charles Harris Date: 2010-09-23 (Thu, 23 Sep 2010) Changed paths: M numpy/f2py/setup.py Log Message: ----------- BUGLET: Need "\\n" in string template. From noreply at github.com Thu Sep 23 22:48:15 2010 From: noreply at github.com (noreply at github.com) Date: Thu, 23 Sep 2010 19:48:15 -0700 Subject: [Numpy-svn] [numpy/numpy] b288b5: BUGLET: Need "\\n" in string template. Message-ID: <20100924024815.48CA3422D7@smtp1.rs.github.com> Branch: refs/heads/maintenance/1.5.x Home: http://github.com/numpy/numpy Commit: b288b511bd2ab937167f9c08c3ff54c5c3a3966c http://github.com/numpy/numpy/commit/b288b511bd2ab937167f9c08c3ff54c5c3a3966c Author: Charles Harris Date: 2010-09-23 (Thu, 23 Sep 2010) Changed paths: M numpy/f2py/setup.py Log Message: ----------- BUGLET: Need "\\n" in string template. From noreply at github.com Sat Sep 25 09:49:45 2010 From: noreply at github.com (noreply at github.com) Date: Sat, 25 Sep 2010 06:49:45 -0700 Subject: [Numpy-svn] [numpy/numpy] a1d54a: ENH: Update absoft fortran flags for v11.0. Message-ID: <20100925134945.22005422E4@smtp1.rs.github.com> Branch: refs/heads/master Home: http://github.com/numpy/numpy Commit: a1d54a888bfa33d9f97881f78cafe92a39839b3f http://github.com/numpy/numpy/commit/a1d54a888bfa33d9f97881f78cafe92a39839b3f Author: Charles Harris Date: 2010-09-25 (Sat, 25 Sep 2010) Changed paths: M numpy/distutils/fcompiler/absoft.py Log Message: ----------- ENH: Update absoft fortran flags for v11.0. Commit: 75cebc1b71a161b18a45f291b97595c1f391ca46 http://github.com/numpy/numpy/commit/75cebc1b71a161b18a45f291b97595c1f391ca46 Author: Charles Harris Date: 2010-09-25 (Sat, 25 Sep 2010) Changed paths: M numpy/f2py/setup.py Log Message: ----------- Merge branch 'master' into fixes From noreply at github.com Sat Sep 25 09:49:55 2010 From: noreply at github.com (noreply at github.com) Date: Sat, 25 Sep 2010 06:49:55 -0700 Subject: [Numpy-svn] [numpy/numpy] 288178: ENH: Update absoft fortran flags for v11.0. Message-ID: <20100925134955.7ADAA422E4@smtp1.rs.github.com> Branch: refs/heads/maintenance/1.5.x Home: http://github.com/numpy/numpy Commit: 288178ee4a8939603177175e7ec0727bf80fb4f7 http://github.com/numpy/numpy/commit/288178ee4a8939603177175e7ec0727bf80fb4f7 Author: Charles Harris Date: 2010-09-25 (Sat, 25 Sep 2010) Changed paths: M numpy/distutils/fcompiler/absoft.py Log Message: ----------- ENH: Update absoft fortran flags for v11.0.