From numpy-svn at scipy.org Mon Feb 2 00:20:23 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 1 Feb 2009 23:20:23 -0600 (CST) Subject: [Numpy-svn] r6337 - in trunk/numpy/lib: . tests Message-ID: <20090202052023.C785EC7C012@scipy.org> Author: pierregm Date: 2009-02-01 23:20:17 -0600 (Sun, 01 Feb 2009) New Revision: 6337 Modified: trunk/numpy/lib/recfunctions.py trunk/numpy/lib/tests/test_recfunctions.py Log: * Added a 'autoconvert' option to stack_arrays. * Fixed 'stack_arrays' to work with fields with titles. Modified: trunk/numpy/lib/recfunctions.py =================================================================== --- trunk/numpy/lib/recfunctions.py 2009-01-30 00:26:44 UTC (rev 6336) +++ trunk/numpy/lib/recfunctions.py 2009-02-02 05:20:17 UTC (rev 6337) @@ -628,7 +628,8 @@ -def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False): +def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, + autoconvert=False): """ Superposes arrays fields by fields @@ -644,6 +645,8 @@ asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. + autoconvert : {False, True}, optional + Whether automatically cast the type of the field to the maximum. Examples -------- @@ -673,16 +676,24 @@ # dtype_l = ndtype[0] newdescr = dtype_l.descr - names = list(dtype_l.names or ()) or [''] + names = [_[0] for _ in newdescr] for dtype_n in ndtype[1:]: for descr in dtype_n.descr: name = descr[0] or '' if name not in names: newdescr.append(descr) names.append(name) - elif descr[1] != dict(newdescr)[name]: - raise TypeError("Incompatible type '%s' <> '%s'" %\ - (dict(newdescr)[name], descr[1])) + else: + nameidx = names.index(name) + current_descr = newdescr[nameidx] + if autoconvert: + if np.dtype(descr[1]) > np.dtype(current_descr[-1]): + current_descr = list(current_descr) + current_descr[-1] = descr[1] + newdescr[nameidx] = tuple(current_descr) + elif descr[1] != current_descr[-1]: + raise TypeError("Incompatible type '%s' <> '%s'" %\ + (dict(newdescr)[name], descr[1])) # Only one field: use concatenate if len(newdescr) == 1: output = ma.concatenate(seqarrays) Modified: trunk/numpy/lib/tests/test_recfunctions.py =================================================================== --- trunk/numpy/lib/tests/test_recfunctions.py 2009-01-30 00:26:44 UTC (rev 6336) +++ trunk/numpy/lib/tests/test_recfunctions.py 2009-02-02 05:20:17 UTC (rev 6337) @@ -485,7 +485,6 @@ assert_equal(test.mask, control.mask) - # def test_defaults(self): "Test defaults: no exception raised if keys of defaults are not fields." (_, _, _, z) = self.data @@ -503,7 +502,38 @@ assert_equal(test.mask, control.mask) + def test_autoconversion(self): + "Tests autoconversion" + adtype = [('A', int), ('B', bool), ('C', float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [('A', int), ('B', float), ('C', float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + test = stack_arrays((a, b), autoconvert=True) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + try: + test = stack_arrays((a, b), autoconvert=False) + except TypeError: + pass + else: + raise AssertionError + + def test_checktitles(self): + "Test using titles in the field names" + adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + test = stack_arrays((a, b)) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + class TestJoinBy(TestCase): # def test_base(self): From numpy-svn at scipy.org Tue Feb 3 12:11:48 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 3 Feb 2009 11:11:48 -0600 (CST) Subject: [Numpy-svn] r6338 - in trunk/numpy/lib: . tests Message-ID: <20090203171148.2074DC7C015@scipy.org> Author: pierregm Date: 2009-02-03 11:11:44 -0600 (Tue, 03 Feb 2009) New Revision: 6338 Modified: trunk/numpy/lib/_iotools.py trunk/numpy/lib/tests/test_io.py Log: * Make sure that StringConverter.update sets the type to object if it can't define it. Modified: trunk/numpy/lib/_iotools.py =================================================================== --- trunk/numpy/lib/_iotools.py 2009-02-02 05:20:17 UTC (rev 6337) +++ trunk/numpy/lib/_iotools.py 2009-02-03 17:11:44 UTC (rev 6338) @@ -468,6 +468,11 @@ for val in missing_values: self.missing_values.add(val) else: - self.missing_values = [] # Update the type - self.type = self._getsubdtype(func('0')) + self.missing_values = [] + # Update the type + try: + tester = func('0') + except ValueError: + tester = None + self.type = self._getsubdtype(tester) Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2009-02-02 05:20:17 UTC (rev 6337) +++ trunk/numpy/lib/tests/test_io.py 2009-02-03 17:11:44 UTC (rev 6338) @@ -535,7 +535,18 @@ dtype=[('A', '|S4'), ('C', int), ('D', float)]) assert_equal(test, control) + def test_converters_cornercases(self): + "Test the conversion to datetime." + from datetime import datetime + converter = {'date':lambda s: datetime.strptime(s,'%Y-%m-%d %H:%M:%SZ')} + data = StringIO.StringIO('2009-02-03 12:00:00Z, 72214.0') + test = np.ndfromtxt(data, delimiter=',', dtype=None, + names=['date','stid'], converters=converter) + control = np.array((datetime(2009,02,03,12,0), 72214.), + dtype=[('date', np.object_), ('stid', float)]) + assert_equal(test, control) + def test_unused_converter(self): "Test whether unused converters are forgotten" data = StringIO.StringIO("1 21\n 3 42\n") From numpy-svn at scipy.org Wed Feb 4 15:52:40 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 4 Feb 2009 14:52:40 -0600 (CST) Subject: [Numpy-svn] r6339 - in trunk/numpy: lib/tests ma Message-ID: <20090204205240.ED31AC7C026@scipy.org> Author: pierregm Date: 2009-02-04 14:52:36 -0600 (Wed, 04 Feb 2009) New Revision: 6339 Modified: trunk/numpy/lib/tests/test__iotools.py trunk/numpy/ma/core.py Log: * test__iotools : prevent test_upgrademapper if dateutil is not installed * MaskedArray.__rmul__ : switch to multiply(self, other) Modified: trunk/numpy/lib/tests/test__iotools.py =================================================================== --- trunk/numpy/lib/tests/test__iotools.py 2009-02-03 17:11:44 UTC (rev 6338) +++ trunk/numpy/lib/tests/test__iotools.py 2009-02-04 20:52:36 UTC (rev 6339) @@ -130,11 +130,14 @@ # def test_upgrademapper(self): "Tests updatemapper" - import dateutil.parser - import datetime - dateparser = dateutil.parser.parse - StringConverter.upgrade_mapper(dateparser, datetime.date(2000,1,1)) - convert = StringConverter(dateparser, datetime.date(2000, 1, 1)) - test = convert('2001-01-01') - assert_equal(test, datetime.datetime(2001, 01, 01, 00, 00, 00)) + try: + import dateutil.parser + import datetime + dateparser = dateutil.parser.parse + StringConverter.upgrade_mapper(dateparser, datetime.date(2000,1,1)) + convert = StringConverter(dateparser, datetime.date(2000, 1, 1)) + test = convert('2001-01-01') + assert_equal(test, datetime.datetime(2001, 01, 01, 00, 00, 00)) + except ImportError: + pass Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2009-02-03 17:11:44 UTC (rev 6338) +++ trunk/numpy/ma/core.py 2009-02-04 20:52:36 UTC (rev 6339) @@ -2466,7 +2466,7 @@ # def __rmul__(self, other): "Multiply other by self, and return a new masked array." - return multiply(other, self) + return multiply(self, other) # def __div__(self, other): "Divide other into self, and return a new masked array." From numpy-svn at scipy.org Wed Feb 4 16:53:46 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 4 Feb 2009 15:53:46 -0600 (CST) Subject: [Numpy-svn] r6340 - trunk/numpy/lib/tests Message-ID: <20090204215346.54698C7C025@scipy.org> Author: pierregm Date: 2009-02-04 15:53:05 -0600 (Wed, 04 Feb 2009) New Revision: 6340 Modified: trunk/numpy/lib/tests/test__iotools.py Log: test_upgrademapper : got rid of the dateutil import Modified: trunk/numpy/lib/tests/test__iotools.py =================================================================== --- trunk/numpy/lib/tests/test__iotools.py 2009-02-04 20:52:36 UTC (rev 6339) +++ trunk/numpy/lib/tests/test__iotools.py 2009-02-04 21:53:05 UTC (rev 6340) @@ -130,14 +130,15 @@ # def test_upgrademapper(self): "Tests updatemapper" - try: - import dateutil.parser - import datetime - dateparser = dateutil.parser.parse - StringConverter.upgrade_mapper(dateparser, datetime.date(2000,1,1)) - convert = StringConverter(dateparser, datetime.date(2000, 1, 1)) - test = convert('2001-01-01') - assert_equal(test, datetime.datetime(2001, 01, 01, 00, 00, 00)) - except ImportError: - pass + from datetime import date + import time + dateparser = lambda s : date(*time.strptime(s, "%Y-%m-%d")[:3]) + StringConverter.upgrade_mapper(dateparser, date(2000,1,1)) + convert = StringConverter(dateparser, date(2000, 1, 1)) + test = convert('2001-01-01') + assert_equal(test, date(2001, 01, 01)) + test = convert('2009-01-01') + assert_equal(test, date(2009, 01, 01)) + test = convert('') + assert_equal(test, date(2000, 01, 01)) From numpy-svn at scipy.org Wed Feb 4 23:31:55 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 4 Feb 2009 22:31:55 -0600 (CST) Subject: [Numpy-svn] r6341 - in trunk/numpy/lib: . tests Message-ID: <20090205043155.BBD3BC7C02A@scipy.org> Author: pierregm Date: 2009-02-04 22:31:51 -0600 (Wed, 04 Feb 2009) New Revision: 6341 Modified: trunk/numpy/lib/_iotools.py trunk/numpy/lib/io.py trunk/numpy/lib/tests/test__iotools.py trunk/numpy/lib/tests/test_io.py Log: * genfromtxt : Fixed when a dtype involving objects is explicitly given. Raise a NotImplementedError if the dtype is nested. * _iotools : make sure StringConverter gets properly initiated when a function returning a np.object is used as input parameter. Modified: trunk/numpy/lib/_iotools.py =================================================================== --- trunk/numpy/lib/_iotools.py 2009-02-04 21:53:05 UTC (rev 6340) +++ trunk/numpy/lib/_iotools.py 2009-02-05 04:31:51 UTC (rev 6341) @@ -54,6 +54,16 @@ return fhd +def has_nested_fields(ndtype): + """ + Returns whether one or several fields of a structured array are nested. + """ + for name in ndtype.names or (): + if ndtype[name].names: + return True + return False + + def flatten_dtype(ndtype): """ Unpack a structured data-type. @@ -71,7 +81,6 @@ return types - class LineSplitter: """ Defines a function to split a string at a given delimiter or at given places. @@ -377,11 +386,17 @@ default = None ttype = self._getsubdtype(default) # Set the status according to the dtype + _status = -1 for (i, (deftype, func, default_def)) in enumerate(self._mapper): if np.issubdtype(ttype, deftype): - self._status = i + _status = i self.default = default or default_def break + if _status == -1: + # We never found a match in the _mapper... + _status = 0 + self.default = default + self._status = _status # If the input was a dtype, set the function to the last we saw if self.func is None: self.func = func Modified: trunk/numpy/lib/io.py =================================================================== --- trunk/numpy/lib/io.py 2009-02-04 21:53:05 UTC (rev 6340) +++ trunk/numpy/lib/io.py 2009-02-05 04:31:51 UTC (rev 6341) @@ -17,7 +17,7 @@ from _compiled_base import packbits, unpackbits from _iotools import LineSplitter, NameValidator, StringConverter, \ - _is_string_like, flatten_dtype + _is_string_like, has_nested_fields, flatten_dtype _file = file _string_like = _is_string_like @@ -703,6 +703,11 @@ there must not be any header in the file (else a :exc:ValueError exception is raised). + Warnings + -------- + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + See Also -------- numpy.loadtxt : equivalent function when no data is missing. @@ -918,8 +923,15 @@ # First, create the array using a flattened dtype: # [('a', int), ('b1', int), ('b2', float)] # Then, view the array using the specified dtype. - rows = np.array(data, dtype=[('', t) for t in flatdtypes]) - output = rows.view(dtype) + if has_nested_fields(dtype): + if 'O' in (_.char for _ in flatdtypes): + errmsg = "Nested fields involving objects "\ + "are not supported..." + raise NotImplementedError(errmsg) + rows = np.array(data, dtype=[('', t) for t in flatdtypes]) + output = rows.view(dtype) + else: + output = np.array(data, dtype=dtype) # Now, process the rowmasks the same way if usemask: rowmasks = np.array(masks, Modified: trunk/numpy/lib/tests/test__iotools.py =================================================================== --- trunk/numpy/lib/tests/test__iotools.py 2009-02-04 21:53:05 UTC (rev 6340) +++ trunk/numpy/lib/tests/test__iotools.py 2009-02-05 04:31:51 UTC (rev 6341) @@ -2,7 +2,8 @@ import StringIO import numpy as np -from numpy.lib._iotools import LineSplitter, NameValidator, StringConverter +from numpy.lib._iotools import LineSplitter, NameValidator, StringConverter,\ + has_nested_fields from numpy.testing import * class TestLineSplitter(TestCase): @@ -142,3 +143,17 @@ test = convert('') assert_equal(test, date(2000, 01, 01)) + +#------------------------------------------------------------------------------- + +class TestMiscFunctions(TestCase): + # + def test_has_nested_dtype(self): + "Test has_nested_dtype" + ndtype = np.dtype(np.float) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + assert_equal(has_nested_fields(ndtype), True) + Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2009-02-04 21:53:05 UTC (rev 6340) +++ trunk/numpy/lib/tests/test_io.py 2009-02-05 04:31:51 UTC (rev 6341) @@ -573,6 +573,35 @@ assert_equal(test, control) + def test_dtype_with_object(self): + "Test using an explicit dtype qith an object" + from datetime import date + import time + data = """ + 1; 2001-01-01 + 2; 2002-01-31 + """ + ndtype = [('idx', int), ('code', np.object)] + func = lambda s: date(*(time.strptime(s.strip(), "%Y-%m-%d")[:3])) + converters = {1: func} + test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array([(1, date(2001,1,1)), (2, date(2002,1,31))], + dtype=ndtype) + assert_equal(test, control) + # + ndtype = [('nest', [('idx', int), ('code', np.object)])] + try: + test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", + dtype=ndtype, converters=converters) + except NotImplementedError: + pass + else: + errmsg = "Nested dtype involving objects should be supported." + raise AssertionError(errmsg) + + + def test_spacedelimiter(self): "Test space delimiter" data = StringIO.StringIO("1 2 3 4 5\n6 7 8 9 10") From numpy-svn at scipy.org Thu Feb 5 15:11:45 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 5 Feb 2009 14:11:45 -0600 (CST) Subject: [Numpy-svn] r6342 - in trunk/numpy/testing: . tests Message-ID: <20090205201145.46F8FC7C03A@scipy.org> Author: alan.mcintyre Date: 2009-02-05 14:11:40 -0600 (Thu, 05 Feb 2009) New Revision: 6342 Added: trunk/numpy/testing/tests/test_decorators.py Modified: trunk/numpy/testing/decorators.py Log: Issue #957: - Fix problems with test decorators when used on test generators. - The skip/fail arguments for skipif and knownfailureif can now be either a bool or a callable that returns a bool. - Added tests for the test decorators. Modified: trunk/numpy/testing/decorators.py =================================================================== --- trunk/numpy/testing/decorators.py 2009-02-05 04:31:51 UTC (rev 6341) +++ trunk/numpy/testing/decorators.py 2009-02-05 20:11:40 UTC (rev 6342) @@ -51,8 +51,11 @@ Parameters --------- - skip_condition : bool - Flag to determine whether to skip test (True) or not (False) + skip_condition : bool or callable. + Flag to determine whether to skip test. If the condition is a + callable, it is used at runtime to dynamically make the decision. This + is useful for tests that may require costly imports, to delay the cost + until the test suite is actually executed. msg : string Message to give on raising a SkipTest exception @@ -69,28 +72,66 @@ decorator with the nose.tools.make_decorator function in order to transmit function name, and various other metadata. ''' - if msg is None: - msg = 'Test skipped due to test condition' + def skip_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose - def skipper(*args, **kwargs): - if skip_condition: - raise nose.SkipTest, msg + + # Allow for both boolean or callable skip conditions. + if callable(skip_condition): + skip_val = lambda : skip_condition() + else: + skip_val = lambda : skip_condition + + def get_msg(func,msg=None): + """Skip message with information about function being skipped.""" + if msg is None: + out = 'Test skipped due to test condition' + else: + out = '\n'+msg + + return "Skipping test: %s%s" % (func.__name__,out) + + # We need to define *two* skippers because Python doesn't allow both + # return with value and yield inside the same function. + def skipper_func(*args, **kwargs): + """Skipper for normal test functions.""" + if skip_val(): + raise nose.SkipTest(get_msg(f,msg)) else: return f(*args, **kwargs) + + def skipper_gen(*args, **kwargs): + """Skipper for test generators.""" + if skip_val(): + raise nose.SkipTest(get_msg(f,msg)) + else: + for x in f(*args, **kwargs): + yield x + + # Choose the right skipper to use when building the actual decorator. + if nose.util.isgenerator(f): + skipper = skipper_gen + else: + skipper = skipper_func + return nose.tools.make_decorator(f)(skipper) + return skip_decorator -def knownfailureif(skip_condition, msg=None): - ''' Make function raise KnownFailureTest exception if skip_condition is true +def knownfailureif(fail_condition, msg=None): + ''' Make function raise KnownFailureTest exception if fail_condition is true + Parameters --------- - skip_condition : bool + fail_condition : bool or callable. Flag to determine whether to mark test as known failure (True) - or not (False) + or not (False). If the condition is a callable, it is used at + runtime to dynamically make the decision. This is useful for + tests that may require costly imports, to delay the cost + until the test suite is actually executed. msg : string Message to give on raising a KnownFailureTest exception @@ -109,15 +150,23 @@ ''' if msg is None: msg = 'Test skipped due to known failure' - def skip_decorator(f): + + # Allow for both boolean or callable known failure conditions. + if callable(fail_condition): + fail_val = lambda : fail_condition() + else: + fail_val = lambda : fail_condition + + def knownfail_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose from noseclasses import KnownFailureTest - def skipper(*args, **kwargs): - if skip_condition: + def knownfailer(*args, **kwargs): + if fail_val(): raise KnownFailureTest, msg else: return f(*args, **kwargs) - return nose.tools.make_decorator(f)(skipper) - return skip_decorator + return nose.tools.make_decorator(f)(knownfailer) + + return knownfail_decorator Added: trunk/numpy/testing/tests/test_decorators.py =================================================================== --- trunk/numpy/testing/tests/test_decorators.py 2009-02-05 04:31:51 UTC (rev 6341) +++ trunk/numpy/testing/tests/test_decorators.py 2009-02-05 20:11:40 UTC (rev 6342) @@ -0,0 +1,156 @@ +import numpy as np +from numpy.testing import * +from numpy.testing.noseclasses import KnownFailureTest +import nose + +def test_slow(): + @dec.slow + def slow_func(x,y,z): + pass + + assert(slow_func.slow) + +def test_setastest(): + @dec.setastest() + def f_default(a): + pass + + @dec.setastest(True) + def f_istest(a): + pass + + @dec.setastest(False) + def f_isnottest(a): + pass + + assert(f_default.__test__) + assert(f_istest.__test__) + assert(not f_isnottest.__test__) + +class DidntSkipException(Exception): + pass + +def test_skip_functions_hardcoded(): + @dec.skipif(True) + def f1(x): + raise DidntSkipException + + try: + f1('a') + except DidntSkipException: + raise Exception('Failed to skip') + except nose.SkipTest: + pass + + @dec.skipif(False) + def f2(x): + raise DidntSkipException + + try: + f2('a') + except DidntSkipException: + pass + except nose.SkipTest: + raise Exception('Skipped when not expected to') + + +def test_skip_functions_callable(): + def skip_tester(): + return skip_flag == 'skip me!' + + @dec.skipif(skip_tester) + def f1(x): + raise DidntSkipException + + try: + skip_flag = 'skip me!' + f1('a') + except DidntSkipException: + raise Exception('Failed to skip') + except nose.SkipTest: + pass + + @dec.skipif(skip_tester) + def f2(x): + raise DidntSkipException + + try: + skip_flag = 'five is right out!' + f2('a') + except DidntSkipException: + pass + except nose.SkipTest: + raise Exception('Skipped when not expected to') + + +def test_skip_generators_hardcoded(): + @dec.knownfailureif(True, "This test is known to fail") + def g1(x): + for i in xrange(x): + yield i + + try: + for j in g1(10): + pass + except KnownFailureTest: + pass + else: + raise Exception('Failed to mark as known failure') + + + @dec.knownfailureif(False, "This test is NOT known to fail") + def g2(x): + for i in xrange(x): + yield i + raise DidntSkipException('FAIL') + + try: + for j in g2(10): + pass + except KnownFailureTest: + raise Exception('Marked incorretly as known failure') + except DidntSkipException: + pass + + +def test_skip_generators_callable(): + def skip_tester(): + return skip_flag == 'skip me!' + + @dec.knownfailureif(skip_tester, "This test is known to fail") + def g1(x): + for i in xrange(x): + yield i + + try: + skip_flag = 'skip me!' + for j in g1(10): + pass + except KnownFailureTest: + pass + else: + raise Exception('Failed to mark as known failure') + + + @dec.knownfailureif(skip_tester, "This test is NOT known to fail") + def g2(x): + for i in xrange(x): + yield i + raise DidntSkipException('FAIL') + + try: + skip_flag = 'do not skip' + for j in g2(10): + pass + except KnownFailureTest: + raise Exception('Marked incorretly as known failure') + except DidntSkipException: + pass + + +if __name__ == '__main__': + run_module_suite() + + + + From numpy-svn at scipy.org Thu Feb 5 19:27:32 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 5 Feb 2009 18:27:32 -0600 (CST) Subject: [Numpy-svn] r6343 - trunk/doc/sphinxext Message-ID: <20090206002732.8EADEC7C074@scipy.org> Author: ptvirtan Date: 2009-02-05 18:27:08 -0600 (Thu, 05 Feb 2009) New Revision: 6343 Modified: trunk/doc/sphinxext/docscrape.py trunk/doc/sphinxext/docscrape_sphinx.py trunk/doc/sphinxext/numpydoc.py Log: doc/numpydoc: work better together with Sphinx's config option Modified: trunk/doc/sphinxext/docscrape.py =================================================================== --- trunk/doc/sphinxext/docscrape.py 2009-02-05 20:11:40 UTC (rev 6342) +++ trunk/doc/sphinxext/docscrape.py 2009-02-06 00:27:08 UTC (rev 6343) @@ -406,11 +406,13 @@ class FunctionDoc(NumpyDocString): - def __init__(self, func, role='func'): + def __init__(self, func, role='func', doc=None): self._f = func self._role = role # e.g. "func" or "meth" + if doc is None: + doc = inspect.getdoc(func) or '' try: - NumpyDocString.__init__(self,inspect.getdoc(func) or '') + NumpyDocString.__init__(self, doc) except ValueError, e: print '*'*78 print "ERROR: '%s' while parsing `%s`" % (e, self._f) @@ -459,7 +461,7 @@ class ClassDoc(NumpyDocString): - def __init__(self,cls,modulename='',func_doc=FunctionDoc): + def __init__(self,cls,modulename='',func_doc=FunctionDoc,doc=None): if not inspect.isclass(cls): raise ValueError("Initialise using a class. Got %r" % cls) self._cls = cls @@ -470,8 +472,11 @@ self._name = cls.__name__ self._func_doc = func_doc - NumpyDocString.__init__(self, pydoc.getdoc(cls)) + if doc is None: + doc = pydoc.getdoc(cls) + NumpyDocString.__init__(self, doc) + @property def methods(self): return [name for name,func in inspect.getmembers(self._cls) Modified: trunk/doc/sphinxext/docscrape_sphinx.py =================================================================== --- trunk/doc/sphinxext/docscrape_sphinx.py 2009-02-05 20:11:40 UTC (rev 6342) +++ trunk/doc/sphinxext/docscrape_sphinx.py 2009-02-06 00:27:08 UTC (rev 6343) @@ -115,7 +115,7 @@ class SphinxClassDoc(SphinxDocString, ClassDoc): pass -def get_doc_object(obj, what=None): +def get_doc_object(obj, what=None, doc=None): if what is None: if inspect.isclass(obj): what = 'class' @@ -126,8 +126,11 @@ else: what = 'object' if what == 'class': - return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc) + return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc) elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, '') + return SphinxFunctionDoc(obj, '', doc=doc) else: - return SphinxDocString(pydoc.getdoc(obj)) + if doc is None: + doc = pydoc.getdoc(obj) + return SphinxDocString(doc) + Modified: trunk/doc/sphinxext/numpydoc.py =================================================================== --- trunk/doc/sphinxext/numpydoc.py 2009-02-05 20:11:40 UTC (rev 6342) +++ trunk/doc/sphinxext/numpydoc.py 2009-02-06 00:27:08 UTC (rev 6343) @@ -28,7 +28,7 @@ re.I|re.S) lines[:] = title_re.sub('', "\n".join(lines)).split("\n") else: - doc = get_doc_object(obj, what) + doc = get_doc_object(obj, what, "\n".join(lines)) lines[:] = str(doc).split("\n") if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ From numpy-svn at scipy.org Thu Feb 5 19:54:31 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 5 Feb 2009 18:54:31 -0600 (CST) Subject: [Numpy-svn] r6344 - trunk/doc/source/reference Message-ID: <20090206005431.353AEC7C074@scipy.org> Author: ptvirtan Date: 2009-02-05 18:51:41 -0600 (Thu, 05 Feb 2009) New Revision: 6344 Modified: trunk/doc/source/reference/arrays.classes.rst trunk/doc/source/reference/arrays.rst trunk/doc/source/reference/maskedarray.rst Log: doc: Move maskedarray docs upward in TOC Modified: trunk/doc/source/reference/arrays.classes.rst =================================================================== --- trunk/doc/source/reference/arrays.classes.rst 2009-02-06 00:27:08 UTC (rev 6343) +++ trunk/doc/source/reference/arrays.classes.rst 2009-02-06 00:51:41 UTC (rev 6344) @@ -263,24 +263,6 @@ .. seealso:: :ref:`maskedarray` -Masked arrays are arrays that may have missing or invalid entries. -The :mod:`numpy.ma` module provides a nearly work-alike replacement for numpy -that supports data arrays with masks. - - -.. XXX: masked array documentation should be improved - -.. currentmodule:: numpy - -.. index:: - single: masked arrays - -.. toctree:: - :maxdepth: 2 - - maskedarray - - Standard container class ======================== Modified: trunk/doc/source/reference/arrays.rst =================================================================== --- trunk/doc/source/reference/arrays.rst 2009-02-06 00:27:08 UTC (rev 6343) +++ trunk/doc/source/reference/arrays.rst 2009-02-06 00:51:41 UTC (rev 6344) @@ -43,4 +43,5 @@ arrays.dtypes arrays.indexing arrays.classes + maskedarray arrays.interface Modified: trunk/doc/source/reference/maskedarray.rst =================================================================== --- trunk/doc/source/reference/maskedarray.rst 2009-02-06 00:27:08 UTC (rev 6343) +++ trunk/doc/source/reference/maskedarray.rst 2009-02-06 00:51:41 UTC (rev 6344) @@ -1,14 +1,19 @@ .. _maskedarray: +************* +Masked arrays +************* Masked arrays are arrays that may have missing or invalid entries. The :mod:`numpy.ma` module provides a nearly work-alike replacement for numpy that supports data arrays with masks. +.. index:: + single: masked arrays .. toctree:: :maxdepth: 2 maskedarray.generic maskedarray.baseclass - routines.ma \ No newline at end of file + routines.ma From numpy-svn at scipy.org Fri Feb 6 01:26:06 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 6 Feb 2009 00:26:06 -0600 (CST) Subject: [Numpy-svn] r6345 - trunk/numpy/lib Message-ID: <20090206062606.21445C7C0E4@scipy.org> Author: oliphant Date: 2009-02-06 00:25:50 -0600 (Fri, 06 Feb 2009) New Revision: 6345 Modified: trunk/numpy/lib/io.py Log: Avoid re-creating the sequence when there is only one field in the regular expression. Modified: trunk/numpy/lib/io.py =================================================================== --- trunk/numpy/lib/io.py 2009-02-06 00:51:41 UTC (rev 6344) +++ trunk/numpy/lib/io.py 2009-02-06 06:25:50 UTC (rev 6345) @@ -604,10 +604,15 @@ seq = regexp.findall(file.read()) if seq and not isinstance(seq[0], tuple): - # make sure np.array doesn't interpret strings as binary data - # by always producing a list of tuples - seq = [(x,) for x in seq] - output = np.array(seq, dtype=dtype) + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output.dtype = dtype + else: + output = np.array(seq, dtype=dtype) + return output From numpy-svn at scipy.org Fri Feb 6 01:31:13 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 6 Feb 2009 00:31:13 -0600 (CST) Subject: [Numpy-svn] r6346 - trunk/numpy/lib/tests Message-ID: <20090206063113.562EFC7C0D1@scipy.org> Author: oliphant Date: 2009-02-06 00:31:11 -0600 (Fri, 06 Feb 2009) New Revision: 6346 Modified: trunk/numpy/lib/tests/test_io.py Log: Removed an unneccessary return statement in a unit test. Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2009-02-06 06:25:50 UTC (rev 6345) +++ trunk/numpy/lib/tests/test_io.py 2009-02-06 06:31:11 UTC (rev 6346) @@ -336,7 +336,6 @@ assert_array_equal(x, a) def test_record_2(self): - return # pass this test until #736 is resolved c = StringIO.StringIO() c.write('1312 foo\n1534 bar\n4444 qux') c.seek(0) From numpy-svn at scipy.org Fri Feb 6 09:37:09 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 6 Feb 2009 08:37:09 -0600 (CST) Subject: [Numpy-svn] r6347 - trunk/numpy/distutils Message-ID: <20090206143709.A73E1C7C011@scipy.org> Author: pearu Date: 2009-02-06 08:36:58 -0600 (Fri, 06 Feb 2009) New Revision: 6347 Modified: trunk/numpy/distutils/system_info.py Log: Fix a bug: python system_info.py failed because _pkg_config_info defined section to be None. Modified: trunk/numpy/distutils/system_info.py =================================================================== --- trunk/numpy/distutils/system_info.py 2009-02-06 06:31:11 UTC (rev 6346) +++ trunk/numpy/distutils/system_info.py 2009-02-06 14:36:58 UTC (rev 6347) @@ -400,8 +400,9 @@ self.files.extend(get_standard_file('.numpy-site.cfg')) self.files.extend(get_standard_file('site.cfg')) self.parse_config_files() - self.search_static_first = self.cp.getboolean(self.section, - 'search_static_first') + if self.section is not None: + self.search_static_first = self.cp.getboolean(self.section, + 'search_static_first') assert isinstance(self.search_static_first, int) def parse_config_files(self): From numpy-svn at scipy.org Fri Feb 6 09:39:01 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 6 Feb 2009 08:39:01 -0600 (CST) Subject: [Numpy-svn] r6348 - trunk/numpy/distutils Message-ID: <20090206143901.8AC8BC7C011@scipy.org> Author: pearu Date: 2009-02-06 08:38:57 -0600 (Fri, 06 Feb 2009) New Revision: 6348 Modified: trunk/numpy/distutils/system_info.py Log: Fix another bug, see last commit. Modified: trunk/numpy/distutils/system_info.py =================================================================== --- trunk/numpy/distutils/system_info.py 2009-02-06 14:36:58 UTC (rev 6347) +++ trunk/numpy/distutils/system_info.py 2009-02-06 14:38:57 UTC (rev 6348) @@ -408,7 +408,8 @@ def parse_config_files(self): self.cp.read(self.files) if not self.cp.has_section(self.section): - self.cp.add_section(self.section) + if self.section is not None: + self.cp.add_section(self.section) def calc_libraries_info(self): libs = self.get_libraries() From numpy-svn at scipy.org Sat Feb 7 04:19:14 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 7 Feb 2009 03:19:14 -0600 (CST) Subject: [Numpy-svn] r6349 - in trunk/numpy/ma: . tests Message-ID: <20090207091914.05A4FC7C00B@scipy.org> Author: pierregm Date: 2009-02-07 03:19:12 -0600 (Sat, 07 Feb 2009) New Revision: 6349 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: MaskedArray.resize : systematically raise a TypeError exception, as a masked array never owns its data MaskedIterator : fixed to allow .flat on masked matrices Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2009-02-06 14:38:57 UTC (rev 6348) +++ trunk/numpy/ma/core.py 2009-02-07 09:19:12 UTC (rev 6349) @@ -1478,9 +1478,8 @@ "Define an interator." def __init__(self, ma): self.ma = ma - self.ma1d = ma.ravel() - self.ma_iter = np.asarray(ma).flat - + self.dataiter = ma._data.flat + # if ma._mask is nomask: self.maskiter = None else: @@ -1490,15 +1489,23 @@ return self def __getitem__(self, indx): - return self.ma1d.__getitem__(indx) + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + _mask.shape = result.shape + result._mask = _mask + return result ### This won't work is ravel makes a copy def __setitem__(self, index, value): - self.ma1d[index] = value + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) +# self.ma1d[index] = value def next(self): "Returns the next element of the iterator." - d = self.ma_iter.next() + d = self.dataiter.next() if self.maskiter is not None and self.maskiter.next(): d = masked return d @@ -2707,25 +2714,24 @@ return result # def resize(self, newshape, refcheck=True, order=False): - """Attempt to modify the size and the shape of the array in place. + """ + Change shape and size of array in-place. - The array must own its own memory and not be referenced by - other arrays. - - Returns - ------- - None. - """ - try: - self._data.resize(newshape, refcheck, order) - if self.mask is not nomask: - self._mask.resize(newshape, refcheck, order) - except ValueError: - raise ValueError("Cannot resize an array that has been referenced " - "or is referencing another array in this way.\n" - "Use the resize function.") - return None + # Note : the 'order' keyword looks broken, let's just drop it +# try: +# ndarray.resize(self, newshape, refcheck=refcheck) +# if self.mask is not nomask: +# self._mask.resize(newshape, refcheck=refcheck) +# except ValueError: +# raise ValueError("Cannot resize an array that has been referenced " +# "or is referencing another array in this way.\n" +# "Use the numpy.ma.resize function.") +# return None + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) # def put(self, indices, values, mode='raise'): """ Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2009-02-06 14:38:57 UTC (rev 6348) +++ trunk/numpy/ma/tests/test_core.py 2009-02-07 09:19:12 UTC (rev 6349) @@ -1122,6 +1122,17 @@ a[1] = 1 assert_equal(a._mask, zeros(10)) + def test_flat(self): + "Test flat on masked_matrices" + test = ma.array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = ma.array([3, 2, 1], mask=[1, 0, 0]) + control = ma.array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # + test = ma.array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) #------------------------------------------------------------------------------ From numpy-svn at scipy.org Sat Feb 7 13:51:33 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 7 Feb 2009 12:51:33 -0600 (CST) Subject: [Numpy-svn] r6350 - trunk/numpy/ma/tests Message-ID: <20090207185133.B38DBC7C016@scipy.org> Author: pierregm Date: 2009-02-07 12:51:31 -0600 (Sat, 07 Feb 2009) New Revision: 6350 Modified: trunk/numpy/ma/tests/test_core.py Log: Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2009-02-07 09:19:12 UTC (rev 6349) +++ trunk/numpy/ma/tests/test_core.py 2009-02-07 18:51:31 UTC (rev 6350) @@ -1124,12 +1124,12 @@ def test_flat(self): "Test flat on masked_matrices" - test = ma.array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) - test.flat = ma.array([3, 2, 1], mask=[1, 0, 0]) - control = ma.array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) assert_equal(test, control) # - test = ma.array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) testflat = test.flat testflat[:] = testflat[[2, 1, 0]] assert_equal(test, control) From numpy-svn at scipy.org Mon Feb 9 15:18:37 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 9 Feb 2009 14:18:37 -0600 (CST) Subject: [Numpy-svn] r6351 - in trunk/numpy/core: src tests Message-ID: <20090209201837.74464C7C017@scipy.org> Author: ptvirtan Date: 2009-02-09 14:18:08 -0600 (Mon, 09 Feb 2009) New Revision: 6351 Modified: trunk/numpy/core/src/scalarmathmodule.c.src trunk/numpy/core/tests/test_regression.py Log: Fix #955: fix errobj leak in scalarmath floating point error handling Modified: trunk/numpy/core/src/scalarmathmodule.c.src =================================================================== --- trunk/numpy/core/src/scalarmathmodule.c.src 2009-02-07 18:51:31 UTC (rev 6350) +++ trunk/numpy/core/src/scalarmathmodule.c.src 2009-02-09 20:18:08 UTC (rev 6351) @@ -636,8 +636,11 @@ &errobj) < 0) return NULL; first = 1; - if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) + if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { + Py_XDECREF(errobj); return NULL; + } + Py_XDECREF(errobj); } #endif @@ -736,8 +739,11 @@ &errobj) < 0) return NULL; first = 1; - if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) + if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { + Py_XDECREF(errobj); return NULL; + } + Py_XDECREF(errobj); } #if @isint@ Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2009-02-07 18:51:31 UTC (rev 6350) +++ trunk/numpy/core/tests/test_regression.py 2009-02-09 20:18:08 UTC (rev 6351) @@ -1,7 +1,7 @@ - from StringIO import StringIO import pickle import sys +import gc from os import path from numpy.testing import * import numpy as np @@ -1208,5 +1208,17 @@ a = np.array(1) self.failUnlessRaises(ValueError, lambda x: x.choose([]), a) + def test_errobj_reference_leak(self, level=rlevel): + """Ticket #955""" + z = int(0) + p = np.int32(-1) + + gc.collect() + n_before = len(gc.get_objects()) + z**p # this shouldn't leak a reference to errobj + gc.collect() + n_after = len(gc.get_objects()) + assert n_before >= n_after, (n_before, n_after) + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Mon Feb 9 19:42:43 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 9 Feb 2009 18:42:43 -0600 (CST) Subject: [Numpy-svn] r6352 - in trunk/numpy/ma: . tests Message-ID: <20090210004243.06254C7C018@scipy.org> Author: pierregm Date: 2009-02-09 18:42:40 -0600 (Mon, 09 Feb 2009) New Revision: 6352 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: * prevent modifications to the mask to be back-propagated w/ __array_wrap__ Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2009-02-09 20:18:08 UTC (rev 6351) +++ trunk/numpy/ma/core.py 2009-02-10 00:42:40 UTC (rev 6352) @@ -1817,7 +1817,8 @@ if d is not nomask: m = d else: - m |= d + # Don't modify inplace, we risk back-propagation + m = (m | d) # Make sure the mask has the proper size if result.shape == () and m: return masked Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2009-02-09 20:18:08 UTC (rev 6351) +++ trunk/numpy/ma/tests/test_core.py 2009-02-10 00:42:40 UTC (rev 6352) @@ -1025,6 +1025,23 @@ assert_equal(test.mask, [False, False]) + def test_numpyarithmetics(self): + "Check that the mask is not back-propagated when using numpy functions" + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + # + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + # + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + # + #------------------------------------------------------------------------------ class TestMaskedArrayAttributes(TestCase): From numpy-svn at scipy.org Mon Feb 9 19:49:42 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 9 Feb 2009 18:49:42 -0600 (CST) Subject: [Numpy-svn] r6353 - in branches/1.2.x/numpy/ma: . tests Message-ID: <20090210004942.B87C8C7C018@scipy.org> Author: pierregm Date: 2009-02-09 18:49:40 -0600 (Mon, 09 Feb 2009) New Revision: 6353 Modified: branches/1.2.x/numpy/ma/core.py branches/1.2.x/numpy/ma/tests/test_core.py Log: *backporting bugfix r6352 Modified: branches/1.2.x/numpy/ma/core.py =================================================================== --- branches/1.2.x/numpy/ma/core.py 2009-02-10 00:42:40 UTC (rev 6352) +++ branches/1.2.x/numpy/ma/core.py 2009-02-10 00:49:40 UTC (rev 6353) @@ -1598,7 +1598,8 @@ if d is not nomask: m = d else: - m |= d + # Don't modify inplace, we risk back-propagation + m = (m | d) # Make sure the mask has the proper size if result.shape == () and m: return masked Modified: branches/1.2.x/numpy/ma/tests/test_core.py =================================================================== --- branches/1.2.x/numpy/ma/tests/test_core.py 2009-02-10 00:42:40 UTC (rev 6352) +++ branches/1.2.x/numpy/ma/tests/test_core.py 2009-02-10 00:49:40 UTC (rev 6353) @@ -825,6 +825,25 @@ self.failUnless(result is output) self.failUnless(output[0] is masked) + + + def test_numpyarithmetics(self): + "Check that the mask is not back-propagated when using numpy functions" + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + # + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + # + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + # + #------------------------------------------------------------------------------ class TestMaskedArrayAttributes(TestCase): From numpy-svn at scipy.org Tue Feb 10 05:45:40 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 10 Feb 2009 04:45:40 -0600 (CST) Subject: [Numpy-svn] r6354 - trunk/numpy/f2py Message-ID: <20090210104540.451D2C7C01D@scipy.org> Author: cdavid Date: 2009-02-10 04:44:01 -0600 (Tue, 10 Feb 2009) New Revision: 6354 Modified: trunk/numpy/f2py/f2py.1 Log: Fix hyphen (patch from debian package). Modified: trunk/numpy/f2py/f2py.1 =================================================================== --- trunk/numpy/f2py/f2py.1 2009-02-10 00:49:40 UTC (rev 6353) +++ trunk/numpy/f2py/f2py.1 2009-02-10 10:44:01 UTC (rev 6354) @@ -20,7 +20,7 @@ This program generates a Python C/API file (module.c) that contains wrappers for given Fortran or C functions so that they can be called from Python. -With the -c option the corresponding +With the \-c option the corresponding extension modules are built. .SH OPTIONS .TP @@ -49,8 +49,8 @@ \'untitled\'. .TP .B \-\-[no\-]lower -Do [not] lower the cases in . By default, --lower is -assumed with -h key, and --no-lower without -h key. +Do [not] lower the cases in . By default, \-\-lower is +assumed with \-h key, and \-\-no\-lower without \-h key. .TP .B \-\-build\-dir All f2py generated files are created in . Default is tempfile.mktemp(). @@ -59,14 +59,14 @@ Overwrite existing signature file. .TP .B \-\-[no\-]latex\-doc -Create (or not) module.tex. Default is --no-latex-doc. +Create (or not) module.tex. Default is \-\-no\-latex\-doc. .TP .B \-\-short\-latex Create 'incomplete' LaTeX document (without commands \\documentclass, \\tableofcontents, and \\begin{document}, \\end{document}). .TP .B \-\-[no\-]rest\-doc -Create (or not) module.rst. Default is --no-rest-doc. +Create (or not) module.rst. Default is \-\-no\-rest\-doc. .TP .B \-\-debug\-capi Create C/API code that reports the state of the wrappers during @@ -81,12 +81,12 @@ .TP .B \-\-[no\-]wrap\-functions Create Fortran subroutine wrappers to Fortran 77 -functions. --wrap-functions is default because it ensures maximum +functions. \-\-wrap\-functions is default because it ensures maximum portability/compiler independence. .TP .B \-\-help\-link [..] List system resources found by system_info.py. [..] may contain -a list of resources names. See also --link- switch below. +a list of resources names. See also \-\-link\- switch below. .TP .B \-\-quiet Run quietly. @@ -100,7 +100,7 @@ .B \-\-include_paths path1:path2:... Search include files (that f2py will scan) from the given directories. .SH "CONFIG_FC OPTIONS" -The following options are effective only when -c switch is used. +The following options are effective only when \-c switch is used. .TP .B \-\-help-compiler List available Fortran compilers [DEPRECIATED]. @@ -147,13 +147,13 @@ .B \-\-debug Compile with debugging information. .SH "EXTRA OPTIONS" -The following options are effective only when -c switch is used. +The following options are effective only when \-c switch is used. .TP .B \-\-link- Link extension module with as defined by numpy_distutils/system_info.py. E.g. to link with optimized LAPACK libraries (vecLib on MacOSX, ATLAS elsewhere), use ---link-lapack_opt. See also --help-link switch. +\-\-link\-lapack_opt. See also \-\-help\-link switch. .TP .B -L/path/to/lib/ -l From numpy-svn at scipy.org Tue Feb 10 05:58:53 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 10 Feb 2009 04:58:53 -0600 (CST) Subject: [Numpy-svn] r6355 - branches/1.2.x/numpy/f2py Message-ID: <20090210105853.89132C7C01D@scipy.org> Author: cdavid Date: 2009-02-10 04:57:41 -0600 (Tue, 10 Feb 2009) New Revision: 6355 Modified: branches/1.2.x/numpy/f2py/f2py.1 Log: Merge 6354 from trunk. Modified: branches/1.2.x/numpy/f2py/f2py.1 =================================================================== --- branches/1.2.x/numpy/f2py/f2py.1 2009-02-10 10:44:01 UTC (rev 6354) +++ branches/1.2.x/numpy/f2py/f2py.1 2009-02-10 10:57:41 UTC (rev 6355) @@ -20,7 +20,7 @@ This program generates a Python C/API file (module.c) that contains wrappers for given Fortran or C functions so that they can be called from Python. -With the -c option the corresponding +With the \-c option the corresponding extension modules are built. .SH OPTIONS .TP @@ -49,8 +49,8 @@ \'untitled\'. .TP .B \-\-[no\-]lower -Do [not] lower the cases in . By default, --lower is -assumed with -h key, and --no-lower without -h key. +Do [not] lower the cases in . By default, \-\-lower is +assumed with \-h key, and \-\-no\-lower without \-h key. .TP .B \-\-build\-dir All f2py generated files are created in . Default is tempfile.mktemp(). @@ -59,14 +59,14 @@ Overwrite existing signature file. .TP .B \-\-[no\-]latex\-doc -Create (or not) module.tex. Default is --no-latex-doc. +Create (or not) module.tex. Default is \-\-no\-latex\-doc. .TP .B \-\-short\-latex Create 'incomplete' LaTeX document (without commands \\documentclass, \\tableofcontents, and \\begin{document}, \\end{document}). .TP .B \-\-[no\-]rest\-doc -Create (or not) module.rst. Default is --no-rest-doc. +Create (or not) module.rst. Default is \-\-no\-rest\-doc. .TP .B \-\-debug\-capi Create C/API code that reports the state of the wrappers during @@ -81,12 +81,12 @@ .TP .B \-\-[no\-]wrap\-functions Create Fortran subroutine wrappers to Fortran 77 -functions. --wrap-functions is default because it ensures maximum +functions. \-\-wrap\-functions is default because it ensures maximum portability/compiler independence. .TP .B \-\-help\-link [..] List system resources found by system_info.py. [..] may contain -a list of resources names. See also --link- switch below. +a list of resources names. See also \-\-link\- switch below. .TP .B \-\-quiet Run quietly. @@ -100,7 +100,7 @@ .B \-\-include_paths path1:path2:... Search include files (that f2py will scan) from the given directories. .SH "CONFIG_FC OPTIONS" -The following options are effective only when -c switch is used. +The following options are effective only when \-c switch is used. .TP .B \-\-help-compiler List available Fortran compilers [DEPRECIATED]. @@ -147,13 +147,13 @@ .B \-\-debug Compile with debugging information. .SH "EXTRA OPTIONS" -The following options are effective only when -c switch is used. +The following options are effective only when \-c switch is used. .TP .B \-\-link- Link extension module with as defined by numpy_distutils/system_info.py. E.g. to link with optimized LAPACK libraries (vecLib on MacOSX, ATLAS elsewhere), use ---link-lapack_opt. See also --help-link switch. +\-\-link\-lapack_opt. See also \-\-help\-link switch. .TP .B -L/path/to/lib/ -l From numpy-svn at scipy.org Tue Feb 10 20:52:32 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 10 Feb 2009 19:52:32 -0600 (CST) Subject: [Numpy-svn] r6356 - in trunk/numpy/ma: . tests Message-ID: <20090211015232.B21FAC7C009@scipy.org> Author: pierregm Date: 2009-02-10 19:51:28 -0600 (Tue, 10 Feb 2009) New Revision: 6356 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: * MaskedArray.__array_wrap__ : forces the domain (if any) to a ndarray (fill with True) Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2009-02-10 10:57:41 UTC (rev 6355) +++ trunk/numpy/ma/core.py 2009-02-11 01:51:28 UTC (rev 6356) @@ -1796,10 +1796,11 @@ # Get the domain mask................ domain = ufunc_domain.get(func, None) if domain is not None: + # Take the domain, and make sure it's a ndarray if len(args) > 2: - d = reduce(domain, args) + d = filled(reduce(domain, args), True) else: - d = domain(*args) + d = filled(domain(*args), True) # Fill the result where the domain is wrong try: # Binary domain: take the last value Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2009-02-10 10:57:41 UTC (rev 6355) +++ trunk/numpy/ma/tests/test_core.py 2009-02-11 01:51:28 UTC (rev 6356) @@ -1040,7 +1040,6 @@ assert_equal(test, control) assert_equal(test.mask, control.mask) assert_equal(a.mask, [0, 0, 0, 0, 1]) - # #------------------------------------------------------------------------------ @@ -1375,7 +1374,17 @@ self.failUnless(amask.max(1)[0].mask) self.failUnless(amask.min(1)[0].mask) + def test_ndarray_mask(self): + "Check that the mask of the result is a ndarray (not a MaskedArray...)" + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + test = np.sqrt(a) + control = masked_array([-1, 0, 1, np.sqrt(2), -1], + mask=[1, 0, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + self.failUnless(not isinstance(test.mask, MaskedArray)) + #------------------------------------------------------------------------------ class TestMaskedArrayInPlaceArithmetics(TestCase): From numpy-svn at scipy.org Tue Feb 10 20:53:09 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 10 Feb 2009 19:53:09 -0600 (CST) Subject: [Numpy-svn] r6357 - in branches/1.2.x/numpy/ma: . tests Message-ID: <20090211015309.45BE2C7C020@scipy.org> Author: pierregm Date: 2009-02-10 19:52:37 -0600 (Tue, 10 Feb 2009) New Revision: 6357 Modified: branches/1.2.x/numpy/ma/core.py branches/1.2.x/numpy/ma/tests/test_core.py Log: * MaskedArray.__array_wrap__ : forces the domain (if any) to a ndarray (fill with True) Modified: branches/1.2.x/numpy/ma/core.py =================================================================== --- branches/1.2.x/numpy/ma/core.py 2009-02-11 01:51:28 UTC (rev 6356) +++ branches/1.2.x/numpy/ma/core.py 2009-02-11 01:52:37 UTC (rev 6357) @@ -1577,10 +1577,11 @@ # Get the domain mask................ domain = ufunc_domain.get(func, None) if domain is not None: + # Take the domain, and make sure it's a ndarray if len(args) > 2: - d = reduce(domain, args) + d = filled(reduce(domain, args), True) else: - d = domain(*args) + d = filled(domain(*args), True) # Fill the result where the domain is wrong try: # Binary domain: take the last value Modified: branches/1.2.x/numpy/ma/tests/test_core.py =================================================================== --- branches/1.2.x/numpy/ma/tests/test_core.py 2009-02-11 01:51:28 UTC (rev 6356) +++ branches/1.2.x/numpy/ma/tests/test_core.py 2009-02-11 01:52:37 UTC (rev 6357) @@ -826,7 +826,6 @@ self.failUnless(output[0] is masked) - def test_numpyarithmetics(self): "Check that the mask is not back-propagated when using numpy functions" a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) @@ -1143,7 +1142,17 @@ self.failUnless(amask.max(1)[0].mask) self.failUnless(amask.min(1)[0].mask) + def test_ndarray_mask(self): + "Check that the mask of the result is a ndarray (not a MaskedArray...)" + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + test = np.sqrt(a) + control = masked_array([-1, 0, 1, np.sqrt(2), -1], + mask=[1, 0, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + self.failUnless(not isinstance(test.mask, MaskedArray)) + #------------------------------------------------------------------------------ class TestMaskedArrayInPlaceArithmetics(TestCase): From numpy-svn at scipy.org Wed Feb 11 23:22:07 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 11 Feb 2009 22:22:07 -0600 (CST) Subject: [Numpy-svn] r6358 - in trunk/numpy/core: . src tests Message-ID: <20090212042207.26D43C7C011@scipy.org> Author: oliphant Date: 2009-02-11 22:22:03 -0600 (Wed, 11 Feb 2009) New Revision: 6358 Modified: trunk/numpy/core/_internal.py trunk/numpy/core/src/arrayobject.c trunk/numpy/core/tests/test_numerictypes.py Log: Add multiple-field access by making a copy of the array and filling with the selected fields. Modified: trunk/numpy/core/_internal.py =================================================================== --- trunk/numpy/core/_internal.py 2009-02-11 01:52:37 UTC (rev 6357) +++ trunk/numpy/core/_internal.py 2009-02-12 04:22:03 UTC (rev 6358) @@ -292,3 +292,22 @@ raise ValueError, "unknown field name: %s" % (name,) return tuple(list(order) + nameslist) raise ValueError, "unsupported order value: %s" % (order,) + +# Given an array with fields and a sequence of field names +# construct a new array with just those fields copied over +def _index_fields(ary, fields): + from multiarray import empty, dtype + dt = ary.dtype + new_dtype = [(name, dt[name]) for name in dt.names if name in fields] + if ary.flags.f_contiguous: + order = 'F' + else: + order = 'C' + + newarray = empty(ary.shape, dtype=new_dtype, order=order) + + for name in fields: + newarray[name] = ary[name] + + return newarray + Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2009-02-11 01:52:37 UTC (rev 6357) +++ trunk/numpy/core/src/arrayobject.c 2009-02-12 04:22:03 UTC (rev 6358) @@ -2827,10 +2827,10 @@ int nd, fancy; PyArrayObject *other; PyArrayMapIterObject *mit; + PyObject *obj; if (PyString_Check(op) || PyUnicode_Check(op)) { if (self->descr->names) { - PyObject *obj; obj = PyDict_GetItem(self->descr->fields, op); if (obj != NULL) { PyArray_Descr *descr; @@ -2852,6 +2852,34 @@ return NULL; } + /* Check for multiple field access + */ + if (self->descr->names && PySequence_Check(op) && !PyTuple_Check(op)) { + int seqlen, i; + seqlen = PySequence_Size(op); + for (i=0; i 0) && (i == seqlen)); + if (fancy) { + PyObject *_numpy_internal; + _numpy_internal = PyImport_ImportModule("numpy.core._internal"); + if (_numpy_internal == NULL) return NULL; + obj = PyObject_CallMethod(_numpy_internal, "_index_fields", + "OO", self, op); + Py_DECREF(_numpy_internal); + return obj; + } + } + if (op == Py_Ellipsis) { Py_INCREF(self); return (PyObject *)self; Modified: trunk/numpy/core/tests/test_numerictypes.py =================================================================== --- trunk/numpy/core/tests/test_numerictypes.py 2009-02-11 01:52:37 UTC (rev 6357) +++ trunk/numpy/core/tests/test_numerictypes.py 2009-02-12 04:22:03 UTC (rev 6358) @@ -353,6 +353,16 @@ res = np.find_common_type(['u8','i8','i8'],['f8']) assert(res == 'f8') +class TestMultipleFields(TestCase): + def setUp(self): + self.ary = np.array([(1,2,3,4),(5,6,7,8)], dtype='i4,f4,i2,c8') + def _bad_call(self): + return self.ary['f0','f1'] + def test_no_tuple(self): + self.failUnlessRaises(ValueError, self._bad_call) + def test_return(self): + res = self.ary[['f0','f2']].tolist() + assert(res == [(1,3), (5,7)]) if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Thu Feb 12 00:44:17 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 11 Feb 2009 23:44:17 -0600 (CST) Subject: [Numpy-svn] r6359 - trunk/numpy/distutils/fcompiler Message-ID: <20090212054417.25E4BC7C01E@scipy.org> Author: stefan Date: 2009-02-11 23:44:07 -0600 (Wed, 11 Feb 2009) New Revision: 6359 Modified: trunk/numpy/distutils/fcompiler/gnu.py Log: Trust user's specification of MACOSX_DEPLOYMENT_TARGET [patch by Brian Granger]. Modified: trunk/numpy/distutils/fcompiler/gnu.py =================================================================== --- trunk/numpy/distutils/fcompiler/gnu.py 2009-02-12 04:22:03 UTC (rev 6358) +++ trunk/numpy/distutils/fcompiler/gnu.py 2009-02-12 05:44:07 UTC (rev 6359) @@ -87,21 +87,29 @@ def get_flags_linker_so(self): opt = self.linker_so[1:] if sys.platform=='darwin': - # MACOSX_DEPLOYMENT_TARGET must be at least 10.3. This is - # a reasonable default value even when building on 10.4 when using - # the official Python distribution and those derived from it (when - # not broken). target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - if target is None or target == '': - target = '10.3' - major, minor = target.split('.') - if int(minor) < 3: - minor = '3' - warnings.warn('Environment variable ' - 'MACOSX_DEPLOYMENT_TARGET reset to %s.%s' % (major, minor)) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = '%s.%s' % (major, - minor) - + # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value + # and leave it alone. But, distutils will complain if the + # environment's value is different from the one in the Python + # Makefile used to build Python. We let disutils handle this + # error checking. + if not target: + # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, + # we try to get it first from the Python Makefile and then we + # fall back to setting it to 10.3 to maximize the set of + # versions we can work with. This is a reasonable default + # even when using the official Python dist and those derived + # from it. + import distutils.sysconfig as sc + g = {} + filename = sc.get_makefile_filename() + sc.parse_makefile(filename, g) + target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') + os.environ['MACOSX_DEPLOYMENT_TARGET'] = target + if target == '10.3': + s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3' + warnings.warn(s) + opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) else: opt.append("-shared") From numpy-svn at scipy.org Sat Feb 14 09:54:41 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 14 Feb 2009 08:54:41 -0600 (CST) Subject: [Numpy-svn] r6360 - in trunk: . numpy/core numpy/core/src numpy/core/tests Message-ID: <20090214145441.270C5C7C011@scipy.org> Author: cdavid Date: 2009-02-14 08:54:26 -0600 (Sat, 14 Feb 2009) New Revision: 6360 Added: trunk/numpy/core/src/numpyos.c Modified: trunk/ trunk/numpy/core/SConscript trunk/numpy/core/setup.py trunk/numpy/core/src/arraytypes.inc.src trunk/numpy/core/src/multiarraymodule.c trunk/numpy/core/src/scalartypes.inc.src trunk/numpy/core/tests/test_multiarray.py trunk/numpy/core/tests/test_print.py Log: Merge fix_float_format branch into the trunk. Property changes on: trunk ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6222,6233-6234,6247-6249 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-2871 Modified: trunk/numpy/core/SConscript =================================================================== --- trunk/numpy/core/SConscript 2009-02-12 05:44:07 UTC (rev 6359) +++ trunk/numpy/core/SConscript 2009-02-14 14:54:26 UTC (rev 6360) @@ -211,6 +211,10 @@ config.Define('DISTUTILS_USE_SDK', distutils_use_sdk, "define to 1 to disable SMP support ") + if a == "Intel": + config.Define('FORCE_NO_LONG_DOUBLE_FORMATTING', 1, + "define to 1 to force long double format string to the" \ + " same as double (Lg -> g)") #-------------- # Checking Blas #-------------- Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-12 05:44:07 UTC (rev 6359) +++ trunk/numpy/core/setup.py 2009-02-14 14:54:26 UTC (rev 6360) @@ -187,6 +187,14 @@ headers=['stdlib.h']): moredefs.append(('PyOS_ascii_strtod', 'strtod')) + if sys.platform == "win32": + from numpy.distutils.misc_util import get_build_architecture + # On win32, force long double format string to be 'g', not + # 'Lg', since the MS runtime does not support long double whose + # size is > sizeof(double) + if get_build_architecture()=="Intel": + moredefs.append('FORCE_NO_LONG_DOUBLE_FORMATTING') + target_f = open(target,'a') for d in moredefs: if isinstance(d,str): @@ -330,6 +338,7 @@ deps = [join('src','arrayobject.c'), join('src','arraymethods.c'), join('src','scalartypes.inc.src'), + join('src','numpyos.c'), join('src','arraytypes.inc.src'), join('src','_signbit.c'), join('src','ucsnarrow.c'), Modified: trunk/numpy/core/src/arraytypes.inc.src =================================================================== --- trunk/numpy/core/src/arraytypes.inc.src 2009-02-12 05:44:07 UTC (rev 6359) +++ trunk/numpy/core/src/arraytypes.inc.src 2009-02-14 14:54:26 UTC (rev 6360) @@ -2,41 +2,17 @@ #include "config.h" static double -_getNAN(void) { -#ifdef NAN - return NAN; -#else - static double nan=0; - - if (nan == 0) { - double mul = 1e100; - double tmp = 0.0; - double pinf=0; - pinf = mul; - for (;;) { - pinf *= mul; - if (pinf == tmp) break; - tmp = pinf; - } - nan = pinf / pinf; - } - return nan; -#endif -} - - -static double MyPyFloat_AsDouble(PyObject *obj) { double ret = 0; PyObject *num; if (obj == Py_None) { - return _getNAN(); + return NumPyOS_NAN; } num = PyNumber_Float(obj); if (num == NULL) { - return _getNAN(); + return NumPyOS_NAN; } ret = PyFloat_AsDouble(num); Py_DECREF(num); @@ -192,7 +168,7 @@ op2 = op; Py_INCREF(op); } if (op2 == Py_None) { - oop.real = oop.imag = _getNAN(); + oop.real = oop.imag = NumPyOS_NAN; } else { oop = PyComplex_AsCComplex (op2); @@ -897,17 +873,30 @@ */ /**begin repeat - -#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#type=short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble# -#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT,"f","lf","Lf"# +#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# +#type=short,ushort,int,uint,long,ulong,longlong,ulonglong# +#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT# */ static int @fname at _scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) { return fscanf(fp, "%"@format@, ip); } +/**end repeat**/ +/**begin repeat +#fname=FLOAT,DOUBLE,LONGDOUBLE# +#type=float,double,longdouble# +*/ +static int + at fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) +{ + double result; + int ret; + ret = NumPyOS_ascii_ftolf(fp, &result); + *ip = (@type@) result; + return ret; +} /**end repeat**/ /**begin repeat @@ -966,19 +955,15 @@ #fname=FLOAT,DOUBLE,LONGDOUBLE# #type=float,double,longdouble# */ -#if (PY_VERSION_HEX >= 0x02040000) || defined(PyOS_ascii_strtod) static int @fname at _fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) { double result; - result = PyOS_ascii_strtod(str, endptr); + result = NumPyOS_ascii_strtod(str, endptr); *ip = (@type@) result; return 0; } -#else -#define @fname at _fromstr NULL -#endif /**end repeat**/ Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2009-02-12 05:44:07 UTC (rev 6359) +++ trunk/numpy/core/src/multiarraymodule.c 2009-02-14 14:54:26 UTC (rev 6360) @@ -7705,6 +7705,9 @@ PyObject *m, *d, *s; PyObject *c_api; + /* Initialize constants etc. */ + NumPyOS_init(); + /* Create the module and add the functions */ m = Py_InitModule("multiarray", array_module_methods); if (!m) goto err; Copied: trunk/numpy/core/src/numpyos.c (from rev 6319, branches/fix_float_format/numpy/core/src/numpyos.c) Modified: trunk/numpy/core/src/scalartypes.inc.src =================================================================== --- trunk/numpy/core/src/scalartypes.inc.src 2009-02-12 05:44:07 UTC (rev 6359) +++ trunk/numpy/core/src/scalartypes.inc.src 2009-02-14 14:54:26 UTC (rev 6360) @@ -5,6 +5,9 @@ #endif #include "numpy/arrayscalars.h" +#include "config.h" +#include "numpyos.c" + static PyBoolScalarObject _PyArrayScalar_BoolValues[2] = { {PyObject_HEAD_INIT(&PyBoolArrType_Type) 0}, {PyObject_HEAD_INIT(&PyBoolArrType_Type) 1}, @@ -604,23 +607,36 @@ return ret; } +#ifdef FORCE_NO_LONG_DOUBLE_FORMATTING +#undef NPY_LONGDOUBLE_FMT +#define NPY_LONGDOUBLE_FMT NPY_DOUBLE_FMT +#endif + /**begin repeat * #name=float, double, longdouble# * #NAME=FLOAT, DOUBLE, LONGDOUBLE# + * #type=f, d, l# */ -#define FMT "%.*" NPY_ at NAME@_FMT -#define CFMT1 "%.*" NPY_ at NAME@_FMT "j" -#define CFMT2 "(%.*" NPY_ at NAME@_FMT "%+.*" NPY_ at NAME@_FMT "j)" +#define _FMT1 "%%.%i" NPY_ at NAME@_FMT +#define _FMT2 "%%+.%i" NPY_ at NAME@_FMT static void format_ at name@(char *buf, size_t buflen, @name@ val, unsigned int prec) { - int cnt, i; + /* XXX: Find a correct size here for format string */ + char format[64], *res; + int i, cnt; - cnt = PyOS_snprintf(buf, buflen, FMT, prec, val); + PyOS_snprintf(format, sizeof(format), _FMT1, prec); + res = NumPyOS_ascii_format at type@(buf, buflen, format, val, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } /* If nothing but digits after sign, append ".0" */ + cnt = strlen(buf); for (i = (val < 0) ? 1 : 0; i < cnt; ++i) { if (!isdigit(Py_CHARMASK(buf[i]))) { break; @@ -634,17 +650,39 @@ static void format_c at name@(char *buf, size_t buflen, c at name@ val, unsigned int prec) { + /* XXX: Find a correct size here for format string */ + char format[64]; + char *res; if (val.real == 0.0) { - PyOS_snprintf(buf, buflen, CFMT1, prec, val.imag); + PyOS_snprintf(format, sizeof(format), _FMT1, prec); + res = NumPyOS_ascii_format at type@(buf, buflen-1, format, val.imag, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } + strncat(buf, "j", 1); } else { - PyOS_snprintf(buf, buflen, CFMT2, prec, val.real, prec, val.imag); + char re[64], im[64]; + PyOS_snprintf(format, sizeof(format), _FMT1, prec); + res = NumPyOS_ascii_format at type@(re, sizeof(re), format, val.real, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } + + PyOS_snprintf(format, sizeof(format), _FMT2, prec); + res = NumPyOS_ascii_format at type@(im, sizeof(im), format, val.imag, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } + PyOS_snprintf(buf, buflen, "(%s%sj)", re, im); } } -#undef FMT -#undef CFMT1 -#undef CFMT2 +#undef _FMT1 +#undef _FMT2 /**end repeat**/ @@ -736,7 +774,47 @@ /**end repeat1**/ /**end repeat**/ +/* + * float type print (control print a, where a is a float type instance) + */ +/**begin repeat + * #name=float, double, longdouble# + * #Name=Float, Double, LongDouble# + * #NAME=FLOAT, DOUBLE, LONGDOUBLE# + */ +static int + at name@type_print(PyObject *v, FILE *fp, int flags) +{ + char buf[100]; + @name@ val = ((Py at Name@ScalarObject *)v)->obval; + + format_ at name@(buf, sizeof(buf), val, + (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); + Py_BEGIN_ALLOW_THREADS + fputs(buf, fp); + Py_END_ALLOW_THREADS + return 0; +} + +static int +c at name@type_print(PyObject *v, FILE *fp, int flags) +{ + /* Size of buf: twice sizeof(real) + 2 (for the parenthesis) */ + char buf[202]; + c at name@ val = ((PyC at Name@ScalarObject *)v)->obval; + + format_c at name@(buf, sizeof(buf), val, + (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); + Py_BEGIN_ALLOW_THREADS + fputs(buf, fp); + Py_END_ALLOW_THREADS + return 0; +} + +/**end repeat**/ + + /* * Could improve this with a PyLong_FromLongDouble(longdouble ldval) * but this would need some more work... @@ -3077,6 +3155,14 @@ PyCDoubleArrType_Type.tp_ at name@ = cdoubletype_ at name@; /**end repeat**/ + PyFloatArrType_Type.tp_print = floattype_print; + PyDoubleArrType_Type.tp_print = doubletype_print; + PyLongDoubleArrType_Type.tp_print = longdoubletype_print; + + PyCFloatArrType_Type.tp_print = cfloattype_print; + PyCDoubleArrType_Type.tp_print = cdoubletype_print; + PyCLongDoubleArrType_Type.tp_print = clongdoubletype_print; + /* These need to be coded specially because getitem does not return a normal Python type */ Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2009-02-12 05:44:07 UTC (rev 6359) +++ trunk/numpy/core/tests/test_multiarray.py 2009-02-14 14:54:26 UTC (rev 6360) @@ -5,6 +5,8 @@ from numpy.testing import * from numpy.core import * +from test_print import in_foreign_locale + class TestFlags(TestCase): def setUp(self): self.a = arange(10) @@ -114,41 +116,6 @@ d2 = dtype('f8') assert_equal(d2, dtype(float64)) - -class TestFromstring(TestCase): - def test_binary(self): - a = fromstring('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',dtype=' 4: + _test_redirected_print(float(1e10), tp) + else: + if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ + sys.version_info[1] <= 5: + ref = '1e+010' + else: + ref = '1e+10' + _test_redirected_print(float(1e10), tp, ref) + +#@dec.knownfailureif(True, "formatting tests are known to fail") def check_complex_type_print(tp): # We do not create complex with inf/nan directly because the feature is # missing in python < 2.6 - for x in [0, 1, -1, 1e10, 1e20, complex(np.inf, 1), - complex(np.nan, 1), complex(-np.inf, 1)] : + for x in [0, 1, -1, 1e20]: _test_redirected_print(complex(x), tp) - at dec.knownfailureif(True, "formatting tests are known to fail") + if tp(1e10).itemsize > 8: + _test_redirected_print(complex(1e10), tp) + else: + if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ + sys.version_info[1] <= 5: + ref = '(1e+010+0j)' + else: + ref = '(1e+10+0j)' + _test_redirected_print(complex(1e10), tp, ref) + + _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') + _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') + _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') + def test_float_type_print(): """Check formatting when using print """ for t in [np.float32, np.double, np.longdouble] : yield check_float_type_print, t - at dec.knownfailureif(True, "formatting tests are known to fail") +#@dec.knownfailureif(True, "formatting tests are known to fail") def test_complex_type_print(): """Check formatting when using print """ for t in [np.complex64, np.cdouble, np.clongdouble] : yield check_complex_type_print, t -# Locale tests: scalar types formatting should be independant of the locale -def has_french_locale(): - curloc = locale.getlocale(locale.LC_NUMERIC) - try: - try: - if not sys.platform == 'win32': - locale.setlocale(locale.LC_NUMERIC, 'fr_FR') - else: - locale.setlocale(locale.LC_NUMERIC, 'FRENCH') - - st = True - except: - st = False - finally: - locale.setlocale(locale.LC_NUMERIC, locale=curloc) - - return st - -def _test_locale_independance(tp): +# Locale tests: scalar types formatting should be independent of the locale +def in_foreign_locale(func): # XXX: How to query locale on a given system ? # French is one language where the decimal is ',' not '.', and should be # relatively common on many systems - curloc = locale.getlocale(locale.LC_NUMERIC) - try: - if not sys.platform == 'win32': - locale.setlocale(locale.LC_NUMERIC, 'fr_FR') - else: - locale.setlocale(locale.LC_NUMERIC, 'FRENCH') + def wrapper(*args, **kwargs): + curloc = locale.getlocale(locale.LC_NUMERIC) + try: + try: + if not sys.platform == 'win32': + locale.setlocale(locale.LC_NUMERIC, 'fr_FR') + else: + locale.setlocale(locale.LC_NUMERIC, 'FRENCH') + except locale.Error: + raise nose.SkipTest("Skipping locale test, because " + "French locale not found") + return func(*args, **kwargs) + finally: + locale.setlocale(locale.LC_NUMERIC, locale=curloc) + return nose.tools.make_decorator(func)(wrapper) - assert_equal(str(tp(1.2)), str(float(1.2)), - err_msg='Failed locale test for type %s' % tp) - finally: - locale.setlocale(locale.LC_NUMERIC, locale=curloc) - - at dec.knownfailureif(True, "formatting tests are known to fail") - at np.testing.dec.skipif(not has_french_locale(), - "Skipping locale test, French locale not found") +#@dec.knownfailureif(True, "formatting tests are known to fail") + at in_foreign_locale def test_locale_single(): - return _test_locale_independance(np.float32) + assert_equal(str(np.float32(1.2)), str(float(1.2))) - at dec.knownfailureif(True, "formatting tests are known to fail") - at np.testing.dec.skipif(not has_french_locale(), - "Skipping locale test, French locale not found") +#@dec.knownfailureif(True, "formatting tests are known to fail") + at in_foreign_locale def test_locale_double(): - return _test_locale_independance(np.double) + assert_equal(str(np.double(1.2)), str(float(1.2))) - at dec.knownfailureif(True, "formatting tests are known to fail") - at np.testing.dec.skipif(not has_french_locale(), - "Skipping locale test, French locale not found") +#@dec.knownfailureif(True, "formatting tests are known to fail") + at in_foreign_locale def test_locale_longdouble(): - return _test_locale_independance(np.longdouble) + assert_equal(str(np.longdouble(1.2)), str(float(1.2))) if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Sat Feb 14 10:02:43 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 14 Feb 2009 09:02:43 -0600 (CST) Subject: [Numpy-svn] r6361 - trunk/numpy/core/tests Message-ID: <20090214150243.21342C7C011@scipy.org> Author: cdavid Date: 2009-02-14 09:02:39 -0600 (Sat, 14 Feb 2009) New Revision: 6361 Modified: trunk/numpy/core/tests/test_multiarray.py Log: Fix typo in multiarray tests. Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2009-02-14 14:54:26 UTC (rev 6360) +++ trunk/numpy/core/tests/test_multiarray.py 2009-02-14 15:02:39 UTC (rev 6361) @@ -805,7 +805,7 @@ y = np.fromfile(f, dtype=self.dtype) f.close() assert_array_equal(y, self.x.flat) - os.unlink(filename) + os.unlink(self.filename) def test_roundtrip_filename(self): self.x.tofile(self.filename) @@ -902,7 +902,7 @@ s = f.read() f.close() assert_equal(s, '1.51,2.0,3.51,4.0') - os.unlink(filename) + os.unlink(self.filename) def test_tofile_format(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) From numpy-svn at scipy.org Sat Feb 14 10:03:27 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 14 Feb 2009 09:03:27 -0600 (CST) Subject: [Numpy-svn] r6362 - trunk/numpy/core/tests Message-ID: <20090214150327.1F1CEC7C011@scipy.org> Author: cdavid Date: 2009-02-14 09:03:22 -0600 (Sat, 14 Feb 2009) New Revision: 6362 Modified: trunk/numpy/core/tests/test_multiarray.py Log: Remove leftover in TestIO. Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2009-02-14 15:02:39 UTC (rev 6361) +++ trunk/numpy/core/tests/test_multiarray.py 2009-02-14 15:03:22 UTC (rev 6362) @@ -794,7 +794,7 @@ def tearDown(self): if os.path.isfile(self.filename): os.unlink(self.filename) - tmp_file.close() + #tmp_file.close() def test_roundtrip_file(self): f = open(self.filename, 'wb') From numpy-svn at scipy.org Sat Feb 14 12:04:05 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 14 Feb 2009 11:04:05 -0600 (CST) Subject: [Numpy-svn] r6363 - trunk/numpy/core/src Message-ID: <20090214170405.02665C7C011@scipy.org> Author: cdavid Date: 2009-02-14 11:03:51 -0600 (Sat, 14 Feb 2009) New Revision: 6363 Modified: trunk/numpy/core/src/multiarraymodule.c Log: Include C99 math compatbility layer in multiarray - isnan and co needed by numpyos.c Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2009-02-14 15:03:22 UTC (rev 6362) +++ trunk/numpy/core/src/multiarraymodule.c 2009-02-14 17:03:51 UTC (rev 6363) @@ -81,6 +81,10 @@ return NULL; } +/* XXX: We include c99 compat math module here because it is needed for + * numpyos.c (included by arrayobject). This is bad - we should separate + * declaration/implementation and share this in a lib. */ +#include "umath_funcs_c99.inc" /* Including this file is the only way I know how to declare functions static in each file, and store the pointers from functions in both From numpy-svn at scipy.org Sat Feb 14 17:10:00 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 14 Feb 2009 16:10:00 -0600 (CST) Subject: [Numpy-svn] r6364 - in trunk/numpy: . lib lib/src Message-ID: <20090214221000.C1DB5C7C023@scipy.org> Author: ptvirtan Date: 2009-02-14 16:09:26 -0600 (Sat, 14 Feb 2009) New Revision: 6364 Modified: trunk/numpy/add_newdocs.py trunk/numpy/lib/function_base.py trunk/numpy/lib/index_tricks.py trunk/numpy/lib/src/_compiled_base.c Log: More add_newdocs entries, and make add_newdoc capable of adding docs also to normal Python objects. Modified: trunk/numpy/add_newdocs.py =================================================================== --- trunk/numpy/add_newdocs.py 2009-02-14 17:03:51 UTC (rev 6363) +++ trunk/numpy/add_newdocs.py 2009-02-14 22:09:26 UTC (rev 6364) @@ -8,140 +8,6 @@ from lib import add_newdoc -add_newdoc('numpy.core', 'dtype', -"""Create a data type. - -A numpy array is homogeneous, and contains elements described by a -dtype. A dtype can be constructed from different combinations of -fundamental numeric types, as illustrated below. - -Examples --------- - -Using array-scalar type: ->>> np.dtype(np.int16) -dtype('int16') - -Record, one field name 'f1', containing int16: ->>> np.dtype([('f1', np.int16)]) -dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) -dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) -dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) -dtype([('a', '>> np.dtype("i4, (2,3)f8") -dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) -dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) -dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) -dtype([('gender', '|S1'), ('age', '|u1')]) - -Offsets in bytes, here 0 and 25: ->>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) -dtype([('surname', '|S25'), ('age', '|u1')]) - -""") - -add_newdoc('numpy.core', 'dtype', - """ - dtype(obj, align=False, copy=False) - - Create a data type object. - - A numpy array is homogeneous, and contains elements described by a - dtype object. A dtype object can be constructed from different - combinations of fundamental numeric types. - - Parameters - ---------- - obj - Object to be converted to a data type object. - align : bool, optional - Add padding to the fields to match what a C compiler would output - for a similar C-struct. Can be ``True`` only if `obj` is a dictionary - or a comma-separated string. - copy : bool, optional - Make a new copy of the data-type object. If ``False``, the result - may just be a reference to a built-in data-type object. - - Examples - -------- - Using array-scalar type: - - >>> np.dtype(np.int16) - dtype('int16') - - Record, one field name 'f1', containing int16: - - >>> np.dtype([('f1', np.int16)]) - dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) - dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) - dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) - dtype([('a', '>> np.dtype("i4, (2,3)f8") - dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) - dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) - dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) - dtype([('gender', '|S1'), ('age', '|u1')]) - - Offsets in bytes, here 0 and 25: - - >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) - dtype([('surname', '|S25'), ('age', '|u1')]) - - """) - ############################################################################### # # flatiter @@ -150,7 +16,12 @@ # ############################################################################### -# attributes +add_newdoc('numpy.core', 'flatiter', + """ + """) + +# flatiter attributes + add_newdoc('numpy.core', 'flatiter', ('base', """documentation needed @@ -170,9 +41,8 @@ """)) +# flatiter functions - -# functions add_newdoc('numpy.core', 'flatiter', ('__array__', """__array__(type=None) Get array from iterator @@ -191,37 +61,37 @@ # ############################################################################### +add_newdoc('numpy.core', 'broadcast', + """ + """) + # attributes + add_newdoc('numpy.core', 'broadcast', ('index', """current index in broadcasted result """)) - add_newdoc('numpy.core', 'broadcast', ('iters', """tuple of individual iterators """)) - add_newdoc('numpy.core', 'broadcast', ('nd', """number of dimensions of broadcasted result """)) - add_newdoc('numpy.core', 'broadcast', ('numiter', """number of iterators """)) - add_newdoc('numpy.core', 'broadcast', ('shape', """shape of broadcasted result """)) - add_newdoc('numpy.core', 'broadcast', ('size', """total size of broadcasted result @@ -2581,6 +2451,25 @@ """)) + +############################################################################## +# +# umath functions +# +############################################################################## + +add_newdoc('numpy.core.umath', 'frexp', + """ + """) + +add_newdoc('numpy.core.umath', 'frompyfunc', + """ + """) + +add_newdoc('numpy.core.umath', 'ldexp', + """ + """) + add_newdoc('numpy.core.umath','geterrobj', """geterrobj() @@ -2610,6 +2499,64 @@ """) + +############################################################################## +# +# lib._compile_base functions +# +############################################################################## + +add_newdoc('numpy.lib._compile_base', 'digitize', + """ + digitize(x,bins) + + Return the index of the bin to which each value of x belongs. + + Each index i returned is such that bins[i-1] <= x < bins[i] if + bins is monotonically increasing, or bins [i-1] > x >= bins[i] if + bins is monotonically decreasing. + + Beyond the bounds of the bins 0 or len(bins) is returned as appropriate. + """) + +add_newdoc('numpy.lib._compile_base', 'bincount', + """ + bincount(x,weights=None) + + Return the number of occurrences of each value in x. + + x must be a list of non-negative integers. The output, b[i], + represents the number of times that i is found in x. If weights + is specified, every occurrence of i at a position p contributes + weights[p] instead of 1. + + See also: histogram, digitize, unique. + """) + +add_newdoc('numpy.lib._compile_base', 'add_docstring', + """ + docstring(obj, docstring) + + Add a docstring to a built-in obj if possible. + If the obj already has a docstring raise a RuntimeError + If this routine does not know how to add a docstring to the object + raise a TypeError + """) + + +############################################################################## +# +# Documentation for ufunc attributes and methods +# +############################################################################## + + +############################################################################## +# +# ufunc object +# +############################################################################## + add_newdoc('numpy.core', 'ufunc', """ Functions that operate element by element on whole arrays. @@ -2662,6 +2609,12 @@ """) +############################################################################## +# +# ufunc methods +# +############################################################################## + add_newdoc('numpy.core', 'ufunc', ('reduce', """ reduce(array, axis=0, dtype=None, out=None) @@ -2842,8 +2795,218 @@ """)) -add_newdoc('numpy.core', 'dtype', ('newbyteorder', + +############################################################################## +# +# Documentation for dtype attributes and methods +# +############################################################################## + +############################################################################## +# +# dtype object +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', + """ + dtype(obj, align=False, copy=False) + + Create a data type object. + + A numpy array is homogeneous, and contains elements described by a + dtype object. A dtype object can be constructed from different + combinations of fundamental numeric types. + + Parameters + ---------- + obj + Object to be converted to a data type object. + align : bool, optional + Add padding to the fields to match what a C compiler would output + for a similar C-struct. Can be ``True`` only if `obj` is a dictionary + or a comma-separated string. + copy : bool, optional + Make a new copy of the data-type object. If ``False``, the result + may just be a reference to a built-in data-type object. + + Examples + -------- + Using array-scalar type: + + >>> np.dtype(np.int16) + dtype('int16') + + Record, one field name 'f1', containing int16: + + >>> np.dtype([('f1', np.int16)]) + dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) + dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) + dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) + dtype([('a', '>> np.dtype("i4, (2,3)f8") + dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) + dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) + dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) + dtype([('gender', '|S1'), ('age', '|u1')]) + + Offsets in bytes, here 0 and 25: + + >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) + dtype([('surname', '|S25'), ('age', '|u1')]) + + """) + +############################################################################## +# +# dtype attributes +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', ''' + dt.byteorder + + String giving byteorder of dtype + + One of: + * '=' - native byteorder + * '<' - little endian + * '>' - big endian + * '|' - endian not relevant + + Examples + -------- + >>> dt = np.dtype('i2') + >>> dt.byteorder + '=' + >>> # endian is not relevant for 8 bit numbers + >>> np.dtype('i1').byteorder + '|' + >>> # or ASCII strings + >>> np.dtype('S2').byteorder + '|' + >>> # Even if specific code is given, and it is native + >>> # '=' is the byteorder + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> dt = np.dtype(native_code + 'i2') + >>> dt.byteorder + '=' + >>> # Swapped code shows up as itself + >>> dt = np.dtype(swapped_code + 'i2') + >>> dt.byteorder == swapped_code + True + ''')) + +add_newdoc('numpy.core.multiarray', 'dtype', ('char', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('descr', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('fields', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('flags', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('kind', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('name', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('names', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('num', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('shape', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('str', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('type', + """ + """)) + +############################################################################## +# +# dtype methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', + ''' newbyteorder(new_order='S') Return a new dtype with a different byte order. @@ -2901,40 +3064,410 @@ True ''')) -add_newdoc('numpy.core', 'dtype', ('byteorder', - ''' - dt.byteorder - String giving byteorder of dtype +############################################################################## +# +# nd_grid instances +# +############################################################################## - One of: - * '=' - native byteorder - * '<' - little endian - * '>' - big endian - * '|' - endian not relevant +add_newdoc('numpy.lib.index_tricks', 'mgrid', + """ + Construct a multi-dimensional filled "meshgrid". + Returns a mesh-grid when indexed. The dimension and number of the + output arrays are equal to the number of indexing dimensions. If + the step length is not a complex number, then the stop is not + inclusive. + + However, if the step length is a **complex number** (e.g. 5j), + then the integer part of its magnitude is interpreted as + specifying the number of points to create between the start and + stop values, where the stop value **is inclusive**. + + See also + -------- + ogrid + Examples -------- - >>> dt = np.dtype('i2') - >>> dt.byteorder - '=' - >>> # endian is not relevant for 8 bit numbers - >>> np.dtype('i1').byteorder - '|' - >>> # or ASCII strings - >>> np.dtype('S2').byteorder - '|' - >>> # Even if specific code is given, and it is native - >>> # '=' is the byteorder - >>> import sys - >>> sys_is_le = sys.byteorder == 'little' - >>> native_code = sys_is_le and '<' or '>' - >>> swapped_code = sys_is_le and '>' or '<' - >>> dt = np.dtype(native_code + 'i2') - >>> dt.byteorder - '=' - >>> # Swapped code shows up as itself - >>> dt = np.dtype(swapped_code + 'i2') - >>> dt.byteorder == swapped_code - True - ''')) + >>> np.mgrid[0:5,0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + """) + +add_newdoc('numpy.lib.index_tricks', 'ogrid', + """ + Construct a multi-dimensional open "meshgrid". + + Returns an 'open' mesh-grid when indexed. The dimension and + number of the output arrays are equal to the number of indexing + dimensions. If the step length is not a complex number, then the + stop is not inclusive. + + The returned mesh-grid is open (or not fleshed out), so that only + one-dimension of each returned argument is greater than 1 + + If the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, + where the stop value **is inclusive**. + + See also + -------- + mgrid + + Examples + -------- + >>> np.ogrid[0:5,0:5] + [array([[0], + [1], + [2], + [3], + [4]]), array([[0, 1, 2, 3, 4]])] + """) + + +############################################################################## +# +# Documentation for `generic` attributes and methods +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'generic', + """ + """) + +# Attributes + +add_newdoc('numpy.core.numerictypes', 'generic', ('T', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('base', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('data', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flags', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flat', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('imag', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('real', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('shape', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('size', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('strides', + """ + """)) + +# Methods + +add_newdoc('numpy.core.numerictypes', 'generic', ('all', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('any', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('astype', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('choose', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('clip', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('compress', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('copy', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dump', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('fill', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('item', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('max', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('mean', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('min', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('prod', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('put', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('resize', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('round', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('sort', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('std', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('sum', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('take', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('trace', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('var', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('view', + """ + """)) + + +############################################################################## +# +# Documentation for other scalar classes +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'bool_', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'complex64', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'complex128', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'complex256', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float32', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float64', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float96', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float128', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int8', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int16', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int32', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int64', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'object_', + """ + """) Modified: trunk/numpy/lib/function_base.py =================================================================== --- trunk/numpy/lib/function_base.py 2009-02-14 17:03:51 UTC (rev 6363) +++ trunk/numpy/lib/function_base.py 2009-02-14 22:09:26 UTC (rev 6364) @@ -1073,53 +1073,6 @@ else: return a[slice1]-a[slice2] -try: - add_docstring(digitize, -r"""digitize(x,bins) - -Return the index of the bin to which each value of x belongs. - -Each index i returned is such that bins[i-1] <= x < bins[i] if -bins is monotonically increasing, or bins [i-1] > x >= bins[i] if -bins is monotonically decreasing. - -Beyond the bounds of the bins 0 or len(bins) is returned as appropriate. - -""") -except RuntimeError: - pass - -try: - add_docstring(bincount, -r"""bincount(x,weights=None) - -Return the number of occurrences of each value in x. - -x must be a list of non-negative integers. The output, b[i], -represents the number of times that i is found in x. If weights -is specified, every occurrence of i at a position p contributes -weights[p] instead of 1. - -See also: histogram, digitize, unique. - -""") -except RuntimeError: - pass - -try: - add_docstring(add_docstring, -r"""docstring(obj, docstring) - -Add a docstring to a built-in obj if possible. -If the obj already has a docstring raise a RuntimeError -If this routine does not know how to add a docstring to the object -raise a TypeError - -""") -except RuntimeError: - pass - - def interp(x, xp, fp, left=None, right=None): """ One-dimensional linear interpolation. Modified: trunk/numpy/lib/index_tricks.py =================================================================== --- trunk/numpy/lib/index_tricks.py 2009-02-14 17:03:51 UTC (rev 6363) +++ trunk/numpy/lib/index_tricks.py 2009-02-14 22:09:26 UTC (rev 6364) @@ -212,6 +212,8 @@ mgrid = nd_grid(sparse=False) ogrid = nd_grid(sparse=True) +mgrid.__doc__ = None # set in numpy.add_newdocs +ogrid.__doc__ = None # set in numpy.add_newdocs class AxisConcatenator(object): """Translates slice objects to concatenation along an axis. Modified: trunk/numpy/lib/src/_compiled_base.c =================================================================== --- trunk/numpy/lib/src/_compiled_base.c 2009-02-14 17:03:51 UTC (rev 6363) +++ trunk/numpy/lib/src/_compiled_base.c 2009-02-14 22:09:26 UTC (rev 6364) @@ -494,35 +494,46 @@ #define _TESTDOC1(typebase) (obj->ob_type == &Py##typebase##_Type) #define _TESTDOC2(typebase) (obj->ob_type == Py##typebase##_TypePtr) -#define _ADDDOC(typebase, doc, name) { \ +#define _ADDDOC(typebase, doc, name) do { \ Py##typebase##Object *new = (Py##typebase##Object *)obj; \ if (!(doc)) { \ doc = docstr; \ } \ else { \ - PyErr_Format(PyExc_RuntimeError, \ - "%s method %s",name, msg); \ + PyErr_Format(PyExc_RuntimeError, "%s method %s", name, msg); \ return NULL; \ } \ + } while (0) + + if (_TESTDOC1(CFunction)) + _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name); + else if (_TESTDOC1(Type)) + _ADDDOC(Type, new->tp_doc, new->tp_name); + else if (_TESTDOC2(MemberDescr)) + _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name); + else if (_TESTDOC2(GetSetDescr)) + _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name); + else if (_TESTDOC2(MethodDescr)) + _ADDDOC(MethodDescr, new->d_method->ml_doc, new->d_method->ml_name); + else { + PyObject *doc_attr; + + doc_attr = PyObject_GetAttrString(obj, "__doc__"); + if (doc_attr != NULL && doc_attr != Py_None) { + PyErr_Format(PyExc_RuntimeError, "object %s", msg); + return NULL; + } + Py_XDECREF(doc_attr); + + if (PyObject_SetAttrString(obj, "__doc__", str) < 0) { + PyErr_SetString(PyExc_TypeError, + "Cannot set a docstring for that object"); + return NULL; + } + Py_INCREF(Py_None); + return Py_None; } - if _TESTDOC1(CFunction) - _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name) - else if _TESTDOC1(Type) - _ADDDOC(Type, new->tp_doc, new->tp_name) - else if _TESTDOC2(MemberDescr) - _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name) - else if _TESTDOC2(GetSetDescr) - _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name) - else if _TESTDOC2(MethodDescr) - _ADDDOC(MethodDescr, new->d_method->ml_doc, - new->d_method->ml_name) - else { - PyErr_SetString(PyExc_TypeError, - "Cannot set a docstring for that object"); - return NULL; - } - #undef _TESTDOC1 #undef _TESTDOC2 #undef _ADDDOC From numpy-svn at scipy.org Sat Feb 14 17:11:00 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 14 Feb 2009 16:11:00 -0600 (CST) Subject: [Numpy-svn] r6365 - in trunk/numpy: . lib/src Message-ID: <20090214221100.779BDC7C03A@scipy.org> Author: ptvirtan Date: 2009-02-14 16:10:24 -0600 (Sat, 14 Feb 2009) New Revision: 6365 Modified: trunk/numpy/add_newdocs.py trunk/numpy/lib/src/_compiled_base.c Log: Move (un)packbits docstrings to add_newdocs.py. Fix typos. Modified: trunk/numpy/add_newdocs.py =================================================================== --- trunk/numpy/add_newdocs.py 2009-02-14 22:09:26 UTC (rev 6364) +++ trunk/numpy/add_newdocs.py 2009-02-14 22:10:24 UTC (rev 6365) @@ -2502,11 +2502,11 @@ ############################################################################## # -# lib._compile_base functions +# lib._compiled_base functions # ############################################################################## -add_newdoc('numpy.lib._compile_base', 'digitize', +add_newdoc('numpy.lib._compiled_base', 'digitize', """ digitize(x,bins) @@ -2519,7 +2519,7 @@ Beyond the bounds of the bins 0 or len(bins) is returned as appropriate. """) -add_newdoc('numpy.lib._compile_base', 'bincount', +add_newdoc('numpy.lib._compiled_base', 'bincount', """ bincount(x,weights=None) @@ -2533,7 +2533,7 @@ See also: histogram, digitize, unique. """) -add_newdoc('numpy.lib._compile_base', 'add_docstring', +add_newdoc('numpy.lib._compiled_base', 'add_docstring', """ docstring(obj, docstring) @@ -2543,7 +2543,45 @@ raise a TypeError """) +add_newdoc('numpy.lib._compiled_base', 'packbits', + """ + out = numpy.packbits(myarray, axis=None) + myarray : an integer type array whose elements should be packed to bits + + This routine packs the elements of a binary-valued dataset into a + NumPy array of type uint8 ('B') whose bits correspond to + the logical (0 or nonzero) value of the input elements. + The dimension over-which bit-packing is done is given by axis. + The shape of the output has the same number of dimensions as the input + (unless axis is None, in which case the output is 1-d). + + Example: + >>> a = array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = numpy.packbits(a,axis=-1) + >>> b + array([[[160],[64]],[[192],[32]]], dtype=uint8) + + Note that 160 = 128 + 32 + 192 = 128 + 64 + """) + +add_newdoc('numpy.lib._compiled_base', 'unpackbits', + """ + out = numpy.unpackbits(myarray, axis=None) + + myarray - array of uint8 type where each element represents a bit-field + that should be unpacked into a boolean output array + + The shape of the output array is either 1-d (if axis is None) or + the same shape as the input array with unpacking done along the + axis specified. + """) + + ############################################################################## # # Documentation for ufunc attributes and methods Modified: trunk/numpy/lib/src/_compiled_base.c =================================================================== --- trunk/numpy/lib/src/_compiled_base.c 2009-02-14 22:09:26 UTC (rev 6364) +++ trunk/numpy/lib/src/_compiled_base.c 2009-02-14 22:10:24 UTC (rev 6365) @@ -544,35 +544,6 @@ } -static char packbits_doc[] = - "out = numpy.packbits(myarray, axis=None)\n\n" - " myarray : an integer type array whose elements should be packed to bits\n\n" - " This routine packs the elements of a binary-valued dataset into a\n" - " NumPy array of type uint8 ('B') whose bits correspond to\n" - " the logical (0 or nonzero) value of the input elements.\n" - " The dimension over-which bit-packing is done is given by axis.\n" - " The shape of the output has the same number of dimensions as the input\n" - " (unless axis is None, in which case the output is 1-d).\n" - "\n" - " Example:\n" - " >>> a = array([[[1,0,1],\n" - " ... [0,1,0]],\n" - " ... [[1,1,0],\n" - " ... [0,0,1]]])\n" - " >>> b = numpy.packbits(a,axis=-1)\n" - " >>> b\n" - " array([[[160],[64]],[[192],[32]]], dtype=uint8)\n\n" - " Note that 160 = 128 + 32\n" - " 192 = 128 + 64\n"; - -static char unpackbits_doc[] = - "out = numpy.unpackbits(myarray, axis=None)\n\n" - " myarray - array of uint8 type where each element represents a bit-field\n" - " that should be unpacked into a boolean output array\n\n" - " The shape of the output array is either 1-d (if axis is None) or\n" - " the same shape as the input array with unpacking done along the\n" - " axis specified."; - /* PACKBITS This function packs binary (0 or 1) 1-bit per pixel arrays @@ -820,9 +791,9 @@ {"add_docstring", (PyCFunction)arr_add_docstring, METH_VARARGS, NULL}, {"packbits", (PyCFunction)io_pack, METH_VARARGS | METH_KEYWORDS, - packbits_doc}, + NULL}, {"unpackbits", (PyCFunction)io_unpack, METH_VARARGS | METH_KEYWORDS, - unpackbits_doc}, + NULL}, {NULL, NULL} /* sentinel */ }; From numpy-svn at scipy.org Sat Feb 14 17:12:15 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 14 Feb 2009 16:12:15 -0600 (CST) Subject: [Numpy-svn] r6366 - trunk/numpy/doc Message-ID: <20090214221215.27592C7C023@scipy.org> Author: ptvirtan Date: 2009-02-14 16:11:19 -0600 (Sat, 14 Feb 2009) New Revision: 6366 Added: trunk/numpy/doc/constants.py Log: Document constants in numpy.doc.constants Added: trunk/numpy/doc/constants.py =================================================================== --- trunk/numpy/doc/constants.py 2009-02-14 22:10:24 UTC (rev 6365) +++ trunk/numpy/doc/constants.py 2009-02-14 22:11:19 UTC (rev 6366) @@ -0,0 +1,80 @@ +""" +========= +Constants +========= + +Numpy includes several constants: + +%(constant_list)s +""" +import textwrap + +# Maintain same format as in numpy.add_newdocs +constants = [] +def add_newdoc(module, name, doc): + constants.append((name, doc)) + +add_newdoc('numpy', 'Inf', + """ + """) + +add_newdoc('numpy', 'Infinity', + """ + """) + +add_newdoc('numpy', 'NAN', + """ + """) + +add_newdoc('numpy', 'NINF', + """ + """) + +add_newdoc('numpy', 'NZERO', + """ + """) + +add_newdoc('numpy', 'NaN', + """ + """) + +add_newdoc('numpy', 'PINF', + """ + """) + +add_newdoc('numpy', 'PZERO', + """ + """) + +add_newdoc('numpy', 'e', + """ + """) + +add_newdoc('numpy', 'inf', + """ + """) + +add_newdoc('numpy', 'infty', + """ + """) + +add_newdoc('numpy', 'nan', + """ + """) + +add_newdoc('numpy', 'newaxis', + """ + """) + +if __doc__: + constants_str = [] + constants.sort() + for name, doc in constants: + constants_str.append(""".. const:: %s\n %s""" % ( + name, textwrap.dedent(doc).replace("\n", "\n "))) + constants_str = "\n".join(constants_str) + + __doc__ = __doc__ % dict(constant_list=constants_str) + del constants_str, name, doc + +del constants, add_newdoc From numpy-svn at scipy.org Sat Feb 14 17:38:56 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 14 Feb 2009 16:38:56 -0600 (CST) Subject: [Numpy-svn] r6367 - trunk/numpy/lib Message-ID: <20090214223856.F23B5C7C023@scipy.org> Author: ptvirtan Date: 2009-02-14 16:38:32 -0600 (Sat, 14 Feb 2009) New Revision: 6367 Modified: trunk/numpy/lib/__init__.py trunk/numpy/lib/info.py Log: Move numpy.lib __doc__ back to info.py; was moved to __init__.py by mistake. Modified: trunk/numpy/lib/__init__.py =================================================================== --- trunk/numpy/lib/__init__.py 2009-02-14 22:11:19 UTC (rev 6366) +++ trunk/numpy/lib/__init__.py 2009-02-14 22:38:32 UTC (rev 6367) @@ -1,151 +1,3 @@ -""" -Basic functions used by several sub-packages and -useful to have in the main name-space. - -Type Handling -------------- -================ =================== -iscomplexobj Test for complex object, scalar result -isrealobj Test for real object, scalar result -iscomplex Test for complex elements, array result -isreal Test for real elements, array result -imag Imaginary part -real Real part -real_if_close Turns complex number with tiny imaginary part to real -isneginf Tests for negative infinity, array result -isposinf Tests for positive infinity, array result -isnan Tests for nans, array result -isinf Tests for infinity, array result -isfinite Tests for finite numbers, array result -isscalar True if argument is a scalar -nan_to_num Replaces NaN's with 0 and infinities with large numbers -cast Dictionary of functions to force cast to each type -common_type Determine the minimum common type code for a group - of arrays -mintypecode Return minimal allowed common typecode. -================ =================== - -Index Tricks ------------- -================ =================== -mgrid Method which allows easy construction of N-d - 'mesh-grids' -``r_`` Append and construct arrays: turns slice objects into - ranges and concatenates them, for 2d arrays appends rows. -index_exp Konrad Hinsen's index_expression class instance which - can be useful for building complicated slicing syntax. -================ =================== - -Useful Functions ----------------- -================ =================== -select Extension of where to multiple conditions and choices -extract Extract 1d array from flattened array according to mask -insert Insert 1d array of values into Nd array according to mask -linspace Evenly spaced samples in linear space -logspace Evenly spaced samples in logarithmic space -fix Round x to nearest integer towards zero -mod Modulo mod(x,y) = x % y except keeps sign of y -amax Array maximum along axis -amin Array minimum along axis -ptp Array max-min along axis -cumsum Cumulative sum along axis -prod Product of elements along axis -cumprod Cumluative product along axis -diff Discrete differences along axis -angle Returns angle of complex argument -unwrap Unwrap phase along given axis (1-d algorithm) -sort_complex Sort a complex-array (based on real, then imaginary) -trim_zeros Trim the leading and trailing zeros from 1D array. -vectorize A class that wraps a Python function taking scalar - arguments into a generalized function which can handle - arrays of arguments using the broadcast rules of - numerix Python. -================ =================== - -Shape Manipulation ------------------- -================ =================== -squeeze Return a with length-one dimensions removed. -atleast_1d Force arrays to be > 1D -atleast_2d Force arrays to be > 2D -atleast_3d Force arrays to be > 3D -vstack Stack arrays vertically (row on row) -hstack Stack arrays horizontally (column on column) -column_stack Stack 1D arrays as columns into 2D array -dstack Stack arrays depthwise (along third dimension) -split Divide array into a list of sub-arrays -hsplit Split into columns -vsplit Split into rows -dsplit Split along third dimension -================ =================== - -Matrix (2D Array) Manipulations -------------------------------- -================ =================== -fliplr 2D array with columns flipped -flipud 2D array with rows flipped -rot90 Rotate a 2D array a multiple of 90 degrees -eye Return a 2D array with ones down a given diagonal -diag Construct a 2D array from a vector, or return a given - diagonal from a 2D array. -mat Construct a Matrix -bmat Build a Matrix from blocks -================ =================== - -Polynomials ------------ -================ =================== -poly1d A one-dimensional polynomial class -poly Return polynomial coefficients from roots -roots Find roots of polynomial given coefficients -polyint Integrate polynomial -polyder Differentiate polynomial -polyadd Add polynomials -polysub Substract polynomials -polymul Multiply polynomials -polydiv Divide polynomials -polyval Evaluate polynomial at given argument -================ =================== - -Import Tricks -------------- -================ =================== -ppimport Postpone module import until trying to use it -ppimport_attr Postpone module import until trying to use its attribute -ppresolve Import postponed module and return it. -================ =================== - -Machine Arithmetics -------------------- -================ =================== -machar_single Single precision floating point arithmetic parameters -machar_double Double precision floating point arithmetic parameters -================ =================== - -Threading Tricks ----------------- -================ =================== -ParallelExec Execute commands in parallel thread. -================ =================== - -1D Array Set Operations ------------------------ -Set operations for 1D numeric arrays based on sort() function. - -================ =================== -ediff1d Array difference (auxiliary function). -unique1d Unique elements of 1D array. -intersect1d Intersection of 1D arrays with unique elements. -intersect1d_nu Intersection of 1D arrays with any elements. -setxor1d Set exclusive-or of 1D arrays with unique elements. -setmember1d Return an array of shape of ar1 containing 1 where - the elements of ar1 are in ar2 and 0 otherwise. -union1d Union of 1D arrays with unique elements. -setdiff1d Set difference of 1D arrays with unique elements. -================ =================== - -""" from info import __doc__ from numpy.version import version as __version__ Modified: trunk/numpy/lib/info.py =================================================================== --- trunk/numpy/lib/info.py 2009-02-14 22:11:19 UTC (rev 6366) +++ trunk/numpy/lib/info.py 2009-02-14 22:38:32 UTC (rev 6367) @@ -1,121 +1,149 @@ -__doc_title__ = """Basic functions used by several sub-packages and -useful to have in the main name-space.""" -__doc__ = __doc_title__ + """ +""" +Basic functions used by several sub-packages and +useful to have in the main name-space. -Type handling -============== -iscomplexobj -- Test for complex object, scalar result -isrealobj -- Test for real object, scalar result -iscomplex -- Test for complex elements, array result -isreal -- Test for real elements, array result -imag -- Imaginary part -real -- Real part -real_if_close -- Turns complex number with tiny imaginary part to real -isneginf -- Tests for negative infinity ---| -isposinf -- Tests for positive infinity | -isnan -- Tests for nans |---- array results -isinf -- Tests for infinity | -isfinite -- Tests for finite numbers ---| -isscalar -- True if argument is a scalar -nan_to_num -- Replaces NaN's with 0 and infinities with large numbers -cast -- Dictionary of functions to force cast to each type -common_type -- Determine the 'minimum common type code' for a group - of arrays -mintypecode -- Return minimal allowed common typecode. +Type Handling +------------- +================ =================== +iscomplexobj Test for complex object, scalar result +isrealobj Test for real object, scalar result +iscomplex Test for complex elements, array result +isreal Test for real elements, array result +imag Imaginary part +real Real part +real_if_close Turns complex number with tiny imaginary part to real +isneginf Tests for negative infinity, array result +isposinf Tests for positive infinity, array result +isnan Tests for nans, array result +isinf Tests for infinity, array result +isfinite Tests for finite numbers, array result +isscalar True if argument is a scalar +nan_to_num Replaces NaN's with 0 and infinities with large numbers +cast Dictionary of functions to force cast to each type +common_type Determine the minimum common type code for a group + of arrays +mintypecode Return minimal allowed common typecode. +================ =================== -Index tricks -================== -mgrid -- Method which allows easy construction of N-d 'mesh-grids' -r_ -- Append and construct arrays: turns slice objects into - ranges and concatenates them, for 2d arrays appends - rows. -index_exp -- Konrad Hinsen's index_expression class instance which - can be useful for building complicated slicing syntax. +Index Tricks +------------ +================ =================== +mgrid Method which allows easy construction of N-d + 'mesh-grids' +``r_`` Append and construct arrays: turns slice objects into + ranges and concatenates them, for 2d arrays appends rows. +index_exp Konrad Hinsen's index_expression class instance which + can be useful for building complicated slicing syntax. +================ =================== -Useful functions -================== -select -- Extension of where to multiple conditions and choices -extract -- Extract 1d array from flattened array according to mask -insert -- Insert 1d array of values into Nd array according to mask -linspace -- Evenly spaced samples in linear space -logspace -- Evenly spaced samples in logarithmic space -fix -- Round x to nearest integer towards zero -mod -- Modulo mod(x,y) = x % y except keeps sign of y -amax -- Array maximum along axis -amin -- Array minimum along axis -ptp -- Array max-min along axis -cumsum -- Cumulative sum along axis -prod -- Product of elements along axis -cumprod -- Cumluative product along axis -diff -- Discrete differences along axis -angle -- Returns angle of complex argument -unwrap -- Unwrap phase along given axis (1-d algorithm) -sort_complex -- Sort a complex-array (based on real, then imaginary) -trim_zeros -- trim the leading and trailing zeros from 1D array. +Useful Functions +---------------- +================ =================== +select Extension of where to multiple conditions and choices +extract Extract 1d array from flattened array according to mask +insert Insert 1d array of values into Nd array according to mask +linspace Evenly spaced samples in linear space +logspace Evenly spaced samples in logarithmic space +fix Round x to nearest integer towards zero +mod Modulo mod(x,y) = x % y except keeps sign of y +amax Array maximum along axis +amin Array minimum along axis +ptp Array max-min along axis +cumsum Cumulative sum along axis +prod Product of elements along axis +cumprod Cumluative product along axis +diff Discrete differences along axis +angle Returns angle of complex argument +unwrap Unwrap phase along given axis (1-d algorithm) +sort_complex Sort a complex-array (based on real, then imaginary) +trim_zeros Trim the leading and trailing zeros from 1D array. +vectorize A class that wraps a Python function taking scalar + arguments into a generalized function which can handle + arrays of arguments using the broadcast rules of + numerix Python. +================ =================== -vectorize -- a class that wraps a Python function taking scalar - arguments into a generalized function which - can handle arrays of arguments using the broadcast - rules of numerix Python. +Shape Manipulation +------------------ +================ =================== +squeeze Return a with length-one dimensions removed. +atleast_1d Force arrays to be > 1D +atleast_2d Force arrays to be > 2D +atleast_3d Force arrays to be > 3D +vstack Stack arrays vertically (row on row) +hstack Stack arrays horizontally (column on column) +column_stack Stack 1D arrays as columns into 2D array +dstack Stack arrays depthwise (along third dimension) +split Divide array into a list of sub-arrays +hsplit Split into columns +vsplit Split into rows +dsplit Split along third dimension +================ =================== -Shape manipulation -=================== -squeeze -- Return a with length-one dimensions removed. -atleast_1d -- Force arrays to be > 1D -atleast_2d -- Force arrays to be > 2D -atleast_3d -- Force arrays to be > 3D -vstack -- Stack arrays vertically (row on row) -hstack -- Stack arrays horizontally (column on column) -column_stack -- Stack 1D arrays as columns into 2D array -dstack -- Stack arrays depthwise (along third dimension) -split -- Divide array into a list of sub-arrays -hsplit -- Split into columns -vsplit -- Split into rows -dsplit -- Split along third dimension +Matrix (2D Array) Manipulations +------------------------------- +================ =================== +fliplr 2D array with columns flipped +flipud 2D array with rows flipped +rot90 Rotate a 2D array a multiple of 90 degrees +eye Return a 2D array with ones down a given diagonal +diag Construct a 2D array from a vector, or return a given + diagonal from a 2D array. +mat Construct a Matrix +bmat Build a Matrix from blocks +================ =================== -Matrix (2d array) manipluations -=============================== -fliplr -- 2D array with columns flipped -flipud -- 2D array with rows flipped -rot90 -- Rotate a 2D array a multiple of 90 degrees -eye -- Return a 2D array with ones down a given diagonal -diag -- Construct a 2D array from a vector, or return a given - diagonal from a 2D array. -mat -- Construct a Matrix -bmat -- Build a Matrix from blocks - Polynomials -============ -poly1d -- A one-dimensional polynomial class +----------- +================ =================== +poly1d A one-dimensional polynomial class +poly Return polynomial coefficients from roots +roots Find roots of polynomial given coefficients +polyint Integrate polynomial +polyder Differentiate polynomial +polyadd Add polynomials +polysub Substract polynomials +polymul Multiply polynomials +polydiv Divide polynomials +polyval Evaluate polynomial at given argument +================ =================== -poly -- Return polynomial coefficients from roots -roots -- Find roots of polynomial given coefficients -polyint -- Integrate polynomial -polyder -- Differentiate polynomial -polyadd -- Add polynomials -polysub -- Substract polynomials -polymul -- Multiply polynomials -polydiv -- Divide polynomials -polyval -- Evaluate polynomial at given argument +Import Tricks +------------- +================ =================== +ppimport Postpone module import until trying to use it +ppimport_attr Postpone module import until trying to use its attribute +ppresolve Import postponed module and return it. +================ =================== -Machine arithmetic -================== -finfo -- Parameters of system floating point arithmetic -iinfo -- Parameters of system integer arithmetic +Machine Arithmetics +------------------- +================ =================== +machar_single Single precision floating point arithmetic parameters +machar_double Double precision floating point arithmetic parameters +================ =================== -1D array set operations -======================= +Threading Tricks +---------------- +================ =================== +ParallelExec Execute commands in parallel thread. +================ =================== + +1D Array Set Operations +----------------------- Set operations for 1D numeric arrays based on sort() function. -ediff1d -- Array difference (auxiliary function). -unique1d -- Unique elements of 1D array. -intersect1d -- Intersection of 1D arrays with unique elements. -intersect1d_nu -- Intersection of 1D arrays with any elements. -setxor1d -- Set exclusive-or of 1D arrays with unique elements. -setmember1d -- Return an array of shape of ar1 containing 1 where - the elements of ar1 are in ar2 and 0 otherwise. -union1d -- Union of 1D arrays with unique elements. -setdiff1d -- Set difference of 1D arrays with unique elements. +================ =================== +ediff1d Array difference (auxiliary function). +unique1d Unique elements of 1D array. +intersect1d Intersection of 1D arrays with unique elements. +intersect1d_nu Intersection of 1D arrays with any elements. +setxor1d Set exclusive-or of 1D arrays with unique elements. +setmember1d Return an array of shape of ar1 containing 1 where + the elements of ar1 are in ar2 and 0 otherwise. +union1d Union of 1D arrays with unique elements. +setdiff1d Set difference of 1D arrays with unique elements. +================ =================== """ From numpy-svn at scipy.org Sat Feb 14 17:42:32 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 14 Feb 2009 16:42:32 -0600 (CST) Subject: [Numpy-svn] r6368 - in trunk/numpy/lib: . tests Message-ID: <20090214224232.32F50C7C036@scipy.org> Author: pierregm Date: 2009-02-14 16:42:29 -0600 (Sat, 14 Feb 2009) New Revision: 6368 Modified: trunk/numpy/lib/io.py trunk/numpy/lib/tests/test__iotools.py trunk/numpy/lib/tests/test_io.py Log: * genfromtxt : fixed case when using explicit converters and explicit dtype. Modified: trunk/numpy/lib/io.py =================================================================== --- trunk/numpy/lib/io.py 2009-02-14 22:38:32 UTC (rev 6367) +++ trunk/numpy/lib/io.py 2009-02-14 22:42:29 UTC (rev 6368) @@ -837,6 +837,7 @@ missing_values = [_.missing_values for _ in converters] # Update the converters to use the user-defined ones + uc_update = [] for (i, conv) in user_converters.iteritems(): # If the converter is specified by column names, use the index instead if _is_string_like(i): @@ -850,6 +851,9 @@ converters[i].update(conv, default=None, missing_values=missing_values[i], locked=True) + uc_update.append((i, conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) # Reset the names to match the usecols if (not first_line) and usecols: @@ -960,8 +964,14 @@ descr.append(('', ttype)) else: descr.append(('', dtype)) + # So we changed the dtype ? if not ishomogeneous: - dtype = np.dtype(descr) + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) # output = np.array(data, dtype) if usemask: Modified: trunk/numpy/lib/tests/test__iotools.py =================================================================== --- trunk/numpy/lib/tests/test__iotools.py 2009-02-14 22:38:32 UTC (rev 6367) +++ trunk/numpy/lib/tests/test__iotools.py 2009-02-14 22:42:29 UTC (rev 6368) @@ -142,6 +142,14 @@ assert_equal(test, date(2009, 01, 01)) test = convert('') assert_equal(test, date(2000, 01, 01)) + # + def test_string_to_object(self): + "Make sure that string-to-object functions are properly recognized" + from datetime import date + import time + conv = StringConverter(lambda s: date(*(time.strptime(s)[:3]))) + assert_equal(conv._mapper[-2][0](0), 0j) + assert(hasattr(conv, 'default')) #------------------------------------------------------------------------------- Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2009-02-14 22:38:32 UTC (rev 6367) +++ trunk/numpy/lib/tests/test_io.py 2009-02-14 22:42:29 UTC (rev 6368) @@ -573,7 +573,7 @@ def test_dtype_with_object(self): - "Test using an explicit dtype qith an object" + "Test using an explicit dtype with an object" from datetime import date import time data = """ @@ -598,9 +598,18 @@ else: errmsg = "Nested dtype involving objects should be supported." raise AssertionError(errmsg) - + def test_userconverters_with_explicit_dtype(self): + "Test user_converters w/ explicit (standard) dtype" + data = StringIO.StringIO('skip,skip,2001-01-01,1.0,skip') + test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: str}) + control = np.array([('2001-01-01', 1.)], + dtype=[('', '|S10'), ('', float)]) + assert_equal(test, control) + + def test_spacedelimiter(self): "Test space delimiter" data = StringIO.StringIO("1 2 3 4 5\n6 7 8 9 10") From numpy-svn at scipy.org Sun Feb 15 07:06:40 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 15 Feb 2009 06:06:40 -0600 (CST) Subject: [Numpy-svn] r6369 - in branches/dynamic_cpu_configuration: . doc/source/reference doc/sphinxext numpy numpy/core numpy/core/code_generators numpy/core/src numpy/core/tests numpy/distutils numpy/distutils/command numpy/distutils/fcompiler numpy/doc numpy/f2py numpy/lib numpy/lib/src numpy/lib/tests numpy/linalg numpy/linalg/tests numpy/ma numpy/ma/tests numpy/numarray numpy/oldnumeric numpy/testing numpy/testing/tests Message-ID: <20090215120640.7114CC7C017@scipy.org> Author: cdavid Date: 2009-02-15 06:03:15 -0600 (Sun, 15 Feb 2009) New Revision: 6369 Added: branches/dynamic_cpu_configuration/doc/source/reference/maskedarray.baseclass.rst branches/dynamic_cpu_configuration/doc/source/reference/maskedarray.generic.rst branches/dynamic_cpu_configuration/doc/source/reference/maskedarray.rst branches/dynamic_cpu_configuration/numpy/core/src/numpyos.c branches/dynamic_cpu_configuration/numpy/doc/constants.py branches/dynamic_cpu_configuration/numpy/lib/_iotools.py branches/dynamic_cpu_configuration/numpy/lib/recfunctions.py branches/dynamic_cpu_configuration/numpy/lib/tests/test__iotools.py branches/dynamic_cpu_configuration/numpy/lib/tests/test_recfunctions.py branches/dynamic_cpu_configuration/numpy/testing/tests/test_decorators.py Removed: branches/dynamic_cpu_configuration/numpy/testing/parametric.py Modified: branches/dynamic_cpu_configuration/ branches/dynamic_cpu_configuration/LICENSE.txt branches/dynamic_cpu_configuration/MANIFEST.in branches/dynamic_cpu_configuration/THANKS.txt branches/dynamic_cpu_configuration/doc/source/reference/arrays.classes.rst branches/dynamic_cpu_configuration/doc/source/reference/arrays.ndarray.rst branches/dynamic_cpu_configuration/doc/source/reference/arrays.rst branches/dynamic_cpu_configuration/doc/sphinxext/docscrape.py branches/dynamic_cpu_configuration/doc/sphinxext/docscrape_sphinx.py branches/dynamic_cpu_configuration/doc/sphinxext/numpydoc.py branches/dynamic_cpu_configuration/numpy/add_newdocs.py branches/dynamic_cpu_configuration/numpy/core/SConscript branches/dynamic_cpu_configuration/numpy/core/_internal.py branches/dynamic_cpu_configuration/numpy/core/code_generators/generate_numpy_api.py branches/dynamic_cpu_configuration/numpy/core/setup.py branches/dynamic_cpu_configuration/numpy/core/src/arrayobject.c branches/dynamic_cpu_configuration/numpy/core/src/arraytypes.inc.src branches/dynamic_cpu_configuration/numpy/core/src/multiarraymodule.c branches/dynamic_cpu_configuration/numpy/core/src/scalarmathmodule.c.src branches/dynamic_cpu_configuration/numpy/core/src/scalartypes.inc.src branches/dynamic_cpu_configuration/numpy/core/tests/test_memmap.py branches/dynamic_cpu_configuration/numpy/core/tests/test_multiarray.py branches/dynamic_cpu_configuration/numpy/core/tests/test_numerictypes.py branches/dynamic_cpu_configuration/numpy/core/tests/test_print.py branches/dynamic_cpu_configuration/numpy/core/tests/test_regression.py branches/dynamic_cpu_configuration/numpy/core/tests/test_unicode.py branches/dynamic_cpu_configuration/numpy/ctypeslib.py branches/dynamic_cpu_configuration/numpy/distutils/command/config.py branches/dynamic_cpu_configuration/numpy/distutils/command/scons.py branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/compaq.py branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/gnu.py branches/dynamic_cpu_configuration/numpy/distutils/lib2def.py branches/dynamic_cpu_configuration/numpy/distutils/mingw32ccompiler.py branches/dynamic_cpu_configuration/numpy/distutils/misc_util.py branches/dynamic_cpu_configuration/numpy/distutils/system_info.py branches/dynamic_cpu_configuration/numpy/f2py/cfuncs.py branches/dynamic_cpu_configuration/numpy/f2py/f2py.1 branches/dynamic_cpu_configuration/numpy/f2py/f2py2e.py branches/dynamic_cpu_configuration/numpy/f2py/rules.py branches/dynamic_cpu_configuration/numpy/lib/__init__.py branches/dynamic_cpu_configuration/numpy/lib/arraysetops.py branches/dynamic_cpu_configuration/numpy/lib/function_base.py branches/dynamic_cpu_configuration/numpy/lib/getlimits.py branches/dynamic_cpu_configuration/numpy/lib/index_tricks.py branches/dynamic_cpu_configuration/numpy/lib/info.py branches/dynamic_cpu_configuration/numpy/lib/io.py branches/dynamic_cpu_configuration/numpy/lib/src/_compiled_base.c branches/dynamic_cpu_configuration/numpy/lib/tests/test_function_base.py branches/dynamic_cpu_configuration/numpy/lib/tests/test_getlimits.py branches/dynamic_cpu_configuration/numpy/lib/tests/test_io.py branches/dynamic_cpu_configuration/numpy/lib/utils.py branches/dynamic_cpu_configuration/numpy/linalg/linalg.py branches/dynamic_cpu_configuration/numpy/linalg/tests/test_linalg.py branches/dynamic_cpu_configuration/numpy/ma/core.py branches/dynamic_cpu_configuration/numpy/ma/extras.py branches/dynamic_cpu_configuration/numpy/ma/mrecords.py branches/dynamic_cpu_configuration/numpy/ma/tests/test_core.py branches/dynamic_cpu_configuration/numpy/ma/tests/test_extras.py branches/dynamic_cpu_configuration/numpy/ma/tests/test_mrecords.py branches/dynamic_cpu_configuration/numpy/ma/tests/test_subclassing.py branches/dynamic_cpu_configuration/numpy/ma/testutils.py branches/dynamic_cpu_configuration/numpy/numarray/util.py branches/dynamic_cpu_configuration/numpy/oldnumeric/arrayfns.py branches/dynamic_cpu_configuration/numpy/oldnumeric/mlab.py branches/dynamic_cpu_configuration/numpy/oldnumeric/rng.py branches/dynamic_cpu_configuration/numpy/testing/__init__.py branches/dynamic_cpu_configuration/numpy/testing/decorators.py branches/dynamic_cpu_configuration/numpy/testing/noseclasses.py branches/dynamic_cpu_configuration/numpy/testing/nosetester.py branches/dynamic_cpu_configuration/numpy/testing/numpytest.py branches/dynamic_cpu_configuration/setup.py Log: Merged revisions 6191-6221,6235-6238,6240-6241,6244,6250-6251,6253,6256,6258,6260-6261,6263,6265-6266,6268,6271,6283-6286,6291-6316,6320-6352,6354,6356,6358-6368 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ................ r6191 | cdavid | 2008-12-23 13:10:59 +0900 (Tue, 23 Dec 2008) | 1 line Fix typos in the comments for manifest. ................ r6192 | cdavid | 2008-12-23 13:11:12 +0900 (Tue, 23 Dec 2008) | 1 line Use msvcrt values if available for manifest generation: only there starting from python 2.6.1. ................ r6193 | pearu | 2008-12-23 18:02:15 +0900 (Tue, 23 Dec 2008) | 1 line Fix issue 964: f2py python 2.6, 2.6.1 support. ................ r6194 | pierregm | 2008-12-24 08:43:43 +0900 (Wed, 24 Dec 2008) | 12 lines testutils: * assert_equal : use assert_equal_array on records * assert_array_compare : prevent the common mask to be back-propagated to the initial input arrays. * assert_equal_array : use operator.__eq__ instead of ma.equal * assert_equal_less: use operator.__less__ instead of ma.less core: * Fixed _check_fill_value for nested flexible types * Add a ndtype option to _make_mask_descr * Fixed mask_or for nested flexible types * Fixed the printing of masked arrays w/ flexible types. ................ r6195 | cdavid | 2008-12-26 21:16:45 +0900 (Fri, 26 Dec 2008) | 1 line Update to handle numscons 0.10.0 and above. ................ r6196 | cdavid | 2008-12-26 21:36:19 +0900 (Fri, 26 Dec 2008) | 1 line Do not import msvcrt globally in mingw32compiler module, since the module is imported on all platforms. ................ r6197 | cdavid | 2008-12-26 23:39:55 +0900 (Fri, 26 Dec 2008) | 1 line Do not test for functions already tested by python configure script. ................ r6198 | cdavid | 2008-12-27 14:56:58 +0900 (Sat, 27 Dec 2008) | 1 line BUG: Add a runtime check about endianness, to detect bug 4728 in python on Mac OS X. ................ r6199 | cdavid | 2008-12-27 19:06:25 +0900 (Sat, 27 Dec 2008) | 1 line Fix some typo/syntax errors when converting dict access to a function in manifest generation. ................ r6200 | cdavid | 2008-12-27 19:15:30 +0900 (Sat, 27 Dec 2008) | 1 line BUG (#970): fix a python 2.6 bug in distutils which caused an unhelpful Error:None message when trying to build with no VS installed and without the -c mingw32 option. ................ r6201 | cdavid | 2008-12-27 19:30:49 +0900 (Sat, 27 Dec 2008) | 1 line Improve the error message when initializing compiler failed. ................ r6202 | cdavid | 2008-12-27 19:32:05 +0900 (Sat, 27 Dec 2008) | 1 line Try to initialize the msvc compiler before the general code to detect the error early. ................ r6203 | cdavid | 2008-12-27 19:43:41 +0900 (Sat, 27 Dec 2008) | 1 line BUG (#970): this commit should fix the actual bug, which albeeit linked to commir r6200, was caused in anoter code path. ................ r6204 | cdavid | 2008-12-27 19:57:05 +0900 (Sat, 27 Dec 2008) | 1 line Fix manifest generation. ................ r6205 | cdavid | 2008-12-27 20:46:08 +0900 (Sat, 27 Dec 2008) | 1 line BUG (#827): close temp file before reopning them on windows, and make sure they are not automatically deleted on close either (2.6and higher specific). ................ r6206 | cdavid | 2008-12-27 21:18:47 +0900 (Sat, 27 Dec 2008) | 1 line Do not define the union for runtime endianness detection if we don't check endianness. ................ r6207 | cdavid | 2008-12-27 22:48:52 +0900 (Sat, 27 Dec 2008) | 1 line Start working on formatting failure on 2.6: copy how python does complex formatting. ................ r6208 | cdavid | 2008-12-27 23:44:11 +0900 (Sat, 27 Dec 2008) | 1 line Fix formatting for purely imaginary complex numbers. ................ r6209 | cdavid | 2008-12-27 23:53:15 +0900 (Sat, 27 Dec 2008) | 1 line More work on formatting float. ................ r6210 | cdavid | 2008-12-27 23:59:41 +0900 (Sat, 27 Dec 2008) | 1 line Finish formatting fixes for float scalar arrays. ................ r6211 | cdavid | 2008-12-28 00:12:20 +0900 (Sun, 28 Dec 2008) | 1 line Include umath_funcs_c99 in multiarray so that we can use isinf and co macros. ................ r6212 | cdavid | 2008-12-28 01:15:04 +0900 (Sun, 28 Dec 2008) | 1 line Include config.h before our C99 math compat layer. ................ r6213 | cdavid | 2008-12-28 01:15:41 +0900 (Sun, 28 Dec 2008) | 1 line Fix formatting. ................ r6214 | cdavid | 2008-12-28 01:16:18 +0900 (Sun, 28 Dec 2008) | 1 line Do not define FMTR and FMTI macros, as those are already defined on some platforms. ................ r6215 | cdavid | 2008-12-28 01:16:52 +0900 (Sun, 28 Dec 2008) | 1 line More formatting fixes. ................ r6216 | cdavid | 2008-12-28 01:17:27 +0900 (Sun, 28 Dec 2008) | 1 line Remove undef of removed macro. ................ r6217 | cdavid | 2008-12-28 01:33:40 +0900 (Sun, 28 Dec 2008) | 1 line Do not use PyOS_ascii_formatd, as it does not handle long double correctly. ................ r6218 | cdavid | 2008-12-28 02:19:40 +0900 (Sun, 28 Dec 2008) | 1 line Try ugly hack to circumvent long double brokenness with mingw. ................ r6219 | cdavid | 2008-12-28 02:25:50 +0900 (Sun, 28 Dec 2008) | 1 line Use ugly hack for mingw long double pb with complex format function as well. ................ r6220 | cdavid | 2008-12-28 12:18:20 +0900 (Sun, 28 Dec 2008) | 1 line Revert formatting changes: ascii_formatd only works for double, so we can't use it as it is for our formatting needs. ................ r6221 | cdavid | 2008-12-28 15:44:06 +0900 (Sun, 28 Dec 2008) | 1 line Do not add doc sources through add_data_dir: it will put the docs alongside numpy, as a separate package, which is not what we want. Use the manifest instead, since that's the only way I know of to include something in sdist-generated tarballs. ................ r6235 | cdavid | 2008-12-29 16:57:52 +0900 (Mon, 29 Dec 2008) | 13 lines Merged revisions 6233-6234 via svnmerge from http://svn.scipy.org/svn/numpy/branches/fix_float_format ........ r6233 | cdavid | 2008-12-29 12:49:09 +0900 (Mon, 29 Dec 2008) | 1 line Use parametric tests for format tests so that it is clearer which type is failing. ........ r6234 | cdavid | 2008-12-29 12:49:27 +0900 (Mon, 29 Dec 2008) | 1 line Fix formatting tests: cfloat and cdouble as well as np.float and np.double are the same; make sure we test 4 bytes float. ........ ................ r6236 | cdavid | 2008-12-29 17:02:15 +0900 (Mon, 29 Dec 2008) | 1 line Add nan/inf tests for formatting. ................ r6237 | cdavid | 2008-12-29 17:26:04 +0900 (Mon, 29 Dec 2008) | 1 line Add test for real float types locale independance. ................ r6238 | cdavid | 2008-12-29 17:35:06 +0900 (Mon, 29 Dec 2008) | 1 line Clearer error messages for formatting failures. ................ r6240 | cdavid | 2008-12-30 12:48:11 +0900 (Tue, 30 Dec 2008) | 1 line Add tests for print of float types. ................ r6241 | cdavid | 2008-12-30 12:56:54 +0900 (Tue, 30 Dec 2008) | 1 line Add print tests for complex types. ................ r6244 | cdavid | 2008-12-30 13:20:48 +0900 (Tue, 30 Dec 2008) | 1 line Fix test for print: forgot to make sure the value is a float before comparing it. ................ r6250 | cdavid | 2008-12-30 14:02:28 +0900 (Tue, 30 Dec 2008) | 17 lines Merged revisions 6247-6249 via svnmerge from http://svn.scipy.org/svn/numpy/branches/fix_float_format ........ r6247 | cdavid | 2008-12-30 13:41:37 +0900 (Tue, 30 Dec 2008) | 1 line Handle 1e10 specially, as it is the limit where exp notation is shorter than decimal for single precision, but not for double (python native one). ........ r6248 | cdavid | 2008-12-30 13:47:38 +0900 (Tue, 30 Dec 2008) | 1 line Refactor a bit redirected output print test. ........ r6249 | cdavid | 2008-12-30 13:49:31 +0900 (Tue, 30 Dec 2008) | 1 line Fix test for single precision print. ........ ................ r6251 | cdavid | 2008-12-30 14:12:50 +0900 (Tue, 30 Dec 2008) | 1 line Use np.inf instead of float('inf'), as the later does not work on windows for python < 2.6. ................ r6253 | cdavid | 2008-12-30 14:15:09 +0900 (Tue, 30 Dec 2008) | 1 line Fix typo in test. ................ r6256 | cdavid | 2008-12-30 14:34:22 +0900 (Tue, 30 Dec 2008) | 1 line Special case float tests on windows: python 2.5 and below have >=3 digits in the exp. ................ r6258 | cdavid | 2008-12-30 14:42:03 +0900 (Tue, 30 Dec 2008) | 1 line Hardcode reference for inf/nan-involved values. ................ r6260 | cdavid | 2008-12-30 14:50:18 +0900 (Tue, 30 Dec 2008) | 1 line Fix more formatting tests on win32. ................ r6261 | cdavid | 2008-12-30 14:52:16 +0900 (Tue, 30 Dec 2008) | 1 line Fix some more redirected output print tests. ................ r6263 | cdavid | 2008-12-30 15:01:31 +0900 (Tue, 30 Dec 2008) | 1 line More fixes for print tests. ................ r6265 | cdavid | 2008-12-30 15:03:56 +0900 (Tue, 30 Dec 2008) | 1 line Fix typo. ................ r6266 | cdavid | 2008-12-30 15:08:06 +0900 (Tue, 30 Dec 2008) | 1 line Fix typo. ................ r6268 | cdavid | 2008-12-30 15:12:26 +0900 (Tue, 30 Dec 2008) | 1 line complex scalar arrays cannot be created from real/imag args: wrap init values in a complex. ................ r6271 | cdavid | 2008-12-30 15:32:03 +0900 (Tue, 30 Dec 2008) | 1 line Do not use dict for reference: hashing on scalar arrays does not work as I expected. ................ r6283 | ptvirtan | 2008-12-31 10:14:47 +0900 (Wed, 31 Dec 2008) | 1 line Fix #951: make tests to clean temp files properly ................ r6284 | jarrod.millman | 2009-01-01 08:25:03 +0900 (Thu, 01 Jan 2009) | 2 lines ran reindent ................ r6285 | alan.mcintyre | 2009-01-01 08:46:34 +0900 (Thu, 01 Jan 2009) | 15 lines Remove the following deprecated items from numpy.testing: - ParametricTestCase - The following arguments from numpy.testing.Tester.test(): level, verbosity, all, sys_argv, testcase_pattern - Path manipulation functions: set_package_path, set_local_path, restore_path - NumpyTestCase, NumpyTest Also separated testing parameter setup from NoseTester.test into NoseTester.prepare_test_args for use in a utility script for valgrind testing (see NumPy ticket #784). ................ r6286 | jarrod.millman | 2009-01-01 16:56:53 +0900 (Thu, 01 Jan 2009) | 2 lines add default include dir for Fedora/Red Hat (see SciPy ticket 817) ................ r6291 | cdavid | 2009-01-04 19:57:39 +0900 (Sun, 04 Jan 2009) | 1 line Do not import md5 on python >= 2.6; use hashlib instead. ................ r6292 | cdavid | 2009-01-04 20:08:16 +0900 (Sun, 04 Jan 2009) | 1 line Do not use popen* but subprocess.Popen instead. ................ r6293 | cdavid | 2009-01-04 21:03:29 +0900 (Sun, 04 Jan 2009) | 1 line Revert md5 change: hashlib.md5 is not a drop-in replacement for md5. ................ r6294 | pierregm | 2009-01-05 05:16:00 +0900 (Mon, 05 Jan 2009) | 2 lines * adapted default_fill_value for flexible datatype * fixed max/minimum_fill_value for flexible datatype ................ r6295 | stefan | 2009-01-06 06:51:18 +0900 (Tue, 06 Jan 2009) | 1 line Credit more developers. ................ r6296 | pierregm | 2009-01-06 07:52:21 +0900 (Tue, 06 Jan 2009) | 1 line *moved the printing templates out of MaskedArray.__repr__ ................ r6297 | stefan | 2009-01-06 19:09:00 +0900 (Tue, 06 Jan 2009) | 1 line Use new-style classes with multiple-inheritance to address bug in IronPython. ................ r6298 | pierregm | 2009-01-07 05:35:37 +0900 (Wed, 07 Jan 2009) | 1 line * Bugfix #961 ................ r6299 | pierregm | 2009-01-08 03:14:12 +0900 (Thu, 08 Jan 2009) | 1 line * Fixed iadd/isub/imul when the base array has no mask but the other array does ................ r6300 | pierregm | 2009-01-08 07:34:51 +0900 (Thu, 08 Jan 2009) | 3 lines * Renamed `torecords` to `toflex`, keeping `torecords` as an alias * Introduced `fromflex`, to reconstruct a masked_array from the output of `toflex` (can?\226?\128?\153t `use fromrecords` as it would clash with `numpy.ma.mrecords.fromrecords`) * Fixed a bug in MaskedBinaryOperation (#979) (wrong array broadcasting) ................ r6301 | cdavid | 2009-01-08 18:19:00 +0900 (Thu, 08 Jan 2009) | 1 line Avoid putting things into stderr when errors occurs in f2py wrappers; put all the info in the python error string instead. ................ r6302 | cdavid | 2009-01-09 00:11:32 +0900 (Fri, 09 Jan 2009) | 1 line Fix python 2.4 issue. ................ r6303 | chanley | 2009-01-09 01:30:01 +0900 (Fri, 09 Jan 2009) | 1 line Fix test_print.py function _test_locale_independance() since str(1.2) does not use the LC_NUMERIC locale to convert numbers. Fix from Mark Sienkiewicz. ................ r6304 | cdavid | 2009-01-09 04:22:21 +0900 (Fri, 09 Jan 2009) | 1 line Revert buggy test fix for locale independecce. ................ r6305 | pierregm | 2009-01-09 05:02:29 +0900 (Fri, 09 Jan 2009) | 2 lines * Add __eq__ and __ne__ for support of flexible arrays. * Fixed .filled for nested structures ................ r6306 | pierregm | 2009-01-09 06:51:04 +0900 (Fri, 09 Jan 2009) | 1 line * Remove a debugging print statement. ................ r6307 | jarrod.millman | 2009-01-09 11:14:35 +0900 (Fri, 09 Jan 2009) | 2 lines Updated license file ................ r6308 | cdavid | 2009-01-09 14:26:58 +0900 (Fri, 09 Jan 2009) | 1 line Tag formatting unit tests as known failures. ................ r6309 | jarrod.millman | 2009-01-09 17:59:29 +0900 (Fri, 09 Jan 2009) | 2 lines should be more reliable way to determine what bit platform ................ r6310 | jarrod.millman | 2009-01-09 18:14:17 +0900 (Fri, 09 Jan 2009) | 2 lines better default library paths for 64bit arch ................ r6311 | jarrod.millman | 2009-01-09 18:57:15 +0900 (Fri, 09 Jan 2009) | 2 lines simplification suggested by stefan ................ r6312 | jarrod.millman | 2009-01-09 19:02:09 +0900 (Fri, 09 Jan 2009) | 2 lines switch the order [lib,lib64] --> [lib64,lib] ................ r6313 | jarrod.millman | 2009-01-09 19:18:29 +0900 (Fri, 09 Jan 2009) | 2 lines removed unneeded import ................ r6314 | jarrod.millman | 2009-01-10 04:37:16 +0900 (Sat, 10 Jan 2009) | 2 lines can't use append an int to a string ................ r6315 | pierregm | 2009-01-10 05:18:12 +0900 (Sat, 10 Jan 2009) | 2 lines * Added flatten_structured_arrays * Fixed _get_recordarray for nested structures ................ r6316 | pierregm | 2009-01-10 10:53:05 +0900 (Sat, 10 Jan 2009) | 1 line * Add flatten_structured_array to the namespace ................ r6320 | pierregm | 2009-01-14 06:01:58 +0900 (Wed, 14 Jan 2009) | 9 lines numpy.ma.core: * introduced baseclass, sharedmask and hardmask as readonly properties of MaskedArray * docstrings update numpy.ma.extras: * docstring updates docs/reference * introduced maskedarray, maskedarray.baseclass, maskedarray.generic ................ r6321 | stefan | 2009-01-14 16:14:27 +0900 (Wed, 14 Jan 2009) | 2 lines Docstring: remove old floating point arithmetic, parallel execution and postponed import references. ................ r6322 | stefan | 2009-01-14 16:55:16 +0900 (Wed, 14 Jan 2009) | 1 line Fix printing of limits. ................ r6323 | stefan | 2009-01-14 16:56:10 +0900 (Wed, 14 Jan 2009) | 1 line Fix finfo to work on all instances, not just NumPy scalars. ................ r6324 | pierregm | 2009-01-17 09:15:15 +0900 (Sat, 17 Jan 2009) | 1 line * fixed _arraymethod.__call__ for structured arrays ................ r6325 | ptvirtan | 2009-01-18 06:24:13 +0900 (Sun, 18 Jan 2009) | 3 lines Make `trapz` accept 1-D `x` parameter for n-d `y`, even if axis != -1. Additional tests included. ................ r6326 | pierregm | 2009-01-19 17:53:53 +0900 (Mon, 19 Jan 2009) | 3 lines * renamed FlatIter to MaskedIterator * added __getitem__ to MaskedIterator ................ r6327 | pierregm | 2009-01-19 18:01:24 +0900 (Mon, 19 Jan 2009) | 2 lines * replace np.asarray by np.asanyarray in unique1d ................ r6328 | pierregm | 2009-01-19 18:04:20 +0900 (Mon, 19 Jan 2009) | 2 lines * add intersect1d, intersect1d_nu, setdiff1d, setmember1d, setxor1d, unique1d, union1d * use np.atleast1d instead of ma.atleast1d ................ r6329 | pierregm | 2009-01-20 06:22:52 +0900 (Tue, 20 Jan 2009) | 3 lines * lib : introduced _iotools * lib.io : introduced genfromtxt, ndfromtxt, mafromtxt, recfromtxt, recfromcsv. ................ r6330 | pierregm | 2009-01-22 14:37:36 +0900 (Thu, 22 Jan 2009) | 1 line * genfromtxt : if names is True, accept a line starting with a comment character as header. ................ r6331 | pierregm | 2009-01-22 14:40:25 +0900 (Thu, 22 Jan 2009) | 1 line * added recfunctions, a collection of utilities to manipulate structured arrays. ................ r6332 | pierregm | 2009-01-23 03:21:32 +0900 (Fri, 23 Jan 2009) | 2 lines * fixed a machine-dependent issue on default int ('>> np.dtype(np.int16) -dtype('int16') - -Record, one field name 'f1', containing int16: ->>> np.dtype([('f1', np.int16)]) -dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) -dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) -dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) -dtype([('a', '>> np.dtype("i4, (2,3)f8") -dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) -dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) -dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) -dtype([('gender', '|S1'), ('age', '|u1')]) - -Offsets in bytes, here 0 and 25: ->>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) -dtype([('surname', '|S25'), ('age', '|u1')]) - -""") - -add_newdoc('numpy.core', 'dtype', - """ - dtype(obj, align=False, copy=False) - - Create a data type object. - - A numpy array is homogeneous, and contains elements described by a - dtype object. A dtype object can be constructed from different - combinations of fundamental numeric types. - - Parameters - ---------- - obj - Object to be converted to a data type object. - align : bool, optional - Add padding to the fields to match what a C compiler would output - for a similar C-struct. Can be ``True`` only if `obj` is a dictionary - or a comma-separated string. - copy : bool, optional - Make a new copy of the data-type object. If ``False``, the result - may just be a reference to a built-in data-type object. - - Examples - -------- - Using array-scalar type: - - >>> np.dtype(np.int16) - dtype('int16') - - Record, one field name 'f1', containing int16: - - >>> np.dtype([('f1', np.int16)]) - dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) - dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) - dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) - dtype([('a', '>> np.dtype("i4, (2,3)f8") - dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) - dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) - dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) - dtype([('gender', '|S1'), ('age', '|u1')]) - - Offsets in bytes, here 0 and 25: - - >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) - dtype([('surname', '|S25'), ('age', '|u1')]) - - """) - ############################################################################### # # flatiter @@ -150,7 +16,12 @@ # ############################################################################### -# attributes +add_newdoc('numpy.core', 'flatiter', + """ + """) + +# flatiter attributes + add_newdoc('numpy.core', 'flatiter', ('base', """documentation needed @@ -170,9 +41,8 @@ """)) +# flatiter functions - -# functions add_newdoc('numpy.core', 'flatiter', ('__array__', """__array__(type=None) Get array from iterator @@ -191,37 +61,37 @@ # ############################################################################### +add_newdoc('numpy.core', 'broadcast', + """ + """) + # attributes + add_newdoc('numpy.core', 'broadcast', ('index', """current index in broadcasted result """)) - add_newdoc('numpy.core', 'broadcast', ('iters', """tuple of individual iterators """)) - add_newdoc('numpy.core', 'broadcast', ('nd', """number of dimensions of broadcasted result """)) - add_newdoc('numpy.core', 'broadcast', ('numiter', """number of iterators """)) - add_newdoc('numpy.core', 'broadcast', ('shape', """shape of broadcasted result """)) - add_newdoc('numpy.core', 'broadcast', ('size', """total size of broadcasted result @@ -1997,6 +1867,32 @@ Equivalent to a.view(a.dtype.newbytorder(byteorder)) + Return array with dtype changed to interpret array data as + specified byte order. + + Changes are also made in all fields and sub-arrays of the array + data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order + specifications below. The default value ('S') results in + swapping the current byte order. + `new_order` codes can be any of: + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * 'S' - swap dtype from current to opposite endian + * {'|', 'I'} - ignore (no change to byte order) + The code does a case-insensitive check on the first letter of + `new_order` for these alternatives. For example, any of '>' + or 'B' or 'b' or 'brian' are valid to specify big-endian. + + Returns + ------- + new_arr : array + array with the given change to the dtype byte order. """)) @@ -2555,6 +2451,25 @@ """)) + +############################################################################## +# +# umath functions +# +############################################################################## + +add_newdoc('numpy.core.umath', 'frexp', + """ + """) + +add_newdoc('numpy.core.umath', 'frompyfunc', + """ + """) + +add_newdoc('numpy.core.umath', 'ldexp', + """ + """) + add_newdoc('numpy.core.umath','geterrobj', """geterrobj() @@ -2584,6 +2499,102 @@ """) + +############################################################################## +# +# lib._compiled_base functions +# +############################################################################## + +add_newdoc('numpy.lib._compiled_base', 'digitize', + """ + digitize(x,bins) + + Return the index of the bin to which each value of x belongs. + + Each index i returned is such that bins[i-1] <= x < bins[i] if + bins is monotonically increasing, or bins [i-1] > x >= bins[i] if + bins is monotonically decreasing. + + Beyond the bounds of the bins 0 or len(bins) is returned as appropriate. + """) + +add_newdoc('numpy.lib._compiled_base', 'bincount', + """ + bincount(x,weights=None) + + Return the number of occurrences of each value in x. + + x must be a list of non-negative integers. The output, b[i], + represents the number of times that i is found in x. If weights + is specified, every occurrence of i at a position p contributes + weights[p] instead of 1. + + See also: histogram, digitize, unique. + """) + +add_newdoc('numpy.lib._compiled_base', 'add_docstring', + """ + docstring(obj, docstring) + + Add a docstring to a built-in obj if possible. + If the obj already has a docstring raise a RuntimeError + If this routine does not know how to add a docstring to the object + raise a TypeError + """) + +add_newdoc('numpy.lib._compiled_base', 'packbits', + """ + out = numpy.packbits(myarray, axis=None) + + myarray : an integer type array whose elements should be packed to bits + + This routine packs the elements of a binary-valued dataset into a + NumPy array of type uint8 ('B') whose bits correspond to + the logical (0 or nonzero) value of the input elements. + The dimension over-which bit-packing is done is given by axis. + The shape of the output has the same number of dimensions as the input + (unless axis is None, in which case the output is 1-d). + + Example: + >>> a = array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = numpy.packbits(a,axis=-1) + >>> b + array([[[160],[64]],[[192],[32]]], dtype=uint8) + + Note that 160 = 128 + 32 + 192 = 128 + 64 + """) + +add_newdoc('numpy.lib._compiled_base', 'unpackbits', + """ + out = numpy.unpackbits(myarray, axis=None) + + myarray - array of uint8 type where each element represents a bit-field + that should be unpacked into a boolean output array + + The shape of the output array is either 1-d (if axis is None) or + the same shape as the input array with unpacking done along the + axis specified. + """) + + +############################################################################## +# +# Documentation for ufunc attributes and methods +# +############################################################################## + + +############################################################################## +# +# ufunc object +# +############################################################################## + add_newdoc('numpy.core', 'ufunc', """ Functions that operate element by element on whole arrays. @@ -2636,6 +2647,12 @@ """) +############################################################################## +# +# ufunc methods +# +############################################################################## + add_newdoc('numpy.core', 'ufunc', ('reduce', """ reduce(array, axis=0, dtype=None, out=None) @@ -2815,3 +2832,680 @@ [12, 15, 18]]) """)) + + +############################################################################## +# +# Documentation for dtype attributes and methods +# +############################################################################## + +############################################################################## +# +# dtype object +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', + """ + dtype(obj, align=False, copy=False) + + Create a data type object. + + A numpy array is homogeneous, and contains elements described by a + dtype object. A dtype object can be constructed from different + combinations of fundamental numeric types. + + Parameters + ---------- + obj + Object to be converted to a data type object. + align : bool, optional + Add padding to the fields to match what a C compiler would output + for a similar C-struct. Can be ``True`` only if `obj` is a dictionary + or a comma-separated string. + copy : bool, optional + Make a new copy of the data-type object. If ``False``, the result + may just be a reference to a built-in data-type object. + + Examples + -------- + Using array-scalar type: + + >>> np.dtype(np.int16) + dtype('int16') + + Record, one field name 'f1', containing int16: + + >>> np.dtype([('f1', np.int16)]) + dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) + dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) + dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) + dtype([('a', '>> np.dtype("i4, (2,3)f8") + dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) + dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) + dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) + dtype([('gender', '|S1'), ('age', '|u1')]) + + Offsets in bytes, here 0 and 25: + + >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) + dtype([('surname', '|S25'), ('age', '|u1')]) + + """) + +############################################################################## +# +# dtype attributes +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', + ''' + dt.byteorder + + String giving byteorder of dtype + + One of: + * '=' - native byteorder + * '<' - little endian + * '>' - big endian + * '|' - endian not relevant + + Examples + -------- + >>> dt = np.dtype('i2') + >>> dt.byteorder + '=' + >>> # endian is not relevant for 8 bit numbers + >>> np.dtype('i1').byteorder + '|' + >>> # or ASCII strings + >>> np.dtype('S2').byteorder + '|' + >>> # Even if specific code is given, and it is native + >>> # '=' is the byteorder + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> dt = np.dtype(native_code + 'i2') + >>> dt.byteorder + '=' + >>> # Swapped code shows up as itself + >>> dt = np.dtype(swapped_code + 'i2') + >>> dt.byteorder == swapped_code + True + ''')) + +add_newdoc('numpy.core.multiarray', 'dtype', ('char', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('descr', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('fields', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('flags', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('kind', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('name', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('names', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('num', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('shape', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('str', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('type', + """ + """)) + +############################################################################## +# +# dtype methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', + ''' + newbyteorder(new_order='S') + + Return a new dtype with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order + specifications below. The default value ('S') results in + swapping the current byte order. + `new_order` codes can be any of: + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * 'S' - swap dtype from current to opposite endian + * {'|', 'I'} - ignore (no change to byte order) + The code does a case-insensitive check on the first letter of + `new_order` for these alternatives. For example, any of '>' + or 'B' or 'b' or 'brian' are valid to specify big-endian. + + Returns + ------- + new_dtype : dtype + New dtype object with the given change to the byte order. + + Examples + -------- + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> native_dt = np.dtype(native_code+'i2') + >>> swapped_dt = np.dtype(swapped_code+'i2') + >>> native_dt.newbyteorder('S') == swapped_dt + True + >>> native_dt.newbyteorder() == swapped_dt + True + >>> native_dt == swapped_dt.newbyteorder('S') + True + >>> native_dt == swapped_dt.newbyteorder('=') + True + >>> native_dt == swapped_dt.newbyteorder('N') + True + >>> native_dt == native_dt.newbyteorder('|') + True + >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') + True + >>> np.dtype('>i2') == native_dt.newbyteorder('B') + True + ''')) + + +############################################################################## +# +# nd_grid instances +# +############################################################################## + +add_newdoc('numpy.lib.index_tricks', 'mgrid', + """ + Construct a multi-dimensional filled "meshgrid". + + Returns a mesh-grid when indexed. The dimension and number of the + output arrays are equal to the number of indexing dimensions. If + the step length is not a complex number, then the stop is not + inclusive. + + However, if the step length is a **complex number** (e.g. 5j), + then the integer part of its magnitude is interpreted as + specifying the number of points to create between the start and + stop values, where the stop value **is inclusive**. + + See also + -------- + ogrid + + Examples + -------- + >>> np.mgrid[0:5,0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + """) + +add_newdoc('numpy.lib.index_tricks', 'ogrid', + """ + Construct a multi-dimensional open "meshgrid". + + Returns an 'open' mesh-grid when indexed. The dimension and + number of the output arrays are equal to the number of indexing + dimensions. If the step length is not a complex number, then the + stop is not inclusive. + + The returned mesh-grid is open (or not fleshed out), so that only + one-dimension of each returned argument is greater than 1 + + If the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, + where the stop value **is inclusive**. + + See also + -------- + mgrid + + Examples + -------- + >>> np.ogrid[0:5,0:5] + [array([[0], + [1], + [2], + [3], + [4]]), array([[0, 1, 2, 3, 4]])] + """) + + +############################################################################## +# +# Documentation for `generic` attributes and methods +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'generic', + """ + """) + +# Attributes + +add_newdoc('numpy.core.numerictypes', 'generic', ('T', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('base', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('data', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flags', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flat', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('imag', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('real', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('shape', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('size', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('strides', + """ + """)) + +# Methods + +add_newdoc('numpy.core.numerictypes', 'generic', ('all', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('any', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('astype', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('choose', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('clip', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('compress', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('copy', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dump', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('fill', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('item', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('max', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('mean', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('min', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('prod', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('put', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('resize', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('round', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('sort', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('std', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('sum', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('take', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('trace', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('var', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('view', + """ + """)) + + +############################################################################## +# +# Documentation for other scalar classes +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'bool_', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'complex64', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'complex128', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'complex256', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float32', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float64', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float96', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float128', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int8', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int16', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int32', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int64', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'object_', + """ + """) Modified: branches/dynamic_cpu_configuration/numpy/core/SConscript =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/SConscript 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/SConscript 2009-02-15 12:03:15 UTC (rev 6369) @@ -211,6 +211,10 @@ config.Define('DISTUTILS_USE_SDK', distutils_use_sdk, "define to 1 to disable SMP support ") + if a == "Intel": + config.Define('FORCE_NO_LONG_DOUBLE_FORMATTING', 1, + "define to 1 to force long double format string to the" \ + " same as double (Lg -> g)") #-------------- # Checking Blas #-------------- Modified: branches/dynamic_cpu_configuration/numpy/core/_internal.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/_internal.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/_internal.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -292,3 +292,22 @@ raise ValueError, "unknown field name: %s" % (name,) return tuple(list(order) + nameslist) raise ValueError, "unsupported order value: %s" % (order,) + +# Given an array with fields and a sequence of field names +# construct a new array with just those fields copied over +def _index_fields(ary, fields): + from multiarray import empty, dtype + dt = ary.dtype + new_dtype = [(name, dt[name]) for name in dt.names if name in fields] + if ary.flags.f_contiguous: + order = 'F' + else: + order = 'C' + + newarray = empty(ary.shape, dtype=new_dtype, order=order) + + for name in fields: + newarray[name] = ary[name] + + return newarray + Modified: branches/dynamic_cpu_configuration/numpy/core/code_generators/generate_numpy_api.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/code_generators/generate_numpy_api.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/code_generators/generate_numpy_api.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -65,6 +65,13 @@ static int _import_array(void) { +#ifdef WORDS_BIGENDIAN + union { + long i; + char c[sizeof(long)]; + } bint = {1}; +#endif + PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); PyObject *c_api = NULL; if (numpy == NULL) return -1; @@ -83,6 +90,17 @@ (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); return -1; } + +#ifdef WORDS_BIGENDIAN + if (bint.c[0] == 1) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "python headers configured as big endian, but little endian arch "\ + "detected: this is a python 2.6.* bug (see bug 4728 in python bug "\ + "tracker )"); + return -1; + } +#endif + return 0; } Modified: branches/dynamic_cpu_configuration/numpy/core/setup.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/setup.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/setup.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -67,8 +67,8 @@ # Mandatory functions: if not found, fail the build mandatory_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", - "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", - "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] + "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", + "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] if not check_funcs_once(mandatory_funcs): raise SystemError("One of the required function to build numpy is not" @@ -81,6 +81,14 @@ optional_stdfuncs = ["expm1", "log1p", "acosh", "asinh", "atanh", "rint", "trunc", "exp2", "log2"] + # XXX: hack to circumvent cpp pollution from python: python put its + # config.h in the public namespace, so we have a clash for the common + # functions we test. We remove every function tested by python's autoconf, + # hoping their own test are correct + if sys.version_info[0] == 2 and sys.version_info[1] >= 6: + for f in ["expm1", "log1p", "acosh", "atanh", "asinh"]: + optional_stdfuncs.remove(f) + check_funcs(optional_stdfuncs) # C99 functions: float and long double versions @@ -179,6 +187,14 @@ headers=['stdlib.h']): moredefs.append(('PyOS_ascii_strtod', 'strtod')) + if sys.platform == "win32": + from numpy.distutils.misc_util import get_build_architecture + # On win32, force long double format string to be 'g', not + # 'Lg', since the MS runtime does not support long double whose + # size is > sizeof(double) + if get_build_architecture()=="Intel": + moredefs.append('FORCE_NO_LONG_DOUBLE_FORMATTING') + target_f = open(target,'a') for d in moredefs: if isinstance(d,str): @@ -322,6 +338,7 @@ deps = [join('src','arrayobject.c'), join('src','arraymethods.c'), join('src','scalartypes.inc.src'), + join('src','numpyos.c'), join('src','arraytypes.inc.src'), join('src','_signbit.c'), join('src','ucsnarrow.c'), Modified: branches/dynamic_cpu_configuration/numpy/core/src/arrayobject.c =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/src/arrayobject.c 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/src/arrayobject.c 2009-02-15 12:03:15 UTC (rev 6369) @@ -2827,10 +2827,10 @@ int nd, fancy; PyArrayObject *other; PyArrayMapIterObject *mit; + PyObject *obj; if (PyString_Check(op) || PyUnicode_Check(op)) { if (self->descr->names) { - PyObject *obj; obj = PyDict_GetItem(self->descr->fields, op); if (obj != NULL) { PyArray_Descr *descr; @@ -2852,6 +2852,34 @@ return NULL; } + /* Check for multiple field access + */ + if (self->descr->names && PySequence_Check(op) && !PyTuple_Check(op)) { + int seqlen, i; + seqlen = PySequence_Size(op); + for (i=0; i 0) && (i == seqlen)); + if (fancy) { + PyObject *_numpy_internal; + _numpy_internal = PyImport_ImportModule("numpy.core._internal"); + if (_numpy_internal == NULL) return NULL; + obj = PyObject_CallMethod(_numpy_internal, "_index_fields", + "OO", self, op); + Py_DECREF(_numpy_internal); + return obj; + } + } + if (op == Py_Ellipsis) { Py_INCREF(self); return (PyObject *)self; Modified: branches/dynamic_cpu_configuration/numpy/core/src/arraytypes.inc.src =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/src/arraytypes.inc.src 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/src/arraytypes.inc.src 2009-02-15 12:03:15 UTC (rev 6369) @@ -2,41 +2,17 @@ #include "config.h" static double -_getNAN(void) { -#ifdef NAN - return NAN; -#else - static double nan=0; - - if (nan == 0) { - double mul = 1e100; - double tmp = 0.0; - double pinf=0; - pinf = mul; - for (;;) { - pinf *= mul; - if (pinf == tmp) break; - tmp = pinf; - } - nan = pinf / pinf; - } - return nan; -#endif -} - - -static double MyPyFloat_AsDouble(PyObject *obj) { double ret = 0; PyObject *num; if (obj == Py_None) { - return _getNAN(); + return NumPyOS_NAN; } num = PyNumber_Float(obj); if (num == NULL) { - return _getNAN(); + return NumPyOS_NAN; } ret = PyFloat_AsDouble(num); Py_DECREF(num); @@ -192,7 +168,7 @@ op2 = op; Py_INCREF(op); } if (op2 == Py_None) { - oop.real = oop.imag = _getNAN(); + oop.real = oop.imag = NumPyOS_NAN; } else { oop = PyComplex_AsCComplex (op2); @@ -897,17 +873,30 @@ */ /**begin repeat - -#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#type=short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble# -#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT,"f","lf","Lf"# +#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# +#type=short,ushort,int,uint,long,ulong,longlong,ulonglong# +#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT# */ static int @fname at _scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) { return fscanf(fp, "%"@format@, ip); } +/**end repeat**/ +/**begin repeat +#fname=FLOAT,DOUBLE,LONGDOUBLE# +#type=float,double,longdouble# +*/ +static int + at fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) +{ + double result; + int ret; + ret = NumPyOS_ascii_ftolf(fp, &result); + *ip = (@type@) result; + return ret; +} /**end repeat**/ /**begin repeat @@ -966,19 +955,15 @@ #fname=FLOAT,DOUBLE,LONGDOUBLE# #type=float,double,longdouble# */ -#if (PY_VERSION_HEX >= 0x02040000) || defined(PyOS_ascii_strtod) static int @fname at _fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) { double result; - result = PyOS_ascii_strtod(str, endptr); + result = NumPyOS_ascii_strtod(str, endptr); *ip = (@type@) result; return 0; } -#else -#define @fname at _fromstr NULL -#endif /**end repeat**/ Modified: branches/dynamic_cpu_configuration/numpy/core/src/multiarraymodule.c =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/src/multiarraymodule.c 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/src/multiarraymodule.c 2009-02-15 12:03:15 UTC (rev 6369) @@ -81,6 +81,10 @@ return NULL; } +/* XXX: We include c99 compat math module here because it is needed for + * numpyos.c (included by arrayobject). This is bad - we should separate + * declaration/implementation and share this in a lib. */ +#include "umath_funcs_c99.inc" /* Including this file is the only way I know how to declare functions static in each file, and store the pointers from functions in both @@ -7705,6 +7709,9 @@ PyObject *m, *d, *s; PyObject *c_api; + /* Initialize constants etc. */ + NumPyOS_init(); + /* Create the module and add the functions */ m = Py_InitModule("multiarray", array_module_methods); if (!m) goto err; Copied: branches/dynamic_cpu_configuration/numpy/core/src/numpyos.c (from rev 6368, trunk/numpy/core/src/numpyos.c) Modified: branches/dynamic_cpu_configuration/numpy/core/src/scalarmathmodule.c.src =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/src/scalarmathmodule.c.src 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/src/scalarmathmodule.c.src 2009-02-15 12:03:15 UTC (rev 6369) @@ -636,8 +636,11 @@ &errobj) < 0) return NULL; first = 1; - if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) + if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { + Py_XDECREF(errobj); return NULL; + } + Py_XDECREF(errobj); } #endif @@ -736,8 +739,11 @@ &errobj) < 0) return NULL; first = 1; - if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) + if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { + Py_XDECREF(errobj); return NULL; + } + Py_XDECREF(errobj); } #if @isint@ Modified: branches/dynamic_cpu_configuration/numpy/core/src/scalartypes.inc.src =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/src/scalartypes.inc.src 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/src/scalartypes.inc.src 2009-02-15 12:03:15 UTC (rev 6369) @@ -5,6 +5,9 @@ #endif #include "numpy/arrayscalars.h" +#include "config.h" +#include "numpyos.c" + static PyBoolScalarObject _PyArrayScalar_BoolValues[2] = { {PyObject_HEAD_INIT(&PyBoolArrType_Type) 0}, {PyObject_HEAD_INIT(&PyBoolArrType_Type) 1}, @@ -604,23 +607,36 @@ return ret; } +#ifdef FORCE_NO_LONG_DOUBLE_FORMATTING +#undef NPY_LONGDOUBLE_FMT +#define NPY_LONGDOUBLE_FMT NPY_DOUBLE_FMT +#endif + /**begin repeat * #name=float, double, longdouble# * #NAME=FLOAT, DOUBLE, LONGDOUBLE# + * #type=f, d, l# */ -#define FMT "%.*" NPY_ at NAME@_FMT -#define CFMT1 "%.*" NPY_ at NAME@_FMT "j" -#define CFMT2 "(%.*" NPY_ at NAME@_FMT "%+.*" NPY_ at NAME@_FMT "j)" +#define _FMT1 "%%.%i" NPY_ at NAME@_FMT +#define _FMT2 "%%+.%i" NPY_ at NAME@_FMT static void format_ at name@(char *buf, size_t buflen, @name@ val, unsigned int prec) { - int cnt, i; + /* XXX: Find a correct size here for format string */ + char format[64], *res; + int i, cnt; - cnt = PyOS_snprintf(buf, buflen, FMT, prec, val); + PyOS_snprintf(format, sizeof(format), _FMT1, prec); + res = NumPyOS_ascii_format at type@(buf, buflen, format, val, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } /* If nothing but digits after sign, append ".0" */ + cnt = strlen(buf); for (i = (val < 0) ? 1 : 0; i < cnt; ++i) { if (!isdigit(Py_CHARMASK(buf[i]))) { break; @@ -634,17 +650,39 @@ static void format_c at name@(char *buf, size_t buflen, c at name@ val, unsigned int prec) { + /* XXX: Find a correct size here for format string */ + char format[64]; + char *res; if (val.real == 0.0) { - PyOS_snprintf(buf, buflen, CFMT1, prec, val.imag); + PyOS_snprintf(format, sizeof(format), _FMT1, prec); + res = NumPyOS_ascii_format at type@(buf, buflen-1, format, val.imag, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } + strncat(buf, "j", 1); } else { - PyOS_snprintf(buf, buflen, CFMT2, prec, val.real, prec, val.imag); + char re[64], im[64]; + PyOS_snprintf(format, sizeof(format), _FMT1, prec); + res = NumPyOS_ascii_format at type@(re, sizeof(re), format, val.real, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } + + PyOS_snprintf(format, sizeof(format), _FMT2, prec); + res = NumPyOS_ascii_format at type@(im, sizeof(im), format, val.imag, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } + PyOS_snprintf(buf, buflen, "(%s%sj)", re, im); } } -#undef FMT -#undef CFMT1 -#undef CFMT2 +#undef _FMT1 +#undef _FMT2 /**end repeat**/ @@ -736,7 +774,47 @@ /**end repeat1**/ /**end repeat**/ +/* + * float type print (control print a, where a is a float type instance) + */ +/**begin repeat + * #name=float, double, longdouble# + * #Name=Float, Double, LongDouble# + * #NAME=FLOAT, DOUBLE, LONGDOUBLE# + */ +static int + at name@type_print(PyObject *v, FILE *fp, int flags) +{ + char buf[100]; + @name@ val = ((Py at Name@ScalarObject *)v)->obval; + + format_ at name@(buf, sizeof(buf), val, + (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); + Py_BEGIN_ALLOW_THREADS + fputs(buf, fp); + Py_END_ALLOW_THREADS + return 0; +} + +static int +c at name@type_print(PyObject *v, FILE *fp, int flags) +{ + /* Size of buf: twice sizeof(real) + 2 (for the parenthesis) */ + char buf[202]; + c at name@ val = ((PyC at Name@ScalarObject *)v)->obval; + + format_c at name@(buf, sizeof(buf), val, + (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); + Py_BEGIN_ALLOW_THREADS + fputs(buf, fp); + Py_END_ALLOW_THREADS + return 0; +} + +/**end repeat**/ + + /* * Could improve this with a PyLong_FromLongDouble(longdouble ldval) * but this would need some more work... @@ -2254,7 +2332,9 @@ 0, /* nb_inplace_floor_divide */ 0, /* nb_inplace_true_divide */ /* Added in release 2.5 */ +#if PY_VERSION_HEX >= 0x02050000 0, /* nb_index */ +#endif }; static PyObject * @@ -3075,6 +3155,14 @@ PyCDoubleArrType_Type.tp_ at name@ = cdoubletype_ at name@; /**end repeat**/ + PyFloatArrType_Type.tp_print = floattype_print; + PyDoubleArrType_Type.tp_print = doubletype_print; + PyLongDoubleArrType_Type.tp_print = longdoubletype_print; + + PyCFloatArrType_Type.tp_print = cfloattype_print; + PyCDoubleArrType_Type.tp_print = cdoubletype_print; + PyCLongDoubleArrType_Type.tp_print = clongdoubletype_print; + /* These need to be coded specially because getitem does not return a normal Python type */ Modified: branches/dynamic_cpu_configuration/numpy/core/tests/test_memmap.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/tests/test_memmap.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/tests/test_memmap.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -14,6 +14,9 @@ self.data = arange(12, dtype=self.dtype) self.data.resize(self.shape) + def tearDown(self): + self.tmpfp.close() + def test_roundtrip(self): # Write data to file fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', Modified: branches/dynamic_cpu_configuration/numpy/core/tests/test_multiarray.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/tests/test_multiarray.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/tests/test_multiarray.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,9 +1,12 @@ import tempfile import sys +import os import numpy as np from numpy.testing import * from numpy.core import * +from test_print import in_foreign_locale + class TestFlags(TestCase): def setUp(self): self.a = arange(10) @@ -113,41 +116,6 @@ d2 = dtype('f8') assert_equal(d2, dtype(float64)) - -class TestFromstring(TestCase): - def test_binary(self): - a = fromstring('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',dtype=' 4: + assert_equal(str(tp(1e10)), str(float('1e10')), + err_msg='Failed str formatting for type %s' % tp) + else: + if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ + sys.version_info[1] <= 5: + ref = '1e+010' + else: + ref = '1e+10' + assert_equal(str(tp(1e10)), ref, + err_msg='Failed str formatting for type %s' % tp) - This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the - python float precision. +#@dec.knownfailureif(True, "formatting tests are known to fail") +def test_float_types(): + """ Check formatting. - """ - for t in [np.cfloat, np.cdouble, np.clongdouble] : - for x in [0, 1,-1, 1e10, 1e20] : - assert_equal(str(t(x)), str(complex(x))) - assert_equal(str(t(x*1j)), str(complex(x*1j))) - assert_equal(str(t(x + x*1j)), str(complex(x + x*1j))) + This is only for the str function, and only for simple types. + The precision of np.float and np.longdouble aren't the same as the + python float precision. + """ + for t in [np.float32, np.double, np.longdouble] : + yield check_float_type, t +def check_nan_inf_float(tp): + for x in [np.inf, -np.inf, np.nan]: + assert_equal(str(tp(x)), _REF[x], + err_msg='Failed str formatting for type %s' % tp) + +#@dec.knownfailureif(True, "formatting tests are known to fail") +def test_nan_inf_float(): + """ Check formatting of nan & inf. + + This is only for the str function, and only for simple types. + The precision of np.float and np.longdouble aren't the same as the + python float precision. + + """ + for t in [np.float32, np.double, np.longdouble] : + yield check_nan_inf_float, t + +def check_complex_type(tp): + for x in [0, 1,-1, 1e20] : + assert_equal(str(tp(x)), str(complex(x)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x*1j)), str(complex(x*1j)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), + err_msg='Failed str formatting for type %s' % tp) + + if tp(1e10).itemsize > 8: + assert_equal(str(tp(1e10)), str(complex(1e10)), + err_msg='Failed str formatting for type %s' % tp) + else: + if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ + sys.version_info[1] <= 5: + ref = '(1e+010+0j)' + else: + ref = '(1e+10+0j)' + assert_equal(str(tp(1e10)), ref, + err_msg='Failed str formatting for type %s' % tp) + +#@dec.knownfailureif(True, "formatting tests are known to fail") +def test_complex_types(): + """Check formatting of complex types. + + This is only for the str function, and only for simple types. + The precision of np.float and np.longdouble aren't the same as the + python float precision. + + """ + for t in [np.complex64, np.cdouble, np.clongdouble] : + yield check_complex_type, t + +# print tests +def _test_redirected_print(x, tp, ref=None): + file = StringIO() + file_tp = StringIO() + stdout = sys.stdout + try: + sys.stdout = file_tp + print tp(x) + sys.stdout = file + if ref: + print ref + else: + print x + finally: + sys.stdout = stdout + + assert_equal(file.getvalue(), file_tp.getvalue(), + err_msg='print failed for type%s' % tp) + +def check_float_type_print(tp): + for x in [0, 1,-1, 1e20]: + _test_redirected_print(float(x), tp) + + for x in [np.inf, -np.inf, np.nan]: + _test_redirected_print(float(x), tp, _REF[x]) + + if tp(1e10).itemsize > 4: + _test_redirected_print(float(1e10), tp) + else: + if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ + sys.version_info[1] <= 5: + ref = '1e+010' + else: + ref = '1e+10' + _test_redirected_print(float(1e10), tp, ref) + +#@dec.knownfailureif(True, "formatting tests are known to fail") +def check_complex_type_print(tp): + # We do not create complex with inf/nan directly because the feature is + # missing in python < 2.6 + for x in [0, 1, -1, 1e20]: + _test_redirected_print(complex(x), tp) + + if tp(1e10).itemsize > 8: + _test_redirected_print(complex(1e10), tp) + else: + if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ + sys.version_info[1] <= 5: + ref = '(1e+010+0j)' + else: + ref = '(1e+10+0j)' + _test_redirected_print(complex(1e10), tp, ref) + + _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') + _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') + _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') + +def test_float_type_print(): + """Check formatting when using print """ + for t in [np.float32, np.double, np.longdouble] : + yield check_float_type_print, t + +#@dec.knownfailureif(True, "formatting tests are known to fail") +def test_complex_type_print(): + """Check formatting when using print """ + for t in [np.complex64, np.cdouble, np.clongdouble] : + yield check_complex_type_print, t + +# Locale tests: scalar types formatting should be independent of the locale +def in_foreign_locale(func): + # XXX: How to query locale on a given system ? + + # French is one language where the decimal is ',' not '.', and should be + # relatively common on many systems + def wrapper(*args, **kwargs): + curloc = locale.getlocale(locale.LC_NUMERIC) + try: + try: + if not sys.platform == 'win32': + locale.setlocale(locale.LC_NUMERIC, 'fr_FR') + else: + locale.setlocale(locale.LC_NUMERIC, 'FRENCH') + except locale.Error: + raise nose.SkipTest("Skipping locale test, because " + "French locale not found") + return func(*args, **kwargs) + finally: + locale.setlocale(locale.LC_NUMERIC, locale=curloc) + return nose.tools.make_decorator(func)(wrapper) + +#@dec.knownfailureif(True, "formatting tests are known to fail") + at in_foreign_locale +def test_locale_single(): + assert_equal(str(np.float32(1.2)), str(float(1.2))) + +#@dec.knownfailureif(True, "formatting tests are known to fail") + at in_foreign_locale +def test_locale_double(): + assert_equal(str(np.double(1.2)), str(float(1.2))) + +#@dec.knownfailureif(True, "formatting tests are known to fail") + at in_foreign_locale +def test_locale_longdouble(): + assert_equal(str(np.longdouble(1.2)), str(float(1.2))) + if __name__ == "__main__": run_module_suite() Modified: branches/dynamic_cpu_configuration/numpy/core/tests/test_regression.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/tests/test_regression.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/tests/test_regression.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,7 +1,7 @@ - from StringIO import StringIO import pickle import sys +import gc from os import path from numpy.testing import * import numpy as np @@ -1208,5 +1208,17 @@ a = np.array(1) self.failUnlessRaises(ValueError, lambda x: x.choose([]), a) + def test_errobj_reference_leak(self, level=rlevel): + """Ticket #955""" + z = int(0) + p = np.int32(-1) + + gc.collect() + n_before = len(gc.get_objects()) + z**p # this shouldn't leak a reference to errobj + gc.collect() + n_after = len(gc.get_objects()) + assert n_before >= n_after, (n_before, n_after) + if __name__ == "__main__": run_module_suite() Modified: branches/dynamic_cpu_configuration/numpy/core/tests/test_unicode.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/core/tests/test_unicode.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/core/tests/test_unicode.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -17,7 +17,7 @@ # Creation tests ############################################################ -class create_zeros: +class create_zeros(object): """Check the creation of zero-valued arrays""" def content_check(self, ua, ua_scalar, nbytes): @@ -69,7 +69,7 @@ ulen = 1009 -class create_values: +class create_values(object): """Check the creation of unicode arrays with values""" def content_check(self, ua, ua_scalar, nbytes): @@ -154,7 +154,7 @@ # Assignment tests ############################################################ -class assign_values: +class assign_values(object): """Check the assignment of unicode arrays with values""" def content_check(self, ua, ua_scalar, nbytes): Modified: branches/dynamic_cpu_configuration/numpy/ctypeslib.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/ctypeslib.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/ctypeslib.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -353,8 +353,3 @@ result = tp.from_address(addr) result.__keep = ai return result - - -def test(level=1, verbosity=1): - from numpy.testing import NumpyTest - return NumpyTest().test(level, verbosity) Modified: branches/dynamic_cpu_configuration/numpy/distutils/command/config.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/distutils/command/config.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/distutils/command/config.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -5,11 +5,13 @@ import os, signal import warnings +import sys from distutils.command.config import config as old_config from distutils.command.config import LANG_EXT from distutils import log from distutils.file_util import copy_file +import distutils from numpy.distutils.exec_command import exec_command from numpy.distutils.mingw32ccompiler import generate_manifest @@ -39,6 +41,30 @@ def _check_compiler (self): old_config._check_compiler(self) from numpy.distutils.fcompiler import FCompiler, new_fcompiler + + if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc': + # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: + # initialize call query_vcvarsall, which throws an IOError, and + # causes an error along the way without much information. We try to + # catch it here, hoping it is early enough, and print an helpful + # message instead of Error: None. + if not self.compiler.initialized: + try: + self.compiler.initialize() + except IOError, e: + msg = """\ +Could not initialize compiler instance: do you have Visual Studio +installed ? If you are trying to build with mingw, please use python setup.py +build -c mingw32 instead ). If you have Visual Studio installed, check it is +correctly installed, and the right version (VS 2008 for python 2.6, VS 2003 for +2.5, etc...). Original exception was: %s, and the Compiler +class was %s +============================================================================""" \ + % (e, self.compiler.__class__.__name__) + print """\ +============================================================================""" + raise distutils.errors.DistutilsPlatformError(msg) + if not isinstance(self.fcompiler, FCompiler): self.fcompiler = new_fcompiler(compiler=self.fcompiler, dry_run=self.dry_run, force=1, Modified: branches/dynamic_cpu_configuration/numpy/distutils/command/scons.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/distutils/command/scons.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/distutils/command/scons.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -361,9 +361,13 @@ try: minver = "0.9.3" - from numscons import get_version - if get_version() < minver: - raise ValueError() + try: + # version_info was added in 0.10.0 + from numscons import version_info + except ImportError: + from numscons import get_version + if get_version() < minver: + raise ValueError() except ImportError: raise RuntimeError("You need numscons >= %s to build numpy "\ "with numscons (imported numscons path " \ Modified: branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/compaq.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/compaq.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/compaq.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -85,6 +85,10 @@ print 'Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg) else: raise + except IOError, e: + if not "vcvarsall.bat" in str(e): + print "Unexpected IOError in", __file__ + raise e executables = { 'version_cmd' : ['', "/what"], Modified: branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/gnu.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/gnu.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/distutils/fcompiler/gnu.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -87,21 +87,29 @@ def get_flags_linker_so(self): opt = self.linker_so[1:] if sys.platform=='darwin': - # MACOSX_DEPLOYMENT_TARGET must be at least 10.3. This is - # a reasonable default value even when building on 10.4 when using - # the official Python distribution and those derived from it (when - # not broken). target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - if target is None or target == '': - target = '10.3' - major, minor = target.split('.') - if int(minor) < 3: - minor = '3' - warnings.warn('Environment variable ' - 'MACOSX_DEPLOYMENT_TARGET reset to %s.%s' % (major, minor)) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = '%s.%s' % (major, - minor) - + # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value + # and leave it alone. But, distutils will complain if the + # environment's value is different from the one in the Python + # Makefile used to build Python. We let disutils handle this + # error checking. + if not target: + # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, + # we try to get it first from the Python Makefile and then we + # fall back to setting it to 10.3 to maximize the set of + # versions we can work with. This is a reasonable default + # even when using the official Python dist and those derived + # from it. + import distutils.sysconfig as sc + g = {} + filename = sc.get_makefile_filename() + sc.parse_makefile(filename, g) + target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') + os.environ['MACOSX_DEPLOYMENT_TARGET'] = target + if target == '10.3': + s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3' + warnings.warn(s) + opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) else: opt.append("-shared") @@ -272,30 +280,30 @@ def get_library_dirs(self): opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + target = self.get_target() + if target: d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir) - mingwdir = os.path.normpath(os.path.join(root, target, "lib")) - full = os.path.join(mingwdir, "libmingwex.a") - if os.path.exists(full): - opt.append(mingwdir) - return opt + root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir) + mingwdir = os.path.normpath(os.path.join(root, target, "lib")) + full = os.path.join(mingwdir, "libmingwex.a") + if os.path.exists(full): + opt.append(mingwdir) + return opt def get_libraries(self): opt = GnuFCompiler.get_libraries(self) if sys.platform == 'darwin': opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i+1, "mingwex") - opt.insert(i+1, "mingw32") + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + if "gcc" in opt: + i = opt.index("gcc") + opt.insert(i+1, "mingwex") + opt.insert(i+1, "mingw32") return opt def get_target(self): @@ -303,9 +311,9 @@ ['-v'], use_tee=0) if not status: - m = TARGET_R.search(output) - if m: - return m.group(1) + m = TARGET_R.search(output) + if m: + return m.group(1) return "" if __name__ == '__main__': Modified: branches/dynamic_cpu_configuration/numpy/distutils/lib2def.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/distutils/lib2def.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/distutils/lib2def.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,6 +1,7 @@ import re import sys import os +import subprocess __doc__ = """This module generates a DEF file from the symbols in an MSVC-compiled DLL import library. It correctly discriminates between @@ -59,13 +60,13 @@ deffile = None return libfile, deffile -def getnm(nm_cmd = 'nm -Cs python%s.lib' % py_ver): +def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]): """Returns the output of nm_cmd via a pipe. nm_output = getnam(nm_cmd = 'nm -Cs py_lib')""" - f = os.popen(nm_cmd) - nm_output = f.read() - f.close() + f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE) + nm_output = f.stdout.read() + f.stdout.close() return nm_output def parse_nm(nm_output): @@ -107,7 +108,7 @@ deffile = sys.stdout else: deffile = open(deffile, 'w') - nm_cmd = '%s %s' % (DEFAULT_NM, libfile) + nm_cmd = [str(DEFAULT_NM), str(libfile)] nm_output = getnm(nm_cmd) dlist, flist = parse_nm(nm_output) output_def(dlist, flist, DEF_HEADER, deffile) Modified: branches/dynamic_cpu_configuration/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/distutils/mingw32ccompiler.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/distutils/mingw32ccompiler.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -9,6 +9,7 @@ """ import os +import subprocess import sys import log @@ -50,9 +51,10 @@ # get_versions methods regex if self.gcc_version is None: import re - out = os.popen('gcc -dumpversion','r') - out_string = out.read() - out.close() + p = subprocess.Popen(['gcc', '-dumpversion'], shell=True, + stdout=subprocess.PIPE) + out_string = p.stdout.read() + p.stdout.close() result = re.search('(\d+\.\d+)',out_string) if result: self.gcc_version = StrictVersion(result.group(1)) @@ -227,20 +229,36 @@ # raise DistutilsPlatformError, msg return +#===================================== +# Dealing with Visual Studio MANIFESTS +#===================================== + # Functions to deal with visual studio manifests. Manifest are a mechanism to # enforce strong DLL versioning on windows, and has nothing to do with # distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL no in the system -# path; in particular, python 2.6 is built against the MS runtime 9 (the one -# from VS 2008), which is not available on most windows systems; python 2.6 -# installer does install it in the Win SxS (Side by side) directory, but this -# requires the manifest too. This is a big mess, thanks MS for a wonderful -# system. +# the OS loader; they are necessary when linking against a DLL not in the +# system path; in particular, official python 2.6 binary is built against the +# MS runtime 9 (the one from VS 2008), which is not available on most windows +# systems; python 2.6 installer does install it in the Win SxS (Side by side) +# directory, but this requires the manifest for this to work. This is a big +# mess, thanks MS for a wonderful system. -# XXX: ideally, we should use exactly the same version as used by python, but I -# have no idea how to obtain the exact version from python. We could use the -# strings utility on python.exe, maybe ? -_MSVCRVER_TO_FULLVER = {'90': "9.0.21022.8"} +# XXX: ideally, we should use exactly the same version as used by python. I +# submitted a patch to get this version, but it was only included for python +# 2.6.1 and above. So for versions below, we use a "best guess". +_MSVCRVER_TO_FULLVER = {} +if sys.platform == 'win32': + try: + import msvcrt + if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): + _MSVCRVER_TO_FULLVER['90'] = msvcrt.CRT_ASSEMBLY_VERSION + else: + _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" + except ImportError: + # If we are here, means python was not built with MSVC. Not sure what to do + # in that case: manifest building will fail, but it should not be used in + # that case anyway + log.warn('Cannot import msvcrt: using manifest will not be possible') def msvc_manifest_xml(maj, min): """Given a major and minor version of the MSVCR, returns the @@ -311,15 +329,15 @@ def configtest_name(config): base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) return os.path.splitext(base)[0] - + def manifest_name(config): - # Get configest name (including suffix) + # Get configest name (including suffix) root = configtest_name(config) exext = config.compiler.exe_extension return root + exext + ".manifest" def rc_name(config): - # Get configest name (including suffix) + # Get configest name (including suffix) root = configtest_name(config) return root + ".rc" Modified: branches/dynamic_cpu_configuration/numpy/distutils/misc_util.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/distutils/misc_util.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/distutils/misc_util.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -6,6 +6,7 @@ import glob import atexit import tempfile +import subprocess try: set @@ -1340,7 +1341,10 @@ revision = None m = None try: - sin, sout = os.popen4('svnversion') + p = subprocess.Popen(['svnversion'], shell=True, + stdout=subprocess.PIPE, stderr=STDOUT, + close_fds=True) + sout = p.stdout m = re.match(r'(?P\d+)', sout.read()) except: pass Modified: branches/dynamic_cpu_configuration/numpy/distutils/system_info.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/distutils/system_info.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/distutils/system_info.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -128,6 +128,50 @@ from numpy.distutils.misc_util import is_sequence, is_string from numpy.distutils.command.config import config as cmd_config +# Determine number of bits +import platform +_bits = {'32bit':32,'64bit':64} +platform_bits = _bits[platform.architecture()[0]] + +def libpaths(paths,bits): + """Return a list of library paths valid on 32 or 64 bit systems. + + Inputs: + paths : sequence + A sequence of strings (typically paths) + bits : int + An integer, the only valid values are 32 or 64. A ValueError exception + is raised otherwise. + + Examples: + + Consider a list of directories + >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] + + For a 32-bit platform, this is already valid: + >>> libpaths(paths,32) + ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] + + On 64 bits, we prepend the '64' postfix + >>> libpaths(paths,64) + ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', + '/usr/lib64', '/usr/lib'] + """ + if bits not in (32, 64): + raise ValueError("Invalid bit size in libpaths: 32 or 64 only") + + # Handle 32bit case + if bits==32: + return paths + + # Handle 64bit case + out = [] + for p in paths: + out.extend([p+'64', p]) + + return out + + if sys.platform == 'win32': default_lib_dirs = ['C:\\', os.path.join(distutils.sysconfig.EXEC_PREFIX, @@ -137,24 +181,16 @@ default_x11_lib_dirs = [] default_x11_include_dirs = [] else: - default_lib_dirs = ['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'] + default_lib_dirs = libpaths(['/usr/local/lib','/opt/lib','/usr/lib', + '/opt/local/lib','/sw/lib'], platform_bits) default_include_dirs = ['/usr/local/include', '/opt/include', '/usr/include', - '/opt/local/include', '/sw/include'] + '/opt/local/include', '/sw/include', + '/usr/include/suitesparse'] default_src_dirs = ['.','/usr/local/src', '/opt/src','/sw/src'] - try: - platform = os.uname() - bit64 = platform[-1].endswith('64') - except: - bit64 = False - - if bit64: - default_x11_lib_dirs = ['/usr/lib64'] - else: - default_x11_lib_dirs = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - + default_x11_lib_dirs = libpaths(['/usr/X11R6/lib','/usr/X11/lib', + '/usr/lib'], platform_bits) default_x11_include_dirs = ['/usr/X11R6/include','/usr/X11/include', '/usr/include'] @@ -364,14 +400,16 @@ self.files.extend(get_standard_file('.numpy-site.cfg')) self.files.extend(get_standard_file('site.cfg')) self.parse_config_files() - self.search_static_first = self.cp.getboolean(self.section, - 'search_static_first') + if self.section is not None: + self.search_static_first = self.cp.getboolean(self.section, + 'search_static_first') assert isinstance(self.search_static_first, int) def parse_config_files(self): self.cp.read(self.files) if not self.cp.has_section(self.section): - self.cp.add_section(self.section) + if self.section is not None: + self.cp.add_section(self.section) def calc_libraries_info(self): libs = self.get_libraries() Copied: branches/dynamic_cpu_configuration/numpy/doc/constants.py (from rev 6368, trunk/numpy/doc/constants.py) Modified: branches/dynamic_cpu_configuration/numpy/f2py/cfuncs.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/f2py/cfuncs.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/f2py/cfuncs.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -472,15 +472,17 @@ cppmacros['CHECKSTRING']="""\ #define CHECKSTRING(check,tcheck,name,show,var)\\ \tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\tfprintf(stderr,show\"\\n\",slen(var),var);\\ +\t\tchar errstring[256];\\ +\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ +\t\tPyErr_SetString(#modulename#_error, errstring);\\ \t\t/*goto capi_fail;*/\\ \t} else """ cppmacros['CHECKSCALAR']="""\ #define CHECKSCALAR(check,tcheck,name,show,var)\\ \tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\tfprintf(stderr,show\"\\n\",var);\\ +\t\tchar errstring[256];\\ +\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ +\t\tPyErr_SetString(#modulename#_error,errstring);\\ \t\t/*goto capi_fail;*/\\ \t} else """ ## cppmacros['CHECKDIMS']="""\ Modified: branches/dynamic_cpu_configuration/numpy/f2py/f2py.1 =================================================================== --- branches/dynamic_cpu_configuration/numpy/f2py/f2py.1 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/f2py/f2py.1 2009-02-15 12:03:15 UTC (rev 6369) @@ -20,7 +20,7 @@ This program generates a Python C/API file (module.c) that contains wrappers for given Fortran or C functions so that they can be called from Python. -With the -c option the corresponding +With the \-c option the corresponding extension modules are built. .SH OPTIONS .TP @@ -49,8 +49,8 @@ \'untitled\'. .TP .B \-\-[no\-]lower -Do [not] lower the cases in . By default, --lower is -assumed with -h key, and --no-lower without -h key. +Do [not] lower the cases in . By default, \-\-lower is +assumed with \-h key, and \-\-no\-lower without \-h key. .TP .B \-\-build\-dir All f2py generated files are created in . Default is tempfile.mktemp(). @@ -59,14 +59,14 @@ Overwrite existing signature file. .TP .B \-\-[no\-]latex\-doc -Create (or not) module.tex. Default is --no-latex-doc. +Create (or not) module.tex. Default is \-\-no\-latex\-doc. .TP .B \-\-short\-latex Create 'incomplete' LaTeX document (without commands \\documentclass, \\tableofcontents, and \\begin{document}, \\end{document}). .TP .B \-\-[no\-]rest\-doc -Create (or not) module.rst. Default is --no-rest-doc. +Create (or not) module.rst. Default is \-\-no\-rest\-doc. .TP .B \-\-debug\-capi Create C/API code that reports the state of the wrappers during @@ -81,12 +81,12 @@ .TP .B \-\-[no\-]wrap\-functions Create Fortran subroutine wrappers to Fortran 77 -functions. --wrap-functions is default because it ensures maximum +functions. \-\-wrap\-functions is default because it ensures maximum portability/compiler independence. .TP .B \-\-help\-link [..] List system resources found by system_info.py. [..] may contain -a list of resources names. See also --link- switch below. +a list of resources names. See also \-\-link\- switch below. .TP .B \-\-quiet Run quietly. @@ -100,7 +100,7 @@ .B \-\-include_paths path1:path2:... Search include files (that f2py will scan) from the given directories. .SH "CONFIG_FC OPTIONS" -The following options are effective only when -c switch is used. +The following options are effective only when \-c switch is used. .TP .B \-\-help-compiler List available Fortran compilers [DEPRECIATED]. @@ -147,13 +147,13 @@ .B \-\-debug Compile with debugging information. .SH "EXTRA OPTIONS" -The following options are effective only when -c switch is used. +The following options are effective only when \-c switch is used. .TP .B \-\-link- Link extension module with as defined by numpy_distutils/system_info.py. E.g. to link with optimized LAPACK libraries (vecLib on MacOSX, ATLAS elsewhere), use ---link-lapack_opt. See also --help-link switch. +\-\-link\-lapack_opt. See also \-\-help\-link switch. .TP .B -L/path/to/lib/ -l Modified: branches/dynamic_cpu_configuration/numpy/f2py/f2py2e.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/f2py/f2py2e.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/f2py/f2py2e.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -543,7 +543,7 @@ setup(ext_modules = [ext]) if remove_build_dir and os.path.exists(build_dir): - import shutil + import shutil outmess('Removing build directory %s\n'%(build_dir)) shutil.rmtree(build_dir) Modified: branches/dynamic_cpu_configuration/numpy/f2py/rules.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/f2py/rules.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/f2py/rules.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -245,7 +245,7 @@ f2py_start_clock(); #endif \tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ -\t\t\"#argformat#|#keyformat##xaformat#:#pyname#\",\\ +\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\ \t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL; #frompyobj# /*end of frompyobj*/ @@ -1355,6 +1355,16 @@ rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ ['\\begin{description}']+rd[k][1:]+\ ['\\end{description}'] + + # Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720 + if rd['keyformat'] or rd['xaformat']: + argformat = rd['argformat'] + if isinstance(argformat, list): + argformat.append('|') + else: + assert isinstance(argformat, str),repr((argformat, type(argformat))) + rd['argformat'] += '|' + ar=applyrules(routine_rules,rd) if ismoduleroutine(rout): outmess('\t\t\t %s\n'%(ar['docshort'])) Modified: branches/dynamic_cpu_configuration/numpy/lib/__init__.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/__init__.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/__init__.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,151 +1,3 @@ -""" -Basic functions used by several sub-packages and -useful to have in the main name-space. - -Type Handling -------------- -================ =================== -iscomplexobj Test for complex object, scalar result -isrealobj Test for real object, scalar result -iscomplex Test for complex elements, array result -isreal Test for real elements, array result -imag Imaginary part -real Real part -real_if_close Turns complex number with tiny imaginary part to real -isneginf Tests for negative infinity, array result -isposinf Tests for positive infinity, array result -isnan Tests for nans, array result -isinf Tests for infinity, array result -isfinite Tests for finite numbers, array result -isscalar True if argument is a scalar -nan_to_num Replaces NaN's with 0 and infinities with large numbers -cast Dictionary of functions to force cast to each type -common_type Determine the minimum common type code for a group - of arrays -mintypecode Return minimal allowed common typecode. -================ =================== - -Index Tricks ------------- -================ =================== -mgrid Method which allows easy construction of N-d - 'mesh-grids' -``r_`` Append and construct arrays: turns slice objects into - ranges and concatenates them, for 2d arrays appends rows. -index_exp Konrad Hinsen's index_expression class instance which - can be useful for building complicated slicing syntax. -================ =================== - -Useful Functions ----------------- -================ =================== -select Extension of where to multiple conditions and choices -extract Extract 1d array from flattened array according to mask -insert Insert 1d array of values into Nd array according to mask -linspace Evenly spaced samples in linear space -logspace Evenly spaced samples in logarithmic space -fix Round x to nearest integer towards zero -mod Modulo mod(x,y) = x % y except keeps sign of y -amax Array maximum along axis -amin Array minimum along axis -ptp Array max-min along axis -cumsum Cumulative sum along axis -prod Product of elements along axis -cumprod Cumluative product along axis -diff Discrete differences along axis -angle Returns angle of complex argument -unwrap Unwrap phase along given axis (1-d algorithm) -sort_complex Sort a complex-array (based on real, then imaginary) -trim_zeros Trim the leading and trailing zeros from 1D array. -vectorize A class that wraps a Python function taking scalar - arguments into a generalized function which can handle - arrays of arguments using the broadcast rules of - numerix Python. -================ =================== - -Shape Manipulation ------------------- -================ =================== -squeeze Return a with length-one dimensions removed. -atleast_1d Force arrays to be > 1D -atleast_2d Force arrays to be > 2D -atleast_3d Force arrays to be > 3D -vstack Stack arrays vertically (row on row) -hstack Stack arrays horizontally (column on column) -column_stack Stack 1D arrays as columns into 2D array -dstack Stack arrays depthwise (along third dimension) -split Divide array into a list of sub-arrays -hsplit Split into columns -vsplit Split into rows -dsplit Split along third dimension -================ =================== - -Matrix (2D Array) Manipulations -------------------------------- -================ =================== -fliplr 2D array with columns flipped -flipud 2D array with rows flipped -rot90 Rotate a 2D array a multiple of 90 degrees -eye Return a 2D array with ones down a given diagonal -diag Construct a 2D array from a vector, or return a given - diagonal from a 2D array. -mat Construct a Matrix -bmat Build a Matrix from blocks -================ =================== - -Polynomials ------------ -================ =================== -poly1d A one-dimensional polynomial class -poly Return polynomial coefficients from roots -roots Find roots of polynomial given coefficients -polyint Integrate polynomial -polyder Differentiate polynomial -polyadd Add polynomials -polysub Substract polynomials -polymul Multiply polynomials -polydiv Divide polynomials -polyval Evaluate polynomial at given argument -================ =================== - -Import Tricks -------------- -================ =================== -ppimport Postpone module import until trying to use it -ppimport_attr Postpone module import until trying to use its attribute -ppresolve Import postponed module and return it. -================ =================== - -Machine Arithmetics -------------------- -================ =================== -machar_single Single precision floating point arithmetic parameters -machar_double Double precision floating point arithmetic parameters -================ =================== - -Threading Tricks ----------------- -================ =================== -ParallelExec Execute commands in parallel thread. -================ =================== - -1D Array Set Operations ------------------------ -Set operations for 1D numeric arrays based on sort() function. - -================ =================== -ediff1d Array difference (auxiliary function). -unique1d Unique elements of 1D array. -intersect1d Intersection of 1D arrays with unique elements. -intersect1d_nu Intersection of 1D arrays with any elements. -setxor1d Set exclusive-or of 1D arrays with unique elements. -setmember1d Return an array of shape of ar1 containing 1 where - the elements of ar1 are in ar2 and 0 otherwise. -union1d Union of 1D arrays with unique elements. -setdiff1d Set difference of 1D arrays with unique elements. -================ =================== - -""" from info import __doc__ from numpy.version import version as __version__ Copied: branches/dynamic_cpu_configuration/numpy/lib/_iotools.py (from rev 6368, trunk/numpy/lib/_iotools.py) Modified: branches/dynamic_cpu_configuration/numpy/lib/arraysetops.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/arraysetops.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/arraysetops.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -52,13 +52,19 @@ If provided, this number will be taked onto the beginning of the returned differences. + Notes + ----- + When applied to masked arrays, this function drops the mask information + if the `to_begin` and/or `to_end` parameters are used + + Returns ------- ed : array The differences. Loosely, this will be (ary[1:] - ary[:-1]). """ - ary = np.asarray(ary).flat + ary = np.asanyarray(ary).flat ed = ary[1:] - ary[:-1] arrays = [ed] if to_begin is not None: @@ -132,7 +138,7 @@ "the output was (indices, unique_arr), but " "has now been reversed to be more consistent.") - ar = np.asarray(ar1).flatten() + ar = np.asanyarray(ar1).flatten() if ar.size == 0: if return_inverse and return_index: return ar, np.empty(0, np.bool), np.empty(0, np.bool) Modified: branches/dynamic_cpu_configuration/numpy/lib/function_base.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/function_base.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/function_base.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -228,10 +228,10 @@ * None : the new behaviour is used, no warning is printed. * True : the new behaviour is used and a warning is raised about the future removal of the `new` keyword. - * False : the old behaviour is used and a DeprecationWarning + * False : the old behaviour is used and a DeprecationWarning is raised. - As of NumPy 1.3, this keyword should not be used explicitly since it - will disappear in NumPy 1.4. + As of NumPy 1.3, this keyword should not be used explicitly since it + will disappear in NumPy 1.4. Returns ------- @@ -267,9 +267,9 @@ # Old behavior if new == False: warnings.warn(""" - The histogram semantics being used is now deprecated and - will disappear in NumPy 1.4. Please update your code to - use the default semantics. + The histogram semantics being used is now deprecated and + will disappear in NumPy 1.4. Please update your code to + use the default semantics. """, DeprecationWarning) a = asarray(a).ravel() @@ -320,8 +320,8 @@ elif new in [True, None]: if new is True: warnings.warn(""" - The new semantics of histogram is now the default and the `new` - keyword will be removed in NumPy 1.4. + The new semantics of histogram is now the default and the `new` + keyword will be removed in NumPy 1.4. """, Warning) a = asarray(a) if weights is not None: @@ -1073,53 +1073,6 @@ else: return a[slice1]-a[slice2] -try: - add_docstring(digitize, -r"""digitize(x,bins) - -Return the index of the bin to which each value of x belongs. - -Each index i returned is such that bins[i-1] <= x < bins[i] if -bins is monotonically increasing, or bins [i-1] > x >= bins[i] if -bins is monotonically decreasing. - -Beyond the bounds of the bins 0 or len(bins) is returned as appropriate. - -""") -except RuntimeError: - pass - -try: - add_docstring(bincount, -r"""bincount(x,weights=None) - -Return the number of occurrences of each value in x. - -x must be a list of non-negative integers. The output, b[i], -represents the number of times that i is found in x. If weights -is specified, every occurrence of i at a position p contributes -weights[p] instead of 1. - -See also: histogram, digitize, unique. - -""") -except RuntimeError: - pass - -try: - add_docstring(add_docstring, -r"""docstring(obj, docstring) - -Add a docstring to a built-in obj if possible. -If the obj already has a docstring raise a RuntimeError -If this routine does not know how to add a docstring to the object -raise a TypeError - -""") -except RuntimeError: - pass - - def interp(x, xp, fp, left=None, right=None): """ One-dimensional linear interpolation. @@ -2818,9 +2771,9 @@ y : array_like Input array to integrate. x : array_like, optional - If `x` is None, then spacing between all `y` elements is 1. + If `x` is None, then spacing between all `y` elements is `dx`. dx : scalar, optional - If `x` is None, spacing given by `dx` is assumed. + If `x` is None, spacing given by `dx` is assumed. Default is 1. axis : int, optional Specify the axis. @@ -2836,7 +2789,15 @@ if x is None: d = dx else: - d = diff(x,axis=axis) + x = asarray(x) + if x.ndim == 1: + d = diff(x) + # reshape to correct shape + shape = [1]*y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = diff(x, axis=axis) nd = len(y.shape) slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd Modified: branches/dynamic_cpu_configuration/numpy/lib/getlimits.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/getlimits.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/getlimits.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -88,6 +88,12 @@ _finfo_cache = {} def __new__(cls, dtype): + try: + dtype = np.dtype(dtype) + except TypeError: + # In case a float instance was given + dtype = np.dtype(type(dtype)) + obj = cls._finfo_cache.get(dtype,None) if obj is not None: return obj @@ -115,7 +121,7 @@ return obj def _init(self, dtype): - self.dtype = dtype + self.dtype = np.dtype(dtype) if dtype is ntypes.double: itype = ntypes.int64 fmt = '%24.16e' @@ -149,23 +155,23 @@ self.nexp = machar.iexp self.nmant = machar.it self.machar = machar - self._str_tiny = machar._str_xmin - self._str_max = machar._str_xmax - self._str_epsneg = machar._str_epsneg - self._str_eps = machar._str_eps - self._str_resolution = machar._str_resolution + self._str_tiny = machar._str_xmin.strip() + self._str_max = machar._str_xmax.strip() + self._str_epsneg = machar._str_epsneg.strip() + self._str_eps = machar._str_eps.strip() + self._str_resolution = machar._str_resolution.strip() return self def __str__(self): return '''\ Machine parameters for %(dtype)s --------------------------------------------------------------------- -precision=%(precision)3s resolution=%(_str_resolution)s -machep=%(machep)6s eps= %(_str_eps)s -negep =%(negep)6s epsneg= %(_str_epsneg)s -minexp=%(minexp)6s tiny= %(_str_tiny)s -maxexp=%(maxexp)6s max= %(_str_max)s -nexp =%(nexp)6s min= -max +precision=%(precision)3s resolution= %(_str_resolution)s +machep=%(machep)6s eps= %(_str_eps)s +negep =%(negep)6s epsneg= %(_str_epsneg)s +minexp=%(minexp)6s tiny= %(_str_tiny)s +maxexp=%(maxexp)6s max= %(_str_max)s +nexp =%(nexp)6s min= -max --------------------------------------------------------------------- ''' % self.__dict__ @@ -220,8 +226,11 @@ _min_vals = {} _max_vals = {} - def __init__(self, type): - self.dtype = np.dtype(type) + def __init__(self, int_type): + try: + self.dtype = np.dtype(int_type) + except TypeError: + self.dtype = np.dtype(type(int_type)) self.kind = self.dtype.kind self.bits = self.dtype.itemsize * 8 self.key = "%s%d" % (self.kind, self.bits) @@ -256,6 +265,17 @@ max = property(max) + def __str__(self): + """String representation.""" + return '''\ +Machine parameters for %(dtype)s +--------------------------------------------------------------------- +min = %(min)s +max = %(max)s +--------------------------------------------------------------------- +''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max} + + if __name__ == '__main__': f = finfo(ntypes.single) print 'single epsilon:',f.eps Modified: branches/dynamic_cpu_configuration/numpy/lib/index_tricks.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/index_tricks.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/index_tricks.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -212,6 +212,8 @@ mgrid = nd_grid(sparse=False) ogrid = nd_grid(sparse=True) +mgrid.__doc__ = None # set in numpy.add_newdocs +ogrid.__doc__ = None # set in numpy.add_newdocs class AxisConcatenator(object): """Translates slice objects to concatenation along an axis. Modified: branches/dynamic_cpu_configuration/numpy/lib/info.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/info.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/info.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,134 +1,149 @@ -__doc_title__ = """Basic functions used by several sub-packages and -useful to have in the main name-space.""" -__doc__ = __doc_title__ + """ +""" +Basic functions used by several sub-packages and +useful to have in the main name-space. -Type handling -============== -iscomplexobj -- Test for complex object, scalar result -isrealobj -- Test for real object, scalar result -iscomplex -- Test for complex elements, array result -isreal -- Test for real elements, array result -imag -- Imaginary part -real -- Real part -real_if_close -- Turns complex number with tiny imaginary part to real -isneginf -- Tests for negative infinity ---| -isposinf -- Tests for positive infinity | -isnan -- Tests for nans |---- array results -isinf -- Tests for infinity | -isfinite -- Tests for finite numbers ---| -isscalar -- True if argument is a scalar -nan_to_num -- Replaces NaN's with 0 and infinities with large numbers -cast -- Dictionary of functions to force cast to each type -common_type -- Determine the 'minimum common type code' for a group - of arrays -mintypecode -- Return minimal allowed common typecode. +Type Handling +------------- +================ =================== +iscomplexobj Test for complex object, scalar result +isrealobj Test for real object, scalar result +iscomplex Test for complex elements, array result +isreal Test for real elements, array result +imag Imaginary part +real Real part +real_if_close Turns complex number with tiny imaginary part to real +isneginf Tests for negative infinity, array result +isposinf Tests for positive infinity, array result +isnan Tests for nans, array result +isinf Tests for infinity, array result +isfinite Tests for finite numbers, array result +isscalar True if argument is a scalar +nan_to_num Replaces NaN's with 0 and infinities with large numbers +cast Dictionary of functions to force cast to each type +common_type Determine the minimum common type code for a group + of arrays +mintypecode Return minimal allowed common typecode. +================ =================== -Index tricks -================== -mgrid -- Method which allows easy construction of N-d 'mesh-grids' -r_ -- Append and construct arrays: turns slice objects into - ranges and concatenates them, for 2d arrays appends - rows. -index_exp -- Konrad Hinsen's index_expression class instance which - can be useful for building complicated slicing syntax. +Index Tricks +------------ +================ =================== +mgrid Method which allows easy construction of N-d + 'mesh-grids' +``r_`` Append and construct arrays: turns slice objects into + ranges and concatenates them, for 2d arrays appends rows. +index_exp Konrad Hinsen's index_expression class instance which + can be useful for building complicated slicing syntax. +================ =================== -Useful functions -================== -select -- Extension of where to multiple conditions and choices -extract -- Extract 1d array from flattened array according to mask -insert -- Insert 1d array of values into Nd array according to mask -linspace -- Evenly spaced samples in linear space -logspace -- Evenly spaced samples in logarithmic space -fix -- Round x to nearest integer towards zero -mod -- Modulo mod(x,y) = x % y except keeps sign of y -amax -- Array maximum along axis -amin -- Array minimum along axis -ptp -- Array max-min along axis -cumsum -- Cumulative sum along axis -prod -- Product of elements along axis -cumprod -- Cumluative product along axis -diff -- Discrete differences along axis -angle -- Returns angle of complex argument -unwrap -- Unwrap phase along given axis (1-d algorithm) -sort_complex -- Sort a complex-array (based on real, then imaginary) -trim_zeros -- trim the leading and trailing zeros from 1D array. +Useful Functions +---------------- +================ =================== +select Extension of where to multiple conditions and choices +extract Extract 1d array from flattened array according to mask +insert Insert 1d array of values into Nd array according to mask +linspace Evenly spaced samples in linear space +logspace Evenly spaced samples in logarithmic space +fix Round x to nearest integer towards zero +mod Modulo mod(x,y) = x % y except keeps sign of y +amax Array maximum along axis +amin Array minimum along axis +ptp Array max-min along axis +cumsum Cumulative sum along axis +prod Product of elements along axis +cumprod Cumluative product along axis +diff Discrete differences along axis +angle Returns angle of complex argument +unwrap Unwrap phase along given axis (1-d algorithm) +sort_complex Sort a complex-array (based on real, then imaginary) +trim_zeros Trim the leading and trailing zeros from 1D array. +vectorize A class that wraps a Python function taking scalar + arguments into a generalized function which can handle + arrays of arguments using the broadcast rules of + numerix Python. +================ =================== -vectorize -- a class that wraps a Python function taking scalar - arguments into a generalized function which - can handle arrays of arguments using the broadcast - rules of numerix Python. +Shape Manipulation +------------------ +================ =================== +squeeze Return a with length-one dimensions removed. +atleast_1d Force arrays to be > 1D +atleast_2d Force arrays to be > 2D +atleast_3d Force arrays to be > 3D +vstack Stack arrays vertically (row on row) +hstack Stack arrays horizontally (column on column) +column_stack Stack 1D arrays as columns into 2D array +dstack Stack arrays depthwise (along third dimension) +split Divide array into a list of sub-arrays +hsplit Split into columns +vsplit Split into rows +dsplit Split along third dimension +================ =================== -Shape manipulation -=================== -squeeze -- Return a with length-one dimensions removed. -atleast_1d -- Force arrays to be > 1D -atleast_2d -- Force arrays to be > 2D -atleast_3d -- Force arrays to be > 3D -vstack -- Stack arrays vertically (row on row) -hstack -- Stack arrays horizontally (column on column) -column_stack -- Stack 1D arrays as columns into 2D array -dstack -- Stack arrays depthwise (along third dimension) -split -- Divide array into a list of sub-arrays -hsplit -- Split into columns -vsplit -- Split into rows -dsplit -- Split along third dimension +Matrix (2D Array) Manipulations +------------------------------- +================ =================== +fliplr 2D array with columns flipped +flipud 2D array with rows flipped +rot90 Rotate a 2D array a multiple of 90 degrees +eye Return a 2D array with ones down a given diagonal +diag Construct a 2D array from a vector, or return a given + diagonal from a 2D array. +mat Construct a Matrix +bmat Build a Matrix from blocks +================ =================== -Matrix (2d array) manipluations -=============================== -fliplr -- 2D array with columns flipped -flipud -- 2D array with rows flipped -rot90 -- Rotate a 2D array a multiple of 90 degrees -eye -- Return a 2D array with ones down a given diagonal -diag -- Construct a 2D array from a vector, or return a given - diagonal from a 2D array. -mat -- Construct a Matrix -bmat -- Build a Matrix from blocks - Polynomials -============ -poly1d -- A one-dimensional polynomial class +----------- +================ =================== +poly1d A one-dimensional polynomial class +poly Return polynomial coefficients from roots +roots Find roots of polynomial given coefficients +polyint Integrate polynomial +polyder Differentiate polynomial +polyadd Add polynomials +polysub Substract polynomials +polymul Multiply polynomials +polydiv Divide polynomials +polyval Evaluate polynomial at given argument +================ =================== -poly -- Return polynomial coefficients from roots -roots -- Find roots of polynomial given coefficients -polyint -- Integrate polynomial -polyder -- Differentiate polynomial -polyadd -- Add polynomials -polysub -- Substract polynomials -polymul -- Multiply polynomials -polydiv -- Divide polynomials -polyval -- Evaluate polynomial at given argument +Import Tricks +------------- +================ =================== +ppimport Postpone module import until trying to use it +ppimport_attr Postpone module import until trying to use its attribute +ppresolve Import postponed module and return it. +================ =================== -Import tricks -============= -ppimport -- Postpone module import until trying to use it -ppimport_attr -- Postpone module import until trying to use its - attribute -ppresolve -- Import postponed module and return it. +Machine Arithmetics +------------------- +================ =================== +machar_single Single precision floating point arithmetic parameters +machar_double Double precision floating point arithmetic parameters +================ =================== -Machine arithmetics -=================== -machar_single -- MachAr instance storing the parameters of system - single precision floating point arithmetics -machar_double -- MachAr instance storing the parameters of system - double precision floating point arithmetics +Threading Tricks +---------------- +================ =================== +ParallelExec Execute commands in parallel thread. +================ =================== -Threading tricks -================ -ParallelExec -- Execute commands in parallel thread. - -1D array set operations -======================= +1D Array Set Operations +----------------------- Set operations for 1D numeric arrays based on sort() function. -ediff1d -- Array difference (auxiliary function). -unique1d -- Unique elements of 1D array. -intersect1d -- Intersection of 1D arrays with unique elements. -intersect1d_nu -- Intersection of 1D arrays with any elements. -setxor1d -- Set exclusive-or of 1D arrays with unique elements. -setmember1d -- Return an array of shape of ar1 containing 1 where - the elements of ar1 are in ar2 and 0 otherwise. -union1d -- Union of 1D arrays with unique elements. -setdiff1d -- Set difference of 1D arrays with unique elements. +================ =================== +ediff1d Array difference (auxiliary function). +unique1d Unique elements of 1D array. +intersect1d Intersection of 1D arrays with unique elements. +intersect1d_nu Intersection of 1D arrays with any elements. +setxor1d Set exclusive-or of 1D arrays with unique elements. +setmember1d Return an array of shape of ar1 containing 1 where + the elements of ar1 are in ar2 and 0 otherwise. +union1d Union of 1D arrays with unique elements. +setdiff1d Set difference of 1D arrays with unique elements. +================ =================== """ Modified: branches/dynamic_cpu_configuration/numpy/lib/io.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/io.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/io.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,4 +1,5 @@ __all__ = ['savetxt', 'loadtxt', + 'genfromtxt', 'ndfromtxt', 'mafromtxt', 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', 'packbits', 'unpackbits', @@ -15,7 +16,11 @@ from _datasource import DataSource from _compiled_base import packbits, unpackbits +from _iotools import LineSplitter, NameValidator, StringConverter, \ + _is_string_like, has_nested_fields, flatten_dtype + _file = file +_string_like = _is_string_like class BagObj(object): """A simple class that converts attribute lookups to @@ -264,10 +269,6 @@ return str -def _string_like(obj): - try: obj + '' - except (TypeError, ValueError): return 0 - return 1 def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False): @@ -342,7 +343,7 @@ if usecols is not None: usecols = list(usecols) - if _string_like(fname): + if _is_string_like(fname): if fname.endswith('.gz'): import gzip fh = gzip.open(fname) @@ -520,7 +521,7 @@ """ - if _string_like(fname): + if _is_string_like(fname): if fname.endswith('.gz'): import gzip fh = gzip.open(fname,'wb') @@ -603,8 +604,508 @@ seq = regexp.findall(file.read()) if seq and not isinstance(seq[0], tuple): - # make sure np.array doesn't interpret strings as binary data - # by always producing a list of tuples - seq = [(x,) for x in seq] - output = np.array(seq, dtype=dtype) + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output.dtype = dtype + else: + output = np.array(seq, dtype=dtype) + return output + + + + +#####-------------------------------------------------------------------------- +#---- --- ASCII functions --- +#####-------------------------------------------------------------------------- + + + +def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, + converters=None, missing='', missing_values=None, usecols=None, + names=None, excludelist=None, deletechars=None, + case_sensitive=True, unpack=None, usemask=False, loose=True): + """ + Load data from a text file. + + Each line past the first `skiprows` ones is split at the `delimiter` + character, and characters following the `comments` character are discarded. + + + + Parameters + ---------- + fname : file or string + File or filename to read. If the filename extension is `.gz` or `.bz2`, + the file is first decompressed. + dtype : data-type + Data type of the resulting array. If this is a flexible data-type, + the resulting array will be 1-dimensional, and each row will be + interpreted as an element of the array. In this case, the number + of columns used must match the number of fields in the data-type, + and the names of each field will be set by the corresponding name + of the dtype. + If None, the dtypes will be determined by the contents of each + column, individually. + comments : {string}, optional + The character used to indicate the start of a comment. + All the characters occurring on a line after a comment are discarded + delimiter : {string}, optional + The string used to separate values. By default, any consecutive + whitespace act as delimiter. + skiprows : {int}, optional + Numbers of lines to skip at the beginning of the file. + converters : {None, dictionary}, optional + A dictionary mapping column number to a function that will convert + values in the column to a number. Converters can also be used to + provide a default value for missing data: + ``converters = {3: lambda s: float(s or 0)}``. + missing : {string}, optional + A string representing a missing value, irrespective of the column where + it appears (e.g., `'missing'` or `'unused'`). + missing_values : {None, dictionary}, optional + A dictionary mapping a column number to a string indicating whether the + corresponding field should be masked. + usecols : {None, sequence}, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + names : {None, True, string, sequence}, optional + If `names` is True, the field names are read from the first valid line + after the first `skiprows` lines. + If `names` is a sequence or a single-string of comma-separated names, + the names will be used to define the field names in a flexible dtype. + If `names` is None, the names of the dtype fields will be used, if any. + excludelist : {sequence}, optional + A list of names to exclude. This list is appended to the default list + ['return','file','print']. Excluded names are appended an underscore: + for example, `file` would become `file_`. + deletechars : {string}, optional + A string combining invalid characters that must be deleted from the names. + case_sensitive : {True, False, 'upper', 'lower'}, optional + If True, field names are case_sensitive. + If False or 'upper', field names are converted to upper case. + If 'lower', field names are converted to lower case. + unpack : {bool}, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)`` + usemask : {bool}, optional + If True, returns a masked array. + If False, return a regular standard array. + + Returns + ------- + out : MaskedArray + Data read from the text file. + + Notes + -------- + * When spaces are used as delimiters, or when no delimiter has been given + as input, there should not be any missing data between two fields. + * When the variable are named (either by a flexible dtype or with `names`, + there must not be any header in the file (else a :exc:ValueError exception + is raised). + + Warnings + -------- + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + + See Also + -------- + numpy.loadtxt : equivalent function when no data is missing. + + """ + # + if usemask: + from numpy.ma import MaskedArray, make_mask_descr + # Check the input dictionary of converters + user_converters = converters or {} + if not isinstance(user_converters, dict): + errmsg = "The input argument 'converter' should be a valid dictionary "\ + "(got '%s' instead)" + raise TypeError(errmsg % type(user_converters)) + # Check the input dictionary of missing values + user_missing_values = missing_values or {} + if not isinstance(user_missing_values, dict): + errmsg = "The input argument 'missing_values' should be a valid "\ + "dictionary (got '%s' instead)" + raise TypeError(errmsg % type(missing_values)) + defmissing = [_.strip() for _ in missing.split(',')] + [''] + + # Initialize the filehandle, the LineSplitter and the NameValidator +# fhd = _to_filehandle(fname) + if isinstance(fname, basestring): + fhd = np.lib._datasource.open(fname) + elif not hasattr(fname, 'read'): + raise TypeError("The input should be a string or a filehandle. "\ + "(got %s instead)" % type(fname)) + else: + fhd = fname + split_line = LineSplitter(delimiter=delimiter, comments=comments, + autostrip=False)._handyman + validate_names = NameValidator(excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive) + + # Get the first valid lines after the first skiprows ones + for i in xrange(skiprows): + fhd.readline() + first_values = None + while not first_values: + first_line = fhd.readline() + if first_line == '': + raise IOError('End-of-file reached before encountering data.') + if names is True: + first_values = first_line.strip().split(delimiter) + else: + first_values = split_line(first_line) + if names is True: + fval = first_values[0].strip() + if fval in comments: + del first_values[0] + + # Check the columns to use + if usecols is not None: + usecols = list(usecols) + nbcols = len(usecols or first_values) + + # Check the names and overwrite the dtype.names if needed + if dtype is not None: + dtype = np.dtype(dtype) + dtypenames = getattr(dtype, 'names', None) + if names is True: + names = validate_names([_.strip() for _ in first_values]) + first_line ='' + elif _is_string_like(names): + names = validate_names([_.strip() for _ in names.split(',')]) + elif names: + names = validate_names(names) + elif dtypenames: + dtype.names = validate_names(dtypenames) + if names and dtypenames: + dtype.names = names + + # If usecols is a list of names, convert to a list of indices + if usecols: + for (i, current) in enumerate(usecols): + if _is_string_like(current): + usecols[i] = names.index(current) + + # If user_missing_values has names as keys, transform them to indices + missing_values = {} + for (key, val) in user_missing_values.iteritems(): + # If val is a list, flatten it. In any case, add missing &'' to the list + if isinstance(val, (list, tuple)): + val = [str(_) for _ in val] + else: + val = [str(val),] + val.extend(defmissing) + if _is_string_like(key): + try: + missing_values[names.index(key)] = val + except ValueError: + pass + else: + missing_values[key] = val + + + # Initialize the default converters + if dtype is None: + # Note: we can't use a [...]*nbcols, as we would have 3 times the same + # ... converter, instead of 3 different converters. + converters = [StringConverter(None, + missing_values=missing_values.get(_, defmissing)) + for _ in range(nbcols)] + else: + flatdtypes = flatten_dtype(dtype) + # Initialize the converters + if len(flatdtypes) > 1: + # Flexible type : get a converter from each dtype + converters = [StringConverter(dt, + missing_values=missing_values.get(i, defmissing), + locked=True) + for (i, dt) in enumerate(flatdtypes)] + else: + # Set to a default converter (but w/ different missing values) + converters = [StringConverter(dtype, + missing_values=missing_values.get(_, defmissing), + locked=True) + for _ in range(nbcols)] + missing_values = [_.missing_values for _ in converters] + + # Update the converters to use the user-defined ones + uc_update = [] + for (i, conv) in user_converters.iteritems(): + # If the converter is specified by column names, use the index instead + if _is_string_like(i): + i = names.index(i) + if usecols: + try: + i = usecols.index(i) + except ValueError: + # Unused converter specified + continue + converters[i].update(conv, default=None, + missing_values=missing_values[i], + locked=True) + uc_update.append((i, conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) + + # Reset the names to match the usecols + if (not first_line) and usecols: + names = [names[_] for _ in usecols] + + rows = [] + append_to_rows = rows.append + if usemask: + masks = [] + append_to_masks = masks.append + # Parse each line + for line in itertools.chain([first_line,], fhd): + values = split_line(line) + # Skip an empty line + if len(values) == 0: + continue + # Select only the columns we need + if usecols: + values = [values[_] for _ in usecols] + # Check whether we need to update the converter + if dtype is None: + for (converter, item) in zip(converters, values): + converter.upgrade(item) + # Store the values + append_to_rows(tuple(values)) + if usemask: + append_to_masks(tuple([val.strip() in mss + for (val, mss) in zip(values, + missing_values)])) + + # Convert each value according to the converter: + # We want to modify the list in place to avoid creating a new one... + if loose: + conversionfuncs = [conv._loose_call for conv in converters] + else: + conversionfuncs = [conv._strict_call for conv in converters] + for (i, vals) in enumerate(rows): + rows[i] = tuple([convert(val) + for (convert, val) in zip(conversionfuncs, vals)]) + + # Reset the dtype + data = rows + if dtype is None: + # Get the dtypes from the types of the converters + coldtypes = [conv.type for conv in converters] + # Find the columns with strings... + strcolidx = [i for (i, v) in enumerate(coldtypes) + if v in (type('S'), np.string_)] + # ... and take the largest number of chars. + for i in strcolidx: + coldtypes[i] = "|S%i" % max(len(row[i]) for row in data) + # + if names is None: + # If the dtype is uniform, don't define names, else use '' + base = set([c.type for c in converters if c._checked]) + + if len(base) == 1: + (ddtype, mdtype) = (list(base)[0], np.bool) + else: + ddtype = [('', dt) for dt in coldtypes] + mdtype = [('', np.bool) for dt in coldtypes] + else: + ddtype = zip(names, coldtypes) + mdtype = zip(names, [np.bool] * len(coldtypes)) + output = np.array(data, dtype=ddtype) + if usemask: + outputmask = np.array(masks, dtype=mdtype) + else: + # Overwrite the initial dtype names if needed + if names and dtype.names: + dtype.names = names + flatdtypes = flatten_dtype(dtype) + # Case 1. We have a structured type + if len(flatdtypes) > 1: + # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] + # First, create the array using a flattened dtype: + # [('a', int), ('b1', int), ('b2', float)] + # Then, view the array using the specified dtype. + if has_nested_fields(dtype): + if 'O' in (_.char for _ in flatdtypes): + errmsg = "Nested fields involving objects "\ + "are not supported..." + raise NotImplementedError(errmsg) + rows = np.array(data, dtype=[('', t) for t in flatdtypes]) + output = rows.view(dtype) + else: + output = np.array(data, dtype=dtype) + # Now, process the rowmasks the same way + if usemask: + rowmasks = np.array(masks, + dtype=np.dtype([('', np.bool) + for t in flatdtypes])) + # Construct the new dtype + mdtype = make_mask_descr(dtype) + outputmask = rowmasks.view(mdtype) + # Case #2. We have a basic dtype + else: + # We used some user-defined converters + if user_converters: + ishomogeneous = True + descr = [] + for (i, ttype) in enumerate([conv.type for conv in converters]): + # Keep the dtype of the current converter + if i in user_converters: + ishomogeneous &= (ttype == dtype.type) + if ttype == np.string_: + ttype = "|S%i" % max(len(row[i]) for row in data) + descr.append(('', ttype)) + else: + descr.append(('', dtype)) + # So we changed the dtype ? + if not ishomogeneous: + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) + # + output = np.array(data, dtype) + if usemask: + if dtype.names: + mdtype = [(_, np.bool) for _ in dtype.names] + else: + mdtype = np.bool + outputmask = np.array(masks, dtype=mdtype) + # Try to take care of the missing data we missed + if usemask and output.dtype.names: + for (name, conv) in zip(names or (), converters): + missing_values = [conv(_) for _ in conv.missing_values if _ != ''] + for mval in missing_values: + outputmask[name] |= (output[name] == mval) + # Construct the final array + if usemask: + output = output.view(MaskedArray) + output._mask = outputmask + if unpack: + return output.squeeze().T + return output.squeeze() + + + +def ndfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, + converters=None, missing='', missing_values=None, + usecols=None, unpack=None, names=None, + excludelist=None, deletechars=None, case_sensitive=True,): + """ + Load ASCII data stored in fname and returns a ndarray. + + Complete description of all the optional input parameters is available in + the docstring of the `genfromtxt` function. + + See Also + -------- + numpy.genfromtxt : generic function. + + """ + kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter, + skiprows=skiprows, converters=converters, + missing=missing, missing_values=missing_values, + usecols=usecols, unpack=unpack, names=names, + excludelist=excludelist, deletechars=deletechars, + case_sensitive=case_sensitive, usemask=False) + return genfromtxt(fname, **kwargs) + +def mafromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, + converters=None, missing='', missing_values=None, + usecols=None, unpack=None, names=None, + excludelist=None, deletechars=None, case_sensitive=True,): + """ + Load ASCII data stored in fname and returns a MaskedArray. + + Complete description of all the optional input parameters is available in + the docstring of the `genfromtxt` function. + + See Also + -------- + numpy.genfromtxt : generic function. + """ + kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter, + skiprows=skiprows, converters=converters, + missing=missing, missing_values=missing_values, + usecols=usecols, unpack=unpack, names=names, + excludelist=excludelist, deletechars=deletechars, + case_sensitive=case_sensitive, + usemask=True) + return genfromtxt(fname, **kwargs) + + +def recfromtxt(fname, dtype=None, comments='#', delimiter=None, skiprows=0, + converters=None, missing='', missing_values=None, + usecols=None, unpack=None, names=None, + excludelist=None, deletechars=None, case_sensitive=True, + usemask=False): + """ + Load ASCII data stored in fname and returns a standard recarray (if + `usemask=False`) or a MaskedRecords (if `usemask=True`). + + Complete description of all the optional input parameters is available in + the docstring of the `genfromtxt` function. + + See Also + -------- + numpy.genfromtxt : generic function + + Warnings + -------- + * by default, `dtype=None`, which means that the dtype of the output array + will be determined from the data. + """ + kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter, + skiprows=skiprows, converters=converters, + missing=missing, missing_values=missing_values, + usecols=usecols, unpack=unpack, names=names, + excludelist=excludelist, deletechars=deletechars, + case_sensitive=case_sensitive, usemask=usemask) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + + +def recfromcsv(fname, dtype=None, comments='#', skiprows=0, + converters=None, missing='', missing_values=None, + usecols=None, unpack=None, names=True, + excludelist=None, deletechars=None, case_sensitive='lower', + usemask=False): + """ + Load ASCII data stored in comma-separated file and returns a recarray (if + `usemask=False`) or a MaskedRecords (if `usemask=True`). + + Complete description of all the optional input parameters is available in + the docstring of the `genfromtxt` function. + + See Also + -------- + numpy.genfromtxt : generic function + """ + kwargs = dict(dtype=dtype, comments=comments, delimiter=",", + skiprows=skiprows, converters=converters, + missing=missing, missing_values=missing_values, + usecols=usecols, unpack=unpack, names=names, + excludelist=excludelist, deletechars=deletechars, + case_sensitive=case_sensitive, usemask=usemask) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + Copied: branches/dynamic_cpu_configuration/numpy/lib/recfunctions.py (from rev 6368, trunk/numpy/lib/recfunctions.py) Modified: branches/dynamic_cpu_configuration/numpy/lib/src/_compiled_base.c =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/src/_compiled_base.c 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/src/_compiled_base.c 2009-02-15 12:03:15 UTC (rev 6369) @@ -494,35 +494,46 @@ #define _TESTDOC1(typebase) (obj->ob_type == &Py##typebase##_Type) #define _TESTDOC2(typebase) (obj->ob_type == Py##typebase##_TypePtr) -#define _ADDDOC(typebase, doc, name) { \ +#define _ADDDOC(typebase, doc, name) do { \ Py##typebase##Object *new = (Py##typebase##Object *)obj; \ if (!(doc)) { \ doc = docstr; \ } \ else { \ - PyErr_Format(PyExc_RuntimeError, \ - "%s method %s",name, msg); \ + PyErr_Format(PyExc_RuntimeError, "%s method %s", name, msg); \ return NULL; \ } \ - } + } while (0) - if _TESTDOC1(CFunction) - _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name) - else if _TESTDOC1(Type) - _ADDDOC(Type, new->tp_doc, new->tp_name) - else if _TESTDOC2(MemberDescr) - _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name) - else if _TESTDOC2(GetSetDescr) - _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name) - else if _TESTDOC2(MethodDescr) - _ADDDOC(MethodDescr, new->d_method->ml_doc, - new->d_method->ml_name) - else { - PyErr_SetString(PyExc_TypeError, - "Cannot set a docstring for that object"); - return NULL; - } + if (_TESTDOC1(CFunction)) + _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name); + else if (_TESTDOC1(Type)) + _ADDDOC(Type, new->tp_doc, new->tp_name); + else if (_TESTDOC2(MemberDescr)) + _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name); + else if (_TESTDOC2(GetSetDescr)) + _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name); + else if (_TESTDOC2(MethodDescr)) + _ADDDOC(MethodDescr, new->d_method->ml_doc, new->d_method->ml_name); + else { + PyObject *doc_attr; + + doc_attr = PyObject_GetAttrString(obj, "__doc__"); + if (doc_attr != NULL && doc_attr != Py_None) { + PyErr_Format(PyExc_RuntimeError, "object %s", msg); + return NULL; + } + Py_XDECREF(doc_attr); + if (PyObject_SetAttrString(obj, "__doc__", str) < 0) { + PyErr_SetString(PyExc_TypeError, + "Cannot set a docstring for that object"); + return NULL; + } + Py_INCREF(Py_None); + return Py_None; + } + #undef _TESTDOC1 #undef _TESTDOC2 #undef _ADDDOC @@ -533,35 +544,6 @@ } -static char packbits_doc[] = - "out = numpy.packbits(myarray, axis=None)\n\n" - " myarray : an integer type array whose elements should be packed to bits\n\n" - " This routine packs the elements of a binary-valued dataset into a\n" - " NumPy array of type uint8 ('B') whose bits correspond to\n" - " the logical (0 or nonzero) value of the input elements.\n" - " The dimension over-which bit-packing is done is given by axis.\n" - " The shape of the output has the same number of dimensions as the input\n" - " (unless axis is None, in which case the output is 1-d).\n" - "\n" - " Example:\n" - " >>> a = array([[[1,0,1],\n" - " ... [0,1,0]],\n" - " ... [[1,1,0],\n" - " ... [0,0,1]]])\n" - " >>> b = numpy.packbits(a,axis=-1)\n" - " >>> b\n" - " array([[[160],[64]],[[192],[32]]], dtype=uint8)\n\n" - " Note that 160 = 128 + 32\n" - " 192 = 128 + 64\n"; - -static char unpackbits_doc[] = - "out = numpy.unpackbits(myarray, axis=None)\n\n" - " myarray - array of uint8 type where each element represents a bit-field\n" - " that should be unpacked into a boolean output array\n\n" - " The shape of the output array is either 1-d (if axis is None) or\n" - " the same shape as the input array with unpacking done along the\n" - " axis specified."; - /* PACKBITS This function packs binary (0 or 1) 1-bit per pixel arrays @@ -809,9 +791,9 @@ {"add_docstring", (PyCFunction)arr_add_docstring, METH_VARARGS, NULL}, {"packbits", (PyCFunction)io_pack, METH_VARARGS | METH_KEYWORDS, - packbits_doc}, + NULL}, {"unpackbits", (PyCFunction)io_unpack, METH_VARARGS | METH_KEYWORDS, - unpackbits_doc}, + NULL}, {NULL, NULL} /* sentinel */ }; Copied: branches/dynamic_cpu_configuration/numpy/lib/tests/test__iotools.py (from rev 6368, trunk/numpy/lib/tests/test__iotools.py) Modified: branches/dynamic_cpu_configuration/numpy/lib/tests/test_function_base.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/tests/test_function_base.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/tests/test_function_base.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -430,6 +430,44 @@ #check integral of normal equals 1 assert_almost_equal(sum(r,axis=0),1,7) + def test_ndim(self): + x = linspace(0, 1, 3) + y = linspace(0, 2, 8) + z = linspace(0, 3, 13) + + wx = ones_like(x) * (x[1]-x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = ones_like(y) * (y[1]-y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = ones_like(z) * (z[1]-z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:,None,None] + y[None,:,None] + z[None,None,:] + + qx = (q*wx[:,None,None]).sum(axis=0) + qy = (q*wy[None,:,None]).sum(axis=1) + qz = (q*wz[None,None,:]).sum(axis=2) + + # n-d `x` + r = trapz(q, x=x[:,None,None], axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y[None,:,None], axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z[None,None,:], axis=2) + assert_almost_equal(r, qz) + + # 1-d `x` + r = trapz(q, x=x, axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y, axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z, axis=2) + assert_almost_equal(r, qz) + + class TestSinc(TestCase): def test_simple(self): assert(sinc(0)==1) Modified: branches/dynamic_cpu_configuration/numpy/lib/tests/test_getlimits.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/tests/test_getlimits.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/tests/test_getlimits.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -51,5 +51,9 @@ assert_equal(iinfo(T).max, T(-1)) +def test_instances(): + iinfo(10) + finfo(3.0) + if __name__ == "__main__": run_module_suite() Modified: branches/dynamic_cpu_configuration/numpy/lib/tests/test_io.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/tests/test_io.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/tests/test_io.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,10 +1,17 @@ -from numpy.testing import * + import numpy as np +import numpy.ma as ma +from numpy.ma.testutils import * + import StringIO from tempfile import NamedTemporaryFile +import sys -class RoundtripTest: + +MAJVER, MINVER = sys.version_info[:2] + +class RoundtripTest(object): def roundtrip(self, save_func, *args, **kwargs): """ save_func : callable @@ -25,7 +32,14 @@ file_on_disk = kwargs.get('file_on_disk', False) if file_on_disk: - target_file = NamedTemporaryFile() + # Do not delete the file on windows, because we can't + # reopen an already opened file on that platform, so we + # need to close the file and reopen it, implying no + # automatic deletion. + if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6: + target_file = NamedTemporaryFile(delete=False) + else: + target_file = NamedTemporaryFile() load_file = target_file.name else: target_file = StringIO.StringIO() @@ -37,6 +51,9 @@ target_file.flush() target_file.seek(0) + if sys.platform == 'win32' and not isinstance(target_file, StringIO.StringIO): + target_file.close() + arr_reloaded = np.load(load_file, **load_kwds) self.arr = arr @@ -319,7 +336,6 @@ assert_array_equal(x, a) def test_record_2(self): - return # pass this test until #736 is resolved c = StringIO.StringIO() c.write('1312 foo\n1534 bar\n4444 qux') c.seek(0) @@ -341,5 +357,448 @@ assert_array_equal(x, a) +#####-------------------------------------------------------------------------- + + +class TestFromTxt(TestCase): + # + def test_record(self): + "Test w/ explicit dtype" + data = StringIO.StringIO('1 2\n3 4') +# data.seek(0) + test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) + control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_equal(test, control) + # + data = StringIO.StringIO('M 64.0 75.0\nF 25.0 60.0') +# data.seek(0) + descriptor = {'names': ('gender','age','weight'), + 'formats': ('S1', 'i4', 'f4')} + control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], + dtype=descriptor) + test = np.ndfromtxt(data, dtype=descriptor) + assert_equal(test, control) + + def test_array(self): + "Test outputing a standard ndarray" + data = StringIO.StringIO('1 2\n3 4') + control = np.array([[1,2],[3,4]], dtype=int) + test = np.ndfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data.seek(0) + control = np.array([[1,2],[3,4]], dtype=float) + test = np.loadtxt(data, dtype=float) + assert_array_equal(test, control) + + def test_1D(self): + "Test squeezing to 1D" + control = np.array([1, 2, 3, 4], int) + # + data = StringIO.StringIO('1\n2\n3\n4\n') + test = np.ndfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data = StringIO.StringIO('1,2,3,4\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',') + assert_array_equal(test, control) + + def test_comments(self): + "Test the stripping of comments" + control = np.array([1, 2, 3, 5], int) + # Comment on its own line + data = StringIO.StringIO('# comment\n1,2,3,5\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + # Comment at the end of a line + data = StringIO.StringIO('1,2,3,5# comment\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + + def test_skiprows(self): + "Test row skipping" + control = np.array([1, 2, 3, 5], int) + # + data = StringIO.StringIO('comment\n1,2,3,5\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', skiprows=1) + assert_equal(test, control) + # + data = StringIO.StringIO('# comment\n1,2,3,5\n') + test = np.loadtxt(data, dtype=int, delimiter=',', skiprows=1) + assert_equal(test, control) + + def test_header(self): + "Test retrieving a header" + data = StringIO.StringIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') + test = np.ndfromtxt(data, dtype=None, names=True) + control = {'gender': np.array(['M', 'F']), + 'age': np.array([64.0, 25.0]), + 'weight': np.array([75.0, 60.0])} + assert_equal(test['gender'], control['gender']) + assert_equal(test['age'], control['age']) + assert_equal(test['weight'], control['weight']) + + def test_auto_dtype(self): + "Test the automatic definition of the output dtype" + data = StringIO.StringIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') + test = np.ndfromtxt(data, dtype=None) + control = [np.array(['A', 'BCD']), + np.array([64, 25]), + np.array([75.0, 60.0]), + np.array([3+4j, 5+6j]), + np.array([True, False]),] + assert_equal(test.dtype.names, ['f0','f1','f2','f3','f4']) + for (i, ctrl) in enumerate(control): + assert_equal(test['f%i' % i], ctrl) + + + def test_auto_dtype_uniform(self): + "Tests whether the output dtype can be uniformized" + data = StringIO.StringIO('1 2 3 4\n5 6 7 8\n') + test = np.ndfromtxt(data, dtype=None) + control = np.array([[1,2,3,4],[5,6,7,8]]) + assert_equal(test, control) + + + def test_fancy_dtype(self): + "Check that a nested dtype isn't MIA" + data = StringIO.StringIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',') + control = np.array([(1,(2,3.0)),(4,(5,6.0))], dtype=fancydtype) + assert_equal(test, control) + + + def test_names_overwrite(self): + "Test overwriting the names of the dtype" + descriptor = {'names': ('g','a','w'), + 'formats': ('S1', 'i4', 'f4')} + data = StringIO.StringIO('M 64.0 75.0\nF 25.0 60.0') + names = ('gender','age','weight') + test = np.ndfromtxt(data, dtype=descriptor, names=names) + descriptor['names'] = names + control = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=descriptor) + assert_equal(test, control) + + + def test_commented_header(self): + "Check that names can be retrieved even if the line is commented out." + data = StringIO.StringIO(""" +#gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + # The # is part of the first name and should be deleted automatically. + test = np.genfromtxt(data, names=True, dtype=None) + ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], + dtype=[('gender','|S1'), ('age', int), ('weight', float)]) + assert_equal(test, ctrl) + # Ditto, but we should get rid of the first element + data = StringIO.StringIO(""" +# gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + test = np.genfromtxt(data, names=True, dtype=None) + assert_equal(test, ctrl) + + + def test_autonames_and_usecols(self): + "Tests names and usecols" + data = StringIO.StringIO('A B C D\n aaaa 121 45 9.1') + test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), + names=True, dtype=None) + control = np.array(('aaaa', 45, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + + def test_converters_with_usecols(self): + "Test the combination user-defined converters and usecol" + data = StringIO.StringIO('1,2,3,,5\n6,7,8,9,10\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', + converters={3:lambda s: int(s or -999)}, + usecols=(1, 3, )) + control = np.array([[2, -999], [7, 9]], int) + assert_equal(test, control) + + def test_converters_with_usecols_and_names(self): + "Tests names and usecols" + data = StringIO.StringIO('A B C D\n aaaa 121 45 9.1') + test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True, + dtype=None, converters={'C':lambda s: 2 * int(s)}) + control = np.array(('aaaa', 90, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_cornercases(self): + "Test the conversion to datetime." + from datetime import datetime + converter = {'date':lambda s: datetime.strptime(s,'%Y-%m-%d %H:%M:%SZ')} + data = StringIO.StringIO('2009-02-03 12:00:00Z, 72214.0') + test = np.ndfromtxt(data, delimiter=',', dtype=None, + names=['date','stid'], converters=converter) + control = np.array((datetime(2009,02,03,12,0), 72214.), + dtype=[('date', np.object_), ('stid', float)]) + assert_equal(test, control) + + + def test_unused_converter(self): + "Test whether unused converters are forgotten" + data = StringIO.StringIO("1 21\n 3 42\n") + test = np.ndfromtxt(data, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_equal(test, [21, 42]) + # + data.seek(0) + test = np.ndfromtxt(data, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_equal(test, [33, 66]) + + + def test_dtype_with_converters(self): + dstr = "2009; 23; 46" + test = np.ndfromtxt(StringIO.StringIO(dstr,), + delimiter=";", dtype=float, converters={0:str}) + control = np.array([('2009', 23., 46)], + dtype=[('f0','|S4'), ('f1', float), ('f2', float)]) + assert_equal(test, control) + test = np.ndfromtxt(StringIO.StringIO(dstr,), + delimiter=";", dtype=float, converters={0:float}) + control = np.array([2009., 23., 46],) + assert_equal(test, control) + + + def test_dtype_with_object(self): + "Test using an explicit dtype with an object" + from datetime import date + import time + data = """ + 1; 2001-01-01 + 2; 2002-01-31 + """ + ndtype = [('idx', int), ('code', np.object)] + func = lambda s: date(*(time.strptime(s.strip(), "%Y-%m-%d")[:3])) + converters = {1: func} + test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array([(1, date(2001,1,1)), (2, date(2002,1,31))], + dtype=ndtype) + assert_equal(test, control) + # + ndtype = [('nest', [('idx', int), ('code', np.object)])] + try: + test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", + dtype=ndtype, converters=converters) + except NotImplementedError: + pass + else: + errmsg = "Nested dtype involving objects should be supported." + raise AssertionError(errmsg) + + + def test_userconverters_with_explicit_dtype(self): + "Test user_converters w/ explicit (standard) dtype" + data = StringIO.StringIO('skip,skip,2001-01-01,1.0,skip') + test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: str}) + control = np.array([('2001-01-01', 1.)], + dtype=[('', '|S10'), ('', float)]) + assert_equal(test, control) + + + def test_spacedelimiter(self): + "Test space delimiter" + data = StringIO.StringIO("1 2 3 4 5\n6 7 8 9 10") + test = np.ndfromtxt(data) + control = np.array([[ 1., 2., 3., 4., 5.], + [ 6., 7., 8., 9.,10.]]) + assert_equal(test, control) + + + def test_missing(self): + data = StringIO.StringIO('1,2,3,,5\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', \ + converters={3:lambda s: int(s or -999)}) + control = np.array([1, 2, 3, -999, 5], int) + assert_equal(test, control) + + + def test_usecols(self): + "Test the selection of columns" + # Select 1 column + control = np.array( [[1, 2], [3, 4]], float) + data = StringIO.StringIO() + np.savetxt(data, control) + data.seek(0) + test = np.ndfromtxt(data, dtype=float, usecols=(1,)) + assert_equal(test, control[:, 1]) + # + control = np.array( [[1, 2, 3], [3, 4, 5]], float) + data = StringIO.StringIO() + np.savetxt(data, control) + data.seek(0) + test = np.ndfromtxt(data, dtype=float, usecols=(1, 2)) + assert_equal(test, control[:, 1:]) + # Testing with arrays instead of tuples. + data.seek(0) + test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2])) + assert_equal(test, control[:, 1:]) + # Checking with dtypes defined converters. + data = StringIO.StringIO("""JOE 70.1 25.3\nBOB 60.5 27.9""") + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + test = np.ndfromtxt(data, usecols=(0, 2), dtype=zip(names, dtypes)) + assert_equal(test['stid'], ["JOE", "BOB"]) + assert_equal(test['temp'], [25.3, 27.9]) + + + def test_empty_file(self): + "Test that an empty file raises the proper exception" + data = StringIO.StringIO() + assert_raises(IOError, np.ndfromtxt, data) + + + def test_fancy_dtype_alt(self): + "Check that a nested dtype isn't MIA" + data = StringIO.StringIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.mafromtxt(data, dtype=fancydtype, delimiter=',') + control = ma.array([(1,(2,3.0)),(4,(5,6.0))], dtype=fancydtype) + assert_equal(test, control) + + + def test_withmissing(self): + data = StringIO.StringIO('A,B\n0,1\n2,N/A') + test = np.mafromtxt(data, dtype=None, delimiter=',', missing='N/A', + names=True) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', np.int), ('B', np.int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + # + data.seek(0) + test = np.mafromtxt(data, delimiter=',', missing='N/A', names=True) + control = ma.array([(0, 1), (2, -1)], + mask=[[False, False], [False, True]],) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + + def test_user_missing_values(self): + datastr ="A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + data = StringIO.StringIO(datastr) + basekwargs = dict(dtype=None, delimiter=',', names=True, missing='N/A') + mdtype = [('A', int), ('B', float), ('C', complex)] + # + test = np.mafromtxt(data, **basekwargs) + control = ma.array([( 0, 0.0, 0j), (1, -999, 1j), + ( -9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], + dtype=mdtype) + assert_equal(test, control) + # + data.seek(0) + test = np.mafromtxt(data, + missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs) + control = ma.array([( 0, 0.0, 0j), (1, -999, 1j), + ( -9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + # + data.seek(0) + test = np.mafromtxt(data, + missing_values={0:-9, 'B':-99, 'C':-999j}, + **basekwargs) + control = ma.array([( 0, 0.0, 0j), (1, -999, 1j), + ( -9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + + + def test_withmissing_float(self): + data = StringIO.StringIO('A,B\n0,1.5\n2,-999.00') + test = np.mafromtxt(data, dtype=None, delimiter=',', missing='-999.0', + names=True,) + control = ma.array([(0, 1.5), (2, -1.)], + mask=[(False, False), (False, True)], + dtype=[('A', np.int), ('B', np.float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + + def test_with_masked_column_uniform(self): + "Test masked column" + data = StringIO.StringIO('1 2 3\n4 5 6\n') + test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True) + control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0],[0, 1, 0]]) + assert_equal(test, control) + + def test_with_masked_column_various(self): + "Test masked column" + data = StringIO.StringIO('True 2 3\nFalse 5 6\n') + test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True) + control = ma.array([(1, 2, 3), (0, 5, 6)], + mask=[(0, 1, 0),(0, 1, 0)], + dtype=[('f0', bool), ('f1', bool), ('f2', int)]) + assert_equal(test, control) + + + def test_recfromtxt(self): + # + data = StringIO.StringIO('A,B\n0,1\n2,3') + test = np.recfromtxt(data, delimiter=',', missing='N/A', names=True) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', np.int), ('B', np.int)]) + self.failUnless(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = StringIO.StringIO('A,B\n0,1\n2,N/A') + test = np.recfromtxt(data, dtype=None, delimiter=',', missing='N/A', + names=True, usemask=True) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', np.int), ('B', np.int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + + + def test_recfromcsv(self): + # + data = StringIO.StringIO('A,B\n0,1\n2,3') + test = np.recfromcsv(data, missing='N/A', + names=True, case_sensitive=True) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', np.int), ('B', np.int)]) + self.failUnless(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = StringIO.StringIO('A,B\n0,1\n2,N/A') + test = np.recfromcsv(data, dtype=None, missing='N/A', + names=True, case_sensitive=True, usemask=True) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', np.int), ('B', np.int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + # + data = StringIO.StringIO('A,B\n0,1\n2,3') + test = np.recfromcsv(data, missing='N/A',) + control = np.array([(0, 1), (2, 3)], + dtype=[('a', np.int), ('b', np.int)]) + self.failUnless(isinstance(test, np.recarray)) + assert_equal(test, control) + + + + if __name__ == "__main__": run_module_suite() Copied: branches/dynamic_cpu_configuration/numpy/lib/tests/test_recfunctions.py (from rev 6368, trunk/numpy/lib/tests/test_recfunctions.py) Modified: branches/dynamic_cpu_configuration/numpy/lib/utils.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/lib/utils.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/lib/utils.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -699,11 +699,11 @@ # import sub-packages if import_modules and hasattr(item, '__path__'): - for pth in item.__path__: - for mod_path in os.listdir(pth): - init_py = os.path.join(pth, mod_path, '__init__.py') + for pth in item.__path__: + for mod_path in os.listdir(pth): + init_py = os.path.join(pth, mod_path, '__init__.py') if not os.path.isfile(init_py): - continue + continue if _all is not None and mod_path not in _all: continue try: Modified: branches/dynamic_cpu_configuration/numpy/linalg/linalg.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/linalg/linalg.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/linalg/linalg.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -9,7 +9,7 @@ zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. """ -__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', +__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'det', 'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'LinAlgError'] Modified: branches/dynamic_cpu_configuration/numpy/linalg/tests/test_linalg.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/linalg/tests/test_linalg.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/linalg/tests/test_linalg.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -202,7 +202,7 @@ assert_equal(matrix_power(A,2),A) -class HermitianTestCase: +class HermitianTestCase(object): def test_single(self): a = array([[1.,2.], [2.,1.]], dtype=single) self.do(a) Modified: branches/dynamic_cpu_configuration/numpy/ma/core.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/ma/core.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/ma/core.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,20 +1,24 @@ # pylint: disable-msg=E1002 -"""MA: a facility for dealing with missing observations -MA is generally used as a numpy.array look-alike. -by Paul F. Dubois. +""" +numpy.ma : a package to handle missing or invalid values. +This package was initially written for numarray by Paul F. Dubois +at Lawrence Livermore National Laboratory. +In 2006, the package was completely rewritten by Pierre Gerard-Marchant +(University of Georgia) to make the MaskedArray class a subclass of ndarray, +and to improve support of structured arrays. + + Copyright 1999, 2000, 2001 Regents of the University of California. Released for unlimited redistribution. -Adapted for numpy_core 2005 by Travis Oliphant and -(mainly) Paul Dubois. +* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. +* Subclassing of the base ndarray 2006 by Pierre Gerard-Marchant + (pgmdevlist_AT_gmail_DOT_com) +* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) -Subclassing of the base ndarray 2006 by Pierre Gerard-Marchant. -pgmdevlist_AT_gmail_DOT_com -Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) +.. moduleauthor:: Pierre Gerard-Marchant -:author: Pierre Gerard-Marchant - """ __author__ = "Pierre GF Gerard-Marchant" __docformat__ = "restructuredtext en" @@ -33,7 +37,8 @@ 'default_fill_value', 'diag', 'diagonal', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', 'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide', - 'fix_invalid', 'frombuffer', 'fromfunction', + 'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex', + 'fromfunction', 'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', 'identity', 'ids', 'indices', 'inner', 'innerproduct', @@ -54,7 +59,7 @@ 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', 'right_shift', 'round_', 'round', 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', - 'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', 'var', 'where', @@ -152,7 +157,7 @@ """ if hasattr(obj,'dtype'): - defval = default_filler[obj.dtype.kind] + defval = _check_fill_value(None, obj.dtype) elif isinstance(obj, np.dtype): if obj.subdtype: defval = default_filler[obj.subdtype[0].kind] @@ -170,6 +175,18 @@ defval = default_filler['O'] return defval + +def _recursive_extremum_fill_value(ndtype, extremum): + names = ndtype.names + if names: + deflist = [] + for name in names: + fval = _recursive_extremum_fill_value(ndtype[name], extremum) + deflist.append(fval) + return tuple(deflist) + return extremum[ndtype] + + def minimum_fill_value(obj): """ Calculate the default fill value suitable for taking the minimum of ``obj``. @@ -177,11 +194,7 @@ """ errmsg = "Unsuitable type for calculating minimum." if hasattr(obj, 'dtype'): - objtype = obj.dtype - filler = min_filler[objtype] - if filler is None: - raise TypeError(errmsg) - return filler + return _recursive_extremum_fill_value(obj.dtype, min_filler) elif isinstance(obj, float): return min_filler[ntypes.typeDict['float_']] elif isinstance(obj, int): @@ -193,6 +206,7 @@ else: raise TypeError(errmsg) + def maximum_fill_value(obj): """ Calculate the default fill value suitable for taking the maximum of ``obj``. @@ -200,11 +214,7 @@ """ errmsg = "Unsuitable type for calculating maximum." if hasattr(obj, 'dtype'): - objtype = obj.dtype - filler = max_filler[objtype] - if filler is None: - raise TypeError(errmsg) - return filler + return _recursive_extremum_fill_value(obj.dtype, max_filler) elif isinstance(obj, float): return max_filler[ntypes.typeDict['float_']] elif isinstance(obj, int): @@ -217,6 +227,28 @@ raise TypeError(errmsg) +def _recursive_set_default_fill_value(dtypedescr): + deflist = [] + for currentdescr in dtypedescr: + currenttype = currentdescr[1] + if isinstance(currenttype, list): + deflist.append(tuple(_recursive_set_default_fill_value(currenttype))) + else: + deflist.append(default_fill_value(np.dtype(currenttype))) + return tuple(deflist) + +def _recursive_set_fill_value(fillvalue, dtypedescr): + fillvalue = np.resize(fillvalue, len(dtypedescr)) + output_value = [] + for (fval, descr) in zip(fillvalue, dtypedescr): + cdtype = descr[1] + if isinstance(cdtype, list): + output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) + else: + output_value.append(np.array(fval, dtype=cdtype).item()) + return tuple(output_value) + + def _check_fill_value(fill_value, ndtype): """ Private function validating the given `fill_value` for the given dtype. @@ -233,10 +265,9 @@ fields = ndtype.fields if fill_value is None: if fields: - fdtype = [(_[0], _[1]) for _ in ndtype.descr] - fill_value = np.array(tuple([default_fill_value(fields[n][0]) - for n in ndtype.names]), - dtype=fdtype) + descr = ndtype.descr + fill_value = np.array(_recursive_set_default_fill_value(descr), + dtype=ndtype,) else: fill_value = default_fill_value(ndtype) elif fields: @@ -248,10 +279,9 @@ err_msg = "Unable to transform %s to dtype %s" raise ValueError(err_msg % (fill_value, fdtype)) else: - fval = np.resize(fill_value, len(ndtype.descr)) - fill_value = [np.asarray(f).astype(desc[1]).item() - for (f, desc) in zip(fval, ndtype.descr)] - fill_value = np.array(tuple(fill_value), copy=False, dtype=fdtype) + descr = ndtype.descr + fill_value = np.array(_recursive_set_fill_value(fill_value, descr), + dtype=ndtype) else: if isinstance(fill_value, basestring) and (ndtype.char not in 'SV'): fill_value = default_fill_value(ndtype) @@ -315,7 +345,7 @@ def filled(a, fill_value = None): """ Return `a` as an array where masked data have been replaced by `value`. - + If `a` is not a MaskedArray, `a` itself is returned. If `a` is a MaskedArray and `fill_value` is None, `fill_value` is set to `a.fill_value`. @@ -367,7 +397,7 @@ return rcls #####-------------------------------------------------------------------------- -def get_data(a, subok=True): +def getdata(a, subok=True): """ Return the `_data` part of `a` if `a` is a MaskedArray, or `a` itself. @@ -384,8 +414,8 @@ if not subok: return data.view(ndarray) return data +get_data = getdata -getdata = get_data def fix_invalid(a, mask=nomask, copy=True, fill_value=None): """ @@ -586,7 +616,7 @@ def __call__ (self, a, b, *args, **kwargs): "Execute the call behavior." - m = mask_or(getmask(a), getmask(b)) + m = mask_or(getmask(a), getmask(b), shrink=False) (da, db) = (getdata(a), getdata(b)) # Easy case: there's no mask... if m is nomask: @@ -597,8 +627,12 @@ # Transforms to a (subclass of) MaskedArray if we don't have a scalar if result.shape: result = result.view(get_masked_subclass(a, b)) - result._mask = make_mask_none(result.shape) - result._mask.flat = m + # If we have a mask, make sure it's broadcasted properly + if m.any(): + result._mask = mask_or(getmaskarray(a), getmaskarray(b)) + # If some initial masks where not shrunk, don't shrink the result + elif m.shape: + result._mask = make_mask_none(result.shape, result.dtype) if isinstance(a, MaskedArray): result._update_from(a) if isinstance(b, MaskedArray): @@ -607,7 +641,7 @@ elif m: return masked return result -# +# # result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a, b)) # if len(result.shape): # if m is not nomask: @@ -724,18 +758,19 @@ def __call__(self, a, b, *args, **kwargs): "Execute the call behavior." ma = getmask(a) - mb = getmask(b) + mb = getmaskarray(b) da = getdata(a) db = getdata(b) t = narray(self.domain(da, db), copy=False) if t.any(None): - mb = mask_or(mb, t) + mb = mask_or(mb, t, shrink=False) # The following line controls the domain filling if t.size == db.size: db = np.where(t, self.filly, db) else: db = np.where(np.resize(t, db.shape), self.filly, db) - m = mask_or(ma, mb) + # Shrink m if a.mask was nomask, otherwise don't. + m = mask_or(ma, mb, shrink=(getattr(a, '_mask', nomask) is nomask)) if (not m.ndim) and m: return masked elif (m is nomask): @@ -744,7 +779,12 @@ result = np.where(m, da, self.f(da, db, *args, **kwargs)) if result.shape: result = result.view(get_masked_subclass(a, b)) - result._mask = m + # If we have a mask, make sure it's broadcasted properly + if m.any(): + result._mask = mask_or(getmaskarray(a), mb) + # If some initial masks where not shrunk, don't shrink the result + elif m.shape: + result._mask = make_mask_none(result.shape, result.dtype) if isinstance(a, MaskedArray): result._update_from(a) if isinstance(b, MaskedArray): @@ -832,36 +872,37 @@ #---- --- Mask creation functions --- #####-------------------------------------------------------------------------- +def _recursive_make_descr(datatype, newtype=bool_): + "Private function allowing recursion in make_descr." + # Do we have some name fields ? + if datatype.names: + descr = [] + for name in datatype.names: + field = datatype.fields[name] + if len(field) == 3: + # Prepend the title to the name + name = (field[-1], name) + descr.append((name, _recursive_make_descr(field[0], newtype))) + return descr + # Is this some kind of composite a la (np.float,2) + elif datatype.subdtype: + mdescr = list(datatype.subdtype) + mdescr[0] = newtype + return tuple(mdescr) + else: + return newtype + def make_mask_descr(ndtype): """Constructs a dtype description list from a given dtype. Each field is set to a bool. """ - def _make_descr(datatype): - "Private function allowing recursion." - # Do we have some name fields ? - if datatype.names: - descr = [] - for name in datatype.names: - field = datatype.fields[name] - if len(field) == 3: - # Prepend the title to the name - name = (field[-1], name) - descr.append((name, _make_descr(field[0]))) - return descr - # Is this some kind of composite a la (np.float,2) - elif datatype.subdtype: - mdescr = list(datatype.subdtype) - mdescr[0] = np.dtype(bool) - return tuple(mdescr) - else: - return np.bool # Make sure we do have a dtype if not isinstance(ndtype, np.dtype): ndtype = np.dtype(ndtype) - return np.dtype(_make_descr(ndtype)) + return np.dtype(_recursive_make_descr(ndtype, np.bool)) -def get_mask(a): +def getmask(a): """Return the mask of a, if any, or nomask. To get a full array of booleans of the same shape as a, use @@ -869,7 +910,7 @@ """ return getattr(a, '_mask', nomask) -getmask = get_mask +get_mask = getmask def getmaskarray(arr): """Return the mask of arr, if any, or a boolean array of the shape @@ -988,7 +1029,17 @@ ValueError If m1 and m2 have different flexible dtypes. - """ + """ + def _recursive_mask_or(m1, m2, newmask): + names = m1.dtype.names + for name in names: + current1 = m1[name] + if current1.dtype.names: + _recursive_mask_or(current1, m2[name], newmask[name]) + else: + umath.logical_or(current1, m2[name], newmask[name]) + return + # if (m1 is nomask) or (m1 is False): dtype = getattr(m2, 'dtype', MaskType) return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) @@ -1002,8 +1053,7 @@ raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) if dtype1.names: newmask = np.empty_like(m1) - for n in dtype1.names: - newmask[n] = umath.logical_or(m1[n], m2[n]) + _recursive_mask_or(m1, m2, newmask) return newmask return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) @@ -1012,7 +1062,7 @@ """ Returns a completely flattened version of the mask, where nested fields are collapsed. - + Parameters ---------- mask : array_like @@ -1035,7 +1085,7 @@ >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) >>> flatten_mask(mask) array([False, False, False, False, False, True], dtype=bool) - + """ # def _flatmask(mask): @@ -1069,7 +1119,7 @@ def masked_where(condition, a, copy=True): """ - Return ``a`` as an array masked where ``condition`` is True. + Return ``a`` as an array masked where ``condition`` is ``True``. Masked values of ``a`` or ``condition`` are kept. Parameters @@ -1099,35 +1149,45 @@ result._mask = cond return result + def masked_greater(x, value, copy=True): """ - Return the array `x` masked where (x > value). + Return the array `x` masked where ``(x > value)``. Any value of mask already masked is kept masked. """ return masked_where(greater(x, value), x, copy=copy) + def masked_greater_equal(x, value, copy=True): - "Shortcut to masked_where, with condition = (x >= value)." + "Shortcut to masked_where, with condition ``(x >= value)``." return masked_where(greater_equal(x, value), x, copy=copy) + def masked_less(x, value, copy=True): - "Shortcut to masked_where, with condition = (x < value)." + "Shortcut to masked_where, with condition ``(x < value)``." return masked_where(less(x, value), x, copy=copy) + def masked_less_equal(x, value, copy=True): - "Shortcut to masked_where, with condition = (x <= value)." + "Shortcut to masked_where, with condition ``(x <= value)``." return masked_where(less_equal(x, value), x, copy=copy) + def masked_not_equal(x, value, copy=True): - "Shortcut to masked_where, with condition = (x != value)." + "Shortcut to masked_where, with condition ``(x != value)``." return masked_where(not_equal(x, value), x, copy=copy) + def masked_equal(x, value, copy=True): """ - Shortcut to masked_where, with condition = (x == value). For - floating point, consider ``masked_values(x, value)`` instead. + Shortcut to masked_where, with condition ``(x == value)``. + See Also + -------- + masked_where : base function + masked_values : equivalent function for floats. + """ # An alternative implementation relies on filling first: probably not needed. # d = filled(x, 0) @@ -1136,6 +1196,7 @@ # return array(d, mask=m, copy=copy) return masked_where(equal(x, value), x, copy=copy) + def masked_inside(x, v1, v2, copy=True): """ Shortcut to masked_where, where ``condition`` is True for x inside @@ -1153,6 +1214,7 @@ condition = (xf >= v1) & (xf <= v2) return masked_where(condition, x, copy=copy) + def masked_outside(x, v1, v2, copy=True): """ Shortcut to ``masked_where``, where ``condition`` is True for x outside @@ -1170,7 +1232,7 @@ condition = (xf < v1) | (xf > v2) return masked_where(condition, x, copy=copy) -# + def masked_object(x, value, copy=True, shrink=True): """ Mask the array `x` where the data are exactly equal to value. @@ -1199,6 +1261,7 @@ mask = mask_or(mask, make_mask(condition, shrink=shrink)) return masked_array(x, mask=mask, copy=copy, fill_value=value) + def masked_values(x, value, rtol=1.e-5, atol=1.e-8, copy=True, shrink=True): """ Mask the array x where the data are approximately equal in @@ -1236,6 +1299,7 @@ mask = mask_or(mask, make_mask(condition, shrink=shrink)) return masked_array(xnew, mask=mask, copy=copy, fill_value=value) + def masked_invalid(a, copy=True): """ Mask the array for invalid values (NaNs or infs). @@ -1257,6 +1321,7 @@ #####-------------------------------------------------------------------------- #---- --- Printing options --- #####-------------------------------------------------------------------------- + class _MaskedPrintOption: """ Handle the string used to represent missing data in a masked array. @@ -1291,10 +1356,65 @@ #if you single index into a masked location you get this object. masked_print_option = _MaskedPrintOption('--') + +def _recursive_printoption(result, mask, printopt): + """ + Puts printoptions in result where mask is True. + Private function allowing for recursion + """ + names = result.dtype.names + for name in names: + (curdata, curmask) = (result[name], mask[name]) + if curdata.dtype.names: + _recursive_printoption(curdata, curmask, printopt) + else: + np.putmask(curdata, curmask, printopt) + return + +_print_templates = dict(long = """\ +masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) +""", + short = """\ +masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, +%(nlen)s fill_value = %(fill)s) +""", + long_flx = """\ +masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, +%(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) +""", + short_flx = """\ +masked_%(name)s(data = %(data)s, +%(nlen)s mask = %(mask)s, +%(nlen)s fill_value = %(fill)s, +%(nlen)s dtype = %(dtype)s) +""") + #####-------------------------------------------------------------------------- #---- --- MaskedArray class --- #####-------------------------------------------------------------------------- +def _recursive_filled(a, mask, fill_value): + """ + Recursively fill `a` with `fill_value`. + Private function + """ + names = a.dtype.names + for name in names: + current = a[name] + if current.dtype.names: + _recursive_filled(current, mask[name], fill_value[name]) + else: + np.putmask(current, mask[name], fill_value[name]) + #............................................................................... class _arraymethod(object): """ @@ -1349,17 +1469,17 @@ elif mask is not nomask: result.__setmask__(getattr(mask, methodname)(*args, **params)) else: - if mask.ndim and mask.all(): + if mask.ndim and (not mask.dtype.names and mask.all()): return masked return result #.......................................................... -class FlatIter(object): +class MaskedIterator(object): "Define an interator." def __init__(self, ma): self.ma = ma - self.ma_iter = np.asarray(ma).flat - + self.dataiter = ma._data.flat + # if ma._mask is nomask: self.maskiter = None else: @@ -1368,19 +1488,79 @@ def __iter__(self): return self + def __getitem__(self, indx): + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + _mask.shape = result.shape + result._mask = _mask + return result + ### This won't work is ravel makes a copy def __setitem__(self, index, value): - a = self.ma.ravel() - a[index] = value + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) +# self.ma1d[index] = value def next(self): "Returns the next element of the iterator." - d = self.ma_iter.next() + d = self.dataiter.next() if self.maskiter is not None and self.maskiter.next(): d = masked return d +def flatten_structured_array(a): + """ + Flatten a strutured array. + + The datatype of the output is the largest datatype of the (nested) fields. + + Returns + ------- + output : var + Flatten MaskedArray if the input is a MaskedArray, + standard ndarray otherwise. + + Examples + -------- + >>> ndtype = [('a', int), ('b', float)] + >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) + >>> flatten_structured_array(a) + array([[1., 1.], + [2., 2.]]) + + """ + # + def flatten_sequence(iterable): + """Flattens a compound of nested iterables.""" + for elm in iter(iterable): + if hasattr(elm,'__iter__'): + for f in flatten_sequence(elm): + yield f + else: + yield elm + # + a = np.asanyarray(a) + inishape = a.shape + a = a.ravel() + if isinstance(a, MaskedArray): + out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) + out = out.view(MaskedArray) + out._mask = np.array([tuple(flatten_sequence(d.item())) + for d in getmaskarray(a)]) + else: + out = np.array([tuple(flatten_sequence(d.item())) for d in a]) + if len(inishape) > 1: + newshape = list(out.shape) + newshape[0] = inishape + out.shape = tuple(flatten_sequence(newshape)) + return out + + + + class MaskedArray(ndarray): """ Arrays with possibly masked values. Masked values of True @@ -1394,32 +1574,32 @@ ---------- data : {var} Input data. - mask : {nomask, sequence} + mask : {nomask, sequence}, optional Mask. Must be convertible to an array of booleans with the same shape as data: True indicates a masked (eg., invalid) data. - dtype : dtype - Data type of the output. If None, the type of the data - argument is used. If dtype is not None and different from - data.dtype, a copy is performed. - copy : bool - Whether to copy the input data (True), or to use a - reference instead. Note: data are NOT copied by default. - subok : {True, boolean} + dtype : {dtype}, optional + Data type of the output. + If dtype is None, the type of the data argument (`data.dtype`) is used. + If dtype is not None and different from `data.dtype`, a copy is performed. + copy : {False, True}, optional + Whether to copy the input data (True), or to use a reference instead. + Note: data are NOT copied by default. + subok : {True, False}, optional Whether to return a subclass of MaskedArray (if possible) or a plain MaskedArray. - ndmin : {0, int} + ndmin : {0, int}, optional Minimum number of dimensions - fill_value : {var} - Value used to fill in the masked values when necessary. If - None, a default based on the datatype is used. - keep_mask : {True, boolean} + fill_value : {var}, optional + Value used to fill in the masked values when necessary. + If None, a default based on the datatype is used. + keep_mask : {True, boolean}, optional Whether to combine mask with the mask of the input data, if any (True), or to use only mask for the output (False). - hard_mask : {False, boolean} - Whether to use a hard mask or not. With a hard mask, - masked values cannot be unmasked. - shrink : {True, boolean} + hard_mask : {False, boolean}, optional + Whether to use a hard mask or not. + With a hard mask, masked values cannot be unmasked. + shrink : {True, boolean}, optional Whether to force compression of an empty mask. """ @@ -1433,10 +1613,12 @@ subok=True, ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, flag=None, shrink=True, **options): - """Create a new masked array from scratch. + """ + Create a new masked array from scratch. - Note: you can also create an array with the .view(MaskedArray) - method. + Notes + ----- + A masked array can also be created by taking a .view(MaskedArray). """ if flag is not None: @@ -1600,7 +1782,8 @@ return #.................................. def __array_wrap__(self, obj, context=None): - """Special hook for ufuncs. + """ + Special hook for ufuncs. Wraps the numpy array and sets the mask according to context. """ result = obj.view(type(self)) @@ -1613,10 +1796,11 @@ # Get the domain mask................ domain = ufunc_domain.get(func, None) if domain is not None: + # Take the domain, and make sure it's a ndarray if len(args) > 2: - d = reduce(domain, args) + d = filled(reduce(domain, args), True) else: - d = domain(*args) + d = filled(domain(*args), True) # Fill the result where the domain is wrong try: # Binary domain: take the last value @@ -1634,7 +1818,8 @@ if d is not nomask: m = d else: - m |= d + # Don't modify inplace, we risk back-propagation + m = (m | d) # Make sure the mask has the proper size if result.shape == () and m: return masked @@ -1666,7 +1851,7 @@ if dtype is None: dtype = output.dtype mdtype = make_mask_descr(dtype) - + output._mask = self._mask.view(mdtype, ndarray) output._mask.shape = output.shape # Make sure to reset the _fill_value if needed @@ -1833,7 +2018,8 @@ ndarray.__setitem__(_data, indx, dindx) _mask[indx] = mindx return - #............................................ + + def __getslice__(self, i, j): """x.__getslice__(i, j) <==> x[i:j] @@ -1842,7 +2028,8 @@ """ return self.__getitem__(slice(i, j)) - #........................ + + def __setslice__(self, i, j, value): """x.__setslice__(i, j, value) <==> x[i:j]=value @@ -1851,7 +2038,8 @@ """ self.__setitem__(slice(i, j), value) - #............................................ + + def __setmask__(self, mask, copy=False): """Set the mask. @@ -1917,33 +2105,28 @@ # return self._mask.reshape(self.shape) return self._mask mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") - # - def _getrecordmask(self): - """Return the mask of the records. + + + def _get_recordmask(self): + """ + Return the mask of the records. A record is masked when all the fields are masked. """ _mask = ndarray.__getattribute__(self, '_mask').view(ndarray) if _mask.dtype.names is None: return _mask - if _mask.size > 1: - axis = 1 - else: - axis = None - # - try: - return _mask.view((bool_, len(self.dtype))).all(axis) - except ValueError: - return np.all([[f[n].all() for n in _mask.dtype.names] - for f in _mask], axis=axis) + return np.all(flatten_structured_array(_mask), axis=-1) - def _setrecordmask(self): + + def _set_recordmask(self): """Return the mask of the records. A record is masked when all the fields are masked. """ raise NotImplementedError("Coming soon: setting the mask per records!") - recordmask = property(fget=_getrecordmask) + recordmask = property(fget=_get_recordmask) + #............................................ def harden_mask(self): """Force the mask to hard. @@ -1957,6 +2140,10 @@ """ self._hardmask = False + hardmask = property(fget=lambda self: self._hardmask, + doc="Hardness of the mask") + + def unshare_mask(self): """Copy the mask and set the sharedmask flag to False. @@ -1965,6 +2152,9 @@ self._mask = self._mask.copy() self._sharedmask = False + sharedmask = property(fget=lambda self: self._sharedmask, + doc="Share status of the mask (read-only).") + def shrink_mask(self): """Reduce a mask to nomask when possible. @@ -1974,6 +2164,10 @@ self._mask = nomask #............................................ + + baseclass = property(fget= lambda self:self._baseclass, + doc="Class of the underlying data (read-only).") + def _get_data(self): """Return the current data, as a view of the original underlying data. @@ -1996,7 +2190,7 @@ """Return a flat iterator. """ - return FlatIter(self) + return MaskedIterator(self) # def _set_flat (self, value): """Set a flattened version of self to value. @@ -2027,24 +2221,25 @@ fill_value = property(fget=get_fill_value, fset=set_fill_value, doc="Filling value.") + def filled(self, fill_value=None): - """Return a copy of self._data, where masked values are filled - with fill_value. + """ + Return a copy of self, where masked values are filled with `fill_value`. - If fill_value is None, self.fill_value is used instead. + If `fill_value` is None, `self.fill_value` is used instead. - Notes - ----- - + Subclassing is preserved - + The result is NOT a MaskedArray ! + Notes + ----- + + Subclassing is preserved + + The result is NOT a MaskedArray ! - Examples - -------- - >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) - >>> x.filled() - array([1,2,-999,4,-999]) - >>> type(x.filled()) - + Examples + -------- + >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) + >>> x.filled() + array([1,2,-999,4,-999]) + >>> type(x.filled()) + """ m = self._mask @@ -2061,9 +2256,7 @@ # if m.dtype.names: result = self._data.copy() - for n in result.dtype.names: - field = result[n] - np.putmask(field, self._mask[n], fill_value[n]) + _recursive_filled(result, self._mask, fill_value) elif not m.any(): return self._data else: @@ -2184,13 +2377,9 @@ res = self._data.astype("|O8") res[m] = f else: - rdtype = [list(_) for _ in self.dtype.descr] - for r in rdtype: - r[1] = '|O8' - rdtype = [tuple(_) for _ in rdtype] + rdtype = _recursive_make_descr(self.dtype, "|O8") res = self._data.astype(rdtype) - for field in names: - np.putmask(res[field], m[field], f) + _recursive_printoption(res, m, f) else: res = self.filled(self.fill_value) return str(res) @@ -2199,44 +2388,71 @@ """Literal string representation. """ - with_mask = """\ -masked_%(name)s(data = - %(data)s, - mask = - %(mask)s, - fill_value=%(fill)s) -""" - with_mask1 = """\ -masked_%(name)s(data = %(data)s, - mask = %(mask)s, - fill_value=%(fill)s) -""" - with_mask_flx = """\ -masked_%(name)s(data = - %(data)s, - mask = - %(mask)s, - fill_value=%(fill)s, - dtype=%(dtype)s) -""" - with_mask1_flx = """\ -masked_%(name)s(data = %(data)s, - mask = %(mask)s, - fill_value=%(fill)s - dtype=%(dtype)s) -""" n = len(self.shape) name = repr(self._data).split('(')[0] - parameters = dict(name=name, data=str(self), mask=str(self._mask), + parameters = dict(name=name, nlen=" "*len(name), + data=str(self), mask=str(self._mask), fill=str(self.fill_value), dtype=str(self.dtype)) if self.dtype.names: if n <= 1: - return with_mask1_flx % parameters - return with_mask_flx % parameters + return _print_templates['short_flx'] % parameters + return _print_templates['long_flx'] % parameters elif n <= 1: - return with_mask1 % parameters - return with_mask % parameters + return _print_templates['short'] % parameters + return _print_templates['long'] % parameters #............................................ + def __eq__(self, other): + "Check whether other equals self elementwise" + omask = getattr(other, '_mask', nomask) + if omask is nomask: + check = ndarray.__eq__(self.filled(0), other).view(type(self)) + check._mask = self._mask + else: + odata = filled(other, 0) + check = ndarray.__eq__(self.filled(0), odata).view(type(self)) + if self._mask is nomask: + check._mask = omask + else: + mask = mask_or(self._mask, omask) + if mask.dtype.names: + if mask.size > 1: + axis = 1 + else: + axis = None + try: + mask = mask.view((bool_, len(self.dtype))).all(axis) + except ValueError: + mask = np.all([[f[n].all() for n in mask.dtype.names] + for f in mask], axis=axis) + check._mask = mask + return check + # + def __ne__(self, other): + "Check whether other doesn't equal self elementwise" + omask = getattr(other, '_mask', nomask) + if omask is nomask: + check = ndarray.__ne__(self.filled(0), other).view(type(self)) + check._mask = self._mask + else: + odata = filled(other, 0) + check = ndarray.__ne__(self.filled(0), odata).view(type(self)) + if self._mask is nomask: + check._mask = omask + else: + mask = mask_or(self._mask, omask) + if mask.dtype.names: + if mask.size > 1: + axis = 1 + else: + axis = None + try: + mask = mask.view((bool_, len(self.dtype))).all(axis) + except ValueError: + mask = np.all([[f[n].all() for n in mask.dtype.names] + for f in mask], axis=axis) + check._mask = mask + return check + # def __add__(self, other): "Add other to self, and return a new masked array." return add(self, other) @@ -2259,7 +2475,7 @@ # def __rmul__(self, other): "Multiply other by self, and return a new masked array." - return multiply(other, self) + return multiply(self, other) # def __div__(self, other): "Divide other into self, and return a new masked array." @@ -2281,7 +2497,9 @@ "Add other to self in-place." m = getmask(other) if self._mask is nomask: - self._mask = m + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m else: if m is not nomask: self._mask += m @@ -2292,7 +2510,9 @@ "Subtract other from self in-place." m = getmask(other) if self._mask is nomask: - self._mask = m + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m elif m is not nomask: self._mask += m ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other))) @@ -2302,7 +2522,9 @@ "Multiply self by other in-place." m = getmask(other) if self._mask is nomask: - self._mask = m + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m elif m is not nomask: self._mask += m ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other))) @@ -2324,7 +2546,7 @@ return self #... def __ipow__(self, other): - "Raise self to the power other, in place" + "Raise self to the power other, in place." other_data = getdata(other) other_mask = getmask(other) ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data)) @@ -2494,25 +2716,24 @@ return result # def resize(self, newshape, refcheck=True, order=False): - """Attempt to modify the size and the shape of the array in place. + """ + Change shape and size of array in-place. - The array must own its own memory and not be referenced by - other arrays. - - Returns - ------- - None. - """ - try: - self._data.resize(newshape, refcheck, order) - if self.mask is not nomask: - self._mask.resize(newshape, refcheck, order) - except ValueError: - raise ValueError("Cannot resize an array that has been referenced " - "or is referencing another array in this way.\n" - "Use the resize function.") - return None + # Note : the 'order' keyword looks broken, let's just drop it +# try: +# ndarray.resize(self, newshape, refcheck=refcheck) +# if self.mask is not nomask: +# self._mask.resize(newshape, refcheck=refcheck) +# except ValueError: +# raise ValueError("Cannot resize an array that has been referenced " +# "or is referencing another array in this way.\n" +# "Use the numpy.ma.resize function.") +# return None + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) # def put(self, indices, values, mode='raise'): """ @@ -3103,7 +3324,7 @@ index_array : ndarray, int Array of indices that sort `a` along the specified axis. In other words, ``a[index_array]`` yields a sorted `a`. - + See Also -------- sort : Describes sorting algorithms used. @@ -3430,7 +3651,7 @@ outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: - + if out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or more"\ " location." @@ -3547,9 +3768,10 @@ def tofile(self, fid, sep="", format="%s"): raise NotImplementedError("Not implemented yet, sorry...") - def torecords(self): + def toflex(self): """ Transforms a MaskedArray into a flexible-type array with two fields: + * the ``_data`` field stores the ``_data`` part of the array; * the ``_mask`` field stores the ``_mask`` part of the array; @@ -3592,6 +3814,7 @@ record['_data'] = self._data record['_mask'] = self._mask return record + torecords = toflex #-------------------------------------------- # Pickling def __getstate__(self): @@ -3651,7 +3874,7 @@ """ _data = ndarray.__new__(baseclass, baseshape, basetype) - _mask = ndarray.__new__(ndarray, baseshape, 'b1') + _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) @@ -3975,12 +4198,12 @@ def compressed(x): """ Return a 1-D array of all the non-masked data. - + See Also -------- MaskedArray.compressed equivalent method - + """ if getmask(x) is nomask: return np.asanyarray(x) @@ -4348,8 +4571,8 @@ Returns the inner product of a and b for arrays of floating point types. Like the generic NumPy equivalent the product sum is over the last dimension - of a and b. - + of a and b. + Notes ----- The first argument is not conjugated. @@ -4384,7 +4607,8 @@ outerproduct = outer def allequal (a, b, fill_value=True): - """Return True if all entries of a and b are equal, using + """ + Return True if all entries of a and b are equal, using fill_value as a truth value where either or both are masked. """ @@ -4419,7 +4643,7 @@ fill_value : boolean, optional Whether masked values in a or b are considered equal (True) or not (False). - + rtol : Relative tolerance The relative difference is equal to `rtol` * `b`. atol : Absolute tolerance @@ -4442,7 +4666,7 @@ True. absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - + Return True if all elements of a and b are equal subject to given tolerances. @@ -4475,10 +4699,10 @@ return np.all(d) #.............................................................................. -def asarray(a, dtype=None): +def asarray(a, dtype=None, order=None): """ - Convert the input to a masked array. - + Convert the input `a` to a masked array of the given datatype. + Parameters ---------- a : array_like @@ -4490,29 +4714,40 @@ order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Defaults to 'C'. - + Returns ------- out : ndarray MaskedArray interpretation of `a`. No copy is performed if the input - is already an ndarray. If `a` is a subclass of ndarray, a base - class ndarray is returned. - Return a as a MaskedArray object of the given dtype. - If dtype is not given or None, is is set to the dtype of a. - No copy is performed if a is already an array. - Subclasses are converted to the base class MaskedArray. + is already an ndarray. If `a` is a subclass of MaskedArray, a base + class MaskedArray is returned. """ return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False) def asanyarray(a, dtype=None): - """asanyarray(data, dtype) = array(data, dtype, copy=0, subok=1) + """ + Convert the input `a` to a masked array of the given datatype. + If `a` is a subclass of MaskedArray, its class is conserved. - Return a as an masked array. - If dtype is not given or None, is is set to the dtype of a. - No copy is performed if a is already an array. - Subclasses are conserved. + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Defaults to 'C'. + Returns + ------- + out : ndarray + MaskedArray interpretation of `a`. No copy is performed if the input + is already an ndarray. + """ return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) @@ -4557,6 +4792,15 @@ raise NotImplementedError("Not yet implemented. Sorry") +def fromflex(fxarray): + """ + Rebuilds a masked_array from a flexible-type array output by the '.torecord' + array + """ + return masked_array(fxarray['_data'], mask=fxarray['_mask']) + + + class _convert2ma: """Convert functions from numpy to numpy.ma. Modified: branches/dynamic_cpu_configuration/numpy/ma/extras.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/ma/extras.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/ma/extras.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -19,11 +19,14 @@ 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack', + 'intersect1d', 'intersect1d_nu', 'mask_cols', 'mask_rowcols', 'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_', 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', + 'setdiff1d', 'setmember1d', 'setxor1d', + 'unique1d', 'union1d', 'vander', 'vstack', ] @@ -45,22 +48,19 @@ #............................................................................... def issequence(seq): """Is seq a sequence (ndarray, list or tuple)?""" - if isinstance(seq, ndarray): + if isinstance(seq, (ndarray, tuple, list)): return True - elif isinstance(seq, tuple): - return True - elif isinstance(seq, list): - return True return False def count_masked(arr, axis=None): - """Count the number of masked elements along the given axis. + """ + Count the number of masked elements along the given axis. Parameters ---------- - axis : int, optional - Axis along which to count. - If None (default), a flattened version of the array is used. + axis : int, optional + Axis along which to count. + If None (default), a flattened version of the array is used. """ m = getmaskarray(arr) @@ -136,9 +136,12 @@ res.append(masked_array(_d, mask=_m)) return res -atleast_1d = _fromnxfunction('atleast_1d') -atleast_2d = _fromnxfunction('atleast_2d') -atleast_3d = _fromnxfunction('atleast_3d') +#atleast_1d = _fromnxfunction('atleast_1d') +#atleast_2d = _fromnxfunction('atleast_2d') +#atleast_3d = _fromnxfunction('atleast_3d') +atleast_1d = np.atleast_1d +atleast_2d = np.atleast_2d +atleast_3d = np.atleast_3d vstack = row_stack = _fromnxfunction('vstack') hstack = _fromnxfunction('hstack') @@ -252,7 +255,8 @@ def average(a, axis=None, weights=None, returned=False): - """Average the array over the given axis. + """ + Average the array over the given axis. Parameters ---------- @@ -440,10 +444,10 @@ #.............................................................................. def compress_rowcols(x, axis=None): """ - Suppress the rows and/or columns of a 2D array that contains + Suppress the rows and/or columns of a 2D array that contain masked values. - The suppression behavior is selected with the `axis`parameter. + The suppression behavior is selected with the `axis` parameter. - If axis is None, rows and columns are suppressed. - If axis is 0, only rows are suppressed. @@ -482,13 +486,15 @@ return x._data[idxr][:,idxc] def compress_rows(a): - """Suppress whole rows of a 2D array that contain masked values. + """ + Suppress whole rows of a 2D array that contain masked values. """ return compress_rowcols(a, 0) def compress_cols(a): - """Suppress whole columnss of a 2D array that contain masked values. + """ + Suppress whole columns of a 2D array that contain masked values. """ return compress_rowcols(a, 1) @@ -530,30 +536,35 @@ return a def mask_rows(a, axis=None): - """Mask whole rows of a 2D array that contain masked values. + """ + Mask whole rows of a 2D array that contain masked values. Parameters ---------- - axis : int, optional - Axis along which to perform the operation. - If None, applies to a flattened version of the array. + axis : int, optional + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + """ return mask_rowcols(a, 0) def mask_cols(a, axis=None): - """Mask whole columns of a 2D array that contain masked values. + """ + Mask whole columns of a 2D array that contain masked values. Parameters ---------- - axis : int, optional - Axis along which to perform the operation. - If None, applies to a flattened version of the array. + axis : int, optional + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + """ return mask_rowcols(a, 1) def dot(a,b, strict=False): - """Return the dot product of two 2D masked arrays a and b. + """ + Return the dot product of two 2D masked arrays a and b. Like the generic numpy equivalent, the product sum is over the last dimension of a and the second-to-last dimension of b. If strict is True, @@ -582,74 +593,213 @@ m = ~np.dot(am, bm) return masked_array(d, mask=m) -#............................................................................... -def ediff1d(array, to_end=None, to_begin=None): - """Return the differences between consecutive elements of an - array, possibly with prefixed and/or appended values. +#####-------------------------------------------------------------------------- +#---- --- arraysetops --- +#####-------------------------------------------------------------------------- - Parameters - ---------- - array : {array} - Input array, will be flattened before the difference is taken. - to_end : {number}, optional - If provided, this number will be tacked onto the end of the returned - differences. - to_begin : {number}, optional - If provided, this number will be taked onto the beginning of the - returned differences. +def ediff1d(arr, to_end=None, to_begin=None): + """ + Computes the differences between consecutive elements of an array. + This function is the equivalent of `numpy.ediff1d` that takes masked + values into account. + + See Also + -------- + numpy.eddif1d : equivalent function for ndarrays. + Returns ------- - ed : {array} - The differences. Loosely, this will be (ary[1:] - ary[:-1]). - + output : MaskedArray + """ - a = masked_array(array, copy=True) - if a.ndim > 1: - a.reshape((a.size,)) - (d, m, n) = (a._data, a._mask, a.size-1) - dd = d[1:]-d[:-1] - if m is nomask: - dm = nomask - else: - dm = m[1:]-m[:-1] + arr = ma.asanyarray(arr).flat + ed = arr[1:] - arr[:-1] + arrays = [ed] # + if to_begin is not None: + arrays.insert(0, to_begin) if to_end is not None: - to_end = asarray(to_end) - nend = to_end.size - if to_begin is not None: - to_begin = asarray(to_begin) - nbegin = to_begin.size - r_data = np.empty((n+nend+nbegin,), dtype=a.dtype) - r_mask = np.zeros((n+nend+nbegin,), dtype=bool) - r_data[:nbegin] = to_begin._data - r_mask[:nbegin] = to_begin._mask - r_data[nbegin:-nend] = dd - r_mask[nbegin:-nend] = dm - else: - r_data = np.empty((n+nend,), dtype=a.dtype) - r_mask = np.zeros((n+nend,), dtype=bool) - r_data[:-nend] = dd - r_mask[:-nend] = dm - r_data[-nend:] = to_end._data - r_mask[-nend:] = to_end._mask + arrays.append(to_end) # - elif to_begin is not None: - to_begin = asarray(to_begin) - nbegin = to_begin.size - r_data = np.empty((n+nbegin,), dtype=a.dtype) - r_mask = np.zeros((n+nbegin,), dtype=bool) - r_data[:nbegin] = to_begin._data - r_mask[:nbegin] = to_begin._mask - r_data[nbegin:] = dd - r_mask[nbegin:] = dm + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in the common + # case where neither to_begin or to_end was given. + ed = hstack(arrays) # + return ed + + +def unique1d(ar1, return_index=False, return_inverse=False): + """ + Finds the unique elements of an array. + + Masked values are considered the same element (masked). + + The output array is always a MaskedArray. + + See Also + -------- + np.unique1d : equivalent function for ndarrays. + """ + output = np.unique1d(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) else: - r_data = dd - r_mask = dm - return masked_array(r_data, mask=r_mask) + output = output.view(MaskedArray) + return output +def intersect1d(ar1, ar2): + """ + Returns the repeated or unique elements belonging to the two arrays. + + Masked values are assumed equals one to the other. + The output is always a masked array + + See Also + -------- + numpy.intersect1d : equivalent function for ndarrays. + + Examples + -------- + >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> intersect1d(x, y) + masked_array(data = [1 1 3 3 --], + mask = [False False False False True], + fill_value = 999999) + """ + aux = ma.concatenate((ar1,ar2)) + aux.sort() + return aux[aux[1:] == aux[:-1]] + + + +def intersect1d_nu(ar1, ar2): + """ + Returns the unique elements common to both arrays. + + Masked values are considered equal one to the other. + The output is always a masked array. + + See Also + -------- + intersect1d : Returns repeated or unique common elements. + numpy.intersect1d_nu : equivalent function for ndarrays. + + Examples + -------- + >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> intersect1d_nu(x, y) + masked_array(data = [1 3 --], + mask = [False False True], + fill_value = 999999) + + """ + # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique1d(ar1), unique1d(ar2))) + aux.sort() + return aux[aux[1:] == aux[:-1]] + + + +def setxor1d(ar1, ar2): + """ + Set exclusive-or of 1D arrays with unique elements. + + See Also + -------- + numpy.setxor1d : equivalent function for ndarrays + + """ + aux = ma.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + aux.sort() + auxf = aux.filled() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = (flag[1:] == flag[:-1]) + return aux[flag2] + + +def setmember1d(ar1, ar2): + """ + Return a boolean array set True where first element is in second array. + + See Also + -------- + numpy.setmember1d : equivalent function for ndarrays. + + """ + ar1 = ma.asanyarray(ar1) + ar2 = ma.asanyarray( ar2 ) + ar = ma.concatenate((ar1, ar2 )) + b1 = ma.zeros(ar1.shape, dtype = np.int8) + b2 = ma.ones(ar2.shape, dtype = np.int8) + tt = ma.concatenate((b1, b2)) + + # We need this to be a stable sort, so always use 'mergesort' here. The + # values from the first array should always come before the values from the + # second array. + perm = ar.argsort(kind='mergesort') + aux = ar[perm] + aux2 = tt[perm] +# flag = ediff1d( aux, 1 ) == 0 + flag = ma.concatenate((aux[1:] == aux[:-1], [False])) + ii = ma.where( flag * aux2 )[0] + aux = perm[ii+1] + perm[ii+1] = perm[ii] + perm[ii] = aux + # + indx = perm.argsort(kind='mergesort')[:len( ar1 )] + # + return flag[indx] + + +def union1d(ar1, ar2): + """ + Union of 1D arrays with unique elements. + + See also + -------- + numpy.union1d : equivalent function for ndarrays. + + """ + return unique1d(ma.concatenate((ar1, ar2))) + + +def setdiff1d(ar1, ar2): + """ + Set difference of 1D arrays with unique elements. + + See Also + -------- + numpy.setdiff1d : equivalent function for ndarrays + + """ + aux = setmember1d(ar1,ar2) + if aux.size == 0: + return aux + else: + return ma.asarray(ar1)[aux == 0] + + + +#####-------------------------------------------------------------------------- +#---- --- Covariance --- +#####-------------------------------------------------------------------------- + + + + def _covhelper(x, y=None, rowvar=True, allow_masked=True): """ Private function for the computation of covariance and correlation @@ -747,7 +897,8 @@ def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True): - """The correlation coefficients formed from the array x, where the + """ + The correlation coefficients formed from the array x, where the rows are the observations, and the columns are variables. corrcoef(x,y) where x and y are 1d arrays is the same as @@ -818,7 +969,8 @@ #####-------------------------------------------------------------------------- class MAxisConcatenator(AxisConcatenator): - """Translate slice objects to concatenation along an axis. + """ + Translate slice objects to concatenation along an axis. """ @@ -877,11 +1029,13 @@ return self._retval(res) class mr_class(MAxisConcatenator): - """Translate slice objects to concatenation along the first axis. + """ + Translate slice objects to concatenation along the first axis. - For example: - >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] - array([1, 2, 3, 0, 0, 4, 5, 6]) + Examples + -------- + >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] + array([1, 2, 3, 0, 0, 4, 5, 6]) """ def __init__(self): @@ -894,7 +1048,8 @@ #####-------------------------------------------------------------------------- def flatnotmasked_edges(a): - """Find the indices of the first and last not masked values in a + """ + Find the indices of the first and last not masked values in a 1D masked array. If all values are masked, returns None. """ @@ -907,8 +1062,10 @@ else: return None + def notmasked_edges(a, axis=None): - """Find the indices of the first and last not masked values along + """ + Find the indices of the first and last not masked values along the given axis in a masked array. If all values are masked, return None. Otherwise, return a list @@ -917,9 +1074,10 @@ Parameters ---------- - axis : int, optional - Axis along which to perform the operation. - If None, applies to a flattened version of the array. + axis : int, optional + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + """ a = asarray(a) if axis is None or a.ndim == 1: @@ -929,8 +1087,10 @@ return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]),] + def flatnotmasked_contiguous(a): - """Find contiguous unmasked data in a flattened masked array. + """ + Find contiguous unmasked data in a flattened masked array. Return a sorted sequence of slices (start index, end index). @@ -950,22 +1110,22 @@ return result def notmasked_contiguous(a, axis=None): - """Find contiguous unmasked data in a masked array along the given - axis. + """ + Find contiguous unmasked data in a masked array along the given axis. Parameters ---------- - axis : int, optional - Axis along which to perform the operation. - If None, applies to a flattened version of the array. + axis : int, optional + Axis along which to perform the operation. + If None, applies to a flattened version of the array. Returns ------- - A sorted sequence of slices (start index, end index). + A sorted sequence of slices (start index, end index). Notes ----- - Only accepts 2D arrays at most. + Only accepts 2D arrays at most. """ a = asarray(a) Modified: branches/dynamic_cpu_configuration/numpy/ma/mrecords.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/ma/mrecords.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/ma/mrecords.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -357,7 +357,7 @@ dtype = None else: output = ndarray.view(self, dtype) - # OK, there's the change + # OK, there's the change except TypeError: dtype = np.dtype(dtype) # we need to revert to MaskedArray, but keeping the possibility Modified: branches/dynamic_cpu_configuration/numpy/ma/tests/test_core.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/ma/tests/test_core.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/ma/tests/test_core.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -474,6 +474,20 @@ np.array([(1, '1', 1.)], dtype=flexi.dtype)) + def test_filled_w_nested_dtype(self): + "Test filled w/ nested dtype" + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([(1, (1, 1)), (2, (2, 2))], + mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) + test = a.filled(0) + control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) + assert_equal(test, control) + # + test = a['B'].filled(0) + control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) + assert_equal(test, control) + + def test_optinfo_propagation(self): "Checks that _optinfo dictionary isn't back-propagated" x = array([1,2,3,], dtype=float) @@ -483,6 +497,55 @@ y._optinfo['info'] = '!!!' assert_equal(x._optinfo['info'], '???') + + def test_fancy_printoptions(self): + "Test printing a masked array w/ fancy dtype." + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = array([(1, (2, 3.0)), (4, (5, 6.0))], + mask=[(1, (0, 1)), (0, (1, 0))], + dtype=fancydtype) + control = "[(--, (2, --)) (4, (--, 6.0))]" + assert_equal(str(test), control) + + + def test_flatten_structured_array(self): + "Test flatten_structured_array on arrays" + # On ndarray + ndtype = [('a', int), ('b', float)] + a = np.array([(1, 1), (2, 2)], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[1., 1.], [2., 2.]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # On masked_array + a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1.], [2., 2.]], + mask=[[0, 1], [1, 0]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # On masked array with nested structure + ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] + a = array([(1, (1, 1.1)), (2, (2, 2.2))], + mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1., 1.1], [2., 2., 2.2]], + mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # Keeping the initial shape + ndtype = [('a', int), ('b', float)] + a = np.array([[(1, 1),], [(2, 2),]], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[[1., 1.],], [[2., 2.],]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + + + + #------------------------------------------------------------------------------ class TestMaskedArrayArithmetic(TestCase): @@ -539,6 +602,7 @@ assert_equal(np.multiply(x,y), multiply(xm, ym)) assert_equal(np.divide(x,y), divide(xm, ym)) + def test_divide_on_different_shapes(self): x = arange(6, dtype=float) x.shape = (2,3) @@ -557,6 +621,7 @@ assert_equal(z, [[-1.,-1.,-1.], [3.,4.,5.]]) assert_equal(z.mask, [[1,1,1],[0,0,0]]) + def test_mixed_arithmetic(self): "Tests mixed arithmetics." na = np.array([1]) @@ -571,6 +636,7 @@ assert_equal(getmaskarray(a/2), [0,0,0]) assert_equal(getmaskarray(2/a), [1,0,1]) + def test_masked_singleton_arithmetic(self): "Tests some scalar arithmetics on MaskedArrays." # Masked singleton should remain masked no matter what @@ -581,6 +647,7 @@ self.failUnless(maximum(xm, xm).mask) self.failUnless(minimum(xm, xm).mask) + def test_arithmetic_with_masked_singleton(self): "Checks that there's no collapsing to masked" x = masked_array([1,2]) @@ -593,6 +660,7 @@ assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) + def test_arithmetic_with_masked_singleton_on_1d_singleton(self): "Check that we're not losing the shape of a singleton" x = masked_array([1, ]) @@ -600,6 +668,7 @@ assert_equal(y.shape, x.shape) assert_equal(y.mask, [True, ]) + def test_scalar_arithmetic(self): x = array(0, mask=0) assert_equal(x.filled().ctypes.data, x.ctypes.data) @@ -608,6 +677,7 @@ assert_equal(xm.shape,(2,)) assert_equal(xm.mask,[1,1]) + def test_basic_ufuncs (self): "Test various functions such as sin, cos." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d @@ -649,6 +719,7 @@ assert getmask(count(ott,0)) is nomask assert_equal([1,2],count(ott,0)) + def test_minmax_func (self): "Tests minimum and maximum." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d @@ -672,6 +743,7 @@ x[-1,-1] = masked assert_equal(maximum(x), 2) + def test_minimummaximum_func(self): a = np.ones((2,2)) aminimum = minimum(a,a) @@ -690,6 +762,7 @@ self.failUnless(isinstance(amaximum, MaskedArray)) assert_equal(amaximum, np.maximum.outer(a,a)) + def test_minmax_funcs_with_output(self): "Tests the min/max functions with explicit outputs" mask = np.random.rand(12).round() @@ -735,7 +808,8 @@ self.failUnless(x.min() is masked) self.failUnless(x.max() is masked) self.failUnless(x.ptp() is masked) - #........................ + + def test_addsumprod (self): "Tests add, sum, product." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d @@ -757,6 +831,98 @@ assert_equal(np.sum(x,1), sum(x,1)) assert_equal(np.product(x,1), product(x,1)) + + def test_binops_d2D(self): + "Test binary operations on 2D data" + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + # + test = a * b + control = array([[2., 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b * a + control = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a * b + control = array([[2, 3], [8, 10], [18, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b * a + control = array([[2, 3], [8, 10], [18, 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + + def test_domained_binops_d2D(self): + "Test domained binary operations on 2D data" + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + # + test = a / b + control = array([[1./2., 1./3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b / a + control = array([[2./1., 3./1.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a / b + control = array([[1./2, 1./3], [2./4, 2./5], [3./6, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b / a + control = array([[2/1., 3/1.], [4/2., 5/2.], [6/3., 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + + def test_noshrinking(self): + "Check that we don't shrink a mask when not wanted" + # Binary operations + a = masked_array([1,2,3], mask=[False,False,False], shrink=False) + b = a + 1 + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a += 1 + assert_equal(a.mask, [0, 0, 0]) + # Domained binary operation + b = a / 1. + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a /= 1. + assert_equal(a.mask, [0, 0, 0]) + + def test_mod(self): "Tests mod" (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d @@ -767,7 +933,6 @@ test = mod(xm, ym) assert_equal(test, np.mod(xm, ym)) assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) - def test_TakeTransposeInnerOuter(self): @@ -826,6 +991,56 @@ self.failUnless(output[0] is masked) + def test_eq_on_structured(self): + "Test the equality of structured arrays" + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + test = (a == a) + assert_equal(test, [True, True]) + assert_equal(test.mask, [False, False]) + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test, [False, True]) + assert_equal(test.mask, [True, False]) + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test, [True, False]) + assert_equal(test.mask, [False, False]) + + + def test_ne_on_structured(self): + "Test the equality of structured arrays" + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + test = (a != a) + assert_equal(test, [False, False]) + assert_equal(test.mask, [False, False]) + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test, [True, False]) + assert_equal(test.mask, [True, False]) + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test, [False, True]) + assert_equal(test.mask, [False, False]) + + + def test_numpyarithmetics(self): + "Check that the mask is not back-propagated when using numpy functions" + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + # + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + # + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + #------------------------------------------------------------------------------ class TestMaskedArrayAttributes(TestCase): @@ -923,6 +1138,17 @@ a[1] = 1 assert_equal(a._mask, zeros(10)) + def test_flat(self): + "Test flat on masked_matrices" + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) #------------------------------------------------------------------------------ @@ -1049,21 +1275,44 @@ # The shape shouldn't matter ndtype = [('f0', float, (2, 2))] control = np.array((default_fill_value(0.),), - dtype=[('f0',float)]) + dtype=[('f0',float)]).astype(ndtype) assert_equal(_check_fill_value(None, ndtype), control) - control = np.array((0,), dtype=[('f0',float)]) + control = np.array((0,), dtype=[('f0',float)]).astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) # ndtype = np.dtype("int, (2,3)float, float") control = np.array((default_fill_value(0), default_fill_value(0.), default_fill_value(0.),), - dtype="int, float, float") + dtype="int, float, float").astype(ndtype) test = _check_fill_value(None, ndtype) assert_equal(test, control) - control = np.array((0,0,0), dtype="int, float, float") + control = np.array((0,0,0), dtype="int, float, float").astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) + + def test_extremum_fill_value(self): + "Tests extremum fill values for flexible type." + a = array([(1, (2, 3)), (4, (5, 6))], + dtype=[('A', int), ('B', [('BA', int), ('BB', int)])]) + test = a.fill_value + assert_equal(test['A'], default_fill_value(a['A'])) + assert_equal(test['B']['BA'], default_fill_value(a['B']['BA'])) + assert_equal(test['B']['BB'], default_fill_value(a['B']['BB'])) + # + test = minimum_fill_value(a) + assert_equal(test[0], minimum_fill_value(a['A'])) + assert_equal(test[1][0], minimum_fill_value(a['B']['BA'])) + assert_equal(test[1][1], minimum_fill_value(a['B']['BB'])) + assert_equal(test[1], minimum_fill_value(a['B'])) + # + test = maximum_fill_value(a) + assert_equal(test[0], maximum_fill_value(a['A'])) + assert_equal(test[1][0], maximum_fill_value(a['B']['BA'])) + assert_equal(test[1][1], maximum_fill_value(a['B']['BB'])) + assert_equal(test[1], maximum_fill_value(a['B'])) + + #------------------------------------------------------------------------------ class TestUfuncs(TestCase): @@ -1125,7 +1374,17 @@ self.failUnless(amask.max(1)[0].mask) self.failUnless(amask.min(1)[0].mask) + def test_ndarray_mask(self): + "Check that the mask of the result is a ndarray (not a MaskedArray...)" + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + test = np.sqrt(a) + control = masked_array([-1, 0, 1, np.sqrt(2), -1], + mask=[1, 0, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + self.failUnless(not isinstance(test.mask, MaskedArray)) + #------------------------------------------------------------------------------ class TestMaskedArrayInPlaceArithmetics(TestCase): @@ -1367,6 +1626,51 @@ assert_equal(x.data, [1., 2.**2.5, 3]) assert_equal(x.mask, [0, 0, 1]) + + def test_datafriendly_add_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + # + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + + def test_datafriendly_sub_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + # + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + + def test_datafriendly_mul_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + # + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + #------------------------------------------------------------------------------ class TestMaskedArrayMethods(TestCase): @@ -1442,8 +1746,8 @@ a *= 1e-8 a[0] = 0 self.failUnless(allclose(a, 0, masked_equal=True)) - + def test_allany(self): """Checks the any/all methods/functions.""" x = np.array([[ 0.13, 0.26, 0.90], @@ -1810,6 +2114,28 @@ assert_equal(am, an) + def test_sort_flexible(self): + "Test sort on flexible dtype." + a = array([(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], + mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], + dtype=[('A', int), ('B', int)]) + # + test = sort(a) + b = array([(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], + mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, b) + assert_equal(test.mask, b.mask) + # + test = sort(a, endwith=False) + b = array([(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3),], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0),], + dtype=[('A', int), ('B', int)]) + assert_equal(test, b) + assert_equal(test.mask, b.mask) + # + + def test_squeeze(self): "Check squeeze" data = masked_array([[1,2,3]]) @@ -1883,15 +2209,15 @@ assert_equal(x.tolist(), [(1,1.1,'one'),(2,2.2,'two'),(None,None,None)]) - def test_torecords(self): + def test_toflex(self): "Test the conversion to records" data = arange(10) - record = data.torecords() + record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) # data[[0,1,2,-1]] = masked - record = data.torecords() + record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) # @@ -1901,7 +2227,7 @@ np.random.rand(10))], dtype=ndtype) data[[0,1,2,-1]] = masked - record = data.torecords() + record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) # @@ -1911,10 +2237,29 @@ np.random.rand(10))], dtype=ndtype) data[[0,1,2,-1]] = masked - record = data.torecords() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) + record = data.toflex() + assert_equal_records(record['_data'], data._data) + assert_equal_records(record['_mask'], data._mask) + + def test_fromflex(self): + "Test the reconstruction of a masked_array from a record" + a = array([1, 2, 3]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + # + a = array([1, 2, 3], mask=[0, 0, 1]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + # + a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], + dtype=[('A', int), ('B', float)]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.data, a.data) + #------------------------------------------------------------------------------ @@ -2078,7 +2423,7 @@ assert_equal(out, [0, 4, 8]) assert_equal(out.mask, [0, 1, 0]) out = diag(out) - control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], mask = [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(out, control) @@ -2531,6 +2876,12 @@ test = mask_or(mask, other) except ValueError: pass + # Using nested arrays + dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])] + amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) + bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) + cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) + assert_equal(mask_or(amask, bmask), cntrl) def test_flatten_mask(self): @@ -2543,7 +2894,7 @@ test = flatten_mask(mask) control = np.array([0, 0, 0, 1], dtype=bool) assert_equal(test, control) - + mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] data = [(0, (0, 0)), (0, (0, 1))] mask = np.array(data, dtype=mdtype) @@ -2691,7 +3042,7 @@ self.failUnless(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) - + # def test_view_to_type(self): (data, a, controlmask) = self.data @@ -2727,7 +3078,7 @@ assert_equal(test.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a'][-1]) assert_equal(test['B'], a['b'][-1]) - + # def test_view_to_subdtype(self): (data, a, controlmask) = self.data Modified: branches/dynamic_cpu_configuration/numpy/ma/tests/test_extras.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/ma/tests/test_extras.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/ma/tests/test_extras.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -22,7 +22,7 @@ # def test_masked_all(self): "Tests masked_all" - # Standard dtype + # Standard dtype test = masked_all((2,), dtype=float) control = array([1, 1], mask=[1, 1], dtype=float) assert_equal(test, control) @@ -53,7 +53,7 @@ def test_masked_all_like(self): "Tests masked_all" - # Standard dtype + # Standard dtype base = array([1, 2], dtype=float) test = masked_all_like(base) control = array([1, 1], mask=[1, 1], dtype=float) @@ -338,40 +338,8 @@ c = dot(b,a,False) assert_equal(c, np.dot(b.filled(0),a.filled(0))) - def test_ediff1d(self): - "Tests mediff1d" - x = masked_array(np.arange(5), mask=[1,0,0,0,1]) - difx_d = (x._data[1:]-x._data[:-1]) - difx_m = (x._mask[1:]-x._mask[:-1]) - dx = ediff1d(x) - assert_equal(dx._data, difx_d) - assert_equal(dx._mask, difx_m) - # - dx = ediff1d(x, to_begin=masked) - assert_equal(dx._data, np.r_[0,difx_d]) - assert_equal(dx._mask, np.r_[1,difx_m]) - dx = ediff1d(x, to_begin=[1,2,3]) - assert_equal(dx._data, np.r_[[1,2,3],difx_d]) - assert_equal(dx._mask, np.r_[[0,0,0],difx_m]) - # - dx = ediff1d(x, to_end=masked) - assert_equal(dx._data, np.r_[difx_d,0]) - assert_equal(dx._mask, np.r_[difx_m,1]) - dx = ediff1d(x, to_end=[1,2,3]) - assert_equal(dx._data, np.r_[difx_d,[1,2,3]]) - assert_equal(dx._mask, np.r_[difx_m,[0,0,0]]) - # - dx = ediff1d(x, to_end=masked, to_begin=masked) - assert_equal(dx._data, np.r_[0,difx_d,0]) - assert_equal(dx._mask, np.r_[1,difx_m,1]) - dx = ediff1d(x, to_end=[1,2,3], to_begin=masked) - assert_equal(dx._data, np.r_[0,difx_d,[1,2,3]]) - assert_equal(dx._mask, np.r_[1,difx_m,[0,0,0]]) - # - dx = ediff1d(x._data, to_end=masked, to_begin=masked) - assert_equal(dx._data, np.r_[0,difx_d,0]) - assert_equal(dx._mask, np.r_[1,0,0,0,0,1]) + class TestApplyAlongAxis(TestCase): # "Tests 2D functions" @@ -383,6 +351,7 @@ assert_equal(xa,[[1,4],[7,10]]) + class TestMedian(TestCase): # def test_2d(self): @@ -422,11 +391,12 @@ assert_equal(median(x,0), [[12,10],[8,9],[16,17]]) + class TestCov(TestCase): - # + def setUp(self): self.data = array(np.random.rand(12)) - # + def test_1d_wo_missing(self): "Test cov on 1D variable w/o missing values" x = self.data @@ -434,7 +404,7 @@ assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), cov(x, rowvar=False, bias=True)) - # + def test_2d_wo_missing(self): "Test cov on 1 2D variable w/o missing values" x = self.data.reshape(3,4) @@ -442,7 +412,7 @@ assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), cov(x, rowvar=False, bias=True)) - # + def test_1d_w_missing(self): "Test cov 1 1D variable w/missing values" x = self.data @@ -466,7 +436,7 @@ cov(x, x[::-1], rowvar=False)) assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), cov(x, x[::-1], rowvar=False, bias=True)) - # + def test_2d_w_missing(self): "Test cov on 2D variable w/ missing value" x = self.data @@ -486,11 +456,12 @@ np.cov(xf, rowvar=False, bias=True) * x.shape[0]/frac) + class TestCorrcoef(TestCase): - # + def setUp(self): self.data = array(np.random.rand(12)) - # + def test_1d_wo_missing(self): "Test cov on 1D variable w/o missing values" x = self.data @@ -499,7 +470,7 @@ corrcoef(x, rowvar=False)) assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) - # + def test_2d_wo_missing(self): "Test corrcoef on 1 2D variable w/o missing values" x = self.data.reshape(3,4) @@ -508,7 +479,7 @@ corrcoef(x, rowvar=False)) assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) - # + def test_1d_w_missing(self): "Test corrcoef 1 1D variable w/missing values" x = self.data @@ -532,7 +503,7 @@ corrcoef(x, x[::-1], rowvar=False)) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False, bias=True), corrcoef(x, x[::-1], rowvar=False, bias=True)) - # + def test_2d_w_missing(self): "Test corrcoef on 2D variable w/ missing value" x = self.data @@ -575,6 +546,213 @@ assert_almost_equal(a, a_) + +class TestArraySetOps(TestCase): + # + def test_unique1d_onlist(self): + "Test unique1d on list" + data = [1, 1, 1, 2, 2, 3] + test = unique1d(data, return_index=True, return_inverse=True) + self.failUnless(isinstance(test[0], MaskedArray)) + assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) + assert_equal(test[1], [0, 3, 5]) + assert_equal(test[2], [0, 0, 0, 1, 1, 2]) + + def test_unique1d_onmaskedarray(self): + "Test unique1d on masked data w/use_mask=True" + data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) + test = unique1d(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + # + data.fill_value = 3 + data = masked_array([1, 1, 1, 2, 2, 3], + mask=[0, 0, 1, 0, 1, 0], fill_value=3) + test = unique1d(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + + def test_unique1d_allmasked(self): + "Test all masked" + data = masked_array([1, 1, 1], mask=True) + test = unique1d(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1,], mask=[True])) + assert_equal(test[1], [0]) + assert_equal(test[2], [0, 0, 0]) + # + "Test masked" + data = masked + test = unique1d(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array(masked)) + assert_equal(test[1], [0]) + assert_equal(test[2], [0]) + + def test_ediff1d(self): + "Tests mediff1d" + x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) + test = ediff1d(x) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + def test_ediff1d_tobegin(self): + "Test ediff1d w/ to_begin" + x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + test = ediff1d(x, to_begin=masked) + control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_begin=[1,2,3]) + control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + def test_ediff1d_toend(self): + "Test ediff1d w/ to_end" + x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + test = ediff1d(x, to_end=masked) + control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1,2,3]) + control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + def test_ediff1d_tobegin_toend(self): + "Test ediff1d w/ to_begin and to_end" + x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1,2,3], to_begin=masked) + control = array([0, 1, 1, 1, 4, 1, 2, 3], mask=[1, 1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + def test_ediff1d_ndarray(self): + "Test ediff1d w/ a ndarray" + x = np.arange(5) + test = ediff1d(x) + control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) + assert_equal(test, control) + self.failUnless(isinstance(test, MaskedArray)) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) + self.failUnless(isinstance(test, MaskedArray)) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + + def test_intersect1d(self): + "Test intersect1d" + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d(x, y) + control = array([1, 1, 3, 3, -1], mask=[0, 0, 0, 0, 1]) + assert_equal(test, control) + + + def test_intersect1d_nu(self): + "Test intersect1d_nu" + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d_nu(x, y) + control = array([1, 3, -1], mask=[0, 0, 1]) + assert_equal(test, control) + + + def test_setxor1d(self): + "Test setxor1d" + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1]) + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7])) + # + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array( [1, 2, 3] ) + b = array( [6, 5, 4] ) + test = setxor1d(a, b) + assert(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b) + assert(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + assert_array_equal([], setxor1d([],[])) + + + def test_setmember1d( self ): + "Test setmember1d" + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1]) + test = setmember1d(a, b) + assert_equal(test, [True, True, True, False, True]) + # + assert_array_equal([], setmember1d([],[])) + + + def test_union1d( self ): + "Test union1d" + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1]) + test = union1d(a, b) + control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) + assert_equal(test, control) + # + assert_array_equal([], setmember1d([],[])) + + + def test_setdiff1d( self ): + "Test setdiff1d" + a = array([6, 5, 4, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 1]) + b = array([2, 4, 3, 3, 2, 1, 5]) + test = setdiff1d(a, b) + assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) + # + a = arange(10) + b = arange(8) + assert_equal(setdiff1d(a, b), array([8, 9])) + + + def test_setdiff1d_char_array(self): + "Test setdiff1d_charray" + a = np.array(['a','b','c']) + b = np.array(['a','b','s']) + assert_array_equal(setdiff1d(a,b), np.array(['c'])) + + + + +class TestShapeBase(TestCase): + # + def test_atleast1d(self): + pass + + ############################################################################### #------------------------------------------------------------------------------ if __name__ == "__main__": Modified: branches/dynamic_cpu_configuration/numpy/ma/tests/test_mrecords.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/ma/tests/test_mrecords.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/ma/tests/test_mrecords.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -334,8 +334,8 @@ mult[0] = masked mult[1] = (1, 1, 1) mult.filled(0) - assert_equal(mult.filled(0), - np.array([(0,0,0),(1,1,1)], dtype=mult.dtype)) + assert_equal_records(mult.filled(0), + np.array([(0,0,0),(1,1,1)], dtype=mult.dtype)) class TestView(TestCase): Modified: branches/dynamic_cpu_configuration/numpy/ma/tests/test_subclassing.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/ma/tests/test_subclassing.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/ma/tests/test_subclassing.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -153,5 +153,3 @@ ################################################################################ if __name__ == '__main__': run_module_suite() - - Modified: branches/dynamic_cpu_configuration/numpy/ma/testutils.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/ma/testutils.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/ma/testutils.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -110,14 +110,14 @@ return _assert_equal_on_sequences(actual.tolist(), desired.tolist(), err_msg='') - elif actual_dtype.char in "OV" and desired_dtype.char in "OV": - if (actual_dtype != desired_dtype) and actual_dtype: - msg = build_err_msg([actual_dtype, desired_dtype], - err_msg, header='', names=('actual', 'desired')) - raise ValueError(msg) - return _assert_equal_on_sequences(actual.tolist(), - desired.tolist(), - err_msg='') +# elif actual_dtype.char in "OV" and desired_dtype.char in "OV": +# if (actual_dtype != desired_dtype) and actual_dtype: +# msg = build_err_msg([actual_dtype, desired_dtype], +# err_msg, header='', names=('actual', 'desired')) +# raise ValueError(msg) +# return _assert_equal_on_sequences(actual.tolist(), +# desired.tolist(), +# err_msg='') return assert_array_equal(actual, desired, err_msg) @@ -171,15 +171,14 @@ # yf = filled(y) # Allocate a common mask and refill m = mask_or(getmask(x), getmask(y)) - x = masked_array(x, copy=False, mask=m, subok=False) - y = masked_array(y, copy=False, mask=m, subok=False) + x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) + y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) if ((x is masked) and not (y is masked)) or \ ((y is masked) and not (x is masked)): msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, header=header, names=('x', 'y')) raise ValueError(msg) # OK, now run the basic tests on filled versions - comparison = getattr(np, comparison.__name__, lambda x,y: True) return utils.assert_array_compare(comparison, x.filled(fill_value), y.filled(fill_value), @@ -189,7 +188,8 @@ def assert_array_equal(x, y, err_msg='', verbose=True): """Checks the elementwise equality of two masked arrays.""" - assert_array_compare(equal, x, y, err_msg=err_msg, verbose=verbose, + assert_array_compare(operator.__eq__, x, y, + err_msg=err_msg, verbose=verbose, header='Arrays are not equal') @@ -223,7 +223,8 @@ def assert_array_less(x, y, err_msg='', verbose=True): "Checks that x is smaller than y elementwise." - assert_array_compare(less, x, y, err_msg=err_msg, verbose=verbose, + assert_array_compare(operator.__lt__, x, y, + err_msg=err_msg, verbose=verbose, header='Arrays are not less-ordered') Modified: branches/dynamic_cpu_configuration/numpy/numarray/util.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/numarray/util.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/numarray/util.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,7 +1,7 @@ import os import numpy -__all__ = ['MathDomainError', 'UnderflowError', 'NumOverflowError', +__all__ = ['MathDomainError', 'UnderflowError', 'NumOverflowError', 'handleError', 'get_numarray_include_dirs'] class MathDomainError(ArithmeticError): pass Modified: branches/dynamic_cpu_configuration/numpy/oldnumeric/arrayfns.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/oldnumeric/arrayfns.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/oldnumeric/arrayfns.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,8 +1,8 @@ """Backward compatible with arrayfns from Numeric """ -__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask', - 'histogram', 'index_sort', 'interp', 'nz', 'reverse', 'span', +__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask', + 'histogram', 'index_sort', 'interp', 'nz', 'reverse', 'span', 'to_corners', 'zmin_zmax'] import numpy as np Modified: branches/dynamic_cpu_configuration/numpy/oldnumeric/mlab.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/oldnumeric/mlab.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/oldnumeric/mlab.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,7 +1,7 @@ # This module is for compatibility only. All functions are defined elsewhere. __all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle', - 'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort', + 'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort', 'LinearAlgebra', 'RandomArray', 'prod', 'std', 'hamming', 'flipud', 'max', 'blackman', 'corrcoef', 'bartlett', 'eye', 'squeeze', 'sinc', 'tri', 'cov', 'svd', 'min', 'median', 'fliplr', 'eig', 'mean'] Modified: branches/dynamic_cpu_configuration/numpy/oldnumeric/rng.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/oldnumeric/rng.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/oldnumeric/rng.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -5,7 +5,7 @@ __all__ = ['CreateGenerator','ExponentialDistribution','LogNormalDistribution', - 'NormalDistribution', 'UniformDistribution', 'error', 'ranf', + 'NormalDistribution', 'UniformDistribution', 'error', 'ranf', 'default_distribution', 'random_sample', 'standard_generator'] import numpy.random.mtrand as mt Modified: branches/dynamic_cpu_configuration/numpy/testing/__init__.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/testing/__init__.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/testing/__init__.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -5,12 +5,10 @@ away. """ -#import unittest from unittest import TestCase import decorators as dec from utils import * -from parametric import ParametricTestCase from numpytest import * from nosetester import NoseTester as Tester from nosetester import run_module_suite Modified: branches/dynamic_cpu_configuration/numpy/testing/decorators.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/testing/decorators.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/testing/decorators.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -51,8 +51,11 @@ Parameters --------- - skip_condition : bool - Flag to determine whether to skip test (True) or not (False) + skip_condition : bool or callable. + Flag to determine whether to skip test. If the condition is a + callable, it is used at runtime to dynamically make the decision. This + is useful for tests that may require costly imports, to delay the cost + until the test suite is actually executed. msg : string Message to give on raising a SkipTest exception @@ -69,28 +72,66 @@ decorator with the nose.tools.make_decorator function in order to transmit function name, and various other metadata. ''' - if msg is None: - msg = 'Test skipped due to test condition' + def skip_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose - def skipper(*args, **kwargs): - if skip_condition: - raise nose.SkipTest, msg + + # Allow for both boolean or callable skip conditions. + if callable(skip_condition): + skip_val = lambda : skip_condition() + else: + skip_val = lambda : skip_condition + + def get_msg(func,msg=None): + """Skip message with information about function being skipped.""" + if msg is None: + out = 'Test skipped due to test condition' + else: + out = '\n'+msg + + return "Skipping test: %s%s" % (func.__name__,out) + + # We need to define *two* skippers because Python doesn't allow both + # return with value and yield inside the same function. + def skipper_func(*args, **kwargs): + """Skipper for normal test functions.""" + if skip_val(): + raise nose.SkipTest(get_msg(f,msg)) else: return f(*args, **kwargs) + + def skipper_gen(*args, **kwargs): + """Skipper for test generators.""" + if skip_val(): + raise nose.SkipTest(get_msg(f,msg)) + else: + for x in f(*args, **kwargs): + yield x + + # Choose the right skipper to use when building the actual decorator. + if nose.util.isgenerator(f): + skipper = skipper_gen + else: + skipper = skipper_func + return nose.tools.make_decorator(f)(skipper) + return skip_decorator -def knownfailureif(skip_condition, msg=None): - ''' Make function raise KnownFailureTest exception if skip_condition is true +def knownfailureif(fail_condition, msg=None): + ''' Make function raise KnownFailureTest exception if fail_condition is true + Parameters --------- - skip_condition : bool - Flag to determine whether to mark test as known failure (True) - or not (False) + fail_condition : bool or callable. + Flag to determine whether to mark test as known failure (True) + or not (False). If the condition is a callable, it is used at + runtime to dynamically make the decision. This is useful for + tests that may require costly imports, to delay the cost + until the test suite is actually executed. msg : string Message to give on raising a KnownFailureTest exception @@ -109,15 +150,23 @@ ''' if msg is None: msg = 'Test skipped due to known failure' - def skip_decorator(f): + + # Allow for both boolean or callable known failure conditions. + if callable(fail_condition): + fail_val = lambda : fail_condition() + else: + fail_val = lambda : fail_condition + + def knownfail_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose from noseclasses import KnownFailureTest - def skipper(*args, **kwargs): - if skip_condition: + def knownfailer(*args, **kwargs): + if fail_val(): raise KnownFailureTest, msg else: return f(*args, **kwargs) - return nose.tools.make_decorator(f)(skipper) - return skip_decorator + return nose.tools.make_decorator(f)(knownfailer) + + return knownfail_decorator Modified: branches/dynamic_cpu_configuration/numpy/testing/noseclasses.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/testing/noseclasses.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/testing/noseclasses.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,4 +1,6 @@ -# These classes implement a doctest runner plugin for nose. +# These classes implement a doctest runner plugin for nose, a "known failure" +# error class, and a customized TestProgram for NumPy. + # Because this module imports nose directly, it should not # be used except by nosetester.py to avoid a general NumPy # dependency on nose. @@ -6,6 +8,7 @@ import os import doctest +import nose from nose.plugins import doctests as npd from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin from nose.plugins.base import Plugin @@ -251,7 +254,7 @@ class KnownFailure(ErrorClassPlugin): - '''Plugin that installs a KNOWNFAIL error class for the + '''Plugin that installs a KNOWNFAIL error class for the KnownFailureClass exception. When KnownFailureTest is raised, the exception will be logged in the knownfail attribute of the result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the @@ -275,3 +278,25 @@ disable = getattr(options, 'noKnownFail', False) if disable: self.enabled = False + + + +# Because nose currently discards the test result object, but we need +# to return it to the user, override TestProgram.runTests to retain +# the result +class NumpyTestProgram(nose.core.TestProgram): + def runTests(self): + """Run Tests. Returns true on success, false on failure, and + sets self.success to the same value. + """ + if self.testRunner is None: + self.testRunner = nose.core.TextTestRunner(stream=self.config.stream, + verbosity=self.config.verbosity, + config=self.config) + plug_runner = self.config.plugins.prepareTestRunner(self.testRunner) + if plug_runner is not None: + self.testRunner = plug_runner + + self.result = self.testRunner.run(self.test) + self.success = self.result.wasSuccessful() + return self.success Modified: branches/dynamic_cpu_configuration/numpy/testing/nosetester.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/testing/nosetester.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/testing/nosetester.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -5,7 +5,6 @@ ''' import os import sys -import warnings def get_package_name(filepath): # find the package name given a path name that's part of the package @@ -28,7 +27,6 @@ pkg_name.reverse() return '.'.join(pkg_name) - def import_nose(): """ Import nose only when needed. """ @@ -166,8 +164,8 @@ print "nose version %d.%d.%d" % nose.__versioninfo__ - def test(self, label='fast', verbose=1, extra_argv=None, doctests=False, - coverage=False, **kwargs): + def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False): ''' Run tests for module using nose %(test_header)s @@ -179,39 +177,6 @@ http://nedbatchelder.com/code/modules/coverage.html) ''' - old_args = set(['level', 'verbosity', 'all', 'sys_argv', - 'testcase_pattern']) - unexpected_args = set(kwargs.keys()) - old_args - if len(unexpected_args) > 0: - ua = ', '.join(unexpected_args) - raise TypeError("test() got unexpected arguments: %s" % ua) - - # issue a deprecation warning if any of the pre-1.2 arguments to - # test are given - if old_args.intersection(kwargs.keys()): - warnings.warn("This method's signature will change in the next " \ - "release; the level, verbosity, all, sys_argv, " \ - "and testcase_pattern keyword arguments will be " \ - "removed. Please update your code.", - DeprecationWarning, stacklevel=2) - - # Use old arguments if given (where it makes sense) - # For the moment, level and sys_argv are ignored - - # replace verbose with verbosity - if kwargs.get('verbosity') is not None: - verbose = kwargs.get('verbosity') - # cap verbosity at 3 because nose becomes *very* verbose beyond that - verbose = min(verbose, 3) - - import utils - utils.verbose = verbose - - # if all evaluates as True, omit attribute filter and run doctests - if kwargs.get('all'): - label = '' - doctests = True - # if doctests is in the extra args, remove it and set the doctest # flag so the NumPy doctester is used instead if extra_argv and '--with-doctest' in extra_argv: @@ -221,9 +186,6 @@ argv = self._test_argv(label, verbose, extra_argv) if doctests: argv += ['--with-numpydoctest'] - print "Running unit tests and doctests for %s" % self.package_name - else: - print "Running unit tests for %s" % self.package_name if coverage: argv+=['--cover-package=%s' % self.package_name, '--with-coverage', @@ -237,33 +199,8 @@ argv += ['--exclude','swig_ext'] argv += ['--exclude','array_from_pyobj'] - self._show_system_info() - nose = import_nose() - # Because nose currently discards the test result object, but we need - # to return it to the user, override TestProgram.runTests to retain - # the result - class NumpyTestProgram(nose.core.TestProgram): - def runTests(self): - """Run Tests. Returns true on success, false on failure, and - sets self.success to the same value. - """ - if self.testRunner is None: - self.testRunner = nose.core.TextTestRunner(stream=self.config.stream, - verbosity=self.config.verbosity, - config=self.config) - plug_runner = self.config.plugins.prepareTestRunner(self.testRunner) - if plug_runner is not None: - self.testRunner = plug_runner - self.result = self.testRunner.run(self.test) - self.success = self.result.wasSuccessful() - return self.success - - # reset doctest state on every run - import doctest - doctest.master = None - # construct list of plugins, omitting the existing doctest plugin import nose.plugins.builtin from noseclasses import NumpyDoctest, KnownFailure @@ -271,10 +208,46 @@ for p in nose.plugins.builtin.plugins: plug = p() if plug.name == 'doctest': + # skip the builtin doctest plugin continue plugins.append(plug) + return argv, plugins + + def test(self, label='fast', verbose=1, extra_argv=None, doctests=False, + coverage=False): + ''' Run tests for module using nose + + %(test_header)s + doctests : boolean + If True, run doctests in module, default False + coverage : boolean + If True, report coverage of NumPy code, default False + (Requires the coverage module: + http://nedbatchelder.com/code/modules/coverage.html) + ''' + + # cap verbosity at 3 because nose becomes *very* verbose beyond that + verbose = min(verbose, 3) + + import utils + utils.verbose = verbose + + if doctests: + print "Running unit tests and doctests for %s" % self.package_name + else: + print "Running unit tests for %s" % self.package_name + + self._show_system_info() + + # reset doctest state on every run + import doctest + doctest.master = None + + argv, plugins = self.prepare_test_args(label, verbose, extra_argv, + doctests, coverage) + from noseclasses import NumpyTestProgram t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) return t.result @@ -286,9 +259,10 @@ print "Running benchmarks for %s" % self.package_name self._show_system_info() - nose = import_nose() argv = self._test_argv(label, verbose, extra_argv) argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] + + nose = import_nose() return nose.run(argv=argv) # generate method docstrings Modified: branches/dynamic_cpu_configuration/numpy/testing/numpytest.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/testing/numpytest.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/testing/numpytest.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,91 +1,16 @@ import os -import re import sys -import imp -import types -import unittest import traceback -import warnings -__all__ = ['set_package_path', 'set_local_path', 'restore_path', - 'IgnoreException', 'NumpyTestCase', 'NumpyTest', 'importall',] +__all__ = ['IgnoreException', 'importall',] DEBUG=0 -from numpy.testing.utils import jiffies get_frame = sys._getframe class IgnoreException(Exception): "Ignoring this exception due to disabled feature" -def set_package_path(level=1): - """ Prepend package directory to sys.path. - - set_package_path should be called from a test_file.py that - satisfies the following tree structure: - - //test_file.py - - Then the first existing path name from the following list - - /build/lib.- - /.. - - is prepended to sys.path. - The caller is responsible for removing this path by using - - restore_path() - """ - warnings.warn("set_package_path will be removed in NumPy 1.3; please " - "update your code", DeprecationWarning, stacklevel=2) - - from distutils.util import get_platform - f = get_frame(level) - if f.f_locals['__name__']=='__main__': - testfile = sys.argv[0] - else: - testfile = f.f_locals['__file__'] - d = os.path.dirname(os.path.dirname(os.path.abspath(testfile))) - d1 = os.path.join(d,'build','lib.%s-%s'%(get_platform(),sys.version[:3])) - if not os.path.isdir(d1): - d1 = os.path.dirname(d) - if DEBUG: - print 'Inserting %r to sys.path for test_file %r' % (d1, testfile) - sys.path.insert(0,d1) - return - - -def set_local_path(reldir='', level=1): - """ Prepend local directory to sys.path. - - The caller is responsible for removing this path by using - - restore_path() - """ - warnings.warn("set_local_path will be removed in NumPy 1.3; please " - "update your code", DeprecationWarning, stacklevel=2) - - f = get_frame(level) - if f.f_locals['__name__']=='__main__': - testfile = sys.argv[0] - else: - testfile = f.f_locals['__file__'] - local_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(testfile)),reldir)) - if DEBUG: - print 'Inserting %r to sys.path' % (local_path) - sys.path.insert(0,local_path) - return - -def restore_path(): - warnings.warn("restore_path will be removed in NumPy 1.3; please " - "update your code", DeprecationWarning, stacklevel=2) - - if DEBUG: - print 'Removing %r from sys.path' % (sys.path[0]) - del sys.path[0] - return - - def output_exception(printstream = sys.stdout): try: type, value, tb = sys.exc_info() @@ -99,576 +24,6 @@ type = value = tb = None # clean up return - -class _dummy_stream: - def __init__(self,stream): - self.data = [] - self.stream = stream - def write(self,message): - if not self.data and not message.startswith('E'): - self.stream.write(message) - self.stream.flush() - message = '' - self.data.append(message) - def writeln(self,message): - self.write(message+'\n') - def flush(self): - self.stream.flush() - - -class NumpyTestCase (unittest.TestCase): - def __init__(self, *args, **kwds): - warnings.warn("NumpyTestCase will be removed in the next release; please update your code to use nose or unittest", - DeprecationWarning, stacklevel=2) - unittest.TestCase.__init__(self, *args, **kwds) - - def measure(self,code_str,times=1): - """ Return elapsed time for executing code_str in the - namespace of the caller for given times. - """ - frame = get_frame(1) - locs,globs = frame.f_locals,frame.f_globals - code = compile(code_str, - 'NumpyTestCase runner for '+self.__class__.__name__, - 'exec') - i = 0 - elapsed = jiffies() - while i>sys.stderr,yellow_text('Warning: %s' % (message)) - sys.stderr.flush() - def info(self, message): - print>>sys.stdout, message - sys.stdout.flush() - - def rundocs(self, filename=None): - """ Run doc string tests found in filename. - """ - import doctest - if filename is None: - f = get_frame(1) - filename = f.f_globals['__file__'] - name = os.path.splitext(os.path.basename(filename))[0] - path = [os.path.dirname(filename)] - file, pathname, description = imp.find_module(name, path) - try: - m = imp.load_module(name, file, pathname, description) - finally: - file.close() - if sys.version[:3]<'2.4': - doctest.testmod(m, verbose=False) - else: - tests = doctest.DocTestFinder().find(m) - runner = doctest.DocTestRunner(verbose=False) - for test in tests: - runner.run(test) - return - - -def _get_all_method_names(cls): - names = dir(cls) - if sys.version[:3]<='2.1': - for b in cls.__bases__: - for n in dir(b)+_get_all_method_names(b): - if n not in names: - names.append(n) - return names - - -# for debug build--check for memory leaks during the test. -class _NumPyTextTestResult(unittest._TextTestResult): - def startTest(self, test): - unittest._TextTestResult.startTest(self, test) - if self.showAll: - N = len(sys.getobjects(0)) - self._totnumobj = N - self._totrefcnt = sys.gettotalrefcount() - return - - def stopTest(self, test): - if self.showAll: - N = len(sys.getobjects(0)) - self.stream.write("objects: %d ===> %d; " % (self._totnumobj, N)) - self.stream.write("refcnts: %d ===> %d\n" % (self._totrefcnt, - sys.gettotalrefcount())) - return - -class NumPyTextTestRunner(unittest.TextTestRunner): - def _makeResult(self): - return _NumPyTextTestResult(self.stream, self.descriptions, self.verbosity) - - -class NumpyTest: - """ Numpy tests site manager. - - Usage: NumpyTest().test(level=1,verbosity=1) - - is package name or its module object. - - Package is supposed to contain a directory tests/ with test_*.py - files where * refers to the names of submodules. See .rename() - method to redefine name mapping between test_*.py files and names of - submodules. Pattern test_*.py can be overwritten by redefining - .get_testfile() method. - - test_*.py files are supposed to define a classes, derived from - NumpyTestCase or unittest.TestCase, with methods having names - starting with test or bench or check. The names of TestCase classes - must have a prefix test. This can be overwritten by redefining - .check_testcase_name() method. - - And that is it! No need to implement test or test_suite functions - in each .py file. - - Old-style test_suite(level=1) hooks are also supported. - """ - _check_testcase_name = re.compile(r'test.*|Test.*').match - def check_testcase_name(self, name): - """ Return True if name matches TestCase class. - """ - return not not self._check_testcase_name(name) - - testfile_patterns = ['test_%(modulename)s.py'] - def get_testfile(self, module, verbosity = 0): - """ Return path to module test file. - """ - mstr = self._module_str - short_module_name = self._get_short_module_name(module) - d = os.path.split(module.__file__)[0] - test_dir = os.path.join(d,'tests') - local_test_dir = os.path.join(os.getcwd(),'tests') - if os.path.basename(os.path.dirname(local_test_dir)) \ - == os.path.basename(os.path.dirname(test_dir)): - test_dir = local_test_dir - for pat in self.testfile_patterns: - fn = os.path.join(test_dir, pat % {'modulename':short_module_name}) - if os.path.isfile(fn): - return fn - if verbosity>1: - self.warn('No test file found in %s for module %s' \ - % (test_dir, mstr(module))) - return - - def __init__(self, package=None): - warnings.warn("NumpyTest will be removed in the next release; please update your code to use nose or unittest", - DeprecationWarning, stacklevel=2) - if package is None: - from numpy.distutils.misc_util import get_frame - f = get_frame(1) - package = f.f_locals.get('__name__',f.f_globals.get('__name__',None)) - assert package is not None - self.package = package - self._rename_map = {} - - def rename(self, **kws): - """Apply renaming submodule test file test_.py to - test_.py. - - Usage: self.rename(name='newname') before calling the - self.test() method. - - If 'newname' is None, then no tests will be executed for a given - module. - """ - for k,v in kws.items(): - self._rename_map[k] = v - return - - def _module_str(self, module): - filename = module.__file__[-30:] - if filename!=module.__file__: - filename = '...'+filename - return '' % (module.__name__, filename) - - def _get_method_names(self,clsobj,level): - names = [] - for mthname in _get_all_method_names(clsobj): - if mthname[:5] not in ['bench','check'] \ - and mthname[:4] not in ['test']: - continue - mth = getattr(clsobj, mthname) - if type(mth) is not types.MethodType: - continue - d = mth.im_func.func_defaults - if d is not None: - mthlevel = d[0] - else: - mthlevel = 1 - if level>=mthlevel: - if mthname not in names: - names.append(mthname) - for base in clsobj.__bases__: - for n in self._get_method_names(base,level): - if n not in names: - names.append(n) - return names - - def _get_short_module_name(self, module): - d,f = os.path.split(module.__file__) - short_module_name = os.path.splitext(os.path.basename(f))[0] - if short_module_name=='__init__': - short_module_name = module.__name__.split('.')[-1] - short_module_name = self._rename_map.get(short_module_name,short_module_name) - return short_module_name - - def _get_module_tests(self, module, level, verbosity): - mstr = self._module_str - - short_module_name = self._get_short_module_name(module) - if short_module_name is None: - return [] - - test_file = self.get_testfile(module, verbosity) - - if test_file is None: - return [] - - if not os.path.isfile(test_file): - if short_module_name[:5]=='info_' \ - and short_module_name[5:]==module.__name__.split('.')[-2]: - return [] - if short_module_name in ['__cvs_version__','__svn_version__']: - return [] - if short_module_name[-8:]=='_version' \ - and short_module_name[:-8]==module.__name__.split('.')[-2]: - return [] - if verbosity>1: - self.warn(test_file) - self.warn(' !! No test file %r found for %s' \ - % (os.path.basename(test_file), mstr(module))) - return [] - - if test_file in self.test_files: - return [] - - parent_module_name = '.'.join(module.__name__.split('.')[:-1]) - test_module_name,ext = os.path.splitext(os.path.basename(test_file)) - test_dir_module = parent_module_name+'.tests' - test_module_name = test_dir_module+'.'+test_module_name - - if test_dir_module not in sys.modules: - sys.modules[test_dir_module] = imp.new_module(test_dir_module) - - old_sys_path = sys.path[:] - try: - f = open(test_file,'r') - test_module = imp.load_module(test_module_name, f, - test_file, ('.py', 'r', 1)) - f.close() - except: - sys.path[:] = old_sys_path - self.warn('FAILURE importing tests for %s' % (mstr(module))) - output_exception(sys.stderr) - return [] - sys.path[:] = old_sys_path - - self.test_files.append(test_file) - - return self._get_suite_list(test_module, level, module.__name__) - - def _get_suite_list(self, test_module, level, module_name='__main__', - verbosity=1): - suite_list = [] - if hasattr(test_module, 'test_suite'): - suite_list.extend(test_module.test_suite(level)._tests) - for name in dir(test_module): - obj = getattr(test_module, name) - if type(obj) is not type(unittest.TestCase) \ - or not issubclass(obj, unittest.TestCase) \ - or not self.check_testcase_name(obj.__name__): - continue - for mthname in self._get_method_names(obj,level): - suite = obj(mthname) - if getattr(suite,'isrunnable',lambda mthname:1)(mthname): - suite_list.append(suite) - matched_suite_list = [suite for suite in suite_list \ - if self.testcase_match(suite.id()\ - .replace('__main__.',''))] - if verbosity>=0: - self.info(' Found %s/%s tests for %s' \ - % (len(matched_suite_list), len(suite_list), module_name)) - return matched_suite_list - - def _test_suite_from_modules(self, this_package, level, verbosity): - package_name = this_package.__name__ - modules = [] - for name, module in sys.modules.items(): - if not name.startswith(package_name) or module is None: - continue - if not hasattr(module,'__file__'): - continue - if os.path.basename(os.path.dirname(module.__file__))=='tests': - continue - modules.append((name, module)) - - modules.sort() - modules = [m[1] for m in modules] - - self.test_files = [] - suites = [] - for module in modules: - suites.extend(self._get_module_tests(module, abs(level), verbosity)) - - suites.extend(self._get_suite_list(sys.modules[package_name], - abs(level), verbosity=verbosity)) - return unittest.TestSuite(suites) - - def _test_suite_from_all_tests(self, this_package, level, verbosity): - importall(this_package) - package_name = this_package.__name__ - - # Find all tests/ directories under the package - test_dirs_names = {} - for name, module in sys.modules.items(): - if not name.startswith(package_name) or module is None: - continue - if not hasattr(module, '__file__'): - continue - d = os.path.dirname(module.__file__) - if os.path.basename(d)=='tests': - continue - d = os.path.join(d, 'tests') - if not os.path.isdir(d): - continue - if d in test_dirs_names: - continue - test_dir_module = '.'.join(name.split('.')[:-1]+['tests']) - test_dirs_names[d] = test_dir_module - - test_dirs = test_dirs_names.keys() - test_dirs.sort() - - # For each file in each tests/ directory with a test case in it, - # import the file, and add the test cases to our list - suite_list = [] - testcase_match = re.compile(r'\s*class\s+\w+\s*\(.*TestCase').match - for test_dir in test_dirs: - test_dir_module = test_dirs_names[test_dir] - - if test_dir_module not in sys.modules: - sys.modules[test_dir_module] = imp.new_module(test_dir_module) - - for fn in os.listdir(test_dir): - base, ext = os.path.splitext(fn) - if ext != '.py': - continue - f = os.path.join(test_dir, fn) - - # check that file contains TestCase class definitions: - fid = open(f, 'r') - skip = True - for line in fid: - if testcase_match(line): - skip = False - break - fid.close() - if skip: - continue - - # import the test file - n = test_dir_module + '.' + base - # in case test files import local modules - sys.path.insert(0, test_dir) - fo = None - try: - try: - fo = open(f) - test_module = imp.load_module(n, fo, f, - ('.py', 'U', 1)) - except Exception, msg: - print 'Failed importing %s: %s' % (f,msg) - continue - finally: - if fo: - fo.close() - del sys.path[0] - - suites = self._get_suite_list(test_module, level, - module_name=n, - verbosity=verbosity) - suite_list.extend(suites) - - all_tests = unittest.TestSuite(suite_list) - return all_tests - - def test(self, level=1, verbosity=1, all=True, sys_argv=[], - testcase_pattern='.*'): - """Run Numpy module test suite with level and verbosity. - - level: - None --- do nothing, return None - < 0 --- scan for tests of level=abs(level), - don't run them, return TestSuite-list - > 0 --- scan for tests of level, run them, - return TestRunner - > 10 --- run all tests (same as specifying all=True). - (backward compatibility). - - verbosity: - >= 0 --- show information messages - > 1 --- show warnings on missing tests - - all: - True --- run all test files (like self.testall()) - False (default) --- only run test files associated with a module - - sys_argv --- replacement of sys.argv[1:] during running - tests. - - testcase_pattern --- run only tests that match given pattern. - - It is assumed (when all=False) that package tests suite follows - the following convention: for each package module, there exists - file /tests/test_.py that defines - TestCase classes (with names having prefix 'test_') with methods - (with names having prefixes 'check_' or 'bench_'); each of these - methods are called when running unit tests. - """ - if level is None: # Do nothing. - return - - if isinstance(self.package, str): - exec 'import %s as this_package' % (self.package) - else: - this_package = self.package - - self.testcase_match = re.compile(testcase_pattern).match - - if all: - all_tests = self._test_suite_from_all_tests(this_package, - level, verbosity) - else: - all_tests = self._test_suite_from_modules(this_package, - level, verbosity) - - if level < 0: - return all_tests - - runner = unittest.TextTestRunner(verbosity=verbosity) - old_sys_argv = sys.argv[1:] - sys.argv[1:] = sys_argv - # Use the builtin displayhook. If the tests are being run - # under IPython (for instance), any doctest test suites will - # fail otherwise. - old_displayhook = sys.displayhook - sys.displayhook = sys.__displayhook__ - try: - r = runner.run(all_tests) - finally: - sys.displayhook = old_displayhook - sys.argv[1:] = old_sys_argv - return r - - def testall(self, level=1,verbosity=1): - """ Run Numpy module test suite with level and verbosity. - - level: - None --- do nothing, return None - < 0 --- scan for tests of level=abs(level), - don't run them, return TestSuite-list - > 0 --- scan for tests of level, run them, - return TestRunner - - verbosity: - >= 0 --- show information messages - > 1 --- show warnings on missing tests - - Different from .test(..) method, this method looks for - TestCase classes from all files in /tests/ - directory and no assumptions are made for naming the - TestCase classes or their methods. - """ - return self.test(level=level, verbosity=verbosity, all=True) - - def run(self): - """ Run Numpy module test suite with level and verbosity - taken from sys.argv. Requires optparse module. - """ - - # delayed import of shlex to reduce startup time - import shlex - - try: - from optparse import OptionParser - except ImportError: - self.warn('Failed to import optparse module, ignoring.') - return self.test() - usage = r'usage: %prog [-v ] [-l ]'\ - r' [-s ""]'\ - r' [-t ""]' - parser = OptionParser(usage) - parser.add_option("-v", "--verbosity", - action="store", - dest="verbosity", - default=1, - type='int') - parser.add_option("-l", "--level", - action="store", - dest="level", - default=1, - type='int') - parser.add_option("-s", "--sys-argv", - action="store", - dest="sys_argv", - default='', - type='string') - parser.add_option("-t", "--testcase-pattern", - action="store", - dest="testcase_pattern", - default=r'.*', - type='string') - (options, args) = parser.parse_args() - return self.test(options.level,options.verbosity, - sys_argv=shlex.split(options.sys_argv or ''), - testcase_pattern=options.testcase_pattern) - - def warn(self, message): - from numpy.distutils.misc_util import yellow_text - print>>sys.stderr,yellow_text('Warning: %s' % (message)) - sys.stderr.flush() - def info(self, message): - print>>sys.stdout, message - sys.stdout.flush() - def importall(package): """ Try recursively to import all subpackages under package. Deleted: branches/dynamic_cpu_configuration/numpy/testing/parametric.py =================================================================== --- branches/dynamic_cpu_configuration/numpy/testing/parametric.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/numpy/testing/parametric.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -1,311 +0,0 @@ -"""Support for parametric tests in unittest. - -:Author: Fernando Perez - -Purpose -======= - -Briefly, the main class in this module allows you to easily and cleanly -(without the gross name-mangling hacks that are normally needed) to write -unittest TestCase classes that have parametrized tests. That is, tests which -consist of multiple sub-tests that scan for example a parameter range, but -where you want each sub-test to: - -* count as a separate test in the statistics. - -* be run even if others in the group error out or fail. - - -The class offers a simple name-based convention to create such tests (see -simple example at the end), in one of two ways: - -* Each sub-test in a group can be run fully independently, with the - setUp/tearDown methods being called each time. - -* The whole group can be run with setUp/tearDown being called only once for the - group. This lets you conveniently reuse state that may be very expensive to - compute for multiple tests. Be careful not to corrupt it!!! - - -Caveats -======= - -This code relies on implementation details of the unittest module (some key -methods are heavily modified versions of those, after copying them in). So it -may well break either if you make sophisticated use of the unittest APIs, or if -unittest itself changes in the future. I have only tested this with Python -2.5. - -""" -__docformat__ = "restructuredtext en" - -import unittest -import warnings - -class _ParametricTestCase(unittest.TestCase): - """TestCase subclass with support for parametric tests. - - Subclasses of this class can implement test methods that return a list of - tests and arguments to call those with, to do parametric testing (often - also called 'data driven' testing.""" - - #: Prefix for tests with independent state. These methods will be run with - #: a separate setUp/tearDown call for each test in the group. - _indepParTestPrefix = 'testip' - - #: Prefix for tests with shared state. These methods will be run with - #: a single setUp/tearDown call for the whole group. This is useful when - #: writing a group of tests for which the setup is expensive and one wants - #: to actually share that state. Use with care (especially be careful not - #: to mutate the state you are using, which will alter later tests). - _shareParTestPrefix = 'testsp' - - def __init__(self, methodName = 'runTest'): - warnings.warn("ParametricTestCase will be removed in the next NumPy " - "release", DeprecationWarning) - unittest.TestCase.__init__(self, methodName) - - def exec_test(self,test,args,result): - """Execute a single test. Returns a success boolean""" - - ok = False - try: - test(*args) - ok = True - except self.failureException: - result.addFailure(self, self._exc_info()) - except KeyboardInterrupt: - raise - except: - result.addError(self, self._exc_info()) - - return ok - - def set_testMethodDoc(self,doc): - self._testMethodDoc = doc - self._TestCase__testMethodDoc = doc - - def get_testMethodDoc(self): - return self._testMethodDoc - - testMethodDoc = property(fset=set_testMethodDoc, fget=get_testMethodDoc) - - def get_testMethodName(self): - try: - return getattr(self,"_testMethodName") - except: - return getattr(self,"_TestCase__testMethodName") - - testMethodName = property(fget=get_testMethodName) - - def run_test(self, testInfo,result): - """Run one test with arguments""" - - test,args = testInfo[0],testInfo[1:] - - # Reset the doc attribute to be the docstring of this particular test, - # so that in error messages it prints the actual test's docstring and - # not that of the test factory. - self.testMethodDoc = test.__doc__ - result.startTest(self) - try: - try: - self.setUp() - except KeyboardInterrupt: - raise - except: - result.addError(self, self._exc_info()) - return - - ok = self.exec_test(test,args,result) - - try: - self.tearDown() - except KeyboardInterrupt: - raise - except: - result.addError(self, self._exc_info()) - ok = False - if ok: result.addSuccess(self) - finally: - result.stopTest(self) - - def run_tests(self, tests,result): - """Run many tests with a common setUp/tearDown. - - The entire set of tests is run with a single setUp/tearDown call.""" - - try: - self.setUp() - except KeyboardInterrupt: - raise - except: - result.testsRun += 1 - result.addError(self, self._exc_info()) - return - - saved_doc = self.testMethodDoc - - try: - # Run all the tests specified - for testInfo in tests: - test,args = testInfo[0],testInfo[1:] - - # Set the doc argument for this test. Note that even if we do - # this, the fail/error tracebacks still print the docstring for - # the parent factory, because they only generate the message at - # the end of the run, AFTER we've restored it. There is no way - # to tell the unittest system (without overriding a lot of - # stuff) to extract this information right away, the logic is - # hardcoded to pull it later, since unittest assumes it doesn't - # change. - self.testMethodDoc = test.__doc__ - result.startTest(self) - ok = self.exec_test(test,args,result) - if ok: result.addSuccess(self) - - finally: - # Restore docstring info and run tearDown once only. - self.testMethodDoc = saved_doc - try: - self.tearDown() - except KeyboardInterrupt: - raise - except: - result.addError(self, self._exc_info()) - - def run(self, result=None): - """Test runner.""" - - #print - #print '*** run for method:',self._testMethodName # dbg - #print '*** doc:',self._testMethodDoc # dbg - - if result is None: result = self.defaultTestResult() - - # Independent tests: each gets its own setup/teardown - if self.testMethodName.startswith(self._indepParTestPrefix): - for t in getattr(self,self.testMethodName)(): - self.run_test(t,result) - # Shared-state test: single setup/teardown for all - elif self.testMethodName.startswith(self._shareParTestPrefix): - tests = getattr(self,self.testMethodName,'runTest')() - self.run_tests(tests,result) - # Normal unittest Test methods - else: - unittest.TestCase.run(self,result) - -# The underscore was added to the class name to keep nose from trying -# to run the test class (nose ignores class names that begin with an -# underscore by default). -ParametricTestCase = _ParametricTestCase - -############################################################################# -# Quick and dirty interactive example/test -if __name__ == '__main__': - - class ExampleTestCase(ParametricTestCase): - - #------------------------------------------------------------------- - # An instrumented setUp method so we can see when it gets called and - # how many times per instance - counter = 0 - - def setUp(self): - self.counter += 1 - print 'setUp count: %2s for: %s' % (self.counter, - self.testMethodDoc) - - #------------------------------------------------------------------- - # A standard test method, just like in the unittest docs. - def test_foo(self): - """Normal test for feature foo.""" - pass - - #------------------------------------------------------------------- - # Testing methods that need parameters. These can NOT be named test*, - # since they would be picked up by unittest and called without - # arguments. Instead, call them anything else (I use tst*) and then - # load them via the factories below. - def tstX(self,i): - "Test feature X with parameters." - print 'tstX, i=',i - if i==1 or i==3: - # Test fails - self.fail('i is bad, bad: %s' % i) - - def tstY(self,i): - "Test feature Y with parameters." - print 'tstY, i=',i - if i==1: - # Force an error - 1/0 - - def tstXX(self,i,j): - "Test feature XX with parameters." - print 'tstXX, i=',i,'j=',j - if i==1: - # Test fails - self.fail('i is bad, bad: %s' % i) - - def tstYY(self,i): - "Test feature YY with parameters." - print 'tstYY, i=',i - if i==2: - # Force an error - 1/0 - - def tstZZ(self): - """Test feature ZZ without parameters, needs multiple runs. - - This could be a random test that you want to run multiple times.""" - pass - - #------------------------------------------------------------------- - # Parametric test factories that create the test groups to call the - # above tst* methods with their required arguments. - def testip(self): - """Independent parametric test factory. - - A separate setUp() call is made for each test returned by this - method. - - You must return an iterable (list or generator is fine) containing - tuples with the actual method to be called as the first argument, - and the arguments for that call later.""" - return [(self.tstX,i) for i in range(5)] - - def testip2(self): - """Another independent parametric test factory""" - return [(self.tstY,i) for i in range(5)] - - def testip3(self): - """Test factory combining different subtests. - - This one shows how to assemble calls to different tests.""" - return [(self.tstX,3),(self.tstX,9),(self.tstXX,4,10), - (self.tstZZ,),(self.tstZZ,)] - - def testsp(self): - """Shared parametric test factory - - A single setUp() call is made for all the tests returned by this - method. - """ - return [(self.tstXX,i,i+1) for i in range(5)] - - def testsp2(self): - """Another shared parametric test factory""" - return [(self.tstYY,i) for i in range(5)] - - def testsp3(self): - """Another shared parametric test factory. - - This one simply calls the same test multiple times, without any - arguments. Note that you must still return tuples, even if there - are no arguments.""" - return [(self.tstZZ,) for i in range(10)] - - - # This test class runs normally under unittest's default runner - unittest.main() Copied: branches/dynamic_cpu_configuration/numpy/testing/tests/test_decorators.py (from rev 6368, trunk/numpy/testing/tests/test_decorators.py) Modified: branches/dynamic_cpu_configuration/setup.py =================================================================== --- branches/dynamic_cpu_configuration/setup.py 2009-02-14 22:42:29 UTC (rev 6368) +++ branches/dynamic_cpu_configuration/setup.py 2009-02-15 12:03:15 UTC (rev 6369) @@ -44,14 +44,6 @@ # a lot more robust than what was previously being used. __builtin__.__NUMPY_SETUP__ = True -def setup_doc_files(configuration): - # Add doc sources - configuration.add_data_dir("doc/release") - configuration.add_data_dir("doc/source") - configuration.add_data_dir("doc/sphinxext") - configuration.add_data_files(("doc/Makefile"), ("doc/postprocess.py")) - - def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration @@ -69,8 +61,6 @@ config.get_version('numpy/version.py') # sets config.version - setup_doc_files(config) - return config def setup_package(): From numpy-svn at scipy.org Sun Feb 15 10:44:58 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 15 Feb 2009 09:44:58 -0600 (CST) Subject: [Numpy-svn] r6370 - trunk/doc/sphinxext Message-ID: <20090215154458.20F7FC7C011@scipy.org> Author: ptvirtan Date: 2009-02-15 09:44:47 -0600 (Sun, 15 Feb 2009) New Revision: 6370 Modified: trunk/doc/sphinxext/plot_directive.py Log: sphinxext: clean up plot directive, and merge some features from matplotlib Modified: trunk/doc/sphinxext/plot_directive.py =================================================================== --- trunk/doc/sphinxext/plot_directive.py 2009-02-15 12:03:15 UTC (rev 6369) +++ trunk/doc/sphinxext/plot_directive.py 2009-02-15 15:44:47 UTC (rev 6370) @@ -57,6 +57,9 @@ plot_include_source Default value for the include-source option + plot_formats + The set of files to generate. Default: ['png', 'pdf', 'hires.png'], + ie. everything. TODO ---- @@ -75,22 +78,27 @@ setup.app = app setup.config = app.config setup.confdir = app.confdir - - app.add_config_value('plot_output_dir', '_static', True) + + static_path = '_static' + if hasattr(app.config, 'html_static_path') and app.config.html_static_path: + static_path = app.config.html_static_path[0] + + app.add_config_value('plot_output_dir', static_path, True) app.add_config_value('plot_pre_code', '', True) app.add_config_value('plot_rcparams', sane_rcparameters, True) app.add_config_value('plot_include_source', False, True) + app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) app.add_directive('plot', plot_directive, True, (0, 1, False), **plot_directive_options) sane_rcparameters = { - 'font.size': 8, - 'axes.titlesize': 8, - 'axes.labelsize': 8, - 'xtick.labelsize': 8, - 'ytick.labelsize': 8, - 'legend.fontsize': 8, + 'font.size': 9, + 'axes.titlesize': 9, + 'axes.labelsize': 9, + 'xtick.labelsize': 9, + 'ytick.labelsize': 9, + 'legend.fontsize': 9, 'figure.figsize': (4, 3), } @@ -134,10 +142,16 @@ # Change the working directory to the directory of the example, so # it can get at its data files, if any. pwd = os.getcwd() + old_sys_path = list(sys.path) if code_path is not None: - os.chdir(os.path.dirname(code_path)) + dirname = os.path.abspath(os.path.dirname(code_path)) + os.chdir(dirname) + sys.path.insert(0, dirname) + + # Redirect stdout stdout = sys.stdout sys.stdout = cStringIO.StringIO() + try: code = unescape_doctest(code) ns = {} @@ -145,9 +159,11 @@ exec code in ns finally: os.chdir(pwd) + sys.path[:] = old_sys_path sys.stdout = stdout return ns + #------------------------------------------------------------------------------ # Generating figures #------------------------------------------------------------------------------ @@ -160,16 +176,19 @@ return (not os.path.exists(derived) or os.stat(derived).st_mtime < os.stat(original).st_mtime) + def makefig(code, code_path, output_dir, output_base, config): """ run a pyplot script and save the low and high res PNGs and a PDF in _static """ - formats = [('png', 100), - ('hires.png', 200), - ('pdf', 50), - ] + included_formats = config.plot_formats + if type(included_formats) is str: + included_formats = eval(included_formats) + + formats = [x for x in [('png', 80), ('hires.png', 200), ('pdf', 50)] + if x[0] in config.plot_formats] all_exists = True @@ -181,26 +200,25 @@ break if all_exists: - return 1 + return [output_base] - # Then look for multi-figure output files, assuming - # if we have some we have all... - i = 0 - while True: - all_exists = True + # Then look for multi-figure output files + image_names = [] + for i in xrange(1000): + image_names.append('%s_%02d' % (output_base, i)) for format, dpi in formats: output_path = os.path.join(output_dir, - '%s_%02d.%s' % (output_base, i, format)) + '%s.%s' % (image_names[-1], format)) if out_of_date(code_path, output_path): all_exists = False break - if all_exists: - i += 1 - else: + if not all_exists: + # assume that if we have one, we have them all + all_exists = (i > 0) break - if i != 0: - return i + if all_exists: + return image_names # We didn't find the files, so build them print "-- Plotting figures %s" % output_base @@ -212,31 +230,24 @@ matplotlib.rcdefaults() matplotlib.rcParams.update(config.plot_rcparams) - try: - run_code(code, code_path) - except: - raise - s = cbook.exception_to_str("Exception running plot %s" % code_path) - warnings.warn(s) - return 0 + # Run code + run_code(code, code_path) + # Collect images + image_names = [] + fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() for i, figman in enumerate(fig_managers): + if len(fig_managers) == 1: + name = output_base + else: + name = "%s_%02d" % (output_base, i) + image_names.append(name) for format, dpi in formats: - if len(fig_managers) == 1: - name = output_base - else: - name = "%s_%02d" % (output_base, i) path = os.path.join(output_dir, '%s.%s' % (name, format)) - try: - figman.canvas.figure.savefig(path, dpi=dpi) - except: - s = cbook.exception_to_str("Exception running plot %s" - % code_path) - warnings.warn(s) - return 0 + figman.canvas.figure.savefig(path, dpi=dpi) - return len(fig_managers) + return image_names #------------------------------------------------------------------------------ # Generating output @@ -303,7 +314,7 @@ document.attributes['_plot_counter'] = counter output_base = '%d-%s' % (counter, os.path.basename(file_name)) - rel_name = relative_path(file_name, setup.confdir) + rel_name = relpath(file_name, setup.confdir) base, ext = os.path.splitext(output_base) if ext in ('.py', '.rst', '.txt'): @@ -334,13 +345,19 @@ f.write(unescape_doctest(code)) f.close() - source_link = relative_path(target_name, rst_dir) + source_link = relpath(target_name, rst_dir) # determine relative reference - link_dir = relative_path(output_dir, rst_dir) + link_dir = relpath(output_dir, rst_dir) # make figures - num_figs = makefig(code, file_name, output_dir, output_base, config) + try: + image_names = makefig(code, file_name, output_dir, output_base, config) + except RuntimeError, err: + reporter = state.memo.reporter + sm = reporter.system_message(3, "Exception occurred rendering plot", + line=lineno) + return [sm] # generate output if options['include-source']: @@ -353,20 +370,6 @@ else: source_code = "" - if num_figs > 0: - image_names = [] - for i in range(num_figs): - if num_figs == 1: - image_names.append(output_base) - else: - image_names.append("%s_%02d" % (output_base, i)) - else: - reporter = state.memo.reporter - sm = reporter.system_message(3, "Exception occurred rendering plot", - line=lineno) - return [sm] - - opts = [':%s: %s' % (key, val) for key, val in options.items() if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] @@ -381,24 +384,49 @@ if len(lines): state_machine.insert_input( lines, state_machine.input_lines.source(0)) + return [] -def relative_path(target, base): - target = os.path.abspath(os.path.normpath(target)) - base = os.path.abspath(os.path.normpath(base)) +if hasattr(os.path, 'relpath'): + relpath = os.path.relpath +else: + def relpath(target, base=os.curdir): + """ + Return a relative path to the target from either the current + dir or an optional base dir. Base can be a directory + specified either as absolute or relative to current dir. + """ - target_parts = target.split(os.path.sep) - base_parts = base.split(os.path.sep) - rel_parts = 0 + if not os.path.exists(target): + raise OSError, 'Target does not exist: '+target - while target_parts and base_parts and target_parts[0] == base_parts[0]: - target_parts.pop(0) - base_parts.pop(0) + if not os.path.isdir(base): + raise OSError, 'Base is not a directory or does not exist: '+base - rel_parts += len(base_parts) - return os.path.sep.join([os.path.pardir] * rel_parts + target_parts) + base_list = (os.path.abspath(base)).split(os.sep) + target_list = (os.path.abspath(target)).split(os.sep) + # On the windows platform the target may be on a completely + # different drive from the base. + if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]: + raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper() + + # Starting from the filepath root, work out how much of the + # filepath is shared by base and target. + for i in range(min(len(base_list), len(target_list))): + if base_list[i] <> target_list[i]: break + else: + # If we broke out of the loop, i is pointing to the first + # differing path elements. If we didn't break out of the + # loop, i is pointing to identical path elements. + # Increment i so that in all cases it points to the first + # differing path elements. + i+=1 + + rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:] + return os.path.join(*rel_list) + #------------------------------------------------------------------------------ # plot:: directive registration etc. #------------------------------------------------------------------------------ @@ -412,21 +440,11 @@ from docutils.parsers.rst.directives.images import Image align = Image.align -try: - from docutils.parsers.rst import Directive -except ImportError: - from docutils.parsers.rst.directives import _directives +def plot_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return run(arguments, content, options, state_machine, state, lineno) - def plot_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return run(arguments, content, options, state_machine, state, lineno) - plot_directive.__doc__ = __doc__ -else: - class plot_directive(Directive): - def run(self): - return run(self.arguments, self.content, self.options, - self.state_machine, self.state, self.lineno) - plot_directive.__doc__ = __doc__ +plot_directive.__doc__ = __doc__ def _option_boolean(arg): if not arg or not arg.strip(): From numpy-svn at scipy.org Tue Feb 17 16:19:13 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 17 Feb 2009 15:19:13 -0600 (CST) Subject: [Numpy-svn] r6371 - trunk/numpy/core/src Message-ID: <20090217211913.816B7C7C083@scipy.org> Author: charris Date: 2009-02-17 15:19:10 -0600 (Tue, 17 Feb 2009) New Revision: 6371 Modified: trunk/numpy/core/src/arraymethods.c Log: Coding style cleanups. Modified: trunk/numpy/core/src/arraymethods.c =================================================================== --- trunk/numpy/core/src/arraymethods.c 2009-02-15 15:44:47 UTC (rev 6370) +++ trunk/numpy/core/src/arraymethods.c 2009-02-17 21:19:10 UTC (rev 6371) @@ -4,10 +4,10 @@ static PyObject * array_take(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int dimension=MAX_DIMS; + int dimension = MAX_DIMS; PyObject *indices; - PyArrayObject *out=NULL; - NPY_CLIPMODE mode=NPY_RAISE; + PyArrayObject *out = NULL; + NPY_CLIPMODE mode = NPY_RAISE; static char *kwlist[] = {"indices", "axis", "out", "mode", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O&", kwlist, @@ -26,9 +26,12 @@ array_fill(PyArrayObject *self, PyObject *args) { PyObject *obj; - if (!PyArg_ParseTuple(args, "O", &obj)) + if (!PyArg_ParseTuple(args, "O", &obj)) { return NULL; - if (PyArray_FillWithScalar(self, obj) < 0) return NULL; + } + if (PyArray_FillWithScalar(self, obj) < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -37,7 +40,7 @@ array_put(PyArrayObject *self, PyObject *args, PyObject *kwds) { PyObject *indices, *values; - NPY_CLIPMODE mode=NPY_RAISE; + NPY_CLIPMODE mode = NPY_RAISE; static char *kwlist[] = {"indices", "values", "mode", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O&", kwlist, @@ -53,7 +56,7 @@ { PyArray_Dims newshape; PyObject *ret; - PyArray_ORDER order=PyArray_CORDER; + PyArray_ORDER order = PyArray_CORDER; int n; if (kwds != NULL) { @@ -64,16 +67,20 @@ "invalid keyword argument"); return NULL; } - if ((PyArray_OrderConverter(ref, &order) == PY_FAIL)) + if ((PyArray_OrderConverter(ref, &order) == PY_FAIL)) { return NULL; + } } n = PyTuple_Size(args); if (n <= 1) { - if (PyTuple_GET_ITEM(args, 0) == Py_None) + if (PyTuple_GET_ITEM(args, 0) == Py_None) { return PyArray_View(self, NULL, NULL); + } if (!PyArg_ParseTuple(args, "O&", PyArray_IntpConverter, - &newshape)) return NULL; + &newshape)) { + return NULL; + } } else { if (!PyArray_IntpConverter(args, &newshape)) { @@ -96,16 +103,18 @@ static PyObject * array_squeeze(PyArrayObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } return PyArray_Squeeze(self); } static PyObject * array_view(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyObject *out_dtype=NULL; - PyObject *out_type=NULL; - PyArray_Descr *dtype=NULL; + PyObject *out_dtype = NULL; + PyObject *out_type = NULL; + PyArray_Descr *dtype = NULL; static char *kwlist[] = {"dtype", "type", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", kwlist, @@ -151,8 +160,8 @@ static PyObject * array_argmax(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -168,8 +177,8 @@ static PyObject * array_argmin(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -185,8 +194,8 @@ static PyObject * array_max(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -202,8 +211,8 @@ static PyObject * array_ptp(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -220,8 +229,8 @@ static PyObject * array_min(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -239,8 +248,9 @@ { int axis1, axis2; - if (!PyArg_ParseTuple(args, "ii", &axis1, &axis2)) return NULL; - + if (!PyArg_ParseTuple(args, "ii", &axis1, &axis2)) { + return NULL; + } return PyArray_SwapAxes(self, axis1, axis2); } @@ -252,7 +262,7 @@ static PyObject * PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) { - PyObject *ret=NULL; + PyObject *ret = NULL; if (offset < 0 || (offset + typed->elsize) > self->descr->elsize) { PyErr_Format(PyExc_ValueError, @@ -268,7 +278,9 @@ self->strides, self->data + offset, self->flags, (PyObject *)self); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } Py_INCREF(self); ((PyArrayObject *)ret)->base = (PyObject *)self; @@ -280,7 +292,7 @@ array_getfield(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyArray_Descr *dtype=NULL; + PyArray_Descr *dtype = NULL; int offset = 0; static char *kwlist[] = {"dtype", "offset", 0}; @@ -302,7 +314,7 @@ PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, int offset, PyObject *val) { - PyObject *ret=NULL; + PyObject *ret = NULL; int retval = 0; if (offset < 0 || (offset + dtype->elsize) > self->descr->elsize) { @@ -317,7 +329,9 @@ dtype, self->nd, self->dimensions, self->strides, self->data + offset, self->flags, (PyObject *)self); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } Py_INCREF(self); ((PyArrayObject *)ret)->base = (PyObject *)self; @@ -330,7 +344,7 @@ static PyObject * array_setfield(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyArray_Descr *dtype=NULL; + PyArray_Descr *dtype = NULL; int offset = 0; PyObject *value; static char *kwlist[] = {"value", "dtype", "offset", 0}; @@ -342,8 +356,9 @@ return NULL; } - if (PyArray_SetField(self, dtype, offset, value) < 0) + if (PyArray_SetField(self, dtype, offset, value) < 0) { return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -391,8 +406,9 @@ } else { PyObject *new; - if ((ret = (PyArrayObject *)PyArray_NewCopy(self,-1)) == NULL) + if ((ret = (PyArrayObject *)PyArray_NewCopy(self,-1)) == NULL) { return NULL; + } new = PyArray_Byteswap(ret, TRUE); Py_DECREF(new); return (PyObject *)ret; @@ -403,18 +419,20 @@ static PyObject * array_byteswap(PyArrayObject *self, PyObject *args) { - Bool inplace=FALSE; + Bool inplace = FALSE; - if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) + if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) { return NULL; - + } return PyArray_Byteswap(self, inplace); } static PyObject * array_tolist(PyArrayObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } return PyArray_ToList(self); } @@ -422,12 +440,14 @@ static PyObject * array_tostring(PyArrayObject *self, PyObject *args, PyObject *kwds) { - NPY_ORDER order=NPY_CORDER; + NPY_ORDER order = NPY_CORDER; static char *kwlist[] = {"order", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&", kwlist, PyArray_OrderConverter, - &order)) return NULL; + &order)) { + return NULL; + } return PyArray_ToString(self, order); } @@ -441,17 +461,21 @@ int ret; PyObject *file; FILE *fd; - char *sep=""; - char *format=""; + char *sep = ""; + char *format = ""; static char *kwlist[] = {"file", "sep", "format", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|ss", kwlist, - &file, &sep, &format)) return NULL; + &file, &sep, &format)) { + return NULL; + } if (PyString_Check(file) || PyUnicode_Check(file)) { file = PyObject_CallFunction((PyObject *)&PyFile_Type, "Os", file, "wb"); - if (file==NULL) return NULL; + if (file == NULL) { + return NULL; + } } else { Py_INCREF(file); @@ -465,7 +489,9 @@ } ret = PyArray_ToFile(self, fd, sep, format); Py_DECREF(file); - if (ret < 0) return NULL; + if (ret < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -476,7 +502,7 @@ int n, nd; n = PyTuple_GET_SIZE(args); - if (n==1) { + if (n == 1) { PyObject *obj; obj = PyTuple_GET_ITEM(args, 0); if (PyTuple_Check(obj)) { @@ -485,7 +511,7 @@ } } - if (n==0) { + if (n == 0) { if (self->nd == 0 || PyArray_SIZE(self) == 1) return self->descr->f->getitem(self->data, self); else { @@ -495,13 +521,13 @@ return NULL; } } - else if (n != self->nd && (n > 1 || self->nd==0)) { + else if (n != self->nd && (n > 1 || self->nd == 0)) { PyErr_SetString(PyExc_ValueError, "incorrect number of indices for " \ "array"); return NULL; } - else if (n==1) { /* allows for flat getting as well as 1-d case */ + else if (n == 1) { /* allows for flat getting as well as 1-d case */ intp value, loc, index, factor; intp factors[MAX_DIMS]; value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, 0)); @@ -528,7 +554,7 @@ factor *= self->dimensions[nd]; } loc = 0; - for (nd=0; nd < self->nd; nd++) { + for (nd = 0; nd < self->nd; nd++) { index = value / factors[nd]; value = value % factors[nd]; loc += self->strides[nd]*index; @@ -541,11 +567,14 @@ else { intp loc, index[MAX_DIMS]; nd = PyArray_IntpFromSequence(args, index, MAX_DIMS); - if (nd < n) return NULL; + if (nd < n) { + return NULL; + } loc = 0; while (nd--) { - if (index[nd] < 0) + if (index[nd] < 0) { index[nd] += self->dimensions[nd]; + } if (index[nd] < 0 || index[nd] >= self->dimensions[nd]) { PyErr_SetString(PyExc_ValueError, @@ -563,7 +592,7 @@ int n, nd; int ret = -1; PyObject *obj; - n = PyTuple_GET_SIZE(args)-1; + n = PyTuple_GET_SIZE(args) - 1; if (n < 0) { PyErr_SetString(PyExc_ValueError, @@ -571,7 +600,7 @@ return NULL; } obj = PyTuple_GET_ITEM(args, n); - if (n==0) { + if (n == 0) { if (self->nd == 0 || PyArray_SIZE(self) == 1) { ret = self->descr->f->setitem(obj, self->data, self); } @@ -582,13 +611,13 @@ return NULL; } } - else if (n != self->nd && (n > 1 || self->nd==0)) { + else if (n != self->nd && (n > 1 || self->nd == 0)) { PyErr_SetString(PyExc_ValueError, "incorrect number of indices for " \ "array"); return NULL; } - else if (n==1) { /* allows for flat setting as well as 1-d case */ + else if (n == 1) { /* allows for flat setting as well as 1-d case */ intp value, loc, index, factor; intp factors[MAX_DIMS]; PyObject *indobj; @@ -602,7 +631,7 @@ nn = PyTuple_GET_SIZE(indobj); newargs = PyTuple_New(nn+1); Py_INCREF(obj); - for (i=0; idimensions[nd]; } loc = 0; - for (nd=0; nd < self->nd; nd++) { + for (nd = 0; nd < self->nd; nd++) { index = value / factors[nd]; value = value % factors[nd]; loc += self->strides[nd]*index; @@ -650,11 +679,14 @@ tupargs = PyTuple_GetSlice(args, 0, n); nd = PyArray_IntpFromSequence(tupargs, index, MAX_DIMS); Py_DECREF(tupargs); - if (nd < n) return NULL; + if (nd < n) { + return NULL; + } loc = 0; while (nd--) { - if (index[nd] < 0) + if (index[nd] < 0) { index[nd] += self->dimensions[nd]; + } if (index[nd] < 0 || index[nd] >= self->dimensions[nd]) { PyErr_SetString(PyExc_ValueError, @@ -667,7 +699,9 @@ } finish: - if (ret < 0) return NULL; + if (ret < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -676,7 +710,7 @@ static PyObject * array_cast(PyArrayObject *self, PyObject *args) { - PyArray_Descr *descr=NULL; + PyArray_Descr *descr = NULL; PyObject *obj; if (!PyArg_ParseTuple(args, "O&", PyArray_DescrConverter, @@ -729,7 +763,9 @@ PyArray_DIMS(arr), PyArray_STRIDES(arr), PyArray_DATA(arr), PyArray_FLAGS(arr), (PyObject *)self); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } Py_INCREF(arr); PyArray_BASE(ret) = arr; return ret; @@ -739,7 +775,7 @@ static PyObject * array_getarray(PyArrayObject *self, PyObject *args) { - PyArray_Descr *newtype=NULL; + PyArray_Descr *newtype = NULL; PyObject *ret; if (!PyArg_ParseTuple(args, "|O&", PyArray_DescrConverter, @@ -765,7 +801,9 @@ PyArray_STRIDES(self), PyArray_DATA(self), PyArray_FLAGS(self), NULL); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } Py_INCREF(self); PyArray_BASE(new) = (PyObject *)self; self = (PyArrayObject *)new; @@ -774,7 +812,7 @@ Py_INCREF(self); } - if ((newtype == NULL) || \ + if ((newtype == NULL) || PyArray_EquivTypes(self->descr, newtype)) { return (PyObject *)self; } @@ -791,7 +829,9 @@ { PyArray_ORDER fortran=PyArray_CORDER; if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, - &fortran)) return NULL; + &fortran)) { + return NULL; + } return PyArray_NewCopy(self, fortran); } @@ -804,7 +844,7 @@ PyObject *ret; int n; int refcheck = 1; - PyArray_ORDER fortran=PyArray_ANYORDER; + PyArray_ORDER fortran = PyArray_ANYORDER; if (kwds != NULL) { PyObject *ref; @@ -817,8 +857,9 @@ } ref = PyDict_GetItemString(kwds, "order"); if (ref != NULL || - (PyArray_OrderConverter(ref, &fortran) == PY_FAIL)) + (PyArray_OrderConverter(ref, &fortran) == PY_FAIL)) { return NULL; + } } n = PyTuple_Size(args); if (n <= 1) { @@ -827,7 +868,9 @@ return Py_None; } if (!PyArg_ParseTuple(args, "O&", PyArray_IntpConverter, - &newshape)) return NULL; + &newshape)) { + return NULL; + } } else { if (!PyArray_IntpConverter(args, &newshape)) { @@ -840,7 +883,9 @@ } ret = PyArray_Resize(self, &newshape, refcheck, fortran); PyDimMem_FREE(newshape.ptr); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } Py_DECREF(ret); Py_INCREF(Py_None); return Py_None; @@ -849,13 +894,14 @@ static PyObject * array_repeat(PyArrayObject *self, PyObject *args, PyObject *kwds) { PyObject *repeats; - int axis=MAX_DIMS; + int axis = MAX_DIMS; static char *kwlist[] = {"repeats", "axis", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist, &repeats, PyArray_AxisConverter, - &axis)) return NULL; - + &axis)) { + return NULL; + } return _ARET(PyArray_Repeat(self, repeats, axis)); } @@ -864,26 +910,27 @@ { PyObject *choices; int n; - PyArrayObject *out=NULL; - NPY_CLIPMODE clipmode=NPY_RAISE; + PyArrayObject *out = NULL; + NPY_CLIPMODE clipmode = NPY_RAISE; n = PyTuple_Size(args); if (n <= 1) { - if (!PyArg_ParseTuple(args, "O", &choices)) + if (!PyArg_ParseTuple(args, "O", &choices)) { return NULL; + } } else { choices = args; } if (kwds && PyDict_Check(kwds)) { - if (PyArray_OutputConverter(PyDict_GetItemString(kwds, - "out"), - &out) == PY_FAIL) + if (PyArray_OutputConverter(PyDict_GetItemString(kwds, "out"), + &out) == PY_FAIL) { return NULL; - if (PyArray_ClipmodeConverter(PyDict_GetItemString(kwds, - "mode"), - &clipmode) == PY_FAIL) + } + if (PyArray_ClipmodeConverter(PyDict_GetItemString(kwds, "mode"), + &clipmode) == PY_FAIL) { return NULL; + } } return _ARET(PyArray_Choose(self, choices, out, clipmode)); @@ -894,18 +941,20 @@ { int axis=-1; int val; - PyArray_SORTKIND which=PyArray_QUICKSORT; - PyObject *order=NULL; - PyArray_Descr *saved=NULL; + PyArray_SORTKIND which = PyArray_QUICKSORT; + PyObject *order = NULL; + PyArray_Descr *saved = NULL; PyArray_Descr *newd; static char *kwlist[] = {"axis", "kind", "order", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&O", kwlist, &axis, PyArray_SortkindConverter, &which, - &order)) + &order)) { return NULL; - - if (order == Py_None) order = NULL; + } + if (order == Py_None) { + order = NULL; + } if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; @@ -916,11 +965,15 @@ return NULL; } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; + if (_numpy_internal == NULL) { + return NULL; + } new_name = PyObject_CallMethod(_numpy_internal, "_newnames", "OO", saved, order); Py_DECREF(_numpy_internal); - if (new_name == NULL) return NULL; + if (new_name == NULL) { + return NULL; + } newd = PyArray_DescrNew(saved); newd->names = new_name; self->descr = newd; @@ -931,7 +984,9 @@ Py_XDECREF(self->descr); self->descr = saved; } - if (val < 0) return NULL; + if (val < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -939,19 +994,21 @@ static PyObject * array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=-1; - PyArray_SORTKIND which=PyArray_QUICKSORT; - PyObject *order=NULL, *res; + int axis = -1; + PyArray_SORTKIND which = PyArray_QUICKSORT; + PyObject *order = NULL, *res; PyArray_Descr *newd, *saved=NULL; static char *kwlist[] = {"axis", "kind", "order", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O", kwlist, PyArray_AxisConverter, &axis, PyArray_SortkindConverter, &which, - &order)) + &order)) { return NULL; - - if (order == Py_None) order = NULL; + } + if (order == Py_None) { + order = NULL; + } if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; @@ -962,11 +1019,15 @@ return NULL; } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; + if (_numpy_internal == NULL) { + return NULL; + } new_name = PyObject_CallMethod(_numpy_internal, "_newnames", "OO", saved, order); Py_DECREF(_numpy_internal); - if (new_name == NULL) return NULL; + if (new_name == NULL) { + return NULL; + } newd = PyArray_DescrNew(saved); newd->names = new_name; self->descr = newd; @@ -989,9 +1050,9 @@ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&:searchsorted", kwlist, &keys, - PyArray_SearchsideConverter, &side)) + PyArray_SearchsideConverter, &side)) { return NULL; - + } return _ARET(PyArray_SearchSorted(self, keys, side)); } @@ -999,16 +1060,22 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, PyObject *deepcopy, PyObject *visit) { - if (!PyDataType_REFCHK(dtype)) return; + if (!PyDataType_REFCHK(dtype)) { + return; + } else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; + if NPY_TITLE_KEY(key, value) { + continue; + } if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; + &title)) { + return; + } _deepcopy_call(iptr + offset, optr + offset, new, deepcopy, visit); } @@ -1020,8 +1087,7 @@ otemp = (PyObject **)optr; Py_XINCREF(*itemp); /* call deepcopy on this argument */ - res = PyObject_CallFunctionObjArgs(deepcopy, - *itemp, visit, NULL); + res = PyObject_CallFunctionObjArgs(deepcopy, *itemp, visit, NULL); Py_XDECREF(*itemp); Py_XDECREF(*otemp); *otemp = res; @@ -1038,20 +1104,28 @@ PyArrayIterObject *it; PyObject *copy, *ret, *deepcopy; - if (!PyArg_ParseTuple(args, "O", &visit)) return NULL; + if (!PyArg_ParseTuple(args, "O", &visit)) { + return NULL; + } ret = PyArray_Copy(self); if (PyDataType_REFCHK(self->descr)) { copy = PyImport_ImportModule("copy"); - if (copy == NULL) return NULL; + if (copy == NULL) { + return NULL; + } deepcopy = PyObject_GetAttrString(copy, "deepcopy"); Py_DECREF(copy); - if (deepcopy == NULL) return NULL; + if (deepcopy == NULL) { + return NULL; + } it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it == NULL) {Py_DECREF(deepcopy); return NULL;} + if (it == NULL) { + Py_DECREF(deepcopy); + return NULL; + } optr = PyArray_DATA(ret); while(it->index < it->size) { - _deepcopy_call(it->dataptr, optr, self->descr, - deepcopy, visit); + _deepcopy_call(it->dataptr, optr, self->descr, deepcopy, visit); optr += self->descr->elsize; PyArray_ITER_NEXT(it); } @@ -1066,15 +1140,20 @@ _getlist_pkl(PyArrayObject *self) { PyObject *theobject; - PyArrayIterObject *iter=NULL; + PyArrayIterObject *iter = NULL; PyObject *list; PyArray_GetItemFunc *getitem; getitem = self->descr->f->getitem; iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (iter == NULL) return NULL; + if (iter == NULL) { + return NULL; + } list = PyList_New(iter->size); - if (list == NULL) {Py_DECREF(iter); return NULL;} + if (list == NULL) { + Py_DECREF(iter); + return NULL; + } while (iter->index < iter->size) { theobject = getitem(iter->dataptr, self); PyList_SET_ITEM(list, (int) iter->index, theobject); @@ -1088,12 +1167,14 @@ _setlist_pkl(PyArrayObject *self, PyObject *list) { PyObject *theobject; - PyArrayIterObject *iter=NULL; + PyArrayIterObject *iter = NULL; PyArray_SetItemFunc *setitem; setitem = self->descr->f->setitem; iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (iter == NULL) return -1; + if (iter == NULL) { + return -1; + } while(iter->index < iter->size) { theobject = PyList_GET_ITEM(list, (int) iter->index); setitem(theobject, iter->dataptr, self); @@ -1111,8 +1192,8 @@ change the format. Be sure to handle the old versions in array_setstate. */ const int version = 1; - PyObject *ret=NULL, *state=NULL, *obj=NULL, *mod=NULL; - PyObject *mybool, *thestr=NULL; + PyObject *ret = NULL, *state = NULL, *obj = NULL, *mod = NULL; + PyObject *mybool, *thestr = NULL; PyArray_Descr *descr; /* Return a tuple of (callable object, arguments, object's state) */ @@ -1120,9 +1201,14 @@ it can use the string object as memory without a copy */ ret = PyTuple_New(3); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) {Py_DECREF(ret); return NULL;} + if (mod == NULL) { + Py_DECREF(ret); + return NULL; + } obj = PyObject_GetAttrString(mod, "_reconstruct"); Py_DECREF(mod); PyTuple_SET_ITEM(ret, 0, obj); @@ -1150,7 +1236,8 @@ state = PyTuple_New(5); if (state == NULL) { - Py_DECREF(ret); return NULL; + Py_DECREF(ret); + return NULL; } PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version)); PyTuple_SET_ITEM(state, 1, PyObject_GetAttrString((PyObject *)self, @@ -1227,7 +1314,9 @@ self->descr = typecode; Py_INCREF(typecode); nd = PyArray_IntpFromSequence(shape, dimensions, MAX_DIMS); - if (nd < 0) return NULL; + if (nd < 0) { + return NULL; + } size = PyArray_MultiplyList(dimensions, nd); if (self->descr->elsize == 0) { PyErr_SetString(PyExc_ValueError, "Invalid data-type size."); @@ -1264,8 +1353,9 @@ } if ((self->flags & OWNDATA)) { - if (self->data != NULL) + if (self->data != NULL) { PyDataMem_FREE(self->data); + } self->flags &= ~OWNDATA; } Py_XDECREF(self->base); @@ -1312,10 +1402,12 @@ } else { self->descr = PyArray_DescrNew(typecode); - if (self->descr->byteorder == PyArray_BIG) + if (self->descr->byteorder == PyArray_BIG) { self->descr->byteorder = PyArray_LITTLE; - else if (self->descr->byteorder == PyArray_LITTLE) + } + else if (self->descr->byteorder == PyArray_LITTLE) { self->descr->byteorder = PyArray_BIG; + } } Py_DECREF(typecode); } @@ -1335,15 +1427,19 @@ if (self->data == NULL) { self->nd = 0; self->data = PyDataMem_NEW(self->descr->elsize); - if (self->dimensions) PyDimMem_FREE(self->dimensions); + if (self->dimensions) { + PyDimMem_FREE(self->dimensions); + } return PyErr_NoMemory(); } - if (PyDataType_FLAGCHK(self->descr, NPY_NEEDS_INIT)) + if (PyDataType_FLAGCHK(self->descr, NPY_NEEDS_INIT)) { memset(self->data, 0, PyArray_NBYTES(self)); + } self->flags |= OWNDATA; self->base = NULL; - if (_setlist_pkl(self, rawdata) < 0) + if (_setlist_pkl(self, rawdata) < 0) { return NULL; + } } PyArray_UpdateFlags(self, UPDATE_ALL); @@ -1356,24 +1452,32 @@ static int PyArray_Dump(PyObject *self, PyObject *file, int protocol) { - PyObject *cpick=NULL; + PyObject *cpick = NULL; PyObject *ret; - if (protocol < 0) protocol = 2; + if (protocol < 0) { + protocol = 2; + } cpick = PyImport_ImportModule("cPickle"); - if (cpick==NULL) return -1; - + if (cpick == NULL) { + return -1; + } if PyString_Check(file) { - file = PyFile_FromString(PyString_AS_STRING(file), "wb"); - if (file==NULL) return -1; + file = PyFile_FromString(PyString_AS_STRING(file), "wb"); + if (file == NULL) { + return -1; } - else Py_INCREF(file); - ret = PyObject_CallMethod(cpick, "dump", "OOi", self, - file, protocol); + } + else { + Py_INCREF(file); + } + ret = PyObject_CallMethod(cpick, "dump", "OOi", self, file, protocol); Py_XDECREF(ret); Py_DECREF(file); Py_DECREF(cpick); - if (PyErr_Occurred()) return -1; + if (PyErr_Occurred()) { + return -1; + } return 0; } @@ -1381,12 +1485,15 @@ static PyObject * PyArray_Dumps(PyObject *self, int protocol) { - PyObject *cpick=NULL; + PyObject *cpick = NULL; PyObject *ret; - if (protocol < 0) protocol = 2; - + if (protocol < 0) { + protocol = 2; + } cpick = PyImport_ImportModule("cPickle"); - if (cpick==NULL) return NULL; + if (cpick == NULL) { + return NULL; + } ret = PyObject_CallMethod(cpick, "dumps", "Oi", self, protocol); Py_DECREF(cpick); return ret; @@ -1396,13 +1503,16 @@ static PyObject * array_dump(PyArrayObject *self, PyObject *args) { - PyObject *file=NULL; + PyObject *file = NULL; int ret; - if (!PyArg_ParseTuple(args, "O", &file)) + if (!PyArg_ParseTuple(args, "O", &file)) { return NULL; + } ret = PyArray_Dump((PyObject *)self, file, 2); - if (ret < 0) return NULL; + if (ret < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -1411,8 +1521,9 @@ static PyObject * array_dumps(PyArrayObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) + if (!PyArg_ParseTuple(args, "")) { return NULL; + } return PyArray_Dumps((PyObject *)self, 2); } @@ -1420,19 +1531,26 @@ static PyObject * array_transpose(PyArrayObject *self, PyObject *args) { - PyObject *shape=Py_None; + PyObject *shape = Py_None; int n; PyArray_Dims permute; PyObject *ret; n = PyTuple_Size(args); - if (n > 1) shape = args; - else if (n == 1) shape = PyTuple_GET_ITEM(args, 0); + if (n > 1) { + shape = args; + } + else if (n == 1) { + shape = PyTuple_GET_ITEM(args, 0); + } - if (shape == Py_None) + if (shape == Py_None) { ret = PyArray_Transpose(self, NULL); + } else { - if (!PyArray_IntpConverter(shape, &permute)) return NULL; + if (!PyArray_IntpConverter(shape, &permute)) { + return NULL; + } ret = PyArray_Transpose(self, &permute); PyDimMem_FREE(permute.ptr); } @@ -1447,9 +1565,9 @@ static int _get_type_num_double(PyArray_Descr *dtype1, PyArray_Descr *dtype2) { - if (dtype2 != NULL) + if (dtype2 != NULL) { return dtype2->type_num; - + } /* For integer or bool data-types */ if (dtype1->type_num < NPY_FLOAT) { return NPY_DOUBLE; @@ -1464,9 +1582,9 @@ static PyObject * array_mean(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int num; static char *kwlist[] = {"axis", "dtype", "out", NULL}; @@ -1488,9 +1606,9 @@ static PyObject * array_sum(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int rtype; static char *kwlist[] = {"axis", "dtype", "out", NULL}; @@ -1513,9 +1631,9 @@ static PyObject * array_cumsum(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int rtype; static char *kwlist[] = {"axis", "dtype", "out", NULL}; @@ -1537,9 +1655,9 @@ static PyObject * array_prod(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int rtype; static char *kwlist[] = {"axis", "dtype", "out", NULL}; @@ -1561,9 +1679,9 @@ static PyObject * array_cumprod(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int rtype; static char *kwlist[] = {"axis", "dtype", "out", NULL}; @@ -1586,8 +1704,8 @@ static PyObject * array_any(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -1604,8 +1722,8 @@ static PyObject * array_all(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -1625,9 +1743,9 @@ static PyObject * array_stddev(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int num; int ddof = 0; static char *kwlist[] = {"axis", "dtype", "out", "ddof", NULL}; @@ -1651,9 +1769,9 @@ static PyObject * array_variance(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int num; int ddof = 0; static char *kwlist[] = {"axis", "dtype", "out", "ddof", NULL}; @@ -1666,7 +1784,7 @@ &out, &ddof)) { Py_XDECREF(dtype); return NULL; - } + } num = _get_type_num_double(self->descr, dtype); Py_XDECREF(dtype); @@ -1677,17 +1795,18 @@ static PyObject * array_compress(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; + int axis = MAX_DIMS; PyObject *condition; - PyArrayObject *out=NULL; + PyArrayObject *out = NULL; static char *kwlist[] = {"condition", "axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&", kwlist, &condition, PyArray_AxisConverter, &axis, PyArray_OutputConverter, - &out)) return NULL; - + &out)) { + return NULL; + } return _ARET(PyArray_Compress(self, condition, axis, out)); } @@ -1695,8 +1814,9 @@ static PyObject * array_nonzero(PyArrayObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; - + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } return PyArray_Nonzero(self); } @@ -1704,9 +1824,9 @@ static PyObject * array_trace(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis1=0, axis2=1, offset=0; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis1 = 0, axis2 = 1, offset = 0; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int rtype; static char *kwlist[] = {"offset", "axis1", "axis2", "dtype", "out", NULL}; @@ -1720,9 +1840,7 @@ rtype = _CHKTYPENUM(dtype); Py_XDECREF(dtype); - - return _ARET(PyArray_Trace(self, offset, axis1, axis2, - rtype, out)); + return _ARET(PyArray_Trace(self, offset, axis1, axis2, rtype, out)); } #undef _CHKTYPENUM @@ -1731,19 +1849,19 @@ static PyObject * array_clip(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyObject *min=NULL, *max=NULL; - PyArrayObject *out=NULL; + PyObject *min = NULL, *max = NULL; + PyArrayObject *out = NULL; static char *kwlist[] = {"min", "max", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO&", kwlist, &min, &max, PyArray_OutputConverter, - &out)) + &out)) { return NULL; - + } if (max == NULL && min == NULL) { PyErr_SetString(PyExc_ValueError, "One of max or min must be given."); - return NULL; + return NULL; } return _ARET(PyArray_Clip(self, min, max, out)); } @@ -1753,11 +1871,12 @@ array_conjugate(PyArrayObject *self, PyObject *args) { - PyArrayObject *out=NULL; + PyArrayObject *out = NULL; if (!PyArg_ParseTuple(args, "|O&", PyArray_OutputConverter, - &out)) return NULL; - + &out)) { + return NULL; + } return PyArray_Conjugate(self, out); } @@ -1765,13 +1884,13 @@ static PyObject * array_diagonal(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis1=0, axis2=1, offset=0; + int axis1 = 0, axis2 = 1, offset = 0; static char *kwlist[] = {"offset", "axis1", "axis2", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iii", kwlist, - &offset, &axis1, &axis2)) + &offset, &axis1, &axis2)) { return NULL; - + } return _ARET(PyArray_Diagonal(self, offset, axis1, axis2)); } @@ -1779,11 +1898,11 @@ static PyObject * array_flatten(PyArrayObject *self, PyObject *args) { - PyArray_ORDER fortran=PyArray_CORDER; + PyArray_ORDER fortran = PyArray_CORDER; - if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, - &fortran)) return NULL; - + if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, &fortran)) { + return NULL; + } return PyArray_Flatten(self, fortran); } @@ -1791,11 +1910,12 @@ static PyObject * array_ravel(PyArrayObject *self, PyObject *args) { - PyArray_ORDER fortran=PyArray_CORDER; + PyArray_ORDER fortran = PyArray_CORDER; if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, - &fortran)) return NULL; - + &fortran)) { + return NULL; + } return PyArray_Ravel(self, fortran); } @@ -1804,14 +1924,14 @@ array_round(PyArrayObject *self, PyObject *args, PyObject *kwds) { int decimals = 0; - PyArrayObject *out=NULL; + PyArrayObject *out = NULL; static char *kwlist[] = {"decimals", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&", kwlist, &decimals, PyArray_OutputConverter, - &out)) + &out)) { return NULL; - + } return _ARET(PyArray_Round(self, decimals, out)); } @@ -1824,9 +1944,9 @@ array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"write", "align", "uic", NULL}; - PyObject *write=Py_None; - PyObject *align=Py_None; - PyObject *uic=Py_None; + PyObject *write = Py_None; + PyObject *align = Py_None; + PyObject *uic = Py_None; int flagback = self->flags; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO", kwlist, @@ -1834,8 +1954,12 @@ return NULL; if (align != Py_None) { - if (PyObject_Not(align)) self->flags &= ~ALIGNED; - else if (_IsAligned(self)) self->flags |= ALIGNED; + if (PyObject_Not(align)) { + self->flags &= ~ALIGNED; + } + else if (_IsAligned(self)) { + self->flags |= ALIGNED; + } else { PyErr_SetString(PyExc_ValueError, "cannot set aligned flag of mis-"\ @@ -1888,10 +2012,13 @@ PyArray_Descr *new; if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter, - &endian)) return NULL; - + &endian)) { + return NULL; + } new = PyArray_DescrNewByteorder(self->descr, endian); - if (!new) return NULL; + if (!new) { + return NULL; + } return PyArray_View(self, new, NULL); } From numpy-svn at scipy.org Wed Feb 18 00:07:08 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 17 Feb 2009 23:07:08 -0600 (CST) Subject: [Numpy-svn] r6372 - trunk/numpy/core/src Message-ID: <20090218050708.A257BC7C010@scipy.org> Author: cdavid Date: 2009-02-17 23:06:57 -0600 (Tue, 17 Feb 2009) New Revision: 6372 Modified: trunk/numpy/core/src/numpyos.c Log: Fix some const issues in NumPyOs_ascii_strtod. Modified: trunk/numpy/core/src/numpyos.c =================================================================== --- trunk/numpy/core/src/numpyos.c 2009-02-17 21:19:10 UTC (rev 6371) +++ trunk/numpy/core/src/numpyos.c 2009-02-18 05:06:57 UTC (rev 6372) @@ -416,7 +416,8 @@ size_t decimal_point_len = strlen(decimal_point); char buffer[FLOAT_FORMATBUFLEN+1]; - char *p; + const char *p; + char *q; size_t n; double result; @@ -444,14 +445,14 @@ while (NumPyOS_ascii_isalnum(*p) || *p == '_') ++p; if (*p == ')') ++p; } - if (endptr != NULL) *endptr = p; + if (endptr != NULL) *endptr = (char*)p; return NumPyOS_NAN; } else if (NumPyOS_ascii_strncasecmp(p, "inf", 3) == 0) { p += 3; if (NumPyOS_ascii_strncasecmp(p, "inity", 5) == 0) p += 5; - if (endptr != NULL) *endptr = p; + if (endptr != NULL) *endptr = (char*)p; return result*NumPyOS_PINF; } /* End of ##1 */ @@ -464,7 +465,7 @@ * where is the decimal point under the foreign locale. */ if (decimal_point[0] != '.' || decimal_point[1] != 0) { - p = (char *)s; + p = s; if (*p == '+' || *p == '-') ++p; while (*p >= '0' && *p <= '9') @@ -475,9 +476,9 @@ n = FLOAT_FORMATBUFLEN; memcpy(buffer, s, n); buffer[n] = '\0'; - result = PyOS_ascii_strtod(buffer, &p); + result = PyOS_ascii_strtod(buffer, &q); if (endptr != NULL) { - *endptr = s + (p - buffer); + *endptr = (char*)(s + (q - buffer)); } return result; } From numpy-svn at scipy.org Wed Feb 18 00:37:04 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 17 Feb 2009 23:37:04 -0600 (CST) Subject: [Numpy-svn] r6373 - trunk/numpy/core/src Message-ID: <20090218053704.E5C88C7C010@scipy.org> Author: charris Date: 2009-02-17 23:36:35 -0600 (Tue, 17 Feb 2009) New Revision: 6373 Modified: trunk/numpy/core/src/arrayobject.c Log: Coding style cleanups. Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2009-02-18 05:06:57 UTC (rev 6372) +++ trunk/numpy/core/src/arrayobject.c 2009-02-18 05:36:35 UTC (rev 6373) @@ -29,13 +29,15 @@ PyArray_GetPriority(PyObject *obj, double default_) { PyObject *ret; - double priority=PyArray_PRIORITY; + double priority = PyArray_PRIORITY; if (PyArray_CheckExact(obj)) return priority; ret = PyObject_GetAttrString(obj, "__array_priority__"); - if (ret != NULL) priority = PyFloat_AsDouble(ret); + if (ret != NULL) { + priority = PyFloat_AsDouble(ret); + } if (PyErr_Occurred()) { PyErr_Clear(); priority = default_; @@ -79,7 +81,9 @@ int ret, storeflags; PyObject *obj; - if (_check_object_rec(arr->descr) < 0) return NULL; + if (_check_object_rec(arr->descr) < 0) { + return NULL; + } zeroval = PyDataMem_NEW(arr->descr->elsize); if (zeroval == NULL) { PyErr_SetNone(PyExc_MemoryError); @@ -165,13 +169,15 @@ Py_XINCREF(*temp); } else if (PyDescr_HASFIELDS(descr)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; + if NPY_TITLE_KEY(key, value) { + continue; + } if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { return; @@ -199,13 +205,15 @@ Py_XDECREF(*temp); } else if PyDescr_HASFIELDS(descr) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; + if NPY_TITLE_KEY(key, value) { + continue; + } if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { return; @@ -250,12 +258,12 @@ data = (PyObject **)mp->data; n = PyArray_SIZE(mp); if (PyArray_ISALIGNED(mp)) { - for(i = 0; i < n; i++, data++) { + for (i = 0; i < n; i++, data++) { Py_XINCREF(*data); } } else { - for(i=0; idata; n = PyArray_SIZE(mp); if (PyArray_ISALIGNED(mp)) { - for(i = 0; i < n; i++, data++) Py_XDECREF(*data); + for (i = 0; i < n; i++, data++) Py_XDECREF(*data); } else { - for(i = 0; i < n; i++, data++) { + for (i = 0; i < n; i++, data++) { temp = data; Py_XDECREF(*temp); } @@ -358,7 +366,7 @@ case 2: _FAST_MOVE(Int16); case 16: - for(i=0; i 0; n--, a += stride-1) { + for (a = (char*)p; n > 0; n--, a += stride - 1) { b = a + 3; c = *a; *a++ = *b; *b-- = c; c = *a; *a = *b; *b = c; } break; case 8: - for(a = (char*)p ; n > 0; n--, a += stride-3) { + for (a = (char*)p; n > 0; n--, a += stride - 3) { b = a + 7; c = *a; *a++ = *b; *b-- = c; c = *a; *a++ = *b; *b-- = c; @@ -474,16 +482,16 @@ } break; case 2: - for(a = (char*)p ; n > 0; n--, a += stride) { + for (a = (char*)p; n > 0; n--, a += stride) { b = a + 1; c = *a; *a = *b; *b = c; } break; default: - m = size / 2; - for(a = (char *)p ; n > 0; n--, a += stride-m) { - b = a + (size-1); - for(j=0; j 0; n--, a += stride - m) { + b = a + (size - 1); + for (j = 0; j < m; j++) { c=*a; *a++ = *b; *b-- = c; } } @@ -508,10 +516,11 @@ char *d1 = (char *)dst; - if ((numitems == 1) || (itemsize == srcstrides)) + if ((numitems == 1) || (itemsize == srcstrides)) { memcpy(d1, s1, itemsize*numitems); + } else { - for(i = 0; i < numitems; i++) { + for (i = 0; i < numitems; i++) { memcpy(d1, s1, itemsize); d1 += itemsize; s1 += srcstrides; @@ -554,7 +563,6 @@ PyErr_SetString(PyExc_TypeError, msg); return -1; } - if (PyInt_Check(o)) { long_value = (longlong) PyInt_AS_LONG(o); goto finish; @@ -593,7 +601,7 @@ #if (PY_VERSION_HEX >= 0x02050000) if (PyIndex_Check(o)) { PyObject* value = PyNumber_Index(o); - if (value==NULL) { + if (value == NULL) { return -1; } long_value = (longlong) PyInt_AsSsize_t(value); @@ -655,7 +663,6 @@ PyErr_SetString(PyExc_TypeError, msg); return -1; } - if (PyInt_Check(o)) { long_value = (long) PyInt_AS_LONG(o); goto finish; @@ -665,7 +672,7 @@ } descr = &INT_Descr; - arr=NULL; + arr = NULL; if (PyArray_Check(o)) { if (PyArray_SIZE(o)!=1 || !PyArray_ISINTEGER(o)) { PyErr_SetString(PyExc_TypeError, msg); @@ -720,8 +727,7 @@ #if (SIZEOF_LONG > SIZEOF_INT) if ((long_value < INT_MIN) || (long_value > INT_MAX)) { - PyErr_SetString(PyExc_ValueError, - "integer won't fit into a C int"); + PyErr_SetString(PyExc_ValueError, "integer won't fit into a C int"); return -1; } #endif @@ -732,17 +738,19 @@ index2ptr(PyArrayObject *mp, intp i) { intp dim0; - if(mp->nd == 0) { - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed"); + + if (mp->nd == 0) { + PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed"); return NULL; } dim0 = mp->dimensions[0]; - if (i<0) i += dim0; - if (i==0 && dim0 > 0) + if (i < 0) { + i += dim0; + } + if (i == 0 && dim0 > 0) { return mp->data; - - if (i>0 && i < dim0) { + } + if (i > 0 && i < dim0) { return mp->data+i*mp->strides[0]; } PyErr_SetString(PyExc_IndexError,"index out of bounds"); @@ -766,11 +774,11 @@ static int _copy_from0d(PyArrayObject *dest, PyArrayObject *src, int usecopy, int swap) { - char *aligned=NULL; + char *aligned = NULL; char *sptr; int numcopies, nbytes; void (*myfunc)(char *, intp, char *, intp, intp, int); - int retval=-1; + int retval = -1; NPY_BEGIN_THREADS_DEF; numcopies = PyArray_SIZE(dest); @@ -807,10 +815,12 @@ intp dstride; dptr = dest->data; - if (dest->nd == 1) + if (dest->nd == 1) { dstride = dest->strides[0]; - else + } + else { dstride = nbytes; + } /* Refcount note: src and dest may have different sizes */ PyArray_INCREF(src); @@ -826,9 +836,10 @@ } else { PyArrayIterObject *dit; - int axis=-1; - dit = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)dest, &axis); + int axis = -1; + + dit = (PyArrayIterObject *) + PyArray_IterAllButAxis((PyObject *)dest, &axis); if (dit == NULL) { goto finish; } @@ -837,12 +848,10 @@ PyArray_XDECREF(dest); NPY_BEGIN_THREADS; while(dit->index < dit->size) { - myfunc(dit->dataptr, PyArray_STRIDE(dest, axis), - sptr, 0, + myfunc(dit->dataptr, PyArray_STRIDE(dest, axis), sptr, 0, PyArray_DIM(dest, axis), nbytes); if (swap) { - _strided_byte_swap(dit->dataptr, - PyArray_STRIDE(dest, axis), + _strided_byte_swap(dit->dataptr, PyArray_STRIDE(dest, axis), PyArray_DIM(dest, axis), nbytes); } PyArray_ITER_NEXT(dit); @@ -928,8 +937,7 @@ PyArray_XDECREF((PyArrayObject *)dst); NPY_BEGIN_THREADS; while(it->index < it->size) { - myfunc(dptr, elsize, it->dataptr, - PyArray_STRIDE(src,axis), + myfunc(dptr, elsize, it->dataptr, PyArray_STRIDE(src,axis), PyArray_DIM(src,axis), elsize); dptr += nbytes; PyArray_ITER_NEXT(it); @@ -949,7 +957,7 @@ void (*myfunc)(char *, intp, char *, intp, intp, int), int swap) { - int maxaxis=-1, elsize; + int maxaxis = -1, elsize; intp maxdim; PyArrayIterObject *dit, *sit; NPY_BEGIN_THREADS_DEF; @@ -1323,7 +1331,7 @@ } if (!PyArray_ISNBO(descr->byteorder)) descr->byteorder = '='; - for(i = 0; i < nd; i++) { + for (i = 0; i < nd; i++) { newd[i] = (intp) d[i]; } ret = PyArray_NewFromDescr(&PyArray_Type, descr, @@ -1409,8 +1417,9 @@ int swap; type_num = descr->type_num; - if (type_num == PyArray_BOOL) + if (type_num == PyArray_BOOL) { PyArrayScalar_RETURN_BOOL_FROM_LONG(*(Bool*)data); + } else if (PyDataType_FLAGCHK(descr, NPY_USE_GETITEM)) { return descr->f->getitem(data, base); } @@ -1420,18 +1429,23 @@ swap = !PyArray_ISNBO(descr->byteorder); if PyTypeNum_ISSTRING(type_num) { /* Eliminate NULL bytes */ char *dptr = data; - dptr += itemsize-1; - while(itemsize && *dptr-- == 0) itemsize--; + + dptr += itemsize - 1; + while(itemsize && *dptr-- == 0) { + itemsize--; + } if (type_num == PyArray_UNICODE && itemsize) { /* make sure itemsize is a multiple of 4 */ /* so round up to nearest multiple */ itemsize = (((itemsize-1) >> 2) + 1) << 2; } } - if (type->tp_itemsize != 0) /* String type */ + if (type->tp_itemsize != 0) { /* String type */ obj = type->tp_alloc(type, itemsize); - else + } + else { obj = type->tp_alloc(type, 0); + } if (obj == NULL) { return NULL; } @@ -1449,7 +1463,7 @@ int length = itemsize >> 2; #ifndef Py_UNICODE_WIDE char *buffer; - int alloc=0; + int alloc = 0; length *= 2; #endif /* Need an extra slot and need to use @@ -1468,22 +1482,25 @@ uni->defenc = NULL; #ifdef Py_UNICODE_WIDE memcpy(destptr, data, itemsize); - if (swap) + if (swap) { byte_swap_vector(destptr, length, 4); + } #else /* need aligned data buffer */ if ((swap) || ((((intp)data) % descr->alignment) != 0)) { buffer = _pya_malloc(itemsize); - if (buffer == NULL) + if (buffer == NULL) { return PyErr_NoMemory(); + } alloc = 1; memcpy(buffer, data, itemsize); if (swap) { - byte_swap_vector(buffer, - itemsize >> 2, 4); + byte_swap_vector(buffer, itemsize >> 2, 4); } } - else buffer = data; + else { + buffer = data; + } /* Allocated enough for 2-characters per itemsize. Now convert from the data-buffer @@ -1491,7 +1508,9 @@ length = PyUCS2Buffer_FromUCS4(uni->str, (PyArray_UCS4 *)buffer, itemsize >> 2); - if (alloc) _pya_free(buffer); + if (alloc) { + _pya_free(buffer); + } /* Resize the unicode result */ if (MyPyUnicode_Resize(uni, length) < 0) { Py_DECREF(obj); @@ -1635,7 +1654,7 @@ copyswap = PyArray_DESCR(arr)->f->copyswap; - for(i = 0; i < n; i++) { + for (i = 0; i < n; i++) { copyswap(dstptr, srcptr, swap, arr); dstptr += dstride; srcptr += sstride; @@ -1657,12 +1676,12 @@ int i; PyArray_Descr *descr; - for(i=0; itypeobj->tp_name, str) == 0) + if (strcmp(descr->typeobj->tp_name, str) == 0) { return descr->type_num; + } } - return PyArray_NOTYPE; } @@ -1684,10 +1703,11 @@ PyArray_ArrFuncs *f; /* See if this type is already registered */ - for(i=0; itype_num; + } } typenum = PyArray_USERDEF + NPY_NUMUSERTYPES; descr->type_num = typenum; @@ -1733,6 +1753,7 @@ { PyObject *cobj, *key; int ret; + if (totype < PyArray_NTYPES) { descr->f->cast[totype] = castfunc; return 0; @@ -1743,12 +1764,19 @@ } if (descr->f->castdict == NULL) { descr->f->castdict = PyDict_New(); - if (descr->f->castdict == NULL) return -1; + if (descr->f->castdict == NULL) { + return -1; + } } key = PyInt_FromLong(totype); - if (PyErr_Occurred()) return -1; + if (PyErr_Occurred()) { + return -1; + } cobj = PyCObject_FromVoidPtr((void *)castfunc, NULL); - if (cobj == NULL) {Py_DECREF(key); return -1;} + if (cobj == NULL) { + Py_DECREF(key); + return -1; + } ret = PyDict_SetItem(descr->f->castdict, key, cobj); Py_DECREF(key); Py_DECREF(cobj); @@ -1758,13 +1786,15 @@ static int * _append_new(int *types, int insert) { - int n=0; + int n = 0; int *newtypes; - while (types[n] != PyArray_NOTYPE) n++; - newtypes = (int *)realloc(types, (n+2)*sizeof(int)); + while (types[n] != PyArray_NOTYPE) { + n++; + } + newtypes = (int *)realloc(types, (n + 2)*sizeof(int)); newtypes[n] = insert; - newtypes[n+1] = PyArray_NOTYPE; + newtypes[n + 1] = PyArray_NOTYPE; return newtypes; } @@ -1791,22 +1821,20 @@ /* register with cancastscalarkindto */ if (descr->f->cancastscalarkindto == NULL) { int i; - descr->f->cancastscalarkindto = \ - (int **)malloc(PyArray_NSCALARKINDS* \ - sizeof(int*)); - for(i=0; if->cancastscalarkindto = + (int **)malloc(PyArray_NSCALARKINDS* sizeof(int*)); + for (i = 0; i < PyArray_NSCALARKINDS; i++) { descr->f->cancastscalarkindto[i] = NULL; } } if (descr->f->cancastscalarkindto[scalar] == NULL) { - descr->f->cancastscalarkindto[scalar] = \ + descr->f->cancastscalarkindto[scalar] = (int *)malloc(1*sizeof(int)); - descr->f->cancastscalarkindto[scalar][0] = \ + descr->f->cancastscalarkindto[scalar][0] = PyArray_NOTYPE; } - descr->f->cancastscalarkindto[scalar] = \ - _append_new(descr->f->cancastscalarkindto[scalar], - totype); + descr->f->cancastscalarkindto[scalar] = + _append_new(descr->f->cancastscalarkindto[scalar], totype); } return 0; } @@ -1859,7 +1887,7 @@ it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self); NPY_BEGIN_THREADS; - while(it->index < it->size) { + while (it->index < it->size) { if (fwrite((const void *)it->dataptr, (size_t) self->descr->elsize, 1, fp) < 1) { @@ -1885,7 +1913,7 @@ it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self); n4 = (format ? strlen((const char *)format) : 0); - while(it->index < it->size) { + while (it->index < it->size) { obj = self->descr->f->getitem(it->dataptr, self); if (obj == NULL) { Py_DECREF(it); @@ -1977,7 +2005,7 @@ sz = self->dimensions[0]; lp = PyList_New(sz); - for(i = 0; i < sz; i++) { + for (i = 0; i < sz; i++) { v = (PyArrayObject *)array_big_item(self, i); if (PyArray_Check(v) && (v->nd >= self->nd)) { PyErr_SetString(PyExc_RuntimeError, @@ -2015,7 +2043,7 @@ */ numbytes = PyArray_NBYTES(self); - if ((PyArray_ISCONTIGUOUS(self) && (order == NPY_CORDER)) || \ + if ((PyArray_ISCONTIGUOUS(self) && (order == NPY_CORDER)) || (PyArray_ISFORTRAN(self) && (order == NPY_FORTRANORDER))) { ret = PyString_FromStringAndSize(self->data, (int) numbytes); } @@ -2024,7 +2052,9 @@ if (order == NPY_FORTRANORDER) { /* iterators are always in C-order */ new = PyArray_Transpose(self, NULL); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } } else { Py_INCREF(self); @@ -2032,13 +2062,18 @@ } it = (PyArrayIterObject *)PyArray_IterNew(new); Py_DECREF(new); - if (it==NULL) return NULL; + if (it == NULL) { + return NULL; + } ret = PyString_FromStringAndSize(NULL, (int) numbytes); - if (ret == NULL) {Py_DECREF(it); return NULL;} + if (ret == NULL) { + Py_DECREF(it); + return NULL; + } dptr = PyString_AS_STRING(ret); index = it->size; elsize = self->descr->elsize; - while(index--) { + while (index--) { memcpy(dptr, it->dataptr, elsize); dptr += elsize; PyArray_ITER_NEXT(it); @@ -2057,30 +2092,34 @@ static void array_dealloc(PyArrayObject *self) { - if (self->weakreflist != NULL) + if (self->weakreflist != NULL) { PyObject_ClearWeakRefs((PyObject *)self); - - if(self->base) { - /* UPDATEIFCOPY means that base points to an - array that should be updated with the contents - of this array upon destruction. - self->base->flags must have been WRITEABLE - (checked previously) and it was locked here - thus, unlock it. - */ + } + if (self->base) { + /* + * UPDATEIFCOPY means that base points to an + * array that should be updated with the contents + * of this array upon destruction. + * self->base->flags must have been WRITEABLE + * (checked previously) and it was locked here + * thus, unlock it. + */ if (self->flags & UPDATEIFCOPY) { ((PyArrayObject *)self->base)->flags |= WRITEABLE; Py_INCREF(self); /* hold on to self in next call */ - if (PyArray_CopyAnyInto((PyArrayObject *)self->base, - self) < 0) { + if (PyArray_CopyAnyInto((PyArrayObject *)self->base, self) < 0) { PyErr_Print(); PyErr_Clear(); } - /* Don't need to DECREF -- because we are deleting - self already... */ + /* + * Don't need to DECREF -- because we are deleting + *self already... + */ } - /* In any case base is pointing to something that we need - to DECREF -- either a view or a buffer object */ + /* + * In any case base is pointing to something that we need + * to DECREF -- either a view or a buffer object + */ Py_DECREF(self->base); } @@ -2089,16 +2128,16 @@ if (PyDataType_FLAGCHK(self->descr, NPY_ITEM_REFCOUNT)) { Py_INCREF(self); /*hold on to self */ PyArray_XDECREF(self); - /* Don't need to DECREF -- because we are deleting - self already... */ + /* + * Don't need to DECREF -- because we are deleting + * self already... + */ } PyDataMem_FREE(self->data); } PyDimMem_FREE(self->dimensions); - Py_DECREF(self->descr); - self->ob_type->tp_free((PyObject *)self); } @@ -2128,8 +2167,9 @@ "0-d arrays can't be indexed"); return NULL; } - if ((item = index2ptr(self, i)) == NULL) return NULL; - + if ((item = index2ptr(self, i)) == NULL) { + return NULL; + } Py_INCREF(self->descr); r = (PyArrayObject *)PyArray_NewFromDescr(self->ob_type, self->descr, @@ -2138,7 +2178,9 @@ self->strides+1, item, self->flags, (PyObject *)self); - if (r == NULL) return NULL; + if (r == NULL) { + return NULL; + } Py_INCREF(self); r->base = (PyObject *)self; PyArray_UpdateFlags(r, CONTIGUOUS | FORTRAN); @@ -2151,12 +2193,14 @@ { if (self->nd == 1) { char *item; - if ((item = index2ptr(self, i)) == NULL) return NULL; + if ((item = index2ptr(self, i)) == NULL) { + return NULL; + } return PyArray_Scalar(item, self->descr, (PyObject *)self); } else { - return PyArray_Return((PyArrayObject *)\ - array_big_item(self, (intp) i)); + return PyArray_Return( + (PyArrayObject *) array_big_item(self, (intp) i)); } } @@ -2185,15 +2229,20 @@ if (self->nd > 1) { - if((tmp = (PyArrayObject *)array_big_item(self, i)) == NULL) + if((tmp = (PyArrayObject *)array_big_item(self, i)) == NULL) { return -1; + } ret = PyArray_CopyObject(tmp, v); Py_DECREF(tmp); return ret; } - if ((item = index2ptr(self, i)) == NULL) return -1; - if (self->descr->f->setitem(v, item, self) == -1) return -1; + if ((item = index2ptr(self, i)) == NULL) { + return -1; + } + if (self->descr->f->setitem(v, item, self) == -1) { + return -1; + } return 0; } @@ -2239,8 +2288,11 @@ if (r->step == Py_None) { *step = 1; - } else { - if (!slice_coerce_index(r->step, step)) return -1; + } + else { + if (!slice_coerce_index(r->step, step)) { + return -1; + } if (*step == 0) { PyErr_SetString(PyExc_ValueError, "slice step cannot be zero"); @@ -2248,15 +2300,20 @@ } } /* defstart = *step < 0 ? length - 1 : 0; */ - defstop = *step < 0 ? -1 : length; - if (r->start == Py_None) { *start = *step < 0 ? length-1 : 0; - } else { - if (!slice_coerce_index(r->start, start)) return -1; - if (*start < 0) *start += length; - if (*start < 0) *start = (*step < 0) ? -1 : 0; + } + else { + if (!slice_coerce_index(r->start, start)) { + return -1; + } + if (*start < 0) { + *start += length; + } + if (*start < 0) { + *start = (*step < 0) ? -1 : 0; + } if (*start >= length) { *start = (*step < 0) ? length - 1 : length; } @@ -2264,19 +2321,30 @@ if (r->stop == Py_None) { *stop = defstop; - } else { - if (!slice_coerce_index(r->stop, stop)) return -1; - if (*stop < 0) *stop += length; - if (*stop < 0) *stop = -1; - if (*stop > length) *stop = length; } + else { + if (!slice_coerce_index(r->stop, stop)) { + return -1; + } + if (*stop < 0) { + *stop += length; + } + if (*stop < 0) { + *stop = -1; + } + if (*stop > length) { + *stop = length; + } + } - if ((*step < 0 && *stop >= *start) || \ + if ((*step < 0 && *stop >= *start) || (*step > 0 && *start >= *stop)) { *slicelength = 0; - } else if (*step < 0) { + } + else if (*step < 0) { *slicelength = (*stop - *start + 1) / (*step) + 1; - } else { + } + else { *slicelength = (*stop - *start - 1) / (*step) + 1; } @@ -2295,10 +2363,12 @@ if (op == Py_None) { *n_steps = PseudoIndex; index = 0; - } else if (op == Py_Ellipsis) { + } + else if (op == Py_Ellipsis) { *n_steps = RubberIndex; index = 0; - } else if (PySlice_Check(op)) { + } + else if (PySlice_Check(op)) { intp stop; if (slice_GetIndices((PySliceObject *)op, max, &index, &stop, step_size, n_steps) < 0) { @@ -2313,7 +2383,8 @@ *step_size = 1; index = 0; } - } else { + } + else { index = PyArray_PyIntAsIntp(op); if (error_converting(index)) { PyErr_SetString(PyExc_IndexError, @@ -2324,13 +2395,16 @@ } *n_steps = SingleIndex; *step_size = 0; - if (index < 0) index += max; + if (index < 0) { + index += max; + } if (index >= max || index < 0) { PyErr_SetString(PyExc_IndexError, "invalid index"); goto fail; } } return index; + fail: return -1; } @@ -2343,7 +2417,7 @@ int i, j, n; int nd_old, nd_new, n_add, n_pseudo; intp n_steps, start, offset, step_size; - PyObject *op1=NULL; + PyObject *op1 = NULL; int is_slice; if (PySlice_Check(op) || op == Py_Ellipsis || op == Py_None) { @@ -2367,7 +2441,7 @@ nd_old = nd_new = 0; offset = 0; - for(i=0; ind ? \ + nd_old < self->nd ? self->dimensions[nd_old] : 0); Py_DECREF(op1); - if (start == -1) break; - + if (start == -1) { + break; + } if (n_steps == PseudoIndex) { dimensions[nd_new] = 1; strides[nd_new] = 0; nd_new++; - } else { + } + else { if (n_steps == RubberIndex) { - for(j=i+1, n_pseudo=0; jnd-(n-i-n_pseudo-1+nd_old); @@ -2398,14 +2475,15 @@ "too many indices"); return -1; } - for(j=0; jdimensions[nd_old]; strides[nd_new] = \ self->strides[nd_old]; nd_new++; nd_old++; } - } else { + } + else { if (nd_old >= self->nd) { PyErr_SetString(PyExc_IndexError, "too many indices"); @@ -2422,12 +2500,15 @@ } } } - if (i < n) return -1; + if (i < n) { + return -1; + } n_add = self->nd-nd_old; - for(j=0; jdimensions[nd_old]; strides[nd_new] = self->strides[nd_old]; - nd_new++; nd_old++; + nd_new++; + nd_old++; } *offset_ptr = offset; return nd_new; @@ -2446,68 +2527,73 @@ permute.ptr = d; permute.len = mit->nd; - /* arr might not have the right number of dimensions - and need to be reshaped first by pre-pending ones */ + /* + * arr might not have the right number of dimensions + * and need to be reshaped first by pre-pending ones + */ arr = *ret; if (arr->nd != mit->nd) { - for(i=1; i<=arr->nd; i++) { + for (i = 1; i <= arr->nd; i++) { permute.ptr[mit->nd-i] = arr->dimensions[arr->nd-i]; } - for(i=0; ind-arr->nd; i++) { + for (i = 0; i < mit->nd-arr->nd; i++) { permute.ptr[i] = 1; } new = PyArray_Newshape(arr, &permute, PyArray_ANYORDER); Py_DECREF(arr); *ret = (PyArrayObject *)new; - if (new == NULL) return; + if (new == NULL) { + return; + } } - /* Setting and getting need to have different permutations. - On the get we are permuting the returned object, but on - setting we are permuting the object-to-be-set. - The set permutation is the inverse of the get permutation. - */ + /* + * Setting and getting need to have different permutations. + * On the get we are permuting the returned object, but on + * setting we are permuting the object-to-be-set. + * The set permutation is the inverse of the get permutation. + */ - /* For getting the array the tuple for transpose is - (n1,...,n1+n2-1,0,...,n1-1,n1+n2,...,n3-1) - n1 is the number of dimensions of - the broadcasted index array - n2 is the number of dimensions skipped at the - start - n3 is the number of dimensions of the - result - */ + /* + * For getting the array the tuple for transpose is + * (n1,...,n1+n2-1,0,...,n1-1,n1+n2,...,n3-1) + * n1 is the number of dimensions of the broadcast index array + * n2 is the number of dimensions skipped at the start + * n3 is the number of dimensions of the result + */ - /* For setting the array the tuple for transpose is - (n2,...,n1+n2-1,0,...,n2-1,n1+n2,...n3-1) - */ + /* + * For setting the array the tuple for transpose is + * (n2,...,n1+n2-1,0,...,n2-1,n1+n2,...n3-1) + */ n1 = mit->iters[0]->nd_m1 + 1; n2 = mit->iteraxes[0]; n3 = mit->nd; - bnd = (getmap ? n1 : n2); /* use n1 as the boundary if getting - but n2 if setting */ - + /* use n1 as the boundary if getting but n2 if setting */ + bnd = getmap ? n1 : n2; val = bnd; i = 0; - while(val < n1+n2) + while (val < n1 + n2) { permute.ptr[i++] = val++; + } val = 0; - while(val < bnd) + while (val < bnd) { permute.ptr[i++] = val++; - val = n1+n2; - while(val < n3) + } + val = n1 + n2; + while (val < n3) { permute.ptr[i++] = val++; - + } new = PyArray_Transpose(*ret, &permute); Py_DECREF(*ret); *ret = (PyArrayObject *)new; } -/* Prototypes for Mapping calls --- not part of the C-API - because only useful as part of a getitem call. -*/ - +/* + * Prototypes for Mapping calls --- not part of the C-API + * because only useful as part of a getitem call. + */ static void PyArray_MapIterReset(PyArrayMapIterObject *); static void PyArray_MapIterNext(PyArrayMapIterObject *); static void PyArray_MapIterBind(PyArrayMapIterObject *, PyArrayObject *); @@ -2524,28 +2610,33 @@ PyArray_CopySwapFunc *copyswap; /* Unbound map iterator --- Bind should have been called */ - if (mit->ait == NULL) return NULL; + if (mit->ait == NULL) { + return NULL; + } /* This relies on the map iterator object telling us the shape of the new array in nd and dimensions. */ temp = mit->ait->ao; Py_INCREF(temp->descr); - ret = (PyArrayObject *)\ + ret = (PyArrayObject *) PyArray_NewFromDescr(temp->ob_type, temp->descr, mit->nd, mit->dimensions, NULL, NULL, PyArray_ISFORTRAN(temp), (PyObject *)temp); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } - /* Now just iterate through the new array filling it in - with the next object from the original array as - defined by the mapping iterator */ + /* + * Now just iterate through the new array filling it in + * with the next object from the original array as + * defined by the mapping iterator + */ - if ((it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ret)) - == NULL) { + if ((it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ret)) == NULL) { Py_DECREF(ret); return NULL; } @@ -2572,7 +2663,7 @@ static int PyArray_SetMap(PyArrayMapIterObject *mit, PyObject *op) { - PyObject *arr=NULL; + PyObject *arr = NULL; PyArrayIterObject *it; int index; int swap; @@ -2580,17 +2671,21 @@ PyArray_Descr *descr; /* Unbound Map Iterator */ - if (mit->ait == NULL) return -1; - + if (mit->ait == NULL) { + return -1; + } descr = mit->ait->ao->descr; Py_INCREF(descr); arr = PyArray_FromAny(op, descr, 0, 0, FORCECAST, NULL); - if (arr == NULL) return -1; - + if (arr == NULL) { + return -1; + } if ((mit->subspace != NULL) && (mit->consec)) { if (mit->iteraxes[0] > 0) { /* then we need to swap */ _swap_axes(mit, (PyArrayObject **)&arr, 0); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } } } @@ -2604,7 +2699,7 @@ } index = mit->size; - swap = (PyArray_ISNOTSWAPPED(mit->ait->ao) != \ + swap = (PyArray_ISNOTSWAPPED(mit->ait->ao) != (PyArray_ISNOTSWAPPED(arr))); copyswap = PyArray_DESCR(arr)->f->copyswap; PyArray_MapIterReset(mit); @@ -2615,8 +2710,9 @@ PyArray_Item_INCREF(it->dataptr, PyArray_DESCR(arr)); memmove(mit->dataptr, it->dataptr, sizeof(PyObject *)); /* ignored unless VOID array with object's */ - if (swap) + if (swap) { copyswap(mit->dataptr, NULL, swap, arr); + } PyArray_MapIterNext(mit); PyArray_ITER_NEXT(it); } @@ -2626,8 +2722,9 @@ } while(index--) { memmove(mit->dataptr, it->dataptr, PyArray_ITEMSIZE(arr)); - if (swap) + if (swap) { copyswap(mit->dataptr, NULL, swap, arr); + } PyArray_MapIterNext(mit); PyArray_ITER_NEXT(it); } @@ -2644,12 +2741,17 @@ int newaxis_count = 0; argument_count = PyTuple_GET_SIZE(tuple); - - for(i = 0; i < argument_count; ++i) { + for (i = 0; i < argument_count; ++i) { PyObject *arg = PyTuple_GET_ITEM(tuple, i); - if (arg == Py_Ellipsis && !ellipsis_count) ellipsis_count++; - else if (arg == Py_None) newaxis_count++; - else break; + if (arg == Py_Ellipsis && !ellipsis_count) { + ellipsis_count++; + } + else if (arg == Py_None) { + newaxis_count++; + } + else { + break; + } } if (i < argument_count) { PyErr_SetString(PyExc_IndexError, @@ -2659,8 +2761,7 @@ return -1; } if (newaxis_count > MAX_DIMS) { - PyErr_SetString(PyExc_IndexError, - "too many dimensions"); + PyErr_SetString(PyExc_IndexError, "too many dimensions"); return -1; } return newaxis_count; @@ -2672,7 +2773,8 @@ PyArrayObject *other; intp dimensions[MAX_DIMS]; int i; - for(i = 0; i < newaxis_count; ++i) { + + for (i = 0; i < newaxis_count; ++i) { dimensions[i] = 1; } Py_INCREF(arr->descr); @@ -2706,13 +2808,16 @@ if (PyTuple_Check(args)) { n = PyTuple_GET_SIZE(args); - if (n >= MAX_DIMS) return SOBJ_TOOMANY; - for(i=0; i= MAX_DIMS) { + return SOBJ_TOOMANY; + } + for (i = 0; i < n; i++) { obj = PyTuple_GET_ITEM(args,i); if (PyArray_Check(obj)) { if (PyArray_ISINTEGER(obj) || - PyArray_ISBOOL(obj)) + PyArray_ISBOOL(obj)) { retval = SOBJ_ISFANCY; + } else { retval = SOBJ_BADARRAY; break; @@ -2725,62 +2830,69 @@ } else if (PyArray_Check(args)) { if ((PyArray_TYPE(args)==PyArray_BOOL) || - (PyArray_ISINTEGER(args))) + (PyArray_ISINTEGER(args))) { return SOBJ_ISFANCY; - else + } + else { return SOBJ_BADARRAY; + } } else if (PySequence_Check(args)) { - /* Sequences < MAX_DIMS with any slice objects - or newaxis, or Ellipsis is considered standard - as long as there are also no Arrays and or additional - sequences embedded. - */ + /* + * Sequences < MAX_DIMS with any slice objects + * or newaxis, or Ellipsis is considered standard + * as long as there are also no Arrays and or additional + * sequences embedded. + */ retval = SOBJ_ISFANCY; n = PySequence_Size(args); - if (n<0 || n>=MAX_DIMS) return SOBJ_ISFANCY; - for(i=0; i= MAX_DIMS) { + return SOBJ_ISFANCY; + } + for (i = 0; i < n; i++) { obj = PySequence_GetItem(args, i); - if (obj == NULL) return SOBJ_ISFANCY; + if (obj == NULL) { + return SOBJ_ISFANCY; + } if (PyArray_Check(obj)) { - if (PyArray_ISINTEGER(obj) || - PyArray_ISBOOL(obj)) + if (PyArray_ISINTEGER(obj) || PyArray_ISBOOL(obj)) { retval = SOBJ_LISTTUP; - else + } + else { retval = SOBJ_BADARRAY; + } } else if (PySequence_Check(obj)) { retval = SOBJ_LISTTUP; } else if (PySlice_Check(obj) || obj == Py_Ellipsis || - obj == Py_None) { + obj == Py_None) { retval = SOBJ_NOTFANCY; } Py_DECREF(obj); - if (retval > SOBJ_ISFANCY) return retval; + if (retval > SOBJ_ISFANCY) { + return retval; + } } } return retval; } -/* Called when treating array object like a mapping -- called first from - Python when using a[object] unless object is a standard slice object - (not an extended one). +/* + * Called when treating array object like a mapping -- called first from + * Python when using a[object] unless object is a standard slice object + * (not an extended one). + * + * There are two situations: + * + * 1 - the subscript is a standard view and a reference to the + * array can be returned + * + * 2 - the subscript uses Boolean masks or integer indexing and + * therefore a new array is created and returned. + */ -*/ - -/* There are two situations: - - 1 - the subscript is a standard view and a reference to the - array can be returned - - 2 - the subscript uses Boolean masks or integer indexing and - therefore a new array is created and returned. - -*/ - /* Always returns arrays */ - static PyObject *iter_subscript(PyArrayIterObject *, PyObject *); @@ -2800,24 +2912,22 @@ PyErr_Clear(); /* Standard (view-based) Indexing */ - if ((nd = parse_index(self, op, dimensions, strides, &offset)) - == -1) return NULL; - + if ((nd = parse_index(self, op, dimensions, strides, &offset)) == -1) { + return NULL; + } /* This will only work if new array will be a view */ Py_INCREF(self->descr); - if ((other = (PyArrayObject *) \ + if ((other = (PyArrayObject *) PyArray_NewFromDescr(self->ob_type, self->descr, nd, dimensions, strides, self->data+offset, self->flags, - (PyObject *)self)) == NULL) + (PyObject *)self)) == NULL) { return NULL; - + } other->base = (PyObject *)self; Py_INCREF(self); - PyArray_UpdateFlags(other, UPDATE_ALL); - return (PyObject *)other; } @@ -2837,11 +2947,9 @@ int offset; PyObject *title; - if (PyArg_ParseTuple(obj, "Oi|O", - &descr, &offset, &title)) { + if (PyArg_ParseTuple(obj, "Oi|O", &descr, &offset, &title)) { Py_INCREF(descr); - return PyArray_GetField(self, descr, - offset); + return PyArray_GetField(self, descr, offset); } } } @@ -2852,12 +2960,11 @@ return NULL; } - /* Check for multiple field access - */ + /* Check for multiple field access */ if (self->descr->names && PySequence_Check(op) && !PyTuple_Check(op)) { int seqlen, i; seqlen = PySequence_Size(op); - for (i=0; i 0) && (i == seqlen)); - if (fancy) { + if (fancy) { PyObject *_numpy_internal; _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - obj = PyObject_CallMethod(_numpy_internal, "_index_fields", - "OO", self, op); + if (_numpy_internal == NULL) { + return NULL; + } + obj = PyObject_CallMethod(_numpy_internal, + "_index_fields", "OO", self, op); Py_DECREF(_numpy_internal); return obj; } @@ -2886,20 +2996,22 @@ } if (self->nd == 0) { - if (op == Py_None) + if (op == Py_None) { return add_new_axes_0d(self, 1); + } if (PyTuple_Check(op)) { if (0 == PyTuple_GET_SIZE(op)) { Py_INCREF(self); return (PyObject *)self; } - if ((nd = count_new_axes_0d(op)) == -1) + if ((nd = count_new_axes_0d(op)) == -1) { return NULL; + } return add_new_axes_0d(self, nd); } /* Allow Boolean mask selection also */ - if ((PyArray_Check(op) && (PyArray_DIMS(op)==0) && - PyArray_ISBOOL(op))) { + if ((PyArray_Check(op) && (PyArray_DIMS(op)==0) + && PyArray_ISBOOL(op))) { if (PyObject_IsTrue(op)) { Py_INCREF(self); return (PyObject *)self; @@ -2915,28 +3027,30 @@ NULL); } } - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed."); + PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed."); return NULL; } fancy = fancy_indexing_check(op); - if (fancy != SOBJ_NOTFANCY) { int oned; + oned = ((self->nd == 1) && !(PyTuple_Check(op) && PyTuple_GET_SIZE(op) > 1)); /* wrap arguments into a mapiter object */ - mit = (PyArrayMapIterObject *)\ - PyArray_MapIterNew(op, oned, fancy); - if (mit == NULL) return NULL; + mit = (PyArrayMapIterObject *) PyArray_MapIterNew(op, oned, fancy); + if (mit == NULL) { + return NULL; + } if (oned) { PyArrayIterObject *it; PyObject *rval; - it = (PyArrayIterObject *)\ - PyArray_IterNew((PyObject *)self); - if (it == NULL) {Py_DECREF(mit); return NULL;} + it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self); + if (it == NULL) { + Py_DECREF(mit); + return NULL; + } rval = iter_subscript(it, mit->indexobj); Py_DECREF(it); Py_DECREF(mit); @@ -2952,15 +3066,13 @@ } -/* Another assignment hacked by using CopyObject. */ - -/* This only works if subscript returns a standard view. */ - -/* Again there are two cases. In the first case, PyArray_CopyObject - can be used. In the second case, a new indexing function has to be - used. -*/ - +/* + * Another assignment hacked by using CopyObject. + * This only works if subscript returns a standard view. + * Again there are two cases. In the first case, PyArray_CopyObject + * can be used. In the second case, a new indexing function has to be + * used. + */ static int iter_ass_subscript(PyArrayIterObject *, PyObject *, PyObject *); static int @@ -2980,12 +3092,16 @@ if (PyArray_CheckExact(self)) { tmp = (PyArrayObject *)array_subscript_simple(self, index); - if (tmp == NULL) return -1; + if (tmp == NULL) { + return -1; + } } else { PyObject *tmp0; tmp0 = PyObject_GetItem((PyObject *)self, index); - if (tmp0 == NULL) return -1; + if (tmp0 == NULL) { + return -1; + } if (!PyArray_Check(tmp0)) { PyErr_SetString(PyExc_RuntimeError, "Getitem not returning array."); @@ -3018,10 +3134,14 @@ for(i=0; i 0) || - PyList_Check(obj)) return -1; + if ((PyArray_Check(obj) && PyArray_NDIM(obj) > 0) + || PyList_Check(obj)) { + return -1; + } temp = PyArray_PyIntAsIntp(obj); - if (error_converting(temp)) return -1; + if (error_converting(temp)) { + return -1; + } vals[i] = temp; } return 0; @@ -3051,26 +3171,27 @@ !PySequence_Check(index))) { intp value; value = PyArray_PyIntAsIntp(index); - if (PyErr_Occurred()) + if (PyErr_Occurred()) { PyErr_Clear(); - else + } + else { return array_ass_big_item(self, value, op); + } } if (PyString_Check(index) || PyUnicode_Check(index)) { if (self->descr->names) { PyObject *obj; + obj = PyDict_GetItem(self->descr->fields, index); if (obj != NULL) { PyArray_Descr *descr; int offset; PyObject *title; - if (PyArg_ParseTuple(obj, "Oi|O", - &descr, &offset, &title)) { + if (PyArg_ParseTuple(obj, "Oi|O", &descr, &offset, &title)) { Py_INCREF(descr); - return PyArray_SetField(self, descr, - offset, op); + return PyArray_SetField(self, descr, offset, op); } } } @@ -3082,17 +3203,19 @@ } if (self->nd == 0) { - /* Several different exceptions to the 0-d no-indexing rule - - 1) ellipses - 2) empty tuple - 3) Using newaxis (None) - 4) Boolean mask indexing - */ - if (index == Py_Ellipsis || index == Py_None || \ - (PyTuple_Check(index) && (0 == PyTuple_GET_SIZE(index) || \ - count_new_axes_0d(index) > 0))) + /* + * Several different exceptions to the 0-d no-indexing rule + * + * 1) ellipses + * 2) empty tuple + * 3) Using newaxis (None) + * 4) Boolean mask indexing + */ + if (index == Py_Ellipsis || index == Py_None || + (PyTuple_Check(index) && (0 == PyTuple_GET_SIZE(index) || + count_new_axes_0d(index) > 0))) { return self->descr->f->setitem(op, self->data, self); + } if (PyBool_Check(index) || PyArray_IsScalar(index, Bool) || (PyArray_Check(index) && (PyArray_DIMS(index)==0) && PyArray_ISBOOL(index))) { @@ -3103,8 +3226,7 @@ return 0; } } - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed."); + PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed."); return -1; } @@ -3114,8 +3236,11 @@ && (_tuple_of_integers(index, vals, self->nd) >= 0)) { int i; char *item; - for(i=0; ind; i++) { - if (vals[i] < 0) vals[i] += self->dimensions[i]; + + for (i = 0; i < self->nd; i++) { + if (vals[i] < 0) { + vals[i] += self->dimensions[i]; + } if ((vals[i] < 0) || (vals[i] >= self->dimensions[i])) { PyErr_Format(PyExc_IndexError, "index (%"INTP_FMT") out of range "\ @@ -3125,25 +3250,27 @@ } } item = PyArray_GetPtr(self, vals); - /* fprintf(stderr, "Here I am...\n");*/ return self->descr->f->setitem(op, item, self); } PyErr_Clear(); fancy = fancy_indexing_check(index); - if (fancy != SOBJ_NOTFANCY) { oned = ((self->nd == 1) && !(PyTuple_Check(index) && PyTuple_GET_SIZE(index) > 1)); - - mit = (PyArrayMapIterObject *) \ - PyArray_MapIterNew(index, oned, fancy); - if (mit == NULL) return -1; + mit = (PyArrayMapIterObject *) PyArray_MapIterNew(index, oned, fancy); + if (mit == NULL) { + return -1; + } if (oned) { PyArrayIterObject *it; int rval; + it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it == NULL) {Py_DECREF(mit); return -1;} + if (it == NULL) { + Py_DECREF(mit); + return -1; + } rval = iter_ass_subscript(it, mit->indexobj, op); Py_DECREF(it); Py_DECREF(mit); @@ -3159,10 +3286,11 @@ } -/* There are places that require that array_subscript return a PyArrayObject - and not possibly a scalar. Thus, this is the function exposed to - Python so that 0-dim arrays are passed as scalars -*/ +/* + * There are places that require that array_subscript return a PyArrayObject + * and not possibly a scalar. Thus, this is the function exposed to + * Python so that 0-dim arrays are passed as scalars + */ static PyObject * @@ -3172,13 +3300,14 @@ PyArrayObject *mp; intp vals[MAX_DIMS]; - if (PyInt_Check(op) || PyArray_IsScalar(op, Integer) || \ + if (PyInt_Check(op) || PyArray_IsScalar(op, Integer) || PyLong_Check(op) || (PyIndex_Check(op) && !PySequence_Check(op))) { intp value; value = PyArray_PyIntAsIntp(op); - if (PyErr_Occurred()) + if (PyErr_Occurred()) { PyErr_Clear(); + } else { return array_item_nice(self, (Py_ssize_t) value); } @@ -3189,8 +3318,11 @@ && (_tuple_of_integers(op, vals, self->nd) >= 0)) { int i; char *item; - for(i=0; ind; i++) { - if (vals[i] < 0) vals[i] += self->dimensions[i]; + + for (i = 0; i < self->nd; i++) { + if (vals[i] < 0) { + vals[i] += self->dimensions[i]; + } if ((vals[i] < 0) || (vals[i] >= self->dimensions[i])) { PyErr_Format(PyExc_IndexError, "index (%"INTP_FMT") out of range "\ @@ -3205,27 +3337,29 @@ PyErr_Clear(); mp = (PyArrayObject *)array_subscript(self, op); + /* + * mp could be a scalar if op is not an Int, Scalar, Long or other Index + * object and still convertable to an integer (so that the code goes to + * array_subscript_simple). So, this cast is a bit dangerous.. + */ - /* mp could be a scalar if op is not an Int, Scalar, Long or other Index - object and still convertable to an integer (so that the code goes to - array_subscript_simple). So, this cast is a bit dangerous.. - */ + /* + * The following is just a copy of PyArray_Return with an + * additional logic in the nd == 0 case. + */ - /* The following is just a copy of PyArray_Return with an - additional logic in the nd == 0 case. - */ - - if (mp == NULL) return NULL; - + if (mp == NULL) { + return NULL; + } if (PyErr_Occurred()) { Py_XDECREF(mp); return NULL; } - if (PyArray_Check(mp) && mp->nd == 0) { Bool noellipses = TRUE; - if ((op == Py_Ellipsis) || PyString_Check(op) || PyUnicode_Check(op)) + if ((op == Py_Ellipsis) || PyString_Check(op) || PyUnicode_Check(op)) { noellipses = FALSE; + } else if (PyBool_Check(op) || PyArray_IsScalar(op, Bool) || (PyArray_Check(op) && (PyArray_DIMS(op)==0) && PyArray_ISBOOL(op))) { @@ -3234,12 +3368,14 @@ else if (PySequence_Check(op)) { int n, i; PyObject *temp; + n = PySequence_Size(op); - i=0; + i = 0; while (idata; return PyArray_NBYTES(self); @@ -3311,10 +3446,11 @@ static Py_ssize_t array_getwritebuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr) { - if (PyArray_CHKFLAGS(self, WRITEABLE)) + if (PyArray_CHKFLAGS(self, WRITEABLE)) { return array_getreadbuf(self, segment, (void **) ptrptr); + } else { - PyErr_SetString(PyExc_ValueError, "array cannot be "\ + PyErr_SetString(PyExc_ValueError, "array cannot be " "accessed as a writeable buffer"); return -1; } @@ -3328,14 +3464,14 @@ static PyBufferProcs array_as_buffer = { #if PY_VERSION_HEX >= 0x02050000 - (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (segcountproc)array_getsegcount, /*bf_getsegcount*/ - (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ + (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ + (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ + (segcountproc)array_getsegcount, /*bf_getsegcount*/ + (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ #else (getreadbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ (getwritebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (getsegcountproc)array_getsegcount, /*bf_getsegcount*/ + (getsegcountproc)array_getsegcount, /*bf_getsegcount*/ (getcharbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ #endif }; @@ -3349,40 +3485,40 @@ typedef struct { - PyObject *add, - *subtract, - *multiply, - *divide, - *remainder, - *power, - *square, - *reciprocal, - *ones_like, - *sqrt, - *negative, - *absolute, - *invert, - *left_shift, - *right_shift, - *bitwise_and, - *bitwise_xor, - *bitwise_or, - *less, - *less_equal, - *equal, - *not_equal, - *greater, - *greater_equal, - *floor_divide, - *true_divide, - *logical_or, - *logical_and, - *floor, - *ceil, - *maximum, - *minimum, - *rint, - *conjugate; + PyObject *add; + PyObject *subtract; + PyObject *multiply; + PyObject *divide; + PyObject *remainder; + PyObject *power; + PyObject *square; + PyObject *reciprocal; + PyObject *ones_like; + PyObject *sqrt; + PyObject *negative; + PyObject *absolute; + PyObject *invert; + PyObject *left_shift; + PyObject *right_shift; + PyObject *bitwise_and; + PyObject *bitwise_xor; + PyObject *bitwise_or; + PyObject *less; + PyObject *less_equal; + PyObject *equal; + PyObject *not_equal; + PyObject *greater; + PyObject *greater_equal; + PyObject *floor_divide; + PyObject *true_divide; + PyObject *logical_or; + PyObject *logical_and; + PyObject *floor; + PyObject *ceil; + PyObject *maximum; + PyObject *minimum; + PyObject *rint; + PyObject *conjugate; } NumericOps; static NumericOps n_ops; /* NB: static objects initialized to zero */ @@ -3500,21 +3636,19 @@ static PyObject * _get_keywords(int rtype, PyArrayObject *out) { - PyObject *kwds=NULL; + PyObject *kwds = NULL; if (rtype != PyArray_NOTYPE || out != NULL) { kwds = PyDict_New(); if (rtype != PyArray_NOTYPE) { PyArray_Descr *descr; descr = PyArray_DescrFromType(rtype); if (descr) { - PyDict_SetItemString(kwds, "dtype", - (PyObject *)descr); + PyDict_SetItemString(kwds, "dtype", (PyObject *)descr); Py_DECREF(descr); } } if (out != NULL) { - PyDict_SetItemString(kwds, "out", - (PyObject *)out); + PyDict_SetItemString(kwds, "out", (PyObject *)out); } } return kwds; @@ -3524,7 +3658,7 @@ PyArray_GenericReduceFunction(PyArrayObject *m1, PyObject *op, int axis, int rtype, PyArrayObject *out) { - PyObject *args, *ret=NULL, *meth; + PyObject *args, *ret = NULL, *meth; PyObject *kwds; if (op == NULL) { Py_INCREF(Py_NotImplemented); @@ -3547,7 +3681,7 @@ PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, int rtype, PyArrayObject *out) { - PyObject *args, *ret=NULL, *meth; + PyObject *args, *ret = NULL, *meth; PyObject *kwds; if (op == NULL) { Py_INCREF(Py_NotImplemented); @@ -3668,8 +3802,9 @@ PyObject* value = PyNumber_Index(o2); Py_ssize_t val; if (value==NULL) { - if (PyErr_Occurred()) + if (PyErr_Occurred()) { PyErr_Clear(); + } return 0; } val = PyInt_AsSsize_t(value); @@ -3686,8 +3821,10 @@ /* optimize float array or complex array to a scalar power */ static PyObject * -fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace) { +fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace) +{ double exp; + if (PyArray_Check(a1) && array_power_is_scalar(o2, &exp)) { PyObject *fastop = NULL; if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) { @@ -3703,33 +3840,37 @@ } else { return PyArray_Copy(a1); } - } else if (exp == -1.0) { + } + else if (exp == -1.0) { fastop = n_ops.reciprocal; - } else if (exp == 0.0) { + } + else if (exp == 0.0) { fastop = n_ops.ones_like; - } else if (exp == 0.5) { + } + else if (exp == 0.5) { fastop = n_ops.sqrt; - } else if (exp == 2.0) { + } + else if (exp == 2.0) { fastop = n_ops.square; - } else { + } + else { return NULL; } + if (inplace) { - return PyArray_GenericInplaceUnaryFunction(a1, - fastop); + return PyArray_GenericInplaceUnaryFunction(a1, fastop); } else { - return PyArray_GenericUnaryFunction(a1, - fastop); + return PyArray_GenericUnaryFunction(a1, fastop); } } else if (exp==2.0) { fastop = n_ops.multiply; if (inplace) { - return PyArray_GenericInplaceBinaryFunction \ + return PyArray_GenericInplaceBinaryFunction (a1, (PyObject *)a1, fastop); } else { - return PyArray_GenericBinaryFunction \ + return PyArray_GenericBinaryFunction (a1, (PyObject *)a1, fastop); } } @@ -3905,7 +4046,9 @@ Bool anyTRUE = FALSE; it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp); - if (it==NULL) return anyTRUE; + if (it == NULL) { + return anyTRUE; + } index = it->size; while(index--) { if (mp->descr->f->nonzero(it->dataptr, mp)) { @@ -3922,6 +4065,7 @@ _array_nonzero(PyArrayObject *mp) { intp n; + n = PyArray_SIZE(mp); if (n == 1) { return mp->descr->f->nonzero(mp->data, mp); @@ -3946,7 +4090,9 @@ PyObject *divp, *modp, *result; divp = array_floor_divide(op1, op2); - if (divp == NULL) return NULL; + if (divp == NULL) { + return NULL; + } modp = array_remainder(op1, op2); if (modp == NULL) { Py_DECREF(divp); @@ -3969,7 +4115,9 @@ return NULL; } pv = v->descr->f->getitem(v->data, v); - if (pv == NULL) return NULL; + if (pv == NULL) { + return NULL; + } if (pv->ob_type->tp_as_number == 0) { PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ "scalar object is not a number"); @@ -3998,7 +4146,9 @@ return NULL; } pv = v->descr->f->getitem(v->data, v); - if (pv == NULL) return NULL; + if (pv == NULL) { + return NULL; + } if (pv->ob_type->tp_as_number == 0) { PyErr_SetString(PyExc_TypeError, "cannot convert to a "\ "float; scalar object is not a number"); @@ -4094,8 +4244,7 @@ static PyObject * _array_copy_nice(PyArrayObject *self) { - return PyArray_Return((PyArrayObject *) \ - PyArray_Copy(self)); + return PyArray_Return((PyArrayObject *) PyArray_Copy(self)); } #if PY_VERSION_HEX >= 0x02050000 @@ -4137,8 +4286,10 @@ (unaryfunc)array_oct, /*nb_oct*/ (unaryfunc)array_hex, /*nb_hex*/ - /*This code adds augmented assignment functionality*/ - /*that was made available in Python 2.0*/ + /* + * This code adds augmented assignment functionality + * that was made available in Python 2.0 + */ (binaryfunc)array_inplace_add, /*inplace_add*/ (binaryfunc)array_inplace_subtract, /*inplace_subtract*/ (binaryfunc)array_inplace_multiply, /*inplace_multiply*/ @@ -4188,15 +4339,26 @@ } l=self->dimensions[0]; - if (ilow < 0) ilow = 0; - else if (ilow > l) ilow = l; - if (ihigh < ilow) ihigh = ilow; - else if (ihigh > l) ihigh = l; + if (ilow < 0) { + ilow = 0; + } + else if (ilow > l) { + ilow = l; + } + if (ihigh < ilow) { + ihigh = ilow; + } + else if (ihigh > l) { + ihigh = l; + } if (ihigh != ilow) { data = index2ptr(self, ilow); - if (data == NULL) return NULL; - } else { + if (data == NULL) { + return NULL; + } + } + else { data = self->data; } @@ -4208,7 +4370,9 @@ self->strides, data, self->flags, (PyObject *)self); self->dimensions[0] = l; - if (r == NULL) return NULL; + if (r == NULL) { + return NULL; + } r->base = (PyObject *)self; Py_INCREF(self); PyArray_UpdateFlags(r, UPDATE_ALL); @@ -4232,9 +4396,9 @@ "array is not writeable"); return -1; } - if ((tmp = (PyArrayObject *)array_slice(self, ilow, ihigh)) \ - == NULL) + if ((tmp = (PyArrayObject *)array_slice(self, ilow, ihigh)) == NULL) { return -1; + } ret = PyArray_CopyObject(tmp, v); Py_DECREF(tmp); @@ -4251,7 +4415,9 @@ res = PyArray_EnsureAnyArray(PyObject_RichCompare((PyObject *)self, el, Py_EQ)); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } ret = array_any_nonzero((PyArrayObject *)res); Py_DECREF(res); return ret; @@ -4296,11 +4462,12 @@ char *ostring; int i, N; -#define CHECK_MEMORY if (*n >= *max_n-16) { *max_n *= 2; \ - *string = (char *)_pya_realloc(*string, *max_n); } +#define CHECK_MEMORY do { if (*n >= *max_n-16) { \ + *max_n *= 2; \ + *string = (char *)_pya_realloc(*string, *max_n); \ + }} while (0) if (nd == 0) { - if ((op = descr->f->getitem(data, self)) == NULL) { return -1; } @@ -4312,33 +4479,33 @@ ostring = PyString_AsString(sp); N = PyString_Size(sp)*sizeof(char); *n += N; - CHECK_MEMORY - memmove(*string + (*n - N), ostring, N); + CHECK_MEMORY; + memmove(*string + (*n - N), ostring, N); Py_DECREF(sp); Py_DECREF(op); return 0; } else { - CHECK_MEMORY - (*string)[*n] = '['; + CHECK_MEMORY; + (*string)[*n] = '['; *n += 1; - for(i = 0; i < dimensions[0]; i++) { + for (i = 0; i < dimensions[0]; i++) { if (dump_data(string, n, max_n, data + (*strides)*i, nd - 1, dimensions + 1, strides + 1, self) < 0) { return -1; } - CHECK_MEMORY - if (i < dimensions[0] - 1) { - (*string)[*n] = ','; - (*string)[*n+1] = ' '; - *n += 2; - } + CHECK_MEMORY; + if (i < dimensions[0] - 1) { + (*string)[*n] = ','; + (*string)[*n+1] = ' '; + *n += 2; + } } - CHECK_MEMORY - (*string)[*n] = ']'; - *n += 1; + CHECK_MEMORY; + (*string)[*n] = ']'; + *n += 1; return 0; } @@ -4397,8 +4564,8 @@ static PyObject *PyArray_ReprFunction = NULL; /*NUMPY_API - Set the array print function to be a Python function. -*/ + * Set the array print function to be a Python function. + */ static void PyArray_SetStringFunction(PyObject *op, int repr) { @@ -4409,7 +4576,8 @@ Py_XINCREF(op); /* Remember new callback */ PyArray_ReprFunction = op; - } else { + } + else { /* Dispose of previous callback */ Py_XDECREF(PyArray_StrFunction); /* Add a reference to new callback */ @@ -4426,7 +4594,8 @@ if (PyArray_ReprFunction == NULL) { s = array_repr_builtin(self, 1); - } else { + } + else { arglist = Py_BuildValue("(O)", self); s = PyEval_CallObject(PyArray_ReprFunction, arglist); Py_DECREF(arglist); @@ -4441,7 +4610,8 @@ if (PyArray_StrFunction == NULL) { s = array_repr_builtin(self, 0); - } else { + } + else { arglist = Py_BuildValue("(O)", self); s = PyEval_CallObject(PyArray_StrFunction, arglist); Py_DECREF(arglist); @@ -4511,29 +4681,46 @@ memcpy(s2t, s2, size); } val = PyArray_CompareUCS4(s1t, s2t, MIN(len1,len2)); - if ((val != 0) || (len1 == len2)) goto finish; - if (len2 > len1) {sptr = s2t+len1; val = -1; diff=len2-len1;} - else {sptr = s1t+len2; val = 1; diff=len1-len2;} + if ((val != 0) || (len1 == len2)) { + goto finish; + } + if (len2 > len1) { + sptr = s2t+len1; + val = -1; + diff = len2-len1; + } + else { + sptr = s1t+len2; + val = 1; + diff=len1-len2; + } while (diff--) { - if (*sptr != 0) goto finish; + if (*sptr != 0) { + goto finish; + } sptr++; } val = 0; finish: - if (s1t != s1) free(s1t); - if (s2t != s2) free(s2t); + if (s1t != s1) { + free(s1t); + } + if (s2t != s2) { + free(s2t); + } return val; } -/* Compare s1 and s2 which are not necessarily NULL-terminated. - s1 is of length len1 - s2 is of length len2 - If they are NULL terminated, then stop comparison. -*/ +/* + * Compare s1 and s2 which are not necessarily NULL-terminated. + * s1 is of length len1 + * s2 is of length len2 + * If they are NULL terminated, then stop comparison. + */ static int _mystrncmp(char *s1, char *s2, int len1, int len2) { @@ -4542,11 +4729,23 @@ int diff; val = memcmp(s1, s2, MIN(len1, len2)); - if ((val != 0) || (len1 == len2)) return val; - if (len2 > len1) {sptr = s2+len1; val = -1; diff=len2-len1;} - else {sptr = s1+len2; val = 1; diff=len1-len2;} + if ((val != 0) || (len1 == len2)) { + return val; + } + if (len2 > len1) { + sptr = s2 + len1; + val = -1; + diff = len2 - len1; + } + else { + sptr = s1 + len2; + val = 1; + diff = len1 - len2; + } while (diff--) { - if (*sptr != 0) return val; + if (*sptr != 0) { + return val; + } sptr++; } return 0; /* Only happens if NULLs are everywhere */ @@ -4564,27 +4763,30 @@ static void _rstripw(char *s, int n) { int i; - for(i=n-1; i>=1; i--) /* Never strip to length 0. */ - { - int c = s[i]; - if (!c || isspace(c)) - s[i] = 0; - else - break; + for (i = n - 1; i >= 1; i--) { /* Never strip to length 0. */ + int c = s[i]; + + if (!c || isspace(c)) { + s[i] = 0; } + else { + break; + } + } } static void _unistripw(PyArray_UCS4 *s, int n) { int i; - for(i=n-1; i>=1; i--) /* Never strip to length 0. */ - { - PyArray_UCS4 c = s[i]; - if (!c || isspace(c)) - s[i] = 0; - else - break; + for (i = n - 1; i >= 1; i--) { /* Never strip to length 0. */ + PyArray_UCS4 c = s[i]; + if (!c || isspace(c)) { + s[i] = 0; } + else { + break; + } + } } @@ -4723,8 +4925,7 @@ _loop(>=) break; default: - PyErr_SetString(PyExc_RuntimeError, - "bad comparison operator"); + PyErr_SetString(PyExc_RuntimeError, "bad comparison operator"); return -1; } return 0; @@ -4746,7 +4947,7 @@ /* Cast arrays to a common type */ if (self->descr->type_num != other->descr->type_num) { PyObject *new; - if (self->descr->type_num == PyArray_STRING && \ + if (self->descr->type_num == PyArray_STRING && other->descr->type_num == PyArray_UNICODE) { Py_INCREF(other->descr); new = PyArray_FromAny((PyObject *)self, other->descr, @@ -4757,7 +4958,7 @@ Py_INCREF(other); self = (PyArrayObject *)new; } - else if (self->descr->type_num == PyArray_UNICODE && \ + else if (self->descr->type_num == PyArray_UNICODE && other->descr->type_num == PyArray_STRING) { Py_INCREF(self->descr); new = PyArray_FromAny((PyObject *)other, self->descr, @@ -4799,12 +5000,10 @@ } if (self->descr->type_num == PyArray_UNICODE) { - val = _compare_strings(result, mit, cmp_op, _myunincmp, - rstrip); + val = _compare_strings(result, mit, cmp_op, _myunincmp, rstrip); } else { - val = _compare_strings(result, mit, cmp_op, _mystrncmp, - rstrip); + val = _compare_strings(result, mit, cmp_op, _mystrncmp, rstrip); } if (val < 0) { @@ -4816,16 +5015,16 @@ return result; } -/* VOID-type arrays can only be compared equal and not-equal - in which case the fields are all compared by extracting the fields - and testing one at a time... - equality testing is performed using logical_ands on all the fields. - in-equality testing is performed using logical_ors on all the fields. - - VOID-type arrays without fields are compared for equality by comparing their - memory at each location directly (using string-code). -*/ - +/* + * VOID-type arrays can only be compared equal and not-equal + * in which case the fields are all compared by extracting the fields + * and testing one at a time... + * equality testing is performed using logical_ands on all the fields. + * in-equality testing is performed using logical_ors on all the fields. + * + * VOID-type arrays without fields are compared for equality by comparing their + * memory at each location directly (using string-code). + */ static PyObject *array_richcompare(PyArrayObject *, PyObject *, int); @@ -4838,21 +5037,23 @@ return NULL; } if (PyArray_HASFIELDS(self)) { - PyObject *res=NULL, *temp, *a, *b; + PyObject *res = NULL, *temp, *a, *b; PyObject *key, *value, *temp2; PyObject *op; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; op = (cmp_op == Py_EQ ? n_ops.logical_and : n_ops.logical_or); while (PyDict_Next(self->descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; + if NPY_TITLE_KEY(key, value) { + continue; + } a = PyArray_EnsureAnyArray(array_subscript(self, key)); - if (a==NULL) { + if (a == NULL) { Py_XDECREF(res); return NULL; } b = array_subscript(other, key); - if (b==NULL) { + if (b == NULL) { Py_XDECREF(res); Py_DECREF(a); return NULL; @@ -4883,8 +5084,10 @@ return res; } else { - /* compare as a string */ - /* assumes self and other have same descr->type */ + /* + * compare as a string. Assumes self and + * other have same descr->type + */ return _strings_richcompare(self, other, cmp_op, 0); } } @@ -4895,15 +5098,14 @@ PyObject *array_other, *result = NULL; int typenum; - switch (cmp_op) - { + switch (cmp_op) { case Py_LT: result = PyArray_GenericBinaryFunction(self, other, - n_ops.less); + n_ops.less); break; case Py_LE: result = PyArray_GenericBinaryFunction(self, other, - n_ops.less_equal); + n_ops.less_equal); break; case Py_EQ: if (other == Py_None) { @@ -4917,15 +5119,14 @@ typenum = PyArray_NOTYPE; } array_other = PyArray_FromObject(other, - typenum, 0, 0); - /* If not successful, then return False - This fixes code that used to - allow equality comparisons between arrays - and other objects which would give a result - of False - */ - if ((array_other == NULL) || \ - (array_other == Py_None)) { + typenum, 0, 0); + /* + * If not successful, then return False. This fixes code + * that used to allow equality comparisons between arrays + * and other objects which would give a result of False. + */ + if ((array_other == NULL) || + (array_other == Py_None)) { Py_XDECREF(array_other); PyErr_Clear(); Py_INCREF(Py_False); @@ -4937,16 +5138,17 @@ array_other = other; } result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.equal); + array_other, + n_ops.equal); if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { + (self->descr->type_num == PyArray_VOID)) { int _res; - _res = PyObject_RichCompareBool \ - ((PyObject *)self->descr, - (PyObject *)\ - PyArray_DESCR(array_other), - Py_EQ); + + _res = PyObject_RichCompareBool + ((PyObject *)self->descr, + (PyObject *)\ + PyArray_DESCR(array_other), + Py_EQ); if (_res < 0) { Py_DECREF(result); Py_DECREF(array_other); @@ -4954,18 +5156,19 @@ } if (_res) { Py_DECREF(result); - result = _void_compare\ - (self, - (PyArrayObject *)array_other, - cmp_op); + result = _void_compare + (self, + (PyArrayObject *)array_other, + cmp_op); Py_DECREF(array_other); } return result; } - /* If the comparison results in NULL, then the - two array objects can not be compared together so - return zero - */ + /* + * If the comparison results in NULL, then the + * two array objects can not be compared together so + * return zero + */ Py_DECREF(array_other); if (result == NULL) { PyErr_Clear(); @@ -4984,14 +5187,13 @@ if (typenum != PyArray_OBJECT) { typenum = PyArray_NOTYPE; } - array_other = PyArray_FromObject(other, - typenum, 0, 0); - /* If not successful, then objects cannot be - compared and cannot be equal, therefore, - return True; - */ - if ((array_other == NULL) || \ - (array_other == Py_None)) { + array_other = PyArray_FromObject(other, typenum, 0, 0); + /* + * If not successful, then objects cannot be + * compared and cannot be equal, therefore, + * return True; + */ + if ((array_other == NULL) || (array_other == Py_None)) { Py_XDECREF(array_other); PyErr_Clear(); Py_INCREF(Py_True); @@ -5003,16 +5205,17 @@ array_other = other; } result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.not_equal); + array_other, + n_ops.not_equal); if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { + (self->descr->type_num == PyArray_VOID)) { int _res; - _res = PyObject_RichCompareBool\ - ((PyObject *)self->descr, - (PyObject *)\ - PyArray_DESCR(array_other), - Py_EQ); + + _res = PyObject_RichCompareBool( + (PyObject *)self->descr, + (PyObject *) + PyArray_DESCR(array_other), + Py_EQ); if (_res < 0) { Py_DECREF(result); Py_DECREF(array_other); @@ -5020,10 +5223,10 @@ } if (_res) { Py_DECREF(result); - result = _void_compare\ - (self, - (PyArrayObject *)array_other, - cmp_op); + result = _void_compare( + self, + (PyArrayObject *)array_other, + cmp_op); Py_DECREF(array_other); } return result; @@ -5038,19 +5241,21 @@ break; case Py_GT: result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater); + n_ops.greater); break; case Py_GE: result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater_equal); + n_ops.greater_equal); break; default: result = Py_NotImplemented; Py_INCREF(result); - } + } if (result == Py_NotImplemented) { /* Try to handle string comparisons */ - if (self->descr->type_num == PyArray_OBJECT) return result; + if (self->descr->type_num == PyArray_OBJECT) { + return result; + } array_other = PyArray_FromObject(other,PyArray_NOTYPE, 0, 0); if (PyArray_ISSTRING(self) && PyArray_ISSTRING(array_other)) { Py_DECREF(result); @@ -5075,7 +5280,10 @@ if ((*axis >= MAX_DIMS) || (n==0)) { if (n != 1) { temp1 = PyArray_Ravel(arr,0); - if (temp1 == NULL) {*axis=0; return NULL;} + if (temp1 == NULL) { + *axis = 0; + return NULL; + } *axis = PyArray_NDIM(temp1)-1; } else { @@ -5083,7 +5291,9 @@ Py_INCREF(temp1); *axis = 0; } - if (!flags) return temp1; + if (!flags) { + return temp1; + } } else { temp1 = (PyObject *)arr; @@ -5093,13 +5303,17 @@ temp2 = PyArray_CheckFromAny((PyObject *)temp1, NULL, 0, 0, flags, NULL); Py_DECREF(temp1); - if (temp2 == NULL) return NULL; + if (temp2 == NULL) { + return NULL; + } } else { temp2 = (PyObject *)temp1; } n = PyArray_NDIM(temp2); - if (*axis < 0) *axis += n; + if (*axis < 0) { + *axis += n; + } if ((*axis < 0) || (*axis >= n)) { PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", *axis); @@ -5122,8 +5336,11 @@ { int i; PyObject *intTuple = PyTuple_New(len); - if (!intTuple) goto fail; - for(i=0; i= SIZEOF_INTP - if (!(op = PyNumber_Int(seq))) return -1; + if (!(op = PyNumber_Int(seq))) { + return -1; + } #else - if (!(op = PyNumber_Long(seq))) return -1; + if (!(op = PyNumber_Long(seq))) { + return -1; + } #endif nd = 1; #if SIZEOF_LONG >= SIZEOF_INTP @@ -5167,17 +5391,22 @@ vals[0] = (intp ) PyLong_AsLongLong(op); #endif Py_DECREF(op); - } else { - for(i=0; i < MIN(nd,maxvals); i++) { + } + else { + for (i = 0; i < MIN(nd,maxvals); i++) { op = PySequence_GetItem(seq, i); - if (op == NULL) return -1; + if (op == NULL) { + return -1; + } #if SIZEOF_LONG >= SIZEOF_INTP vals[i]=(intp )PyInt_AsLong(op); #else vals[i]=(intp )PyLong_AsLongLong(op); #endif Py_DECREF(op); - if(PyErr_Occurred()) return -1; + if(PyErr_Occurred()) { + return -1; + } } } return nd; @@ -5185,10 +5414,12 @@ -/* Check whether the given array is stored contiguously (row-wise) in - memory. */ - -/* 0-strided arrays are not contiguous (even if dimension == 1) */ +/* + * Check whether the given array is stored contiguously + * (row-wise) in memory. + * + * 0-strided arrays are not contiguous (even if dimension == 1) + */ static int _IsContiguous(PyArrayObject *ap) { @@ -5196,15 +5427,22 @@ register intp dim; register int i; - if (ap->nd == 0) return 1; + if (ap->nd == 0) { + return 1; + } sd = ap->descr->elsize; - if (ap->nd == 1) return (ap->dimensions[0] == 1 || \ - sd == ap->strides[0]); - for(i = ap->nd-1; i >= 0; --i) { + if (ap->nd == 1) { + return ap->dimensions[0] == 1 || sd == ap->strides[0]; + } + for (i = ap->nd - 1; i >= 0; --i) { dim = ap->dimensions[i]; /* contiguous by definition */ - if (dim == 0) return 1; - if (ap->strides[i] != sd) return 0; + if (dim == 0) { + return 1; + } + if (ap->strides[i] != sd) { + return 0; + } sd *= dim; } return 1; @@ -5219,15 +5457,22 @@ register intp dim; register int i; - if (ap->nd == 0) return 1; + if (ap->nd == 0) { + return 1; + } sd = ap->descr->elsize; - if (ap->nd == 1) return (ap->dimensions[0] == 1 || \ - sd == ap->strides[0]); - for(i=0; i< ap->nd; ++i) { + if (ap->nd == 1) { + return ap->dimensions[0] == 1 || sd == ap->strides[0]; + } + for (i = 0; i < ap->nd; ++i) { dim = ap->dimensions[i]; /* fortran contiguous by definition */ - if (dim == 0) return 1; - if (ap->strides[i] != sd) return 0; + if (dim == 0) { + return 1; + } + if (ap->strides[i] != sd) { + return 0; + } sd *= dim; } return 1; @@ -5236,20 +5481,22 @@ static int _IsAligned(PyArrayObject *ap) { - int i, alignment, aligned=1; + int i, alignment, aligned = 1; intp ptr; int type = ap->descr->type_num; - if ((type == PyArray_STRING) || (type == PyArray_VOID)) + if ((type == PyArray_STRING) || (type == PyArray_VOID)) { return 1; - + } alignment = ap->descr->alignment; - if (alignment == 1) return 1; - + if (alignment == 1) { + return 1; + } ptr = (intp) ap->data; aligned = (ptr % alignment) == 0; - for(i=0; i nd; i++) + for (i = 0; i < ap->nd; i++) { aligned &= ((ap->strides[i] % alignment) == 0); + } return aligned != 0; } @@ -5261,31 +5508,37 @@ Py_ssize_t n; /* If we own our own data, then no-problem */ - if ((base == NULL) || (ap->flags & OWNDATA)) return TRUE; + if ((base == NULL) || (ap->flags & OWNDATA)) { + return TRUE; + } + /* + * Get to the final base object + * If it is a writeable array, then return TRUE + * If we can find an array object + * or a writeable buffer object as the final base object + * or a string object (for pickling support memory savings). + * - this last could be removed if a proper pickleable + * buffer was added to Python. + */ - /* Get to the final base object - If it is a writeable array, then return TRUE - If we can find an array object - or a writeable buffer object as the final base object - or a string object (for pickling support memory savings). - - this last could be removed if a proper pickleable - buffer was added to Python. - */ - while(PyArray_Check(base)) { - if (PyArray_CHKFLAGS(base, OWNDATA)) + if (PyArray_CHKFLAGS(base, OWNDATA)) { return (Bool) (PyArray_ISWRITEABLE(base)); + } base = PyArray_BASE(base); } - /* here so pickle support works seamlessly - and unpickled array can be set and reset writeable - -- could be abused -- */ - if PyString_Check(base) return TRUE; - - if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) + /* + * here so pickle support works seamlessly + * and unpickled array can be set and reset writeable + * -- could be abused -- + */ + if PyString_Check(base) { + return TRUE; + } + if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) { return FALSE; - + } return TRUE; } @@ -5295,20 +5548,21 @@ static int PyArray_ElementStrides(PyObject *arr) { - register int itemsize = PyArray_ITEMSIZE(arr); - register int i, N=PyArray_NDIM(arr); - register intp *strides = PyArray_STRIDES(arr); + int itemsize = PyArray_ITEMSIZE(arr); + int i, N = PyArray_NDIM(arr); + intp *strides = PyArray_STRIDES(arr); - for(i=0; iflags |= FORTRAN; - if (ret->nd > 1) ret->flags &= ~CONTIGUOUS; + if (ret->nd > 1) { + ret->flags &= ~CONTIGUOUS; + } } - else ret->flags &= ~FORTRAN; + else { + ret->flags &= ~FORTRAN; + } } if (flagmask & CONTIGUOUS) { if (_IsContiguous(ret)) { ret->flags |= CONTIGUOUS; - if (ret->nd > 1) ret->flags &= ~FORTRAN; + if (ret->nd > 1) { + ret->flags &= ~FORTRAN; + } } - else ret->flags &= ~CONTIGUOUS; + else { + ret->flags &= ~CONTIGUOUS; + } } if (flagmask & ALIGNED) { - if (_IsAligned(ret)) ret->flags |= ALIGNED; - else ret->flags &= ~ALIGNED; + if (_IsAligned(ret)) { + ret->flags |= ALIGNED; + } + else { + ret->flags &= ~ALIGNED; + } } - /* This is not checked by default WRITEABLE is not - part of UPDATE_ALL */ + /* + * This is not checked by default WRITEABLE is not + * part of UPDATE_ALL + */ if (flagmask & WRITEABLE) { - if (_IsWriteable(ret)) ret->flags |= WRITEABLE; - else ret->flags &= ~WRITEABLE; + if (_IsWriteable(ret)) { + ret->flags |= WRITEABLE; + } + else { + ret->flags &= ~WRITEABLE; + } } return; } From numpy-svn at scipy.org Wed Feb 18 11:56:50 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 10:56:50 -0600 (CST) Subject: [Numpy-svn] r6374 - branches Message-ID: <20090218165650.6F843C7C024@scipy.org> Author: cdavid Date: 2009-02-18 10:56:39 -0600 (Wed, 18 Feb 2009) New Revision: 6374 Added: branches/coremath/ Log: Start branch for separate core math library Copied: branches/coremath (from rev 6373, trunk) From numpy-svn at scipy.org Wed Feb 18 12:20:56 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:20:56 -0600 (CST) Subject: [Numpy-svn] r6375 - branches/coremath/numpy/core Message-ID: <20090218172056.548A1C7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:20:50 -0600 (Wed, 18 Feb 2009) New Revision: 6375 Modified: branches/coremath/numpy/core/setup.py Log: Check Python.h through compilation instead of looking for it on the FS + do not add python include, since it is already included by distutils. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 16:56:39 UTC (rev 6374) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:20:50 UTC (rev 6375) @@ -137,16 +137,12 @@ if newer(__file__,target): config_cmd = config.get_config_cmd() log.info('Generating %s',target) + + # Check we have the python header (-dev* packages on Linux) + config_cmd.check_header('Python.h') + tc = generate_testcode(target) - from distutils import sysconfig - python_include = sysconfig.get_python_inc() - python_h = join(python_include, 'Python.h') - if not os.path.isfile(python_h): - raise SystemError,\ - "Non-existing %s. Perhaps you need to install"\ - " python-dev|python-devel." % (python_h) - result = config_cmd.try_run(tc,include_dirs=[python_include], - library_dirs = default_lib_dirs) + result = config_cmd.try_run(tc, library_dirs = default_lib_dirs) if not result: raise SystemError,"Failed to test configuration. "\ "See previous error messages for more information." From numpy-svn at scipy.org Wed Feb 18 12:21:17 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:21:17 -0600 (CST) Subject: [Numpy-svn] r6376 - branches/coremath/numpy/core Message-ID: <20090218172117.34432C7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:21:08 -0600 (Wed, 18 Feb 2009) New Revision: 6376 Modified: branches/coremath/numpy/core/setup.py Log: Forgot to raise an exception if Python.h cannot be found. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:20:50 UTC (rev 6375) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:21:08 UTC (rev 6376) @@ -139,7 +139,11 @@ log.info('Generating %s',target) # Check we have the python header (-dev* packages on Linux) - config_cmd.check_header('Python.h') + result = config_cmd.check_header('Python.h') + if not result: + raise SystemError( + "Cannot compiler 'Python.h'. Perhaps you need to "\ + "install python-dev|python-devel.") tc = generate_testcode(target) result = config_cmd.try_run(tc, library_dirs = default_lib_dirs) From numpy-svn at scipy.org Wed Feb 18 12:22:04 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:22:04 -0600 (CST) Subject: [Numpy-svn] r6377 - branches/coremath/numpy/distutils/command Message-ID: <20090218172204.23D46C7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:21:49 -0600 (Wed, 18 Feb 2009) New Revision: 6377 Modified: branches/coremath/numpy/distutils/command/config.py Log: Add check_type_sizeof function. Modified: branches/coremath/numpy/distutils/command/config.py =================================================================== --- branches/coremath/numpy/distutils/command/config.py 2009-02-18 17:21:08 UTC (rev 6376) +++ branches/coremath/numpy/distutils/command/config.py 2009-02-18 17:21:49 UTC (rev 6377) @@ -11,6 +11,7 @@ from distutils.command.config import LANG_EXT from distutils import log from distutils.file_util import copy_file +from distutils.ccompiler import CompileError, LinkError import distutils from numpy.distutils.exec_command import exec_command from numpy.distutils.mingw32ccompiler import generate_manifest @@ -158,6 +159,81 @@ return self.try_compile(body, headers, include_dirs) + def check_type_size(self, type_name, headers=None, include_dirs=None): + """Check size of a given type.""" + # XXX: should also implement the cross-compiling version (using binary + # search + array indexing, see AC_CHECK_SIZEOF). + self._check_compiler() + + # We declare the functions to avoid warnings with -Wstrict-prototypes + body = r""" +typedef %(type)s _dist_type_sizeof_; + +static long int longval (void) +{ + return (long int) (sizeof (_dist_type_sizeof_)); +} +static unsigned long int ulongval (void) +{ + return (long int) (sizeof (_dist_type_sizeof_)); +} + +#include +#include +int +main (void) +{ + + if (((long int) (sizeof (_dist_type_sizeof_))) < 0) { + long int i = longval (); + if (i != ((long int) (sizeof (_dist_type_sizeof_)))) + return 1; + printf("%%ld\n", i); + } else { + unsigned long int i = ulongval (); + if (i != ((long int) (sizeof (_dist_type_sizeof_)))) + return 1; + printf("%%lu\n", i); + } + + return 0; +} +""" % {'type': type_name} + + # XXX: this should be refactored (same code as get_output) + exitcode, output = 255, '' + size = None + try: + src, obj, exe = self._link(body, headers, include_dirs, + [], [], 'c') + exe = os.path.join('.', exe) + exitstatus, output = exec_command(exe, execute_in='.') + if hasattr(os, 'WEXITSTATUS'): + exitcode = os.WEXITSTATUS(exitstatus) + if os.WIFSIGNALED(exitstatus): + sig = os.WTERMSIG(exitstatus) + log.error('subprocess exited with signal %d' % (sig,)) + if sig == signal.SIGINT: + # control-C + raise KeyboardInterrupt + else: + exitcode = exitstatus + log.info("success!") + + try: + size = int(output) + except ValueError: + log.error("Unexpected output %s" % output) + log.info("failure") + except (CompileError, LinkError): + log.info("failure.") + + self._clean() + if size is not None: + return size + else: + return -1 + def check_func(self, func, headers=None, include_dirs=None, libraries=None, library_dirs=None, From numpy-svn at scipy.org Wed Feb 18 12:22:37 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:22:37 -0600 (CST) Subject: [Numpy-svn] r6378 - branches/coremath/numpy/core Message-ID: <20090218172237.509B7C7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:22:31 -0600 (Wed, 18 Feb 2009) New Revision: 6378 Modified: branches/coremath/numpy/core/setup.py Log: Be explicit about sizeof checks instead of using one big .C file. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:21:49 UTC (rev 6377) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:22:31 UTC (rev 6378) @@ -130,6 +130,7 @@ header_dir = 'include/numpy' # this is relative to config.path_in_package def generate_config_h(ext, build_dir): + sizeofs = {} target = join(build_dir,header_dir,'config.h') dir = os.path.dirname(target) if not os.path.exists(dir): @@ -145,14 +146,31 @@ "Cannot compiler 'Python.h'. Perhaps you need to "\ "install python-dev|python-devel.") - tc = generate_testcode(target) - result = config_cmd.try_run(tc, library_dirs = default_lib_dirs) - if not result: - raise SystemError,"Failed to test configuration. "\ - "See previous error messages for more information." + # Check basic types sizes + for type in ('short', 'int', 'long', 'float', 'double', 'long double'): + res = config_cmd.check_type_size(type) + if res >= 0: + sizeofs[type] = res + for type in ('Py_intptr_t',): + res = config_cmd.check_type_size(type, headers=["Python.h"]) + if res >= 0: + sizeofs[type] = res + + # We check declaration AND type because that's how distutils does it. + if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): + st = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h']) + assert not st == 0 + sizeofs['PY_LONG_LONG'] = st + + if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): + raise RuntimeError( + "Config wo CHAR_BIT is not supported"\ + ", please contact the maintainers") + moredefs = [] - # + + # Testing the C math library mathlibs = [] tc = testcode_mathlib() mathlibs_choices = [[],['m'],['cpml']] From numpy-svn at scipy.org Wed Feb 18 12:22:59 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:22:59 -0600 (CST) Subject: [Numpy-svn] r6379 - branches/coremath/numpy/core Message-ID: <20090218172259.1ED5DC7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:22:51 -0600 (Wed, 18 Feb 2009) New Revision: 6379 Modified: branches/coremath/numpy/core/setup.py Log: Add sizeofs defines to moredefs so that they end up in config.h. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:22:31 UTC (rev 6378) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:22:51 UTC (rev 6379) @@ -112,6 +112,9 @@ if st: moredefs.append(name_to_defsymb("decl_%s" % f)) +def sym2def(symbol): + define = symbol.replace(' ', '_') + return define.upper() def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration,dot_join @@ -130,13 +133,14 @@ header_dir = 'include/numpy' # this is relative to config.path_in_package def generate_config_h(ext, build_dir): - sizeofs = {} target = join(build_dir,header_dir,'config.h') dir = os.path.dirname(target) if not os.path.exists(dir): os.makedirs(dir) if newer(__file__,target): config_cmd = config.get_config_cmd() + moredefs = [] + log.info('Generating %s',target) # Check we have the python header (-dev* packages on Linux) @@ -150,26 +154,24 @@ for type in ('short', 'int', 'long', 'float', 'double', 'long double'): res = config_cmd.check_type_size(type) if res >= 0: - sizeofs[type] = res + moredefs.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) for type in ('Py_intptr_t',): res = config_cmd.check_type_size(type, headers=["Python.h"]) if res >= 0: - sizeofs[type] = res + moredefs.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) # We check declaration AND type because that's how distutils does it. if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): st = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h']) assert not st == 0 - sizeofs['PY_LONG_LONG'] = st + moredefs.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % st)) if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): raise RuntimeError( "Config wo CHAR_BIT is not supported"\ ", please contact the maintainers") - moredefs = [] - # Testing the C math library mathlibs = [] tc = testcode_mathlib() @@ -190,9 +192,11 @@ check_math_capabilities(config_cmd, moredefs, mathlibs) + # Signal test if is_npy_no_signal(): moredefs.append('__NPY_PRIVATE_NO_SIGNAL') + # Distutils hack on AMD64 on windows if sys.platform=='win32' or os.name=='nt': from numpy.distutils.misc_util import get_build_architecture a = get_build_architecture() From numpy-svn at scipy.org Wed Feb 18 12:23:22 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:23:22 -0600 (CST) Subject: [Numpy-svn] r6380 - branches/coremath/numpy/core Message-ID: <20090218172322.7BAACC7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:23:16 -0600 (Wed, 18 Feb 2009) New Revision: 6380 Modified: branches/coremath/numpy/core/setup.py Log: Put all sizeofs checks ending up in config.h in a separate function. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:22:51 UTC (rev 6379) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:23:16 UTC (rev 6380) @@ -112,6 +112,43 @@ if st: moredefs.append(name_to_defsymb("decl_%s" % f)) +def check_types(config, ext, build_dir): + private_defines = [] + public_defines = [] + + config_cmd = config.get_config_cmd() + + # Check we have the python header (-dev* packages on Linux) + result = config_cmd.check_header('Python.h') + if not result: + raise SystemError( + "Cannot compiler 'Python.h'. Perhaps you need to "\ + "install python-dev|python-devel.") + + # Check basic types sizes + for type in ('short', 'int', 'long', 'float', 'double', 'long double'): + res = config_cmd.check_type_size(type) + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + + for type in ('Py_intptr_t',): + res = config_cmd.check_type_size(type, headers=["Python.h"]) + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + + # We check declaration AND type because that's how distutils does it. + if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): + st = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h']) + assert not st == 0 + private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + + if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): + raise RuntimeError( + "Config wo CHAR_BIT is not supported"\ + ", please contact the maintainers") + + return private_defines, public_defines + def sym2def(symbol): define = symbol.replace(' ', '_') return define.upper() @@ -139,39 +176,10 @@ os.makedirs(dir) if newer(__file__,target): config_cmd = config.get_config_cmd() - moredefs = [] - log.info('Generating %s',target) - # Check we have the python header (-dev* packages on Linux) - result = config_cmd.check_header('Python.h') - if not result: - raise SystemError( - "Cannot compiler 'Python.h'. Perhaps you need to "\ - "install python-dev|python-devel.") + moredefs, ignored = check_types(config, ext, build_dir) - # Check basic types sizes - for type in ('short', 'int', 'long', 'float', 'double', 'long double'): - res = config_cmd.check_type_size(type) - if res >= 0: - moredefs.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) - - for type in ('Py_intptr_t',): - res = config_cmd.check_type_size(type, headers=["Python.h"]) - if res >= 0: - moredefs.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) - - # We check declaration AND type because that's how distutils does it. - if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): - st = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h']) - assert not st == 0 - moredefs.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % st)) - - if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): - raise RuntimeError( - "Config wo CHAR_BIT is not supported"\ - ", please contact the maintainers") - # Testing the C math library mathlibs = [] tc = testcode_mathlib() From numpy-svn at scipy.org Wed Feb 18 12:23:49 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:23:49 -0600 (CST) Subject: [Numpy-svn] r6381 - branches/coremath/numpy/core Message-ID: <20090218172349.A1F27C7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:23:42 -0600 (Wed, 18 Feb 2009) New Revision: 6381 Modified: branches/coremath/numpy/core/setup.py Log: Avoid using dir to avoid shadowing dir builtin. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:23:16 UTC (rev 6380) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:23:42 UTC (rev 6381) @@ -171,9 +171,9 @@ def generate_config_h(ext, build_dir): target = join(build_dir,header_dir,'config.h') - dir = os.path.dirname(target) - if not os.path.exists(dir): - os.makedirs(dir) + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) if newer(__file__,target): config_cmd = config.get_config_cmd() log.info('Generating %s',target) @@ -270,9 +270,9 @@ def generate_numpyconfig_h(ext, build_dir): """Depends on config.h: generate_config_h has to be called before !""" target = join(build_dir,header_dir,'numpyconfig.h') - dir = os.path.dirname(target) - if not os.path.exists(dir): - os.makedirs(dir) + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) if newer(__file__,target): config_cmd = config.get_config_cmd() log.info('Generating %s',target) From numpy-svn at scipy.org Wed Feb 18 12:24:12 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:24:12 -0600 (CST) Subject: [Numpy-svn] r6382 - branches/coremath/numpy/core Message-ID: <20090218172412.66469C7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:24:03 -0600 (Wed, 18 Feb 2009) New Revision: 6382 Modified: branches/coremath/numpy/core/setup.py Log: Put mathlib checks in separate function. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:23:42 UTC (rev 6381) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:24:03 UTC (rev 6382) @@ -153,6 +153,24 @@ define = symbol.replace(' ', '_') return define.upper() +def check_mathlib(config_cmd): + # Testing the C math library + mathlibs = [] + tc = testcode_mathlib() + mathlibs_choices = [[],['m'],['cpml']] + mathlib = os.environ.get('MATHLIB') + if mathlib: + mathlibs_choices.insert(0,mathlib.split(',')) + for libs in mathlibs_choices: + if config_cmd.try_run(tc,libraries=libs): + mathlibs = libs + break + else: + raise EnvironmentError("math library missing; rerun " + "setup.py after setting the " + "MATHLIB env variable") + return mathlibs + def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration,dot_join from numpy.distutils.system_info import get_info, default_lib_dirs @@ -180,22 +198,7 @@ moredefs, ignored = check_types(config, ext, build_dir) - # Testing the C math library - mathlibs = [] - tc = testcode_mathlib() - mathlibs_choices = [[],['m'],['cpml']] - mathlib = os.environ.get('MATHLIB') - if mathlib: - mathlibs_choices.insert(0,mathlib.split(',')) - for libs in mathlibs_choices: - if config_cmd.try_run(tc,libraries=libs): - mathlibs = libs - break - else: - raise EnvironmentError("math library missing; rerun " - "setup.py after setting the " - "MATHLIB env variable") - ext.libraries.extend(mathlibs) + mathlibs = check_mathlib(config_cmd) moredefs.append(('MATHLIB',','.join(mathlibs))) check_math_capabilities(config_cmd, moredefs, mathlibs) @@ -259,7 +262,11 @@ mathlibs.extend(value.split(',')) target_f.close() - ext.libraries.extend(mathlibs) + # Ugly: this can be called within a library and not an extension, + # in which case there is no libraries attributes (and none is + # needed). + if hasattr(ext, 'libraries'): + ext.libraries.extend(mathlibs) incl_dir = os.path.dirname(target) if incl_dir not in config.numpy_include_dirs: From numpy-svn at scipy.org Wed Feb 18 12:24:30 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:24:30 -0600 (CST) Subject: [Numpy-svn] r6383 - branches/coremath/numpy/core Message-ID: <20090218172430.8DEAEC7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:24:24 -0600 (Wed, 18 Feb 2009) New Revision: 6383 Modified: branches/coremath/numpy/core/setup.py Log: Remote unused code. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:24:03 UTC (rev 6382) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:24:24 UTC (rev 6383) @@ -285,20 +285,9 @@ log.info('Generating %s',target) testcode = generate_numpyconfig_code(target) - from distutils import sysconfig - python_include = sysconfig.get_python_inc() - python_h = join(python_include, 'Python.h') - if not os.path.isfile(python_h): - raise SystemError,\ - "Non-existing %s. Perhaps you need to install"\ - " python-dev|python-devel." % (python_h) - - config.numpy_include_dirs result = config_cmd.try_run(testcode, - include_dirs = [python_include] + \ - config.numpy_include_dirs, - library_dirs = default_lib_dirs) - + include_dirs=config.numpy_include_dirs, + library_dirs=default_lib_dirs) if not result: raise SystemError,"Failed to generate numpy configuration. "\ "See previous error messages for more information." From numpy-svn at scipy.org Wed Feb 18 12:24:51 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:24:51 -0600 (CST) Subject: [Numpy-svn] r6384 - branches/coremath/numpy/core Message-ID: <20090218172451.DA3DCC7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:24:44 -0600 (Wed, 18 Feb 2009) New Revision: 6384 Modified: branches/coremath/numpy/core/setup.py Log: Remove unused code. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:24:24 UTC (rev 6383) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:24:44 UTC (rev 6384) @@ -472,70 +472,6 @@ """ import sys -def generate_testcode(target): - if sys.platform == 'win32': - target = target.replace('\\','\\\\') - testcode = [r''' -#include -#include -#include - -int main(int argc, char **argv) -{ - - FILE *fp; - - fp = fopen("'''+target+'''","w"); - '''] - - c_size_test = r''' -#ifndef %(sz)s - fprintf(fp,"#define %(sz)s %%d\n", sizeof(%(type)s)); -#else - fprintf(fp,"/* #define %(sz)s %%d */\n", %(sz)s); -#endif -''' - for sz, t in [('SIZEOF_SHORT', 'short'), - ('SIZEOF_INT', 'int'), - ('SIZEOF_LONG', 'long'), - ('SIZEOF_FLOAT', 'float'), - ('SIZEOF_DOUBLE', 'double'), - ('SIZEOF_LONG_DOUBLE', 'long double'), - ('SIZEOF_PY_INTPTR_T', 'Py_intptr_t'), - ]: - testcode.append(c_size_test % {'sz' : sz, 'type' : t}) - - testcode.append('#ifdef PY_LONG_LONG') - testcode.append(c_size_test % {'sz' : 'SIZEOF_LONG_LONG', - 'type' : 'PY_LONG_LONG'}) - testcode.append(c_size_test % {'sz' : 'SIZEOF_PY_LONG_LONG', - 'type' : 'PY_LONG_LONG'}) - - - testcode.append(r''' -#else - fprintf(fp, "/* PY_LONG_LONG not defined */\n"); -#endif -#ifndef CHAR_BIT - { - unsigned char var = 2; - int i=0; - while (var >= 2) { - var = var << 1; - i++; - } - fprintf(fp,"#define CHAR_BIT %d\n", i+1); - } -#else - fprintf(fp, "/* #define CHAR_BIT %d */\n", CHAR_BIT); -#endif - fclose(fp); - return 0; -} -''') - testcode = '\n'.join(testcode) - return testcode - def generate_numpyconfig_code(target): """Return the source code as a string of the code to generate the numpyconfig header file.""" From numpy-svn at scipy.org Wed Feb 18 12:25:19 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:25:19 -0600 (CST) Subject: [Numpy-svn] r6385 - branches/coremath/numpy/core Message-ID: <20090218172519.88572C7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:25:12 -0600 (Wed, 18 Feb 2009) New Revision: 6385 Modified: branches/coremath/numpy/core/setup.py Log: Remove obsolete defines. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:24:44 UTC (rev 6384) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:25:12 UTC (rev 6385) @@ -235,16 +235,6 @@ else: target_f.write('#define %s %s\n' % (d[0],d[1])) - # Keep those for backward compatibility for now - target_f.write(""" -#ifdef HAVE_EXPL -#define HAVE_LONGDOUBLE_FUNCS -#endif - -#ifdef HAVE_EXPF -#define HAVE_FLOAT_FUNCS -#endif -""") target_f.close() print 'File:',target target_f = open(target) From numpy-svn at scipy.org Wed Feb 18 12:25:42 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:25:42 -0600 (CST) Subject: [Numpy-svn] r6386 - branches/coremath/numpy/core Message-ID: <20090218172542.470EBC7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:25:35 -0600 (Wed, 18 Feb 2009) New Revision: 6386 Modified: branches/coremath/numpy/core/setup.py Log: Remove dead codepath (we require python 2.4). Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:25:12 UTC (rev 6385) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:25:35 UTC (rev 6386) @@ -215,11 +215,6 @@ if a == 'AMD64': moredefs.append('DISTUTILS_USE_SDK') - if sys.version[:3] < '2.4': - if config_cmd.check_func('strtod', decl=False, - headers=['stdlib.h']): - moredefs.append(('PyOS_ascii_strtod', 'strtod')) - if sys.platform == "win32": from numpy.distutils.misc_util import get_build_architecture # On win32, force long double format string to be 'g', not From numpy-svn at scipy.org Wed Feb 18 12:26:08 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:26:08 -0600 (CST) Subject: [Numpy-svn] r6387 - branches/coremath/numpy/core Message-ID: <20090218172608.AF0C0C7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:25:55 -0600 (Wed, 18 Feb 2009) New Revision: 6387 Modified: branches/coremath/numpy/core/setup.py Log: Put win32 checks in separate function. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:25:35 UTC (rev 6386) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:25:55 UTC (rev 6387) @@ -35,6 +35,22 @@ nosmp = 0 return nosmp == 1 +def win32_checks(deflist): + from numpy.distutils.misc_util import get_build_architecture + a = get_build_architecture() + + # Distutils hack on AMD64 on windows + print 'BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % \ + (a, os.name, sys.platform) + if a == 'AMD64': + deflist.append('DISTUTILS_USE_SDK') + + # On win32, force long double format string to be 'g', not + # 'Lg', since the MS runtime does not support long double whose + # size is > sizeof(double) + if a =="Intel": + deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') + def check_math_capabilities(config, moredefs, mathlibs): def check_func(func_name): return config.check_func(func_name, libraries=mathlibs, @@ -196,33 +212,24 @@ config_cmd = config.get_config_cmd() log.info('Generating %s',target) + # Check sizeof moredefs, ignored = check_types(config, ext, build_dir) + # Check math library and C99 math funcs availability mathlibs = check_mathlib(config_cmd) moredefs.append(('MATHLIB',','.join(mathlibs))) check_math_capabilities(config_cmd, moredefs, mathlibs) - # Signal test + # Signal check if is_npy_no_signal(): moredefs.append('__NPY_PRIVATE_NO_SIGNAL') - # Distutils hack on AMD64 on windows + # Windows checks if sys.platform=='win32' or os.name=='nt': - from numpy.distutils.misc_util import get_build_architecture - a = get_build_architecture() - print 'BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % (a, os.name, sys.platform) - if a == 'AMD64': - moredefs.append('DISTUTILS_USE_SDK') + win32_checks(moredefs) - if sys.platform == "win32": - from numpy.distutils.misc_util import get_build_architecture - # On win32, force long double format string to be 'g', not - # 'Lg', since the MS runtime does not support long double whose - # size is > sizeof(double) - if get_build_architecture()=="Intel": - moredefs.append('FORCE_NO_LONG_DOUBLE_FORMATTING') - + # Generate the config.h file from moredefs target_f = open(target,'a') for d in moredefs: if isinstance(d,str): From numpy-svn at scipy.org Wed Feb 18 12:26:40 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:26:40 -0600 (CST) Subject: [Numpy-svn] r6388 - branches/coremath/numpy/distutils/command Message-ID: <20090218172640.88CE1C7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:26:30 -0600 (Wed, 18 Feb 2009) New Revision: 6388 Modified: branches/coremath/numpy/distutils/command/build_clib.py Log: Add numpy_include_dirs to C libraries as well. Modified: branches/coremath/numpy/distutils/command/build_clib.py =================================================================== --- branches/coremath/numpy/distutils/command/build_clib.py 2009-02-18 17:25:55 UTC (rev 6387) +++ branches/coremath/numpy/distutils/command/build_clib.py 2009-02-18 17:26:30 UTC (rev 6388) @@ -10,7 +10,8 @@ from numpy.distutils import log from distutils.dep_util import newer_group from numpy.distutils.misc_util import filter_sources, has_f_sources,\ - has_cxx_sources, all_strings, get_lib_source_files, is_sequence + has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \ + get_numpy_include_dirs # Fix Python distutils bug sf #1718574: _l = old_build_clib.user_options @@ -162,8 +163,11 @@ macros = build_info.get('macros') include_dirs = build_info.get('include_dirs') + if include_dirs is None: + include_dirs = [] extra_postargs = build_info.get('extra_compiler_args') or [] + include_dirs.extend(get_numpy_include_dirs()) # where compiled F90 module files are: module_dirs = build_info.get('module_dirs') or [] module_build_dir = os.path.dirname(lib_file) From numpy-svn at scipy.org Wed Feb 18 12:27:39 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:27:39 -0600 (CST) Subject: [Numpy-svn] r6389 - branches/coremath/numpy/core/include/numpy Message-ID: <20090218172739.D458AC7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:27:26 -0600 (Wed, 18 Feb 2009) New Revision: 6389 Added: branches/coremath/numpy/core/include/numpy/npy_common.h Modified: branches/coremath/numpy/core/include/numpy/ndarrayobject.h Log: Put npy type base types declaration into separate header, to be usable by C libraries without pulling the whole ndarray stuff. Modified: branches/coremath/numpy/core/include/numpy/ndarrayobject.h =================================================================== --- branches/coremath/numpy/core/include/numpy/ndarrayobject.h 2009-02-18 17:26:30 UTC (rev 6388) +++ branches/coremath/numpy/core/include/numpy/ndarrayobject.h 2009-02-18 17:27:26 UTC (rev 6389) @@ -56,73 +56,8 @@ */ #define NPY_FEATURE_VERSION 0x00000001 -/* Some platforms don't define bool, long long, or long double. - Handle that here. -*/ +#include "npy_common.h" -#define NPY_BYTE_FMT "hhd" -#define NPY_UBYTE_FMT "hhu" -#define NPY_SHORT_FMT "hd" -#define NPY_USHORT_FMT "hu" -#define NPY_INT_FMT "d" -#define NPY_UINT_FMT "u" -#define NPY_LONG_FMT "ld" -#define NPY_ULONG_FMT "lu" -#define NPY_FLOAT_FMT "g" -#define NPY_DOUBLE_FMT "g" - -#ifdef PY_LONG_LONG -typedef PY_LONG_LONG npy_longlong; -typedef unsigned PY_LONG_LONG npy_ulonglong; -# ifdef _MSC_VER -# define NPY_LONGLONG_FMT "I64d" -# define NPY_ULONGLONG_FMT "I64u" -# define NPY_LONGLONG_SUFFIX(x) (x##i64) -# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) -# else - /* #define LONGLONG_FMT "lld" Another possible variant - #define ULONGLONG_FMT "llu" - - #define LONGLONG_FMT "qd" -- BSD perhaps? - #define ULONGLONG_FMT "qu" - */ -# define NPY_LONGLONG_FMT "Ld" -# define NPY_ULONGLONG_FMT "Lu" -# define NPY_LONGLONG_SUFFIX(x) (x##LL) -# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) -# endif -#else -typedef long npy_longlong; -typedef unsigned long npy_ulonglong; -# define NPY_LONGLONG_SUFFIX(x) (x##L) -# define NPY_ULONGLONG_SUFFIX(x) (x##UL) -#endif - - -typedef unsigned char npy_bool; -#define NPY_FALSE 0 -#define NPY_TRUE 1 - - -#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE - typedef double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "g" -#else - typedef long double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "Lg" -#endif - -#ifndef Py_USING_UNICODE -#error Must use Python with unicode enabled. -#endif - - -typedef signed char npy_byte; -typedef unsigned char npy_ubyte; -typedef unsigned short npy_ushort; -typedef unsigned int npy_uint; -typedef unsigned long npy_ulong; - /* These are for completeness */ typedef float npy_float; typedef double npy_double; Added: branches/coremath/numpy/core/include/numpy/npy_common.h =================================================================== --- branches/coremath/numpy/core/include/numpy/npy_common.h 2009-02-18 17:26:30 UTC (rev 6388) +++ branches/coremath/numpy/core/include/numpy/npy_common.h 2009-02-18 17:27:26 UTC (rev 6389) @@ -0,0 +1,74 @@ +#ifndef _NPY_COMMON_H_ +#define _NPY_COMMON_H_ + +/* This is auto-generated */ +#include "numpyconfig.h" + +/* Some platforms don't define bool, long long, or long double. + Handle that here. +*/ + +#define NPY_BYTE_FMT "hhd" +#define NPY_UBYTE_FMT "hhu" +#define NPY_SHORT_FMT "hd" +#define NPY_USHORT_FMT "hu" +#define NPY_INT_FMT "d" +#define NPY_UINT_FMT "u" +#define NPY_LONG_FMT "ld" +#define NPY_ULONG_FMT "lu" +#define NPY_FLOAT_FMT "g" +#define NPY_DOUBLE_FMT "g" + +#ifdef PY_LONG_LONG +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +# ifdef _MSC_VER +# define NPY_LONGLONG_FMT "I64d" +# define NPY_ULONGLONG_FMT "I64u" +# define NPY_LONGLONG_SUFFIX(x) (x##i64) +# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) +# else + /* #define LONGLONG_FMT "lld" Another possible variant + #define ULONGLONG_FMT "llu" + + #define LONGLONG_FMT "qd" -- BSD perhaps? + #define ULONGLONG_FMT "qu" + */ +# define NPY_LONGLONG_FMT "Ld" +# define NPY_ULONGLONG_FMT "Lu" +# define NPY_LONGLONG_SUFFIX(x) (x##LL) +# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) +# endif +#else +typedef long npy_longlong; +typedef unsigned long npy_ulonglong; +# define NPY_LONGLONG_SUFFIX(x) (x##L) +# define NPY_ULONGLONG_SUFFIX(x) (x##UL) +#endif + + +typedef unsigned char npy_bool; +#define NPY_FALSE 0 +#define NPY_TRUE 1 + + +#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + typedef double npy_longdouble; + #define NPY_LONGDOUBLE_FMT "g" +#else + typedef long double npy_longdouble; + #define NPY_LONGDOUBLE_FMT "Lg" +#endif + +#ifndef Py_USING_UNICODE +#error Must use Python with unicode enabled. +#endif + + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +#endif From numpy-svn at scipy.org Wed Feb 18 12:28:14 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:28:14 -0600 (CST) Subject: [Numpy-svn] r6390 - branches/coremath/numpy/core Message-ID: <20090218172814.0579AC7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:28:03 -0600 (Wed, 18 Feb 2009) New Revision: 6390 Modified: branches/coremath/numpy/core/setup.py Log: Check for isinf and co declarations for numpyconfig.h as well, this it will be needed for npy_math. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:27:26 UTC (rev 6389) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:28:03 UTC (rev 6390) @@ -286,6 +286,18 @@ moredefs = [] + # Normally, isnan and isinf are macro (C99), but some platforms + # only have func, or both func and macro version. Check for macro + # only, and define replacement ones if not found. + # Note: including Python.h is necessary because it modifies some + # math.h definitions + # XXX: we check those twice... should decouple tests from + # config.h/numpyconfig.h to avoid this + for f in ["isnan", "isinf", "signbit", "isfinite"]: + st = config_cmd.check_decl(f, headers = ["Python.h", "math.h"]) + if st: + moredefs.append('NPY_HAVE_DECL_%s' % f.upper()) + # Check wether we can use inttypes (C99) formats if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']): moredefs.append(('NPY_USE_C99_FORMATS', 1)) From numpy-svn at scipy.org Wed Feb 18 12:28:39 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:28:39 -0600 (CST) Subject: [Numpy-svn] r6391 - branches/coremath/numpy/core/src Message-ID: <20090218172839.7DC1AC7C010@scipy.org> Author: cdavid Date: 2009-02-18 11:28:30 -0600 (Wed, 18 Feb 2009) New Revision: 6391 Added: branches/coremath/numpy/core/src/npy_math.c.src branches/coremath/numpy/core/src/npy_math.h Log: Start working on core math library itself. Added: branches/coremath/numpy/core/src/npy_math.c.src =================================================================== --- branches/coremath/numpy/core/src/npy_math.c.src 2009-02-18 17:28:03 UTC (rev 6390) +++ branches/coremath/numpy/core/src/npy_math.c.src 2009-02-18 17:28:30 UTC (rev 6391) @@ -0,0 +1,304 @@ +/* + * vim:syntax=c + * A small module to implement missing C99 math capabilities required by numpy + * + * Please keep this independant of python ! Only basic types (npy_longdouble) + * can be used, otherwise, pure C, without any use of Python facilities + * + * How to add a function to this section + * ------------------------------------- + * + * Say you want to add `foo`, these are the steps and the reasons for them. + * + * 1) Add foo to the appropriate list in the configuration system. The + * lists can be found in numpy/core/setup.py lines 63-105. Read the + * comments that come with them, they are very helpful. + * + * 2) The configuration system will define a macro HAVE_FOO if your function + * can be linked from the math library. The result can depend on the + * optimization flags as well as the compiler, so can't be known ahead of + * time. If the function can't be linked, then either it is absent, defined + * as a macro, or is an intrinsic (hardware) function. + * + * i) Undefine any possible macros: + * + * #ifdef foo + * #undef foo + * #endif + * + * ii) Avoid as much as possible to declare any function here. Declaring + * functions is not portable: some platforms define some function inline + * with a non standard identifier, for example, or may put another + * idendifier which changes the calling convention of the function. If you + * really have to, ALWAYS declare it for the one platform you are dealing + * with: + * + * Not ok: + * double exp(double a); + * + * Ok: + * #ifdef SYMBOL_DEFINED_WEIRD_PLATFORM + * double exp(double); + * #endif + */ + +#include +#include + +#include "config.h" +#include "npy_math.h" + +/* + ***************************************************************************** + ** BASIC MATH FUNCTIONS ** + ***************************************************************************** + */ + +/* Original code by Konrad Hinsen. */ +#ifndef HAVE_EXPM1 +double expm1(double x) +{ + double u = exp(x); + if (u == 1.0) { + return x; + } else if (u-1.0 == -1.0) { + return -1; + } else { + return (u-1.0) * x/log(u); + } +} +#endif + +#ifndef HAVE_LOG1P +double log1p(double x) +{ + double u = 1. + x; + if (u == 1.0) { + return x; + } else { + return log(u) * x / (u - 1); + } +} +#endif + +#ifndef HAVE_HYPOT +double hypot(double x, double y) +{ + double yx; + + x = fabs(x); + y = fabs(y); + if (x < y) { + double temp = x; + x = y; + y = temp; + } + if (x == 0.) + return 0.; + else { + yx = y/x; + return x*sqrt(1.+yx*yx); + } +} +#endif + +#ifndef HAVE_ACOSH +double acosh(double x) +{ + return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); +} +#endif + +#ifndef HAVE_ASINH +double asinh(double xx) +{ + double x, d; + int sign; + if (xx < 0.0) { + sign = -1; + x = -xx; + } + else { + sign = 1; + x = xx; + } + if (x > 1e8) { + d = x; + } else { + d = sqrt(x*x + 1); + } + return sign*log1p(x*(1.0 + x/(d+1))); +} +#endif + +#ifndef HAVE_ATANH +double atanh(double x) +{ + if (x > 0) { + return -0.5*log1p(-2.0*x/(1.0 + x)); + } + else { + return 0.5*log1p(2.0*x/(1.0 - x)); + } +} +#endif + +#ifndef HAVE_RINT +double rint(double x) +{ + double y, r; + + y = floor(x); + r = x - y; + + if (r > 0.5) goto rndup; + + /* Round to nearest even */ + if (r==0.5) { + r = y - 2.0*floor(0.5*y); + if (r==1.0) { + rndup: + y+=1.0; + } + } + return y; +} +#endif + +#ifndef HAVE_TRUNC +double trunc(double x) +{ + return x < 0 ? ceil(x) : floor(x); +} +#endif + +#ifndef HAVE_EXP2 +#define LOG2 0.69314718055994530943 +double exp2(double x) +{ + return exp(LOG2*x); +} +#undef LOG2 +#endif + +#ifndef HAVE_LOG2 +#define INVLOG2 1.4426950408889634074 +double log2(double x) +{ + return INVLOG2*log(x); +} +#undef INVLOG2 +#endif + +/* + ***************************************************************************** + ** IEEE 754 FPU HANDLING ** + ***************************************************************************** + */ +#if !defined(HAVE_DECL_ISNAN) + # define isnan(x) ((x) != (x)) +#endif + +/* VS 2003 with /Ox optimizes (x)-(x) to 0, which is not IEEE compliant. So we + * force (x) + (-x), which seems to work. */ +#if !defined(HAVE_DECL_ISFINITE) + # define isfinite(x) !isnan((x) + (-x)) +#endif + +#if !defined(HAVE_DECL_ISINF) +#define isinf(x) (!isfinite(x) && !isnan(x)) +#endif + +#if !defined(HAVE_DECL_SIGNBIT) + #include "_signbit.c" + # define signbit(x) \ + (sizeof (x) == sizeof (long double) ? signbit_ld (x) \ + : sizeof (x) == sizeof (double) ? signbit_d (x) \ + : signbit_f (x)) + +static int signbit_f (float x) +{ + return signbit_d((double)x); +} + +static int signbit_ld (long double x) +{ + return signbit_d((double)x); +} +#endif + +/* + * if C99 extensions not available then define dummy functions that use the + * double versions for + * + * sin, cos, tan + * sinh, cosh, tanh, + * fabs, floor, ceil, rint, trunc + * sqrt, log10, log, exp, expm1 + * asin, acos, atan, + * asinh, acosh, atanh + * + * hypot, atan2, pow, fmod, modf + * + * We assume the above are always available in their double versions. + * + * NOTE: some facilities may be available as macro only instead of functions. + * For simplicity, we define our own functions and undef the macros. We could + * instead test for the macro, but I am lazy to do that for now. + */ + +/**begin repeat + * #type = npy_longdouble, float# + * #TYPE = NPY_LONGDOUBLE, FLOAT# + * #c = l,f# + * #C = L,F# + */ + +/**begin repeat1 + * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# + * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, + * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# + */ + +#ifdef @kind@@c@ +#undef @kind@@c@ +#endif +#ifndef HAVE_ at KIND@@C@ + at type@ @kind@@c@(@type@ x) +{ + return (@type@) @kind@((double)x); +} +#endif + +/**end repeat1**/ + +/**begin repeat1 + * #kind = atan2,hypot,pow,fmod# + * #KIND = ATAN2,HYPOT,POW,FMOD# + */ +#ifdef @kind@@c@ +#undef @kind@@c@ +#endif +#ifndef HAVE_ at KIND@@C@ + at type@ @kind@@c@(@type@ x, @type@ y) +{ + return (@type@) @kind@((double)x, (double) y); +} +#endif +/**end repeat1**/ + +#ifdef modf at c@ +#undef modf at c@ +#endif +#ifndef HAVE_MODF at C@ + at type@ modf at c@(@type@ x, @type@ *iptr) +{ + double niptr; + double y = modf((double)x, &niptr); + *iptr = (@type@) niptr; + return (@type@) y; +} +#endif + +/**end repeat**/ Added: branches/coremath/numpy/core/src/npy_math.h =================================================================== --- branches/coremath/numpy/core/src/npy_math.h 2009-02-18 17:28:03 UTC (rev 6390) +++ branches/coremath/numpy/core/src/npy_math.h 2009-02-18 17:28:30 UTC (rev 6391) @@ -0,0 +1,121 @@ +#ifndef __NPY_MATH_C99_H_ +#define __NPY_MATH_C99_H_ + +#include +#include +/* + * C99 double math funcs + */ +double npy_expm1(double x); +double npy_log1p(double x); +double npy_hypot(double x, double y); +double npy_acosh(double x); +double npy_asinh(double xx); +double npy_atanh(double x); +double npy_rint(double x); +double npy_trunc(double x); +double npy_exp2(double x); +double npy_log2(double x); + +/* + * IEEE 754 fpu handling. Those are guaranteed to be macros + */ +#ifndef NPY_HAVE_DECL_ISNAN + #define npy_isnan(x) _npy_isnan((x)) +#else + #define npy_isnan(x) isnan((x)) +#endif + +#ifndef NPY_HAVE_DECL_ISFINITE + #define npy_isfinite(x) _npy_isfinite((x)) +#else + #define npy_isfinite(x) isfinite((x)) +#endif + +#ifndef NPY_HAVE_DECL_ISFINITE + #define npy_isinf(x) _npy_isinf((x)) +#else + #define npy_isinf(x) isinf((x)) +#endif + +#ifndef NPY_HAVE_DECL_SIGNBIT + #define npy_signbit(x) _npy_signbit((x)) +#else + #define npy_signbit(x) signbit((x)) +#endif + +/* + * float C99 math functions + */ + +float npy_sinf(float x); +float npy_cosf(float x); +float npy_tanf(float x); +float npy_sinhf(float x); +float npy_coshf(float x); +float npy_tanhf(float x); +float npy_fabsf(float x); +float npy_floorf(float x); +float npy_ceilf(float x); +float npy_rintf(float x); +float npy_truncf(float x); +float npy_sqrtf(float x); +float npy_log10f(float x); +float npy_logf(float x); +float npy_expf(float x); +float npy_expm1f(float x); +float npy_asinf(float x); +float npy_acosf(float x); +float npy_atanf(float x); +float npy_asinhf(float x); +float npy_acoshf(float x); +float npy_atanhf(float x); +float npy_log1pf(float x); +float npy_exp2f(float x); +float npy_log2f(float x); + +float npy_atan2f(float x, float y); +float npy_hypotf(float x, float y); +float npy_powf(float x, float y); +float npy_fmodf(float x, float y); + +float npy_modff(float x, float* y); + +/* + * float C99 math functions + */ + +npy_longdouble npy_sinl(npy_longdouble x); +npy_longdouble npy_cosl(npy_longdouble x); +npy_longdouble npy_tanl(npy_longdouble x); +npy_longdouble npy_sinhl(npy_longdouble x); +npy_longdouble npy_coshl(npy_longdouble x); +npy_longdouble npy_tanhl(npy_longdouble x); +npy_longdouble npy_fabsl(npy_longdouble x); +npy_longdouble npy_floorl(npy_longdouble x); +npy_longdouble npy_ceill(npy_longdouble x); +npy_longdouble npy_rintl(npy_longdouble x); +npy_longdouble npy_truncl(npy_longdouble x); +npy_longdouble npy_sqrtl(npy_longdouble x); +npy_longdouble npy_log10l(npy_longdouble x); +npy_longdouble npy_logl(npy_longdouble x); +npy_longdouble npy_expl(npy_longdouble x); +npy_longdouble npy_expm1l(npy_longdouble x); +npy_longdouble npy_asinl(npy_longdouble x); +npy_longdouble npy_acosl(npy_longdouble x); +npy_longdouble npy_atanl(npy_longdouble x); +npy_longdouble npy_asinhl(npy_longdouble x); +npy_longdouble npy_acoshl(npy_longdouble x); +npy_longdouble npy_atanhl(npy_longdouble x); +npy_longdouble npy_log1pl(npy_longdouble x); +npy_longdouble npy_exp2l(npy_longdouble x); +npy_longdouble npy_log2l(npy_longdouble x); + +npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); + +npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); + +#endif From numpy-svn at scipy.org Wed Feb 18 12:29:06 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:29:06 -0600 (CST) Subject: [Numpy-svn] r6392 - branches/coremath/numpy/core Message-ID: <20090218172906.B3BB2C7C024@scipy.org> Author: cdavid Date: 2009-02-18 11:28:57 -0600 (Wed, 18 Feb 2009) New Revision: 6392 Modified: branches/coremath/numpy/core/setup.py Log: Build npymath lib. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:28:30 UTC (rev 6391) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:28:57 UTC (rev 6392) @@ -383,6 +383,23 @@ if sys.platform == 'cygwin': config.add_data_dir('include/numpy/fenv') + config.add_extension('_sort', + sources=[join('src','_sortmodule.c.src'), + generate_config_h, + generate_numpyconfig_h, + generate_numpy_api, + ], + ) + + # npymath needs the config.h and numpyconfig.h files to be generated, but + # build_clib cannot handle generate_config_h and generate_numpyconfig_h + # (don't ask). Because clib are generated before extensions, we have to + # explicitely add an extension which has generate_config_h and + # generate_numpyconfig_h as sources *before* adding npymath. + config.add_library('npymath', + sources=[join('src', 'npy_math.c.src')], + depends=[]) + config.add_extension('multiarray', sources = [join('src','multiarraymodule.c'), generate_config_h, @@ -414,14 +431,6 @@ ]+deps, ) - config.add_extension('_sort', - sources=[join('src','_sortmodule.c.src'), - generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - ], - ) - config.add_extension('scalarmath', sources=[join('src','scalarmathmodule.c.src'), generate_config_h, From numpy-svn at scipy.org Wed Feb 18 12:29:40 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:29:40 -0600 (CST) Subject: [Numpy-svn] r6393 - branches/coremath/numpy/core Message-ID: <20090218172940.C055EC7C024@scipy.org> Author: cdavid Date: 2009-02-18 11:29:21 -0600 (Wed, 18 Feb 2009) New Revision: 6393 Modified: branches/coremath/numpy/core/setup.py Log: Typo. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:28:57 UTC (rev 6392) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:29:21 UTC (rev 6393) @@ -394,7 +394,7 @@ # npymath needs the config.h and numpyconfig.h files to be generated, but # build_clib cannot handle generate_config_h and generate_numpyconfig_h # (don't ask). Because clib are generated before extensions, we have to - # explicitely add an extension which has generate_config_h and + # explicitly add an extension which has generate_config_h and # generate_numpyconfig_h as sources *before* adding npymath. config.add_library('npymath', sources=[join('src', 'npy_math.c.src')], From numpy-svn at scipy.org Wed Feb 18 12:30:15 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:30:15 -0600 (CST) Subject: [Numpy-svn] r6394 - in branches/coremath/numpy/core: include/numpy src Message-ID: <20090218173015.13786C7C024@scipy.org> Author: cdavid Date: 2009-02-18 11:30:03 -0600 (Wed, 18 Feb 2009) New Revision: 6394 Added: branches/coremath/numpy/core/include/numpy/npy_math.h Removed: branches/coremath/numpy/core/src/npy_math.h Modified: branches/coremath/numpy/core/src/npy_math.c.src Log: Move npy_math header to numpy/core include path. Copied: branches/coremath/numpy/core/include/numpy/npy_math.h (from rev 6393, branches/coremath/numpy/core/src/npy_math.h) =================================================================== --- branches/coremath/numpy/core/src/npy_math.h 2009-02-18 17:29:21 UTC (rev 6393) +++ branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-18 17:30:03 UTC (rev 6394) @@ -0,0 +1,121 @@ +#ifndef __NPY_MATH_C99_H_ +#define __NPY_MATH_C99_H_ + +#include +#include +/* + * C99 double math funcs + */ +double npy_expm1(double x); +double npy_log1p(double x); +double npy_hypot(double x, double y); +double npy_acosh(double x); +double npy_asinh(double xx); +double npy_atanh(double x); +double npy_rint(double x); +double npy_trunc(double x); +double npy_exp2(double x); +double npy_log2(double x); + +/* + * IEEE 754 fpu handling. Those are guaranteed to be macros + */ +#ifndef NPY_HAVE_DECL_ISNAN + #define npy_isnan(x) _npy_isnan((x)) +#else + #define npy_isnan(x) isnan((x)) +#endif + +#ifndef NPY_HAVE_DECL_ISFINITE + #define npy_isfinite(x) _npy_isfinite((x)) +#else + #define npy_isfinite(x) isfinite((x)) +#endif + +#ifndef NPY_HAVE_DECL_ISFINITE + #define npy_isinf(x) _npy_isinf((x)) +#else + #define npy_isinf(x) isinf((x)) +#endif + +#ifndef NPY_HAVE_DECL_SIGNBIT + #define npy_signbit(x) _npy_signbit((x)) +#else + #define npy_signbit(x) signbit((x)) +#endif + +/* + * float C99 math functions + */ + +float npy_sinf(float x); +float npy_cosf(float x); +float npy_tanf(float x); +float npy_sinhf(float x); +float npy_coshf(float x); +float npy_tanhf(float x); +float npy_fabsf(float x); +float npy_floorf(float x); +float npy_ceilf(float x); +float npy_rintf(float x); +float npy_truncf(float x); +float npy_sqrtf(float x); +float npy_log10f(float x); +float npy_logf(float x); +float npy_expf(float x); +float npy_expm1f(float x); +float npy_asinf(float x); +float npy_acosf(float x); +float npy_atanf(float x); +float npy_asinhf(float x); +float npy_acoshf(float x); +float npy_atanhf(float x); +float npy_log1pf(float x); +float npy_exp2f(float x); +float npy_log2f(float x); + +float npy_atan2f(float x, float y); +float npy_hypotf(float x, float y); +float npy_powf(float x, float y); +float npy_fmodf(float x, float y); + +float npy_modff(float x, float* y); + +/* + * float C99 math functions + */ + +npy_longdouble npy_sinl(npy_longdouble x); +npy_longdouble npy_cosl(npy_longdouble x); +npy_longdouble npy_tanl(npy_longdouble x); +npy_longdouble npy_sinhl(npy_longdouble x); +npy_longdouble npy_coshl(npy_longdouble x); +npy_longdouble npy_tanhl(npy_longdouble x); +npy_longdouble npy_fabsl(npy_longdouble x); +npy_longdouble npy_floorl(npy_longdouble x); +npy_longdouble npy_ceill(npy_longdouble x); +npy_longdouble npy_rintl(npy_longdouble x); +npy_longdouble npy_truncl(npy_longdouble x); +npy_longdouble npy_sqrtl(npy_longdouble x); +npy_longdouble npy_log10l(npy_longdouble x); +npy_longdouble npy_logl(npy_longdouble x); +npy_longdouble npy_expl(npy_longdouble x); +npy_longdouble npy_expm1l(npy_longdouble x); +npy_longdouble npy_asinl(npy_longdouble x); +npy_longdouble npy_acosl(npy_longdouble x); +npy_longdouble npy_atanl(npy_longdouble x); +npy_longdouble npy_asinhl(npy_longdouble x); +npy_longdouble npy_acoshl(npy_longdouble x); +npy_longdouble npy_atanhl(npy_longdouble x); +npy_longdouble npy_log1pl(npy_longdouble x); +npy_longdouble npy_exp2l(npy_longdouble x); +npy_longdouble npy_log2l(npy_longdouble x); + +npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); + +npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); + +#endif Modified: branches/coremath/numpy/core/src/npy_math.c.src =================================================================== --- branches/coremath/numpy/core/src/npy_math.c.src 2009-02-18 17:29:21 UTC (rev 6393) +++ branches/coremath/numpy/core/src/npy_math.c.src 2009-02-18 17:30:03 UTC (rev 6394) @@ -46,7 +46,7 @@ #include #include "config.h" -#include "npy_math.h" +#include "numpy/npy_math.h" /* ***************************************************************************** Deleted: branches/coremath/numpy/core/src/npy_math.h =================================================================== --- branches/coremath/numpy/core/src/npy_math.h 2009-02-18 17:29:21 UTC (rev 6393) +++ branches/coremath/numpy/core/src/npy_math.h 2009-02-18 17:30:03 UTC (rev 6394) @@ -1,121 +0,0 @@ -#ifndef __NPY_MATH_C99_H_ -#define __NPY_MATH_C99_H_ - -#include -#include -/* - * C99 double math funcs - */ -double npy_expm1(double x); -double npy_log1p(double x); -double npy_hypot(double x, double y); -double npy_acosh(double x); -double npy_asinh(double xx); -double npy_atanh(double x); -double npy_rint(double x); -double npy_trunc(double x); -double npy_exp2(double x); -double npy_log2(double x); - -/* - * IEEE 754 fpu handling. Those are guaranteed to be macros - */ -#ifndef NPY_HAVE_DECL_ISNAN - #define npy_isnan(x) _npy_isnan((x)) -#else - #define npy_isnan(x) isnan((x)) -#endif - -#ifndef NPY_HAVE_DECL_ISFINITE - #define npy_isfinite(x) _npy_isfinite((x)) -#else - #define npy_isfinite(x) isfinite((x)) -#endif - -#ifndef NPY_HAVE_DECL_ISFINITE - #define npy_isinf(x) _npy_isinf((x)) -#else - #define npy_isinf(x) isinf((x)) -#endif - -#ifndef NPY_HAVE_DECL_SIGNBIT - #define npy_signbit(x) _npy_signbit((x)) -#else - #define npy_signbit(x) signbit((x)) -#endif - -/* - * float C99 math functions - */ - -float npy_sinf(float x); -float npy_cosf(float x); -float npy_tanf(float x); -float npy_sinhf(float x); -float npy_coshf(float x); -float npy_tanhf(float x); -float npy_fabsf(float x); -float npy_floorf(float x); -float npy_ceilf(float x); -float npy_rintf(float x); -float npy_truncf(float x); -float npy_sqrtf(float x); -float npy_log10f(float x); -float npy_logf(float x); -float npy_expf(float x); -float npy_expm1f(float x); -float npy_asinf(float x); -float npy_acosf(float x); -float npy_atanf(float x); -float npy_asinhf(float x); -float npy_acoshf(float x); -float npy_atanhf(float x); -float npy_log1pf(float x); -float npy_exp2f(float x); -float npy_log2f(float x); - -float npy_atan2f(float x, float y); -float npy_hypotf(float x, float y); -float npy_powf(float x, float y); -float npy_fmodf(float x, float y); - -float npy_modff(float x, float* y); - -/* - * float C99 math functions - */ - -npy_longdouble npy_sinl(npy_longdouble x); -npy_longdouble npy_cosl(npy_longdouble x); -npy_longdouble npy_tanl(npy_longdouble x); -npy_longdouble npy_sinhl(npy_longdouble x); -npy_longdouble npy_coshl(npy_longdouble x); -npy_longdouble npy_tanhl(npy_longdouble x); -npy_longdouble npy_fabsl(npy_longdouble x); -npy_longdouble npy_floorl(npy_longdouble x); -npy_longdouble npy_ceill(npy_longdouble x); -npy_longdouble npy_rintl(npy_longdouble x); -npy_longdouble npy_truncl(npy_longdouble x); -npy_longdouble npy_sqrtl(npy_longdouble x); -npy_longdouble npy_log10l(npy_longdouble x); -npy_longdouble npy_logl(npy_longdouble x); -npy_longdouble npy_expl(npy_longdouble x); -npy_longdouble npy_expm1l(npy_longdouble x); -npy_longdouble npy_asinl(npy_longdouble x); -npy_longdouble npy_acosl(npy_longdouble x); -npy_longdouble npy_atanl(npy_longdouble x); -npy_longdouble npy_asinhl(npy_longdouble x); -npy_longdouble npy_acoshl(npy_longdouble x); -npy_longdouble npy_atanhl(npy_longdouble x); -npy_longdouble npy_log1pl(npy_longdouble x); -npy_longdouble npy_exp2l(npy_longdouble x); -npy_longdouble npy_log2l(npy_longdouble x); - -npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); -npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); - -npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); - -#endif From numpy-svn at scipy.org Wed Feb 18 12:31:07 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:31:07 -0600 (CST) Subject: [Numpy-svn] r6395 - in branches/coremath/numpy/core: . src Message-ID: <20090218173107.9FC40C7C024@scipy.org> Author: cdavid Date: 2009-02-18 11:30:41 -0600 (Wed, 18 Feb 2009) New Revision: 6395 Removed: branches/coremath/numpy/core/src/umath_funcs_c99.inc.src Modified: branches/coremath/numpy/core/setup.py branches/coremath/numpy/core/src/multiarraymodule.c branches/coremath/numpy/core/src/numpyos.c branches/coremath/numpy/core/src/umathmodule.c.src Log: Remove umath_funcs_c99.inc.src. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:30:03 UTC (rev 6394) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:30:41 UTC (rev 6395) @@ -421,7 +421,6 @@ generate_ufunc_api, join('src','scalartypes.inc.src'), join('src','arraytypes.inc.src'), - join('src','umath_funcs_c99.inc.src'), join('src','umath_funcs.inc.src'), join('src','umath_loops.inc.src'), ], Modified: branches/coremath/numpy/core/src/multiarraymodule.c =================================================================== --- branches/coremath/numpy/core/src/multiarraymodule.c 2009-02-18 17:30:03 UTC (rev 6394) +++ branches/coremath/numpy/core/src/multiarraymodule.c 2009-02-18 17:30:41 UTC (rev 6395) @@ -81,11 +81,6 @@ return NULL; } -/* XXX: We include c99 compat math module here because it is needed for - * numpyos.c (included by arrayobject). This is bad - we should separate - * declaration/implementation and share this in a lib. */ -#include "umath_funcs_c99.inc" - /* Including this file is the only way I know how to declare functions static in each file, and store the pointers from functions in both arrayobject.c and multiarraymodule.c for the C-API Modified: branches/coremath/numpy/core/src/numpyos.c =================================================================== --- branches/coremath/numpy/core/src/numpyos.c 2009-02-18 17:30:03 UTC (rev 6394) +++ branches/coremath/numpy/core/src/numpyos.c 2009-02-18 17:30:41 UTC (rev 6395) @@ -1,6 +1,8 @@ #include #include +#include "numpy/npy_math.h" + /* From the C99 standard, section 7.19.6: The exponent always contains at least two digits, and only as many more digits as necessary to represent the exponent. @@ -249,21 +251,21 @@ const char *format, \ type val, int decimal) \ { \ - if (isfinite(val)) { \ + if (npy_isfinite(val)) { \ if(_check_ascii_format(format)) { \ return NULL; \ } \ PyOS_snprintf(buffer, buf_size, format, (print_type)val); \ return _fix_ascii_format(buffer, buf_size, decimal); \ } \ - else if (isnan(val)){ \ + else if (npy_isnan(val)){ \ if (buf_size < 4) { \ return NULL; \ } \ strcpy(buffer, "nan"); \ } \ else { \ - if (signbit(val)) { \ + if (npy_signbit(val)) { \ if (buf_size < 5) { \ return NULL; \ } \ Deleted: branches/coremath/numpy/core/src/umath_funcs_c99.inc.src =================================================================== --- branches/coremath/numpy/core/src/umath_funcs_c99.inc.src 2009-02-18 17:30:03 UTC (rev 6394) +++ branches/coremath/numpy/core/src/umath_funcs_c99.inc.src 2009-02-18 17:30:41 UTC (rev 6395) @@ -1,304 +0,0 @@ -/* - * vim:syntax=c - * A small module to implement missing C99 math capabilities required by numpy - * - * Please keep this independant of python ! - * - * How to add a function to this section - * ------------------------------------- - * - * Say you want to add `foo`, these are the steps and the reasons for them. - * - * 1) Add foo to the appropriate list in the configuration system. The - * lists can be found in numpy/core/setup.py lines 63-105. Read the - * comments that come with them, they are very helpful. - * - * 2) The configuration system will define a macro HAVE_FOO if your function - * can be linked from the math library. The result can depend on the - * optimization flags as well as the compiler, so can't be known ahead of - * time. If the function can't be linked, then either it is absent, defined - * as a macro, or is an intrinsic (hardware) function. - * - * i) Undefine any possible macros: - * - * #ifdef foo - * #undef foo - * #endif - * - * ii) Avoid as much as possible to declare any function here. Declaring - * functions is not portable: some platforms define some function inline - * with a non standard identifier, for example, or may put another - * idendifier which changes the calling convention of the function. If you - * really have to, ALWAYS declare it for the one platform you are dealing - * with: - * - * Not ok: - * double exp(double a); - * - * Ok: - * #ifdef SYMBOL_DEFINED_WEIRD_PLATFORM - * double exp(double); - * #endif - */ - -/* - ***************************************************************************** - ** DISTRO VOODOO ** - ***************************************************************************** - */ - - -/* - ***************************************************************************** - ** BASIC MATH FUNCTIONS ** - ***************************************************************************** - */ - -/* Original code by Konrad Hinsen. */ -#ifndef HAVE_EXPM1 -double expm1(double x) -{ - double u = exp(x); - if (u == 1.0) { - return x; - } else if (u-1.0 == -1.0) { - return -1; - } else { - return (u-1.0) * x/log(u); - } -} -#endif - -#ifndef HAVE_LOG1P -double log1p(double x) -{ - double u = 1. + x; - if (u == 1.0) { - return x; - } else { - return log(u) * x / (u - 1); - } -} -#endif - -#ifndef HAVE_HYPOT -double hypot(double x, double y) -{ - double yx; - - x = fabs(x); - y = fabs(y); - if (x < y) { - double temp = x; - x = y; - y = temp; - } - if (x == 0.) - return 0.; - else { - yx = y/x; - return x*sqrt(1.+yx*yx); - } -} -#endif - -#ifndef HAVE_ACOSH -double acosh(double x) -{ - return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); -} -#endif - -#ifndef HAVE_ASINH -double asinh(double xx) -{ - double x, d; - int sign; - if (xx < 0.0) { - sign = -1; - x = -xx; - } - else { - sign = 1; - x = xx; - } - if (x > 1e8) { - d = x; - } else { - d = sqrt(x*x + 1); - } - return sign*log1p(x*(1.0 + x/(d+1))); -} -#endif - -#ifndef HAVE_ATANH -double atanh(double x) -{ - if (x > 0) { - return -0.5*log1p(-2.0*x/(1.0 + x)); - } - else { - return 0.5*log1p(2.0*x/(1.0 - x)); - } -} -#endif - -#ifndef HAVE_RINT -double rint(double x) -{ - double y, r; - - y = floor(x); - r = x - y; - - if (r > 0.5) goto rndup; - - /* Round to nearest even */ - if (r==0.5) { - r = y - 2.0*floor(0.5*y); - if (r==1.0) { - rndup: - y+=1.0; - } - } - return y; -} -#endif - -#ifndef HAVE_TRUNC -double trunc(double x) -{ - return x < 0 ? ceil(x) : floor(x); -} -#endif - -#ifndef HAVE_EXP2 -#define LOG2 0.69314718055994530943 -double exp2(double x) -{ - return exp(LOG2*x); -} -#undef LOG2 -#endif - -#ifndef HAVE_LOG2 -#define INVLOG2 1.4426950408889634074 -double log2(double x) -{ - return INVLOG2*log(x); -} -#undef INVLOG2 -#endif - -/* - ***************************************************************************** - ** IEEE 754 FPU HANDLING ** - ***************************************************************************** - */ -#if !defined(HAVE_DECL_ISNAN) - # define isnan(x) ((x) != (x)) -#endif - -/* VS 2003 with /Ox optimizes (x)-(x) to 0, which is not IEEE compliant. So we - * force (x) + (-x), which seems to work. */ -#if !defined(HAVE_DECL_ISFINITE) - # define isfinite(x) !isnan((x) + (-x)) -#endif - -#if !defined(HAVE_DECL_ISINF) -#define isinf(x) (!isfinite(x) && !isnan(x)) -#endif - -#if !defined(HAVE_DECL_SIGNBIT) - #include "_signbit.c" - # define signbit(x) \ - (sizeof (x) == sizeof (long double) ? signbit_ld (x) \ - : sizeof (x) == sizeof (double) ? signbit_d (x) \ - : signbit_f (x)) - -static int signbit_f (float x) -{ - return signbit_d((double)x); -} - -static int signbit_ld (long double x) -{ - return signbit_d((double)x); -} -#endif - -/* - * if C99 extensions not available then define dummy functions that use the - * double versions for - * - * sin, cos, tan - * sinh, cosh, tanh, - * fabs, floor, ceil, rint, trunc - * sqrt, log10, log, exp, expm1 - * asin, acos, atan, - * asinh, acosh, atanh - * - * hypot, atan2, pow, fmod, modf - * - * We assume the above are always available in their double versions. - * - * NOTE: some facilities may be available as macro only instead of functions. - * For simplicity, we define our own functions and undef the macros. We could - * instead test for the macro, but I am lazy to do that for now. - */ - -/**begin repeat - * #type = longdouble, float# - * #TYPE = LONGDOUBLE, FLOAT# - * #c = l,f# - * #C = L,F# - */ - -/**begin repeat1 - * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, - * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# - * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, - * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# - */ - -#ifdef @kind@@c@ -#undef @kind@@c@ -#endif -#ifndef HAVE_ at KIND@@C@ - at type@ @kind@@c@(@type@ x) -{ - return (@type@) @kind@((double)x); -} -#endif - -/**end repeat1**/ - -/**begin repeat1 - * #kind = atan2,hypot,pow,fmod# - * #KIND = ATAN2,HYPOT,POW,FMOD# - */ -#ifdef @kind@@c@ -#undef @kind@@c@ -#endif -#ifndef HAVE_ at KIND@@C@ - at type@ @kind@@c@(@type@ x, @type@ y) -{ - return (@type@) @kind@((double)x, (double) y); -} -#endif -/**end repeat1**/ - -#ifdef modf at c@ -#undef modf at c@ -#endif -#ifndef HAVE_MODF at C@ - at type@ modf at c@(@type@ x, @type@ *iptr) -{ - double niptr; - double y = modf((double)x, &niptr); - *iptr = (@type@) niptr; - return (@type@) y; -} -#endif - -/**end repeat**/ Modified: branches/coremath/numpy/core/src/umathmodule.c.src =================================================================== --- branches/coremath/numpy/core/src/umathmodule.c.src 2009-02-18 17:30:03 UTC (rev 6394) +++ branches/coremath/numpy/core/src/umathmodule.c.src 2009-02-18 17:30:41 UTC (rev 6395) @@ -23,18 +23,13 @@ #include "abstract.h" #include "config.h" -/* - * Looks like some versions of Python.h do naughty things, so math.h needs - * to come after. - */ -#include +#include "numpy/npy_math.h" /* ***************************************************************************** ** INCLUDE GENERATED CODE ** ***************************************************************************** */ -#include "umath_funcs_c99.inc" #include "umath_funcs.inc" #include "umath_loops.inc" #include "umath_ufunc_object.inc" From numpy-svn at scipy.org Wed Feb 18 12:31:44 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:31:44 -0600 (CST) Subject: [Numpy-svn] r6396 - in branches/coremath/numpy/core: include/numpy src Message-ID: <20090218173144.02A15C7C024@scipy.org> Author: cdavid Date: 2009-02-18 11:31:27 -0600 (Wed, 18 Feb 2009) New Revision: 6396 Modified: branches/coremath/numpy/core/include/numpy/npy_math.h branches/coremath/numpy/core/src/_signbit.c branches/coremath/numpy/core/src/npy_math.c.src Log: Fix nan/inf/co macro when not available on the target platform in npy_math. Modified: branches/coremath/numpy/core/include/numpy/npy_math.h =================================================================== --- branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-18 17:30:41 UTC (rev 6395) +++ branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-18 17:31:27 UTC (rev 6396) @@ -21,25 +21,28 @@ * IEEE 754 fpu handling. Those are guaranteed to be macros */ #ifndef NPY_HAVE_DECL_ISNAN - #define npy_isnan(x) _npy_isnan((x)) + #define npy_isnan(x) ((x) != (x)) #else #define npy_isnan(x) isnan((x)) #endif #ifndef NPY_HAVE_DECL_ISFINITE - #define npy_isfinite(x) _npy_isfinite((x)) + #define npy_isfinite(x) !npy_isnan((x) + (-x)) #else #define npy_isfinite(x) isfinite((x)) #endif #ifndef NPY_HAVE_DECL_ISFINITE - #define npy_isinf(x) _npy_isinf((x)) + #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x)) #else #define npy_isinf(x) isinf((x)) #endif #ifndef NPY_HAVE_DECL_SIGNBIT - #define npy_signbit(x) _npy_signbit((x)) + #define npy_signbit(x) \ + (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \ + : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \ + : _npy_signbit_f (x)) #else #define npy_signbit(x) signbit((x)) #endif Modified: branches/coremath/numpy/core/src/_signbit.c =================================================================== --- branches/coremath/numpy/core/src/_signbit.c 2009-02-18 17:30:41 UTC (rev 6395) +++ branches/coremath/numpy/core/src/_signbit.c 2009-02-18 17:31:27 UTC (rev 6396) @@ -1,7 +1,7 @@ /* Adapted from cephes */ static int -signbit_d(double x) +_npy_signbit_d(double x) { union { Modified: branches/coremath/numpy/core/src/npy_math.c.src =================================================================== --- branches/coremath/numpy/core/src/npy_math.c.src 2009-02-18 17:30:41 UTC (rev 6395) +++ branches/coremath/numpy/core/src/npy_math.c.src 2009-02-18 17:31:27 UTC (rev 6396) @@ -195,35 +195,17 @@ ** IEEE 754 FPU HANDLING ** ***************************************************************************** */ -#if !defined(HAVE_DECL_ISNAN) - # define isnan(x) ((x) != (x)) -#endif - -/* VS 2003 with /Ox optimizes (x)-(x) to 0, which is not IEEE compliant. So we - * force (x) + (-x), which seems to work. */ -#if !defined(HAVE_DECL_ISFINITE) - # define isfinite(x) !isnan((x) + (-x)) -#endif - -#if !defined(HAVE_DECL_ISINF) -#define isinf(x) (!isfinite(x) && !isnan(x)) -#endif - #if !defined(HAVE_DECL_SIGNBIT) - #include "_signbit.c" - # define signbit(x) \ - (sizeof (x) == sizeof (long double) ? signbit_ld (x) \ - : sizeof (x) == sizeof (double) ? signbit_d (x) \ - : signbit_f (x)) +#include "_signbit.c" -static int signbit_f (float x) +static int _npy_signbit_f (float x) { - return signbit_d((double)x); + return npy_signbit_d((double)x); } -static int signbit_ld (long double x) +static int _npy_signbit_ld (long double x) { - return signbit_d((double)x); + return _npy_signbit_d((double)x); } #endif From numpy-svn at scipy.org Wed Feb 18 12:32:09 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 11:32:09 -0600 (CST) Subject: [Numpy-svn] r6397 - in branches/coremath/numpy/core: . src Message-ID: <20090218173209.EB7C8C7C024@scipy.org> Author: cdavid Date: 2009-02-18 11:32:02 -0600 (Wed, 18 Feb 2009) New Revision: 6397 Modified: branches/coremath/numpy/core/setup.py branches/coremath/numpy/core/src/umath_funcs.inc.src Log: Use npymath instead of C math lib for umath_funcs (break numpy ATM). Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-18 17:31:27 UTC (rev 6396) +++ branches/coremath/numpy/core/setup.py 2009-02-18 17:32:02 UTC (rev 6397) @@ -411,6 +411,7 @@ join('*.py') ], depends = deps, + libraries=['npymath'], ) config.add_extension('umath', @@ -428,6 +429,7 @@ generate_umath_py, join(codegen_dir,'generate_ufunc_api.py'), ]+deps, + libraries=['npymath'], ) config.add_extension('scalarmath', Modified: branches/coremath/numpy/core/src/umath_funcs.inc.src =================================================================== --- branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-18 17:31:27 UTC (rev 6396) +++ branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-18 17:32:02 UTC (rev 6397) @@ -62,20 +62,20 @@ if (u == 1) { return LOG2E*x; } else { - return log2 at c@(u) * x / (u - 1); + return npy_log2 at c@(u) * x / (u - 1); } } static @type@ exp2_1m at c@(@type@ x) { - @type@ u = exp at c@(x); + @type@ u = npy_exp at c@(x); if (u == 1.0) { return LOGE2*x; } else if (u - 1 == -1) { return -LOGE2; } else { - return (u - 1) * x/log2 at c@(u); + return (u - 1) * x/npy_log2 at c@(u); } } @@ -84,10 +84,10 @@ { const @type@ tmp = x - y; if (tmp > 0) { - return x + log1p at c@(exp at c@(-tmp)); + return x + npy_log1p at c@(exp at c@(-tmp)); } else { - return y + log1p at c@(exp at c@(tmp)); + return y + npy_log1p at c@(exp at c@(tmp)); } } @@ -96,10 +96,10 @@ { const @type@ tmp = x - y; if (tmp > 0) { - return x + log2_1p at c@(exp2 at c@(-tmp)); + return x + npy_log2_1p at c@(exp2 at c@(-tmp)); } else { - return y + log2_1p at c@(exp2 at c@(tmp)); + return y + npy_log2_1p at c@(exp2 at c@(tmp)); } } @@ -261,7 +261,7 @@ if (x->real == 0. && x->imag == 0.) *r = *x; else { - s = sqrt at c@((fabs at c@(x->real) + hypot at c@(x->real,x->imag))/2); + s = sqrt at c@((npy_fabs at c@(x->real) + npy_hypot at c@(x->real,x->imag))/2); d = x->imag/(2*s); if (x->real > 0) { r->real = s; @@ -289,36 +289,36 @@ static void nc_log at c@(c at typ@ *x, c at typ@ *r) { - @typ@ l = hypot at c@(x->real,x->imag); - r->imag = atan2 at c@(x->imag, x->real); - r->real = log at c@(l); + @typ@ l = npy_hypot at c@(x->real,x->imag); + r->imag = npy_atan2 at c@(x->imag, x->real); + r->real = npy_log at c@(l); return; } static void nc_log1p at c@(c at typ@ *x, c at typ@ *r) { - @typ@ l = hypot at c@(x->real + 1,x->imag); - r->imag = atan2 at c@(x->imag, x->real + 1); - r->real = log at c@(l); + @typ@ l = npy_hypot at c@(x->real + 1,x->imag); + r->imag = npy_atan2 at c@(x->imag, x->real + 1); + r->real = npy_log at c@(l); return; } static void nc_exp at c@(c at typ@ *x, c at typ@ *r) { - @typ@ a = exp at c@(x->real); - r->real = a*cos at c@(x->imag); - r->imag = a*sin at c@(x->imag); + @typ@ a = npy_exp at c@(x->real); + r->real = a*npy_cos at c@(x->imag); + r->imag = a*npy_sin at c@(x->imag); return; } static void nc_expm1 at c@(c at typ@ *x, c at typ@ *r) { - @typ@ a = exp at c@(x->real); - r->real = a*cos at c@(x->imag) - 1; - r->imag = a*sin at c@(x->imag); + @typ@ a = npy_exp at c@(x->real); + r->real = a*npy_cos at c@(x->imag) - 1; + r->imag = a*npy_sin at c@(x->imag); return; } @@ -483,8 +483,8 @@ nc_cos at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xr)*cosh at c@(xi); - r->imag = -sin at c@(xr)*sinh at c@(xi); + r->real = npy_cos at c@(xr)*npy_cosh at c@(xi); + r->imag = -npy_sin at c@(xr)*npy_sinh at c@(xi); return; } @@ -492,8 +492,8 @@ nc_cosh at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xi)*cosh at c@(xr); - r->imag = sin at c@(xi)*sinh at c@(xr); + r->real = npy_cos at c@(xi)*npy_cosh at c@(xr); + r->imag = npy_sin at c@(xi)*npy_sinh at c@(xr); return; } @@ -510,8 +510,8 @@ nc_sin at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = sin at c@(xr)*cosh at c@(xi); - r->imag = cos at c@(xr)*sinh at c@(xi); + r->real = npy_sin at c@(xr)*npy_cosh at c@(xi); + r->imag = npy_cos at c@(xr)*npy_sinh at c@(xi); return; } @@ -519,8 +519,8 @@ nc_sinh at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xi)*sinh at c@(xr); - r->imag = sin at c@(xi)*cosh at c@(xr); + r->real = npy_cos at c@(xi)*npy_sinh at c@(xr); + r->imag = npy_sin at c@(xi)*npy_cosh at c@(xr); return; } @@ -531,10 +531,10 @@ @typ@ rs,is,rc,ic; @typ@ d; @typ@ xr=x->real, xi=x->imag; - sr = sin at c@(xr); - cr = cos at c@(xr); - shi = sinh at c@(xi); - chi = cosh at c@(xi); + sr = npy_sin at c@(xr); + cr = npy_cos at c@(xr); + shi = npy_sinh at c@(xi); + chi = npy_cosh at c@(xi); rs = sr*chi; is = cr*shi; rc = cr*chi; @@ -552,10 +552,10 @@ @typ@ rs,is,rc,ic; @typ@ d; @typ@ xr=x->real, xi=x->imag; - si = sin at c@(xi); - ci = cos at c@(xi); - shr = sinh at c@(xr); - chr = cosh at c@(xr); + si = npy_sin at c@(xi); + ci = npy_cos at c@(xi); + shr = npy_sinh at c@(xr); + chr = npy_cosh at c@(xr); rs = ci*shr; is = si*chr; rc = ci*chr; From numpy-svn at scipy.org Wed Feb 18 15:54:56 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 14:54:56 -0600 (CST) Subject: [Numpy-svn] r6398 - trunk/numpy/core/src Message-ID: <20090218205456.0452FC7C049@scipy.org> Author: charris Date: 2009-02-18 14:54:52 -0600 (Wed, 18 Feb 2009) New Revision: 6398 Modified: trunk/numpy/core/src/arrayobject.c Log: Coding style cleanups. Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2009-02-18 17:32:02 UTC (rev 6397) +++ trunk/numpy/core/src/arrayobject.c 2009-02-18 20:54:52 UTC (rev 6398) @@ -5612,22 +5612,23 @@ return; } -/* This routine checks to see if newstrides (of length nd) will not - ever be able to walk outside of the memory implied numbytes and offset. +/* + * This routine checks to see if newstrides (of length nd) will not + * ever be able to walk outside of the memory implied numbytes and offset. + * + * The available memory is assumed to start at -offset and proceed + * to numbytes-offset. The strides are checked to ensure + * that accessing memory using striding will not try to reach beyond + * this memory for any of the axes. + * + * If numbytes is 0 it will be calculated using the dimensions and + * element-size. + * + * This function checks for walking beyond the beginning and right-end + * of the buffer and therefore works for any integer stride (positive + * or negative). + */ - The available memory is assumed to start at -offset and proceed - to numbytes-offset. The strides are checked to ensure - that accessing memory using striding will not try to reach beyond - this memory for any of the axes. - - If numbytes is 0 it will be calculated using the dimensions and - element-size. - - This function checks for walking beyond the beginning and right-end - of the buffer and therefore works for any integer stride (positive - or negative). -*/ - /*NUMPY_API*/ static Bool PyArray_CheckStrides(int elsize, int nd, intp numbytes, intp offset, @@ -5638,37 +5639,38 @@ intp begin; intp end; - if (numbytes == 0) + if (numbytes == 0) { numbytes = PyArray_MultiplyList(dims, nd) * elsize; - + } begin = -offset; end = numbytes - offset - elsize; - for(i=0; i end)) + for (i = 0; i < nd; i++) { + byte_begin = newstrides[i]*(dims[i] - 1); + if ((byte_begin < begin) || (byte_begin > end)) { return FALSE; + } } return TRUE; - } -/* This is the main array creation routine. */ +/* + * This is the main array creation routine. + * + * Flags argument has multiple related meanings + * depending on data and strides: + * + * If data is given, then flags is flags associated with data. + * If strides is not given, then a contiguous strides array will be created + * and the CONTIGUOUS bit will be set. If the flags argument + * has the FORTRAN bit set, then a FORTRAN-style strides array will be + * created (and of course the FORTRAN flag bit will be set). + * + * If data is not given but created here, then flags will be DEFAULT + * and a non-zero flags argument can be used to indicate a FORTRAN style + * array is desired. + */ -/* Flags argument has multiple related meanings - depending on data and strides: - - If data is given, then flags is flags associated with data. - If strides is not given, then a contiguous strides array will be created - and the CONTIGUOUS bit will be set. If the flags argument - has the FORTRAN bit set, then a FORTRAN-style strides array will be - created (and of course the FORTRAN flag bit will be set). - - If data is not given but created here, then flags will be DEFAULT - and a non-zero flags argument can be used to indicate a FORTRAN style - array is desired. -*/ - static size_t _array_fill_strides(intp *strides, intp *dims, int nd, size_t itemsize, int inflag, int *objflags) @@ -5676,29 +5678,37 @@ int i; /* Only make Fortran strides if not contiguous as well */ if ((inflag & FORTRAN) && !(inflag & CONTIGUOUS)) { - for(i=0; i 1) *objflags &= ~CONTIGUOUS; - else *objflags |= CONTIGUOUS; + if (nd > 1) { + *objflags &= ~CONTIGUOUS; + } + else { + *objflags |= CONTIGUOUS; + } } else { - for(i=nd-1;i>=0;i--) { + for (i = nd - 1; i >= 0; i--) { strides[i] = itemsize; itemsize *= dims[i] ? dims[i] : 1; } *objflags |= CONTIGUOUS; - if (nd > 1) *objflags &= ~FORTRAN; - else *objflags |= FORTRAN; + if (nd > 1) { + *objflags &= ~FORTRAN; + } + else { + *objflags |= FORTRAN; + } } return itemsize; } /*NUMPY_API - Generic new array creation routine. -*/ + * Generic new array creation routine. + */ static PyObject * PyArray_New(PyTypeObject *subtype, int nd, intp *dims, int type_num, intp *strides, void *data, int itemsize, int flags, @@ -5708,7 +5718,9 @@ PyObject *new; descr = PyArray_DescrFromType(type_num); - if (descr == NULL) return NULL; + if (descr == NULL) { + return NULL; + } if (descr->elsize == 0) { if (itemsize < 1) { PyErr_SetString(PyExc_ValueError, @@ -5724,14 +5736,16 @@ return new; } -/* Change a sub-array field to the base descriptor */ -/* and update the dimensions and strides - appropriately. Dimensions and strides are added - to the end unless we have a FORTRAN array - and then they are added to the beginning - - Strides are only added if given (because data is given). -*/ +/* + * Change a sub-array field to the base descriptor + * + * and update the dimensions and strides + * appropriately. Dimensions and strides are added + * to the end unless we have a FORTRAN array + * and then they are added to the beginning + * + * Strides are only added if given (because data is given). + */ static int _update_descr_and_dimensions(PyArray_Descr **des, intp *newdims, intp *newstrides, int oldnd, int isfortran) @@ -5758,16 +5772,17 @@ newnd = oldnd + numnew; - if (newnd > MAX_DIMS) goto finish; + if (newnd > MAX_DIMS) { + goto finish; + } if (isfortran) { memmove(newdims+numnew, newdims, oldnd*sizeof(intp)); mydim = newdims; } - if (tuple) { - for(i=0; isubarray->shape, i)); + for (i = 0; i < numnew; i++) { + mydim[i] = (intp) PyInt_AsLong( + PyTuple_GET_ITEM(old->subarray->shape, i)); } } else { @@ -5777,15 +5792,15 @@ if (newstrides) { intp tempsize; intp *mystrides; + mystrides = newstrides + oldnd; if (isfortran) { - memmove(newstrides+numnew, newstrides, - oldnd*sizeof(intp)); + memmove(newstrides+numnew, newstrides, oldnd*sizeof(intp)); mystrides = newstrides; } /* Make new strides -- alwasy C-contiguous */ tempsize = (*des)->elsize; - for(i=numnew-1; i>=0; i--) { + for (i = numnew - 1; i >= 0; i--) { mystrides[i] = tempsize; tempsize *= mydim[i] ? mydim[i] : 1; } @@ -5798,10 +5813,11 @@ } -/* steals a reference to descr (even on failure) */ /*NUMPY_API - Generic new array creation routine. -*/ + * Generic new array creation routine. + * + * steals a reference to descr (even on failure) + */ static PyObject * PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd, intp *dims, intp *strides, void *data, @@ -5816,9 +5832,9 @@ if (descr->subarray) { PyObject *ret; intp newdims[2*MAX_DIMS]; - intp *newstrides=NULL; - int isfortran=0; - isfortran = (data && (flags & FORTRAN) && !(flags & CONTIGUOUS)) || \ + intp *newstrides = NULL; + int isfortran = 0; + isfortran = (data && (flags & FORTRAN) && !(flags & CONTIGUOUS)) || (!data && flags); memcpy(newdims, dims, nd*sizeof(intp)); if (strides) { @@ -5832,7 +5848,6 @@ data, flags, obj); return ret; } - if (nd < 0) { PyErr_SetString(PyExc_ValueError, "number of dimensions must be >=0"); @@ -5856,13 +5871,19 @@ return NULL; } PyArray_DESCR_REPLACE(descr); - if (descr->type_num == NPY_STRING) descr->elsize = 1; - else descr->elsize = sizeof(PyArray_UCS4); + if (descr->type_num == NPY_STRING) { + descr->elsize = 1; + } + else { + descr->elsize = sizeof(PyArray_UCS4); + } sd = (size_t) descr->elsize; } largest = MAX_INTP / sd; - for(i=0;iflags = DEFAULT; if (flags) { self->flags |= FORTRAN; - if (nd > 1) self->flags &= ~CONTIGUOUS; + if (nd > 1) { + self->flags &= ~CONTIGUOUS; + } flags = FORTRAN; } } - else self->flags = (flags & ~UPDATEIFCOPY); - + else { + self->flags = (flags & ~UPDATEIFCOPY); + } self->descr = descr; self->base = (PyObject *)NULL; self->weakreflist = (PyObject *)NULL; @@ -5913,84 +5937,102 @@ sd = _array_fill_strides(self->strides, dims, nd, sd, flags, &(self->flags)); } - else { /* we allow strides even when we create - the memory, but be careful with this... - */ + else { + /* + * we allow strides even when we create + * the memory, but be careful with this... + */ memcpy(self->strides, strides, sizeof(intp)*nd); sd *= size; } } - else { self->dimensions = self->strides = NULL; } + else { + self->dimensions = self->strides = NULL; + } if (data == NULL) { + /* + * Allocate something even for zero-space arrays + * e.g. shape=(0,) -- otherwise buffer exposure + * (a.data) doesn't work as it should. + */ - /* Allocate something even for zero-space arrays - e.g. shape=(0,) -- otherwise buffer exposure - (a.data) doesn't work as it should. */ - - if (sd==0) sd = descr->elsize; - - if ((data = PyDataMem_NEW(sd))==NULL) { + if (sd == 0) { + sd = descr->elsize; + } + if ((data = PyDataMem_NEW(sd)) == NULL) { PyErr_NoMemory(); goto fail; } self->flags |= OWNDATA; - /* It is bad to have unitialized OBJECT pointers */ - /* which could also be sub-fields of a VOID array */ + /* + * It is bad to have unitialized OBJECT pointers + * which could also be sub-fields of a VOID array + */ if (PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT)) { memset(data, 0, sd); } } else { - self->flags &= ~OWNDATA; /* If data is passed in, - this object won't own it - by default. - Caller must arrange for - this to be reset if truly - desired */ + /* + * If data is passed in, this object won't own it by default. + * Caller must arrange for this to be reset if truly desired + */ + self->flags &= ~OWNDATA; } self->data = data; - /* call the __array_finalize__ - method if a subtype. - If obj is NULL, then call method with Py_None - */ + /* + * call the __array_finalize__ + * method if a subtype. + * If obj is NULL, then call method with Py_None + */ if ((subtype != &PyArray_Type)) { PyObject *res, *func, *args; - static PyObject *str=NULL; + static PyObject *str = NULL; if (str == NULL) { str = PyString_InternFromString("__array_finalize__"); } func = PyObject_GetAttr((PyObject *)self, str); if (func && func != Py_None) { - if (strides != NULL) { /* did not allocate own data - or funny strides */ - /* update flags before finalize function */ + if (strides != NULL) { + /* + * did not allocate own data or funny strides + * update flags before finalize function + */ PyArray_UpdateFlags(self, UPDATE_ALL); } - if PyCObject_Check(func) { /* A C-function is stored here */ - PyArray_FinalizeFunc *cfunc; - cfunc = PyCObject_AsVoidPtr(func); - Py_DECREF(func); - if (cfunc(self, obj) < 0) goto fail; + if PyCObject_Check(func) { + /* A C-function is stored here */ + PyArray_FinalizeFunc *cfunc; + cfunc = PyCObject_AsVoidPtr(func); + Py_DECREF(func); + if (cfunc(self, obj) < 0) { + goto fail; } + } else { args = PyTuple_New(1); - if (obj == NULL) obj=Py_None; + if (obj == NULL) { + obj=Py_None; + } Py_INCREF(obj); PyTuple_SET_ITEM(args, 0, obj); res = PyObject_Call(func, args, NULL); Py_DECREF(args); Py_DECREF(func); - if (res == NULL) goto fail; - else Py_DECREF(res); + if (res == NULL) { + goto fail; + } + else { + Py_DECREF(res); + } } } else Py_XDECREF(func); } - return (PyObject *)self; fail: @@ -6005,14 +6047,17 @@ memset(optr, 0, dtype->elsize); } else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { + return; + } _putzero(optr + offset, zero, new); } } @@ -6027,13 +6072,11 @@ /*NUMPY_API - Resize (reallocate data). Only works if nothing else is referencing - this array and it is contiguous. - If refcheck is 0, then the reference count is not checked - and assumed to be 1. - You still must own this data and have no weak-references and no base - object. -*/ + * Resize (reallocate data). Only works if nothing else is referencing this + * array and it is contiguous. If refcheck is 0, then the reference count is + * not checked and assumed to be 1. You still must own this data and have no + * weak-references and no base object. + */ static PyObject * PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, NPY_ORDER fortran) @@ -6054,9 +6097,9 @@ return NULL; } - if (fortran == PyArray_ANYORDER) + if (fortran == PyArray_ANYORDER) { fortran = PyArray_CORDER; - + } if (self->descr->elsize == 0) { PyErr_SetString(PyExc_ValueError, "Bad data-type size."); return NULL; @@ -6064,7 +6107,9 @@ newsize = 1; largest = MAX_INTP / self->descr->elsize; for(k=0; k 2) || (self->base != NULL) || \ + if (refcheck) { + refcnt = REFCOUNT(self); + } + else { + refcnt = 1; + } + if ((refcnt > 2) || (self->base != NULL) || (self->weakreflist != NULL)) { PyErr_SetString(PyExc_ValueError, "cannot resize an array that has "\ @@ -6097,8 +6146,12 @@ return NULL; } - if (newsize == 0) sd = self->descr->elsize; - else sd = newsize * self->descr->elsize; + if (newsize == 0) { + sd = self->descr->elsize; + } + else { + sd = newsize*self->descr->elsize; + } /* Reallocate space if needed */ new_data = PyDataMem_RENEW(self->data, sd); if (new_data == NULL) { @@ -6117,21 +6170,20 @@ char *optr; optr = self->data + oldsize*elsize; n = newsize - oldsize; - for(k=0; kdescr); optr += elsize; } Py_DECREF(zero); } else{ - memset(self->data+oldsize*elsize, 0, - (newsize-oldsize)*elsize); + memset(self->data+oldsize*elsize, 0, (newsize-oldsize)*elsize); } } - if (self->nd != new_nd) { /* Different number of dimensions. */ + if (self->nd != new_nd) { + /* Different number of dimensions. */ self->nd = new_nd; - /* Need new dimensions and strides arrays */ dimptr = PyDimMem_RENEW(self->dimensions, 2*new_nd); if (dimptr == NULL) { @@ -6148,42 +6200,44 @@ sd = (size_t) self->descr->elsize; sd = (size_t) _array_fill_strides(new_strides, new_dimensions, new_nd, sd, self->flags, &(self->flags)); - memmove(self->dimensions, new_dimensions, new_nd*sizeof(intp)); memmove(self->strides, new_strides, new_nd*sizeof(intp)); - Py_INCREF(Py_None); return Py_None; - } static void _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype) { if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { - if ((obj == Py_None) || - (PyInt_Check(obj) && PyInt_AsLong(obj)==0)) + if ((obj == Py_None) || (PyInt_Check(obj) && PyInt_AsLong(obj)==0)) { return; + } else { PyObject *arr; Py_INCREF(dtype); arr = PyArray_NewFromDescr(&PyArray_Type, dtype, 0, NULL, NULL, NULL, 0, NULL); - if (arr!=NULL) + if (arr!=NULL) { dtype->f->setitem(obj, optr, arr); + } Py_XDECREF(arr); } } else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; + while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { + return; + } _fillobject(optr + offset, obj, new); } } @@ -6196,8 +6250,9 @@ } } -/* Assumes contiguous */ -/*NUMPY_API*/ +/*NUMPY_API + * Assumes contiguous + */ static void PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) { @@ -6208,12 +6263,12 @@ optr = (PyObject **)(arr->data); n = PyArray_SIZE(arr); if (obj == NULL) { - for(i=0; idata; - for(i=0; idescr); optr += arr->descr->elsize; } @@ -6250,7 +6305,9 @@ descr = PyArray_DESCR(arr); Py_INCREF(descr); newarr = PyArray_FromAny(obj, descr, 0,0, ALIGNED, NULL); - if (newarr == NULL) return -1; + if (newarr == NULL) { + return -1; + } fromptr = PyArray_DATA(newarr); swap = (PyArray_ISNOTSWAPPED(arr) != PyArray_ISNOTSWAPPED(newarr)); } @@ -6280,7 +6337,7 @@ Py_XDECREF(newarr); return -1; } - while(size--) { + while (size--) { copyswap(iter->dataptr, fromptr, swap, arr); PyArray_ITER_NEXT(iter); } @@ -6307,14 +6364,11 @@ PyArrayObject *ret; buffer.ptr = NULL; - /* Usually called with shape and type - but can also be called with buffer, strides, and swapped info - */ - - /* For now, let's just use this to create an empty, contiguous - array of a specific type and shape. - */ - + /* + * Usually called with shape and type but can also be called with buffer, + * strides, and swapped info For now, let's just use this to create an + * empty, contiguous array of a specific type and shape. + */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&LO&O&", kwlist, PyArray_IntpConverter, &dims, @@ -6326,16 +6380,17 @@ &PyArray_IntpConverter, &strides, &PyArray_OrderConverter, - &order)) + &order)) { goto fail; - - if (order == PyArray_FORTRANORDER) fortran = 1; - - if (descr == NULL) + } + if (order == PyArray_FORTRANORDER) { + fortran = 1; + } + if (descr == NULL) { descr = PyArray_DescrFromType(PyArray_DEFAULT); + } itemsize = descr->elsize; - if (itemsize == 0) { PyErr_SetString(PyExc_ValueError, "data-type with unspecified variable length"); @@ -6373,27 +6428,31 @@ } if (buffer.ptr == NULL) { - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(subtype, descr, (int)dims.len, dims.ptr, strides.ptr, NULL, fortran, NULL); - if (ret == NULL) {descr=NULL;goto fail;} + if (ret == NULL) { + descr = NULL; + goto fail; + } if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)) { /* place Py_None in object positions */ PyArray_FillObjectArray(ret, Py_None); if (PyErr_Occurred()) { - descr=NULL; + descr = NULL; goto fail; } } } - else { /* buffer given -- use it */ + else { + /* buffer given -- use it */ if (dims.len == 1 && dims.ptr[0] == -1) { dims.ptr[0] = (buffer.len-(intp)offset) / itemsize; } - else if ((strides.ptr == NULL) && \ - (buffer.len < ((intp)itemsize)* \ + else if ((strides.ptr == NULL) && + (buffer.len < ((intp)itemsize)* PyArray_MultiplyList(dims.ptr, dims.len))) { PyErr_SetString(PyExc_TypeError, "buffer is too small for " \ @@ -6401,27 +6460,38 @@ goto fail; } /* get writeable and aligned */ - if (fortran) buffer.flags |= FORTRAN; + if (fortran) { + buffer.flags |= FORTRAN; + } ret = (PyArrayObject *)\ PyArray_NewFromDescr(subtype, descr, dims.len, dims.ptr, strides.ptr, offset + (char *)buffer.ptr, buffer.flags, NULL); - if (ret == NULL) {descr=NULL; goto fail;} + if (ret == NULL) { + descr = NULL; + goto fail; + } PyArray_UpdateFlags(ret, UPDATE_ALL); ret->base = buffer.base; Py_INCREF(buffer.base); } PyDimMem_FREE(dims.ptr); - if (strides.ptr) PyDimMem_FREE(strides.ptr); + if (strides.ptr) { + PyDimMem_FREE(strides.ptr); + } return (PyObject *)ret; fail: Py_XDECREF(descr); - if (dims.ptr) PyDimMem_FREE(dims.ptr); - if (strides.ptr) PyDimMem_FREE(strides.ptr); + if (dims.ptr) { + PyDimMem_FREE(dims.ptr); + } + if (strides.ptr) { + PyDimMem_FREE(strides.ptr); + } return NULL; } @@ -6467,7 +6537,9 @@ /* Assumes C-order */ ret = PyArray_Reshape(self, val); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } if (PyArray_DATA(ret) != PyArray_DATA(self)) { Py_DECREF(ret); PyErr_SetString(PyExc_AttributeError, @@ -6480,7 +6552,8 @@ PyDimMem_FREE(self->dimensions); nd = PyArray_NDIM(ret); self->nd = nd; - if (nd > 0) { /* create new dimensions and strides */ + if (nd > 0) { + /* create new dimensions and strides */ self->dimensions = PyDimMem_NEW(2*nd); if (self->dimensions == NULL) { Py_DECREF(ret); @@ -6488,12 +6561,13 @@ return -1; } self->strides = self->dimensions + nd; - memcpy(self->dimensions, PyArray_DIMS(ret), - nd*sizeof(intp)); - memcpy(self->strides, PyArray_STRIDES(ret), - nd*sizeof(intp)); + memcpy(self->dimensions, PyArray_DIMS(ret), nd*sizeof(intp)); + memcpy(self->strides, PyArray_STRIDES(ret), nd*sizeof(intp)); } - else {self->dimensions=NULL; self->strides=NULL;} + else { + self->dimensions = NULL; + self->strides = NULL; + } Py_DECREF(ret); PyArray_UpdateFlags(self, CONTIGUOUS | FORTRAN); return 0; @@ -6511,12 +6585,12 @@ { PyArray_Dims newstrides = {NULL, 0}; PyArrayObject *new; - intp numbytes=0; - intp offset=0; + intp numbytes = 0; + intp offset = 0; Py_ssize_t buf_len; char *buf; - if (!PyArray_IntpConverter(obj, &newstrides) || \ + if (!PyArray_IntpConverter(obj, &newstrides) || newstrides.ptr == NULL) { PyErr_SetString(PyExc_TypeError, "invalid strides"); return -1; @@ -6530,9 +6604,10 @@ while(new->base && PyArray_Check(new->base)) { new = (PyArrayObject *)(new->base); } - /* Get the available memory through the buffer - interface on new->base or if that fails - from the current new */ + /* + * Get the available memory through the buffer interface on + * new->base or if that fails from the current new + */ if (new->base && PyObject_AsReadBuffer(new->base, (const void **)&buf, &buf_len) >= 0) { @@ -6568,10 +6643,12 @@ static PyObject * array_priority_get(PyArrayObject *self) { - if (PyArray_CheckExact(self)) + if (PyArray_CheckExact(self)) { return PyFloat_FromDouble(PyArray_PRIORITY); - else + } + else { return PyFloat_FromDouble(PyArray_SUBTYPE_PRIORITY); + } } static PyObject *arraydescr_protocol_typestr_get(PyArray_Descr *); @@ -6598,16 +6675,23 @@ PyObject *dobj; res = arraydescr_protocol_descr_get(self->descr); - if (res) return res; + if (res) { + return res; + } PyErr_Clear(); /* get default */ dobj = PyTuple_New(2); - if (dobj == NULL) return NULL; + if (dobj == NULL) { + return NULL; + } PyTuple_SET_ITEM(dobj, 0, PyString_FromString("")); PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); res = PyList_New(1); - if (res == NULL) {Py_DECREF(dobj); return NULL;} + if (res == NULL) { + Py_DECREF(dobj); + return NULL; + } PyList_SET_ITEM(res, 0, dobj); return res; } @@ -6616,9 +6700,9 @@ array_protocol_strides_get(PyArrayObject *self) { if PyArray_ISCONTIGUOUS(self) { - Py_INCREF(Py_None); - return Py_None; - } + Py_INCREF(Py_None); + return Py_None; + } return PyArray_IntTupleFromIntp(self->nd, self->strides); } @@ -6639,9 +6723,10 @@ PyObject *_numpy_internal; PyObject *ret; _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - ret = PyObject_CallMethod(_numpy_internal, "_ctypes", - "ON", self, + if (_numpy_internal == NULL) { + return NULL; + } + ret = PyObject_CallMethod(_numpy_internal, "_ctypes", "ON", self, PyLong_FromVoidPtr(self->data)); Py_DECREF(_numpy_internal); return ret; @@ -6652,8 +6737,11 @@ { PyObject *dict; PyObject *obj; + dict = PyDict_New(); - if (dict == NULL) return NULL; + if (dict == NULL) { + return NULL; + } /* dataptr */ obj = array_dataptr_get(self); @@ -6693,11 +6781,12 @@ return NULL; } nbytes = PyArray_NBYTES(self); - if PyArray_ISWRITEABLE(self) - return PyBuffer_FromReadWriteObject((PyObject *)self, 0, - (int) nbytes); - else + if PyArray_ISWRITEABLE(self) { + return PyBuffer_FromReadWriteObject((PyObject *)self, 0, (int) nbytes); + } + else { return PyBuffer_FromObject((PyObject *)self, 0, (int) nbytes); + } } static int @@ -6709,8 +6798,7 @@ if (PyObject_AsWriteBuffer(op, &buf, &buf_len) < 0) { writeable = 0; - if (PyObject_AsReadBuffer(op, (const void **)&buf, - &buf_len) < 0) { + if (PyObject_AsReadBuffer(op, (const void **)&buf, &buf_len) < 0) { PyErr_SetString(PyExc_AttributeError, "object does not have single-segment " \ "buffer interface"); @@ -6723,8 +6811,7 @@ return -1; } if (PyArray_NBYTES(self) > buf_len) { - PyErr_SetString(PyExc_AttributeError, - "not enough data for array"); + PyErr_SetString(PyExc_AttributeError, "not enough data for array"); return -1; } if (self->flags & OWNDATA) { @@ -6742,8 +6829,9 @@ self->base = op; self->data = buf; self->flags = CARRAY; - if (!writeable) + if (!writeable) { self->flags &= ~WRITEABLE; + } return 0; } @@ -6761,10 +6849,12 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) size); #else - if (size > MAX_LONG || size < MIN_LONG) + if (size > MAX_LONG || size < MIN_LONG) { return PyLong_FromLongLong(size); - else + } + else { return PyInt_FromLong((long) size); + } #endif } @@ -6775,28 +6865,29 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) nbytes); #else - if (nbytes > MAX_LONG || nbytes < MIN_LONG) + if (nbytes > MAX_LONG || nbytes < MIN_LONG) { return PyLong_FromLongLong(nbytes); - else + } + else { return PyInt_FromLong((long) nbytes); + } #endif } -/* If the type is changed. - Also needing change: strides, itemsize +/* + * If the type is changed. + * Also needing change: strides, itemsize + * + * Either itemsize is exactly the same or the array is single-segment + * (contiguous or fortran) with compatibile dimensions The shape and strides + * will be adjusted in that case as well. + */ - Either itemsize is exactly the same - or the array is single-segment (contiguous or fortran) with - compatibile dimensions - - The shape and strides will be adjusted in that case as well. -*/ - static int array_descr_set(PyArrayObject *self, PyObject *arg) { - PyArray_Descr *newtype=NULL; + PyArray_Descr *newtype = NULL; intp newdim; int index; char *msg = "new type not compatible with array."; @@ -6825,51 +6916,61 @@ } - if ((newtype->elsize != self->descr->elsize) && \ - (self->nd == 0 || !PyArray_ISONESEGMENT(self) || \ - newtype->subarray)) goto fail; - - if (PyArray_ISCONTIGUOUS(self)) index = self->nd - 1; - else index = 0; - + if ((newtype->elsize != self->descr->elsize) && + (self->nd == 0 || !PyArray_ISONESEGMENT(self) || + newtype->subarray)) { + goto fail; + } + if (PyArray_ISCONTIGUOUS(self)) { + index = self->nd - 1; + } + else { + index = 0; + } if (newtype->elsize < self->descr->elsize) { - /* if it is compatible increase the size of the - dimension at end (or at the front for FORTRAN) - */ - if (self->descr->elsize % newtype->elsize != 0) + /* + * if it is compatible increase the size of the + * dimension at end (or at the front for FORTRAN) + */ + if (self->descr->elsize % newtype->elsize != 0) { goto fail; + } newdim = self->descr->elsize / newtype->elsize; self->dimensions[index] *= newdim; self->strides[index] = newtype->elsize; } - else if (newtype->elsize > self->descr->elsize) { - - /* Determine if last (or first if FORTRAN) dimension - is compatible */ - + /* + * Determine if last (or first if FORTRAN) dimension + * is compatible + */ newdim = self->dimensions[index] * self->descr->elsize; - if ((newdim % newtype->elsize) != 0) goto fail; - + if ((newdim % newtype->elsize) != 0) { + goto fail; + } self->dimensions[index] = newdim / newtype->elsize; self->strides[index] = newtype->elsize; } /* fall through -- adjust type*/ - Py_DECREF(self->descr); if (newtype->subarray) { - /* create new array object from data and update - dimensions, strides and descr from it */ + /* + * create new array object from data and update + * dimensions, strides and descr from it + */ PyArrayObject *temp; - - /* We would decref newtype here --- temp will - steal a reference to it */ - temp = (PyArrayObject *) \ + /* + * We would decref newtype here. + * temp will steal a reference to it + */ + temp = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, newtype, self->nd, self->dimensions, self->strides, self->data, self->flags, NULL); - if (temp == NULL) return -1; + if (temp == NULL) { + return -1; + } PyDimMem_FREE(self->dimensions); self->dimensions = temp->dimensions; self->nd = temp->nd; @@ -6884,7 +6985,6 @@ self->descr = newtype; PyArray_UpdateFlags(self, UPDATE_ALL); - return 0; fail: @@ -6899,7 +6999,9 @@ PyArrayInterface *inter; inter = (PyArrayInterface *)_pya_malloc(sizeof(PyArrayInterface)); - if (inter==NULL) return PyErr_NoMemory(); + if (inter==NULL) { + return PyErr_NoMemory(); + } inter->two = 2; inter->nd = self->nd; inter->typekind = self->descr->kind; @@ -6908,9 +7010,10 @@ /* reset unused flags */ inter->flags &= ~(UPDATEIFCOPY | OWNDATA); if (PyArray_ISNOTSWAPPED(self)) inter->flags |= NOTSWAPPED; - /* Copy shape and strides over since these can be reset - when the array is "reshaped". - */ + /* + * Copy shape and strides over since these can be reset + *when the array is "reshaped". + */ if (self->nd > 0) { inter->shape = (intp *)_pya_malloc(2*sizeof(intp)*self->nd); if (inter->shape == NULL) { @@ -6928,10 +7031,16 @@ inter->data = self->data; if (self->descr->names) { inter->descr = arraydescr_protocol_descr_get(self->descr); - if (inter->descr == NULL) PyErr_Clear(); - else inter->flags &= ARR_HAS_DESCR; + if (inter->descr == NULL) { + PyErr_Clear(); + } + else { + inter->flags &= ARR_HAS_DESCR; + } } - else inter->descr = NULL; + else { + inter->descr = NULL; + } Py_INCREF(self); return PyCObject_FromVoidPtrAndDesc(inter, self, gentype_struct_free); } @@ -6958,7 +7067,7 @@ PyArray_FillObjectArray(ret, zero); Py_DECREF(zero); if (PyErr_Occurred()) { - Py_DECREF(ret); + Py_DECREF(ret); return -1; } } @@ -6966,14 +7075,14 @@ intp n = PyArray_NBYTES(ret); memset(ret->data, 0, n); } - return 0; + return 0; } -/* Create a view of a complex array with an equivalent data-type - except it is real instead of complex. -*/ - +/* + * Create a view of a complex array with an equivalent data-type + * except it is real instead of complex. + */ static PyArrayObject * _get_part(PyArrayObject *self, int imag) { @@ -6992,7 +7101,7 @@ Py_DECREF(type); type = new; } - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(self->ob_type, type, self->nd, @@ -7000,7 +7109,9 @@ self->strides, self->data + offset, self->flags, (PyObject *)self); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } ret->flags &= ~CONTIGUOUS; ret->flags &= ~FORTRAN; Py_INCREF(self); @@ -7033,14 +7144,19 @@ if (PyArray_ISCOMPLEX(self)) { ret = _get_part(self, 0); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } } else { Py_INCREF(self); ret = self; } new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0, NULL); - if (new == NULL) {Py_DECREF(ret); return -1;} + if (new == NULL) { + Py_DECREF(ret); + return -1; + } rint = PyArray_MoveInto(ret, new); Py_DECREF(ret); Py_DECREF(new); @@ -7059,15 +7175,17 @@ Py_INCREF(self->descr); ret = (PyArrayObject *)PyArray_NewFromDescr(self->ob_type, self->descr, - self->nd, + self->nd, self->dimensions, NULL, NULL, PyArray_ISFORTRAN(self), (PyObject *)self); - if (ret == NULL) return NULL; - - if (_zerofill(ret) < 0) return NULL; - + if (ret == NULL) { + return NULL; + } + if (_zerofill(ret) < 0) { + return NULL; + } ret->flags &= ~WRITEABLE; } return (PyObject *) ret; @@ -7082,9 +7200,14 @@ int rint; ret = _get_part(self, 1); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0, NULL); - if (new == NULL) {Py_DECREF(ret); return -1;} + if (new == NULL) { + Py_DECREF(ret); + return -1; + } rint = PyArray_MoveInto(ret, new); Py_DECREF(ret); Py_DECREF(new); @@ -7106,9 +7229,9 @@ static int array_flat_set(PyArrayObject *self, PyObject *val) { - PyObject *arr=NULL; + PyObject *arr = NULL; int retval = -1; - PyArrayIterObject *selfit=NULL, *arrit=NULL; + PyArrayIterObject *selfit = NULL, *arrit = NULL; PyArray_Descr *typecode; int swap; PyArray_CopySwapFunc *copyswap; @@ -7117,28 +7240,36 @@ Py_INCREF(typecode); arr = PyArray_FromAny(val, typecode, 0, 0, FORCECAST | FORTRAN_IF(self), NULL); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } arrit = (PyArrayIterObject *)PyArray_IterNew(arr); - if (arrit == NULL) goto exit; + if (arrit == NULL) { + goto exit; + } selfit = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (selfit == NULL) goto exit; - - if (arrit->size == 0) {retval = 0; goto exit;} - + if (selfit == NULL) { + goto exit; + } + if (arrit->size == 0) { + retval = 0; + goto exit; + } swap = PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(arr); copyswap = self->descr->f->copyswap; if (PyDataType_REFCHK(self->descr)) { - while(selfit->index < selfit->size) { + while (selfit->index < selfit->size) { PyArray_Item_XDECREF(selfit->dataptr, self->descr); PyArray_Item_INCREF(arrit->dataptr, PyArray_DESCR(arr)); - memmove(selfit->dataptr, arrit->dataptr, - sizeof(PyObject **)); - if (swap) + memmove(selfit->dataptr, arrit->dataptr, sizeof(PyObject **)); + if (swap) { copyswap(selfit->dataptr, NULL, swap, self); + } PyArray_ITER_NEXT(selfit); PyArray_ITER_NEXT(arrit); - if (arrit->index == arrit->size) + if (arrit->index == arrit->size) { PyArray_ITER_RESET(arrit); + } } retval = 0; goto exit; @@ -7146,14 +7277,17 @@ while(selfit->index < selfit->size) { memmove(selfit->dataptr, arrit->dataptr, self->descr->elsize); - if (swap) + if (swap) { copyswap(selfit->dataptr, NULL, swap, self); + } PyArray_ITER_NEXT(selfit); PyArray_ITER_NEXT(arrit); - if (arrit->index == arrit->size) + if (arrit->index == arrit->size) { PyArray_ITER_RESET(arrit); + } } retval = 0; + exit: Py_XDECREF(selfit); Py_XDECREF(arrit); @@ -7420,13 +7554,12 @@ } n = PyObject_Length(s); - if ((nd == 0) || PyString_Check(s) || PyUnicode_Check(s) || PyBuffer_Check(s)) { *itemsize = MAX(*itemsize, n); return 0; } - for(i = 0; i < n; i++) { + for (i = 0; i < n; i++) { if ((e = PySequence_GetItem(s,i))==NULL) { return -1; } @@ -7456,8 +7589,7 @@ } return 0; } - - n=PyObject_Length(s); + n = PyObject_Length(s); *d = n; if (*d < 0) { return -1; @@ -7507,10 +7639,11 @@ } - if (chktype->type_num > mintype->type_num) + if (chktype->type_num > mintype->type_num) { outtype_num = chktype->type_num; + } else { - if (PyDataType_ISOBJECT(chktype) && \ + if (PyDataType_ISOBJECT(chktype) && PyDataType_ISSTRING(mintype)) { return PyArray_DescrFromType(NPY_OBJECT); } @@ -7520,10 +7653,11 @@ } save_num = outtype_num; - while(outtype_num < PyArray_NTYPES && + while (outtype_num < PyArray_NTYPES && !(PyArray_CanCastSafely(chktype->type_num, outtype_num) - && PyArray_CanCastSafely(mintype->type_num, outtype_num))) + && PyArray_CanCastSafely(mintype->type_num, outtype_num))) { outtype_num++; + } if (outtype_num == PyArray_NTYPES) { outtype = PyArray_DescrFromType(save_num); } @@ -7532,11 +7666,13 @@ } if (PyTypeNum_ISEXTENDED(outtype->type_num)) { int testsize = outtype->elsize; - register int chksize, minsize; + int chksize, minsize; chksize = chktype->elsize; minsize = mintype->elsize; - /* Handle string->unicode case separately - because string itemsize is 4* as large */ + /* + * Handle string->unicode case separately + * because string itemsize is 4* as large + */ if (outtype->type_num == PyArray_UNICODE && mintype->type_num == PyArray_STRING) { testsize = MAX(chksize, 4*minsize); @@ -7569,7 +7705,8 @@ /* bools are a subclass of int */ if (PyBool_Check(op)) { return PyArray_DescrFromType(PyArray_BOOL); - } else { + } + else { return PyArray_DescrFromType(PyArray_LONG); } } @@ -7607,39 +7744,42 @@ } -/* op is an object to be converted to an ndarray. - - minitype is the minimum type-descriptor needed. - - max is the maximum number of dimensions -- used for recursive call - to avoid infinite recursion... - -*/ - +/* + * op is an object to be converted to an ndarray. + * + * minitype is the minimum type-descriptor needed. + * + * max is the maximum number of dimensions -- used for recursive call + * to avoid infinite recursion... + */ static PyArray_Descr * _array_find_type(PyObject *op, PyArray_Descr *minitype, int max) { int l; PyObject *ip; - PyArray_Descr *chktype=NULL; + PyArray_Descr *chktype = NULL; PyArray_Descr *outtype; - /* These need to come first because if op already carries - a descr structure, then we want it to be the result if minitype - is NULL. - */ - + /* + * These need to come first because if op already carries + * a descr structure, then we want it to be the result if minitype + * is NULL. + */ if (PyArray_Check(op)) { chktype = PyArray_DESCR(op); Py_INCREF(chktype); - if (minitype == NULL) return chktype; + if (minitype == NULL) { + return chktype; + } Py_INCREF(minitype); goto finish; } if (PyArray_IsScalar(op, Generic)) { chktype = PyArray_DescrFromScalar(op); - if (minitype == NULL) return chktype; + if (minitype == NULL) { + return chktype; + } Py_INCREF(minitype); goto finish; } @@ -7647,10 +7787,12 @@ if (minitype == NULL) { minitype = PyArray_DescrFromType(PyArray_BOOL); } - else Py_INCREF(minitype); - - if (max < 0) goto deflt; - + else { + Py_INCREF(minitype); + } + if (max < 0) { + goto deflt; + } chktype = _array_find_python_scalar_type(op); if (chktype) { goto finish; @@ -7661,15 +7803,17 @@ PyObject *new; new = PyDict_GetItemString(ip, "typestr"); if (new && PyString_Check(new)) { - chktype =_array_typedescr_fromstr \ - (PyString_AS_STRING(new)); + chktype =_array_typedescr_fromstr(PyString_AS_STRING(new)); } } Py_DECREF(ip); - if (chktype) goto finish; + if (chktype) { + goto finish; + } } - else PyErr_Clear(); - + else { + PyErr_Clear(); + } if ((ip=PyObject_GetAttrString(op, "__array_struct__")) != NULL) { PyArrayInterface *inter; char buf[40]; @@ -7682,9 +7826,13 @@ } } Py_DECREF(ip); - if (chktype) goto finish; + if (chktype) { + goto finish; + } } - else PyErr_Clear(); + else { + PyErr_Clear(); + } if (PyString_Check(op)) { chktype = PyArray_DescrNewFromType(PyArray_STRING); @@ -7720,10 +7868,10 @@ if (PyErr_Occurred()) PyErr_Clear(); } - if (PyInstance_Check(op)) goto deflt; - + if (PyInstance_Check(op)) { + goto deflt; + } if (PySequence_Check(op)) { - l = PyObject_Length(op); if (l < 0 && PyErr_Occurred()) { PyErr_Clear(); @@ -7757,13 +7905,14 @@ chktype = _use_default_type(op); finish: - outtype = _array_small_type(chktype, minitype); Py_DECREF(chktype); Py_DECREF(minitype); - /* VOID Arrays should not occur by "default" - unless input was already a VOID */ - if (outtype->type_num == PyArray_VOID && \ + /* + * VOID Arrays should not occur by "default" + * unless input was already a VOID + */ + if (outtype->type_num == PyArray_VOID && minitype->type_num != PyArray_VOID) { Py_DECREF(outtype); return PyArray_DescrFromType(PyArray_OBJECT); @@ -7778,15 +7927,15 @@ Py_ssize_t i, slen; int res = 0; - /* This code is to ensure that the sequence access below will - return a lower-dimensional sequence. + /* + * This code is to ensure that the sequence access below will + * return a lower-dimensional sequence. */ if (PyArray_Check(s) && !(PyArray_CheckExact(s))) { - /* FIXME: This could probably copy the entire subarray - at once here using a faster algorithm. - Right now, just make sure a base-class array - is used so that the dimensionality reduction assumption - is correct. + /* + * FIXME: This could probably copy the entire subarray at once here using + * a faster algorithm. Right now, just make sure a base-class array is + * used so that the dimensionality reduction assumption is correct. */ s = PyArray_EnsureArray(s); } @@ -7798,14 +7947,13 @@ } slen = PySequence_Length(s); - if (slen != a->dimensions[dim]) { PyErr_Format(PyExc_ValueError, "setArrayFromSequence: sequence/array shape mismatch."); return -1; } - for(i=0; ind - dim) > 1) { res = setArrayFromSequence(a, o, dim+1, offset); @@ -7814,7 +7962,9 @@ res = a->descr->f->setitem(o, (a->data + offset), a); } Py_DECREF(o); - if (res < 0) return res; + if (res < 0) { + return res; + } offset += a->strides[dim]; } return 0; @@ -7834,12 +7984,13 @@ "assignment to 0-d array"); return -1; } - return setArrayFromSequence(self, v, 0, 0); } -/* "Array Scalars don't call this code" */ -/* steals reference to typecode -- no NULL*/ +/* + * "Array Scalars don't call this code" + * steals reference to typecode -- no NULL + */ static PyObject * Array_FromPyScalar(PyObject *op, PyArray_Descr *typecode) { @@ -7852,7 +8003,6 @@ if (itemsize == 0 && PyTypeNum_ISEXTENDED(type)) { itemsize = PyObject_Length(op); - if (type == PyArray_UNICODE) { itemsize *= 4; } @@ -7879,21 +8029,21 @@ if (PyErr_Occurred()) { Py_DECREF(ret); return NULL; - } + } else { return (PyObject *)ret; } } -/* If s is not a list, return 0 - Otherwise: - - run object_depth_and_dimension on all the elements - and make sure the returned shape and size - is the same for each element - -*/ +/* + * If s is not a list, return 0 + * Otherwise: + * + * run object_depth_and_dimension on all the elements + * and make sure the returned shape and size is the + * same for each element + */ static int object_depth_and_dimension(PyObject *s, int max, intp *dims) { @@ -7931,7 +8081,7 @@ } nd = object_depth_and_dimension(obj, max - 1, newdims); - for(i = 1; i < size; i++) { + for (i = 1; i < size; i++) { if (islist) { obj = PyList_GET_ITEM(s, i); } @@ -7947,7 +8097,7 @@ } } - for(i = 1; i <= nd; i++) { + for (i = 1; i <= nd; i++) { dims[i] = newdims[i-1]; } dims[0] = size; @@ -7970,12 +8120,10 @@ if (nd == 0) { return Array_FromPyScalar(s, typecode); } - r = (PyArrayObject*)PyArray_NewFromDescr(&PyArray_Type, typecode, nd, d, NULL, NULL, fortran, NULL); - if (!r) { return NULL; } @@ -7986,12 +8134,12 @@ return (PyObject*)r; } -/* +/* * isobject means that we are constructing an * object array on-purpose with a nested list. * Only a list is interpreted as a sequence with these rules + * steals reference to typecode */ -/* steals reference to typecode */ static PyObject * Array_FromSequence(PyObject *s, PyArray_Descr *typecode, int fortran, int min_depth, int max_depth) @@ -8007,11 +8155,9 @@ int itemsize = typecode->elsize; check_it = (typecode->type != PyArray_CHARLTR); - stop_at_string = (type != PyArray_STRING) || (typecode->type == PyArray_STRINGLTR); - - stop_at_tuple = (type == PyArray_VOID && (typecode->names \ + stop_at_tuple = (type == PyArray_VOID && (typecode->names || typecode->subarray)); nd = discover_depth(s, MAX_DIMS + 1, stop_at_string, stop_at_tuple); @@ -8076,8 +8222,8 @@ /*NUMPY_API - Is the typenum valid? -*/ + * Is the typenum valid? + */ static int PyArray_ValidType(int type) { From numpy-svn at scipy.org Wed Feb 18 19:45:18 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 18:45:18 -0600 (CST) Subject: [Numpy-svn] r6399 - trunk/numpy/core/src Message-ID: <20090219004518.A480DC7C019@scipy.org> Author: charris Date: 2009-02-18 18:45:14 -0600 (Wed, 18 Feb 2009) New Revision: 6399 Modified: trunk/numpy/core/src/arrayobject.c Log: Coding style cleanups. Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2009-02-18 20:54:52 UTC (rev 6398) +++ trunk/numpy/core/src/arrayobject.c 2009-02-19 00:45:14 UTC (rev 6399) @@ -8238,11 +8238,11 @@ return res; } -/* For backward compatibility */ - -/* steals reference to at --- cannot be NULL*/ /*NUMPY_API - *Cast an array using typecode structure. + * For backward compatibility + * + * Cast an array using typecode structure. + * steals reference to at --- cannot be NULL */ static PyObject * PyArray_CastToType(PyArrayObject *mp, PyArray_Descr *at, int fortran) @@ -8253,12 +8253,11 @@ mpd = mp->descr; - if (((mpd == at) || ((mpd->type_num == at->type_num) && \ - PyArray_EquivByteorders(mpd->byteorder,\ - at->byteorder) && \ - ((mpd->elsize == at->elsize) || \ - (at->elsize==0)))) && \ - PyArray_ISBEHAVED_RO(mp)) { + if (((mpd == at) || + ((mpd->type_num == at->type_num) && + PyArray_EquivByteorders(mpd->byteorder, at->byteorder) && + ((mpd->elsize == at->elsize) || (at->elsize==0)))) && + PyArray_ISBEHAVED_RO(mp)) { Py_DECREF(at); Py_INCREF(mp); return (PyObject *)mp; @@ -8269,7 +8268,7 @@ if (at == NULL) { return NULL; } - if (mpd->type_num == PyArray_STRING && + if (mpd->type_num == PyArray_STRING && at->type_num == PyArray_UNICODE) { at->elsize = mpd->elsize << 2; } @@ -8303,14 +8302,15 @@ } /*NUMPY_API - Get a cast function to cast from the input descriptor to the - output type_number (must be a registered data-type). - Returns NULL if un-successful. -*/ + * Get a cast function to cast from the input descriptor to the + * output type_number (must be a registered data-type). + * Returns NULL if un-successful. + */ static PyArray_VectorUnaryFunc * PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) { - PyArray_VectorUnaryFunc *castfunc=NULL; + PyArray_VectorUnaryFunc *castfunc = NULL; + if (type_num < PyArray_NTYPES) { castfunc = descr->f->cast[type_num]; } @@ -8335,19 +8335,19 @@ return castfunc; } - PyErr_SetString(PyExc_ValueError, - "No cast function available."); + PyErr_SetString(PyExc_ValueError, "No cast function available."); return NULL; } -/* Reference counts: - copyswapn is used which increases and decreases reference counts for OBJECT arrays. - All that needs to happen is for any reference counts in the buffers to be - decreased when completely finished with the buffers. - - buffers[0] is the destination - buffers[1] is the source -*/ +/* + * Reference counts: + * copyswapn is used which increases and decreases reference counts for OBJECT arrays. + * All that needs to happen is for any reference counts in the buffers to be + * decreased when completely finished with the buffers. + * + * buffers[0] is the destination + * buffers[1] is the source + */ static void _strided_buffered_cast(char *dptr, intp dstride, int delsize, int dswap, PyArray_CopySwapNFunc *dcopyfunc, @@ -8359,10 +8359,11 @@ { int i; if (N <= bufsize) { - /* 1. copy input to buffer and swap - 2. cast input to output - 3. swap output if necessary and copy from output buffer - */ + /* + * 1. copy input to buffer and swap + * 2. cast input to output + * 3. swap output if necessary and copy from output buffer + */ scopyfunc(buffers[1], selsize, sptr, sstride, N, sswap, src); castfunc(buffers[1], buffers[0], N, src, dest); dcopyfunc(dptr, dstride, buffers[0], delsize, N, dswap, dest); @@ -8371,9 +8372,9 @@ /* otherwise we need to divide up into bufsize pieces */ i = 0; - while(N > 0) { - int newN; - newN = MIN(N, bufsize); + while (N > 0) { + int newN = MIN(N, bufsize); + _strided_buffered_cast(dptr+i*dstride, dstride, delsize, dswap, dcopyfunc, sptr+i*sstride, sstride, selsize, @@ -8453,7 +8454,7 @@ } #endif - while(multi->index < multi->size) { + while (multi->index < multi->size) { _strided_buffered_cast(multi->iters[0]->dataptr, ostrides, delsize, oswap, ocopyfunc, @@ -8472,13 +8473,13 @@ Py_DECREF(multi); if (PyDataType_REFCHK(in->descr)) { obptr = buffers[1]; - for(i = 0; i < N; i++, obptr+=selsize) { + for (i = 0; i < N; i++, obptr+=selsize) { PyArray_Item_XDECREF(obptr, out->descr); } } if (PyDataType_REFCHK(out->descr)) { obptr = buffers[0]; - for(i = 0; i < N; i++, obptr+=delsize) { + for (i = 0; i < N; i++, obptr+=delsize) { PyArray_Item_XDECREF(obptr, out->descr); } } @@ -8508,7 +8509,7 @@ { int simple; int same; - PyArray_VectorUnaryFunc *castfunc=NULL; + PyArray_VectorUnaryFunc *castfunc = NULL; int mpsize = PyArray_SIZE(mp); int iswap, oswap; NPY_BEGIN_THREADS_DEF; @@ -8517,8 +8518,7 @@ return 0; } if (!PyArray_ISWRITEABLE(out)) { - PyErr_SetString(PyExc_ValueError, - "output array is not writeable"); + PyErr_SetString(PyExc_ValueError, "output array is not writeable"); return -1; } @@ -8573,13 +8573,13 @@ { char *inbuffer, *bptr, *optr; char *outbuffer=NULL; - PyArrayIterObject *it_in=NULL, *it_out=NULL; + PyArrayIterObject *it_in = NULL, *it_out = NULL; register intp i, index; intp ncopies = PyArray_SIZE(out) / PyArray_SIZE(in); int elsize=in->descr->elsize; int nels = PyArray_BUFSIZE; int el; - int inswap, outswap=0; + int inswap, outswap = 0; int obuf=!PyArray_ISCARRAY(out); int oelsize = out->descr->elsize; PyArray_CopySwapFunc *in_csn; @@ -8598,45 +8598,50 @@ inswap = !(PyArray_ISFLEXIBLE(in) || PyArray_ISNOTSWAPPED(in)); inbuffer = PyDataMem_NEW(PyArray_BUFSIZE*elsize); - if (inbuffer == NULL) return -1; - if (PyArray_ISOBJECT(in)) + if (inbuffer == NULL) { + return -1; + } + if (PyArray_ISOBJECT(in)) { memset(inbuffer, 0, PyArray_BUFSIZE*elsize); + } it_in = (PyArrayIterObject *)PyArray_IterNew((PyObject *)in); - if (it_in == NULL) goto exit; - + if (it_in == NULL) { + goto exit; + } if (obuf) { - outswap = !(PyArray_ISFLEXIBLE(out) || \ + outswap = !(PyArray_ISFLEXIBLE(out) || PyArray_ISNOTSWAPPED(out)); outbuffer = PyDataMem_NEW(PyArray_BUFSIZE*oelsize); - if (outbuffer == NULL) goto exit; - if (PyArray_ISOBJECT(out)) + if (outbuffer == NULL) { + goto exit; + } + if (PyArray_ISOBJECT(out)) { memset(outbuffer, 0, PyArray_BUFSIZE*oelsize); - + } it_out = (PyArrayIterObject *)PyArray_IterNew((PyObject *)out); - if (it_out == NULL) goto exit; - + if (it_out == NULL) { + goto exit; + } nels = MIN(nels, PyArray_BUFSIZE); } optr = (obuf) ? outbuffer: out->data; bptr = inbuffer; el = 0; - while(ncopies--) { + while (ncopies--) { index = it_in->size; PyArray_ITER_RESET(it_in); - while(index--) { + while (index--) { in_csn(bptr, it_in->dataptr, inswap, in); bptr += elsize; PyArray_ITER_NEXT(it_in); el += 1; if ((el == nels) || (index == 0)) { /* buffer filled, do cast */ - castfunc(inbuffer, optr, el, in, out); - if (obuf) { /* Copy from outbuffer to array */ - for(i=0; idataptr, optr, outswap, out); @@ -8654,6 +8659,7 @@ } } retval = 0; + exit: Py_XDECREF(it_in); PyDataMem_FREE(inbuffer); @@ -8665,20 +8671,21 @@ } /*NUMPY_API - Cast to an already created array. Arrays don't have to be "broadcastable" - Only requirement is they have the same number of elements. -*/ + * Cast to an already created array. Arrays don't have to be "broadcastable" + * Only requirement is they have the same number of elements. + */ static int PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) { int simple; - PyArray_VectorUnaryFunc *castfunc=NULL; + PyArray_VectorUnaryFunc *castfunc = NULL; int mpsize = PyArray_SIZE(mp); - if (mpsize == 0) return 0; + if (mpsize == 0) { + return 0; + } if (!PyArray_ISWRITEABLE(out)) { - PyErr_SetString(PyExc_ValueError, - "output array is not writeable"); + PyErr_SetString(PyExc_ValueError, "output array is not writeable"); return -1; } @@ -8690,36 +8697,34 @@ } castfunc = PyArray_GetCastFunc(mp->descr, out->descr->type_num); - if (castfunc == NULL) return -1; - - + if (castfunc == NULL) { + return -1; + } simple = ((PyArray_ISCARRAY_RO(mp) && PyArray_ISCARRAY(out)) || (PyArray_ISFARRAY_RO(mp) && PyArray_ISFARRAY(out))); - if (simple) { castfunc(mp->data, out->data, mpsize, mp, out); return 0; } - if (PyArray_SAMESHAPE(out, mp)) { int iswap, oswap; iswap = PyArray_ISBYTESWAPPED(mp) && !PyArray_ISFLEXIBLE(mp); oswap = PyArray_ISBYTESWAPPED(out) && !PyArray_ISFLEXIBLE(out); return _broadcast_cast(out, mp, castfunc, iswap, oswap); } - return _bufferedcast(out, mp, castfunc); } -/* steals reference to newtype --- acc. NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * steals reference to newtype --- acc. NULL + */ static PyObject * PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) { - PyArrayObject *ret=NULL; + PyArrayObject *ret = NULL; int itemsize; int copy = 0; int arrflags; @@ -8728,9 +8733,7 @@ PyTypeObject *subtype; oldtype = PyArray_DESCR(arr); - subtype = arr->ob_type; - if (newtype == NULL) { newtype = oldtype; Py_INCREF(oldtype); } @@ -8744,10 +8747,11 @@ itemsize = newtype->elsize; } - /* Can't cast unless ndim-0 array, FORCECAST is specified - or the cast is safe. - */ - if (!(flags & FORCECAST) && !PyArray_NDIM(arr)==0 && + /* + * Can't cast unless ndim-0 array, FORCECAST is specified + * or the cast is safe. + */ + if (!(flags & FORCECAST) && !PyArray_NDIM(arr) == 0 && !PyArray_CanCastTo(oldtype, newtype)) { Py_DECREF(newtype); PyErr_SetString(PyExc_TypeError, @@ -8759,16 +8763,15 @@ /* Don't copy if sizes are compatible */ if ((flags & ENSURECOPY) || PyArray_EquivTypes(oldtype, newtype)) { arrflags = arr->flags; - - copy = (flags & ENSURECOPY) || \ - ((flags & CONTIGUOUS) && (!(arrflags & CONTIGUOUS))) \ - || ((flags & ALIGNED) && (!(arrflags & ALIGNED))) \ - || (arr->nd > 1 && \ - ((flags & FORTRAN) && (!(arrflags & FORTRAN)))) \ + copy = (flags & ENSURECOPY) || + ((flags & CONTIGUOUS) && (!(arrflags & CONTIGUOUS))) + || ((flags & ALIGNED) && (!(arrflags & ALIGNED))) + || (arr->nd > 1 && + ((flags & FORTRAN) && (!(arrflags & FORTRAN)))) || ((flags & WRITEABLE) && (!(arrflags & WRITEABLE))); if (copy) { - if ((flags & UPDATEIFCOPY) && \ + if ((flags & UPDATEIFCOPY) && (!PyArray_ISWRITEABLE(arr))) { Py_DECREF(newtype); PyErr_SetString(PyExc_ValueError, msg); @@ -8777,7 +8780,7 @@ if ((flags & ENSUREARRAY)) { subtype = &PyArray_Type; } - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(subtype, newtype, arr->nd, arr->dimensions, @@ -8798,14 +8801,16 @@ Py_INCREF(arr); } } - /* If no copy then just increase the reference - count and return the input */ + /* + * If no copy then just increase the reference + * count and return the input + */ else { Py_DECREF(newtype); if ((flags & ENSUREARRAY) && !PyArray_CheckExact(arr)) { Py_INCREF(arr->descr); - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, arr->descr, arr->nd, @@ -8825,10 +8830,12 @@ } } - /* The desired output type is different than the input - array type and copy was not specified */ + /* + * The desired output type is different than the input + * array type and copy was not specified + */ else { - if ((flags & UPDATEIFCOPY) && \ + if ((flags & UPDATEIFCOPY) && (!PyArray_ISWRITEABLE(arr))) { Py_DECREF(newtype); PyErr_SetString(PyExc_ValueError, msg); @@ -8837,7 +8844,7 @@ if ((flags & ENSUREARRAY)) { subtype = &PyArray_Type; } - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(subtype, newtype, arr->nd, arr->dimensions, NULL, NULL, @@ -8958,16 +8965,22 @@ #undef _MY_FAIL descr = PyArray_DescrFromType(type_num); - if (descr == NULL) return NULL; + if (descr == NULL) { + return NULL; + } swap = !PyArray_ISNBO(swapchar); if (descr->elsize == 0 || swap) { /* Need to make a new PyArray_Descr */ PyArray_DESCR_REPLACE(descr); - if (descr==NULL) return NULL; - if (descr->elsize == 0) + if (descr==NULL) { + return NULL; + } + if (descr->elsize == 0) { descr->elsize = size; - if (swap) + } + if (swap) { descr->byteorder = swapchar; + } } return descr; } @@ -8976,7 +8989,7 @@ static PyObject * PyArray_FromStructInterface(PyObject *input) { - PyArray_Descr *thetype=NULL; + PyArray_Descr *thetype = NULL; char buf[40]; PyArrayInterface *inter; PyObject *attr, *r; @@ -8987,9 +9000,13 @@ PyErr_Clear(); return Py_NotImplemented; } - if (!PyCObject_Check(attr)) goto fail; + if (!PyCObject_Check(attr)) { + goto fail; + } inter = PyCObject_AsVoidPtr(attr); - if (inter->two != 2) goto fail; + if (inter->two != 2) { + goto fail; + } if ((inter->flags & NOTSWAPPED) != NOTSWAPPED) { endian = PyArray_OPPBYTE; inter->flags &= ~NOTSWAPPED; @@ -9033,10 +9050,10 @@ static PyObject * PyArray_FromInterface(PyObject *input) { - PyObject *attr=NULL, *item=NULL; - PyObject *tstr=NULL, *shape=NULL; - PyObject *inter=NULL; - PyObject *base=NULL; + PyObject *attr = NULL, *item = NULL; + PyObject *tstr = NULL, *shape = NULL; + PyObject *inter = NULL; + PyObject *base = NULL; PyArrayObject *ret; PyArray_Descr *type=NULL; char *data; @@ -9051,26 +9068,42 @@ /* Get the strides */ inter = PyObject_GetAttrString(input, "__array_interface__"); - if (inter == NULL) {PyErr_Clear(); return Py_NotImplemented;} - if (!PyDict_Check(inter)) {Py_DECREF(inter); return Py_NotImplemented;} - + if (inter == NULL) { + PyErr_Clear(); + return Py_NotImplemented; + } + if (!PyDict_Check(inter)) { + Py_DECREF(inter); + return Py_NotImplemented; + } shape = PyDict_GetItemString(inter, "shape"); - if (shape == NULL) {Py_DECREF(inter); return Py_NotImplemented;} + if (shape == NULL) { + Py_DECREF(inter); + return Py_NotImplemented; + } tstr = PyDict_GetItemString(inter, "typestr"); - if (tstr == NULL) {Py_DECREF(inter); return Py_NotImplemented;} + if (tstr == NULL) { + Py_DECREF(inter); + return Py_NotImplemented; + } attr = PyDict_GetItemString(inter, "data"); base = input; if ((attr == NULL) || (attr==Py_None) || (!PyTuple_Check(attr))) { - if (attr && (attr != Py_None)) item=attr; - else item=input; - res = PyObject_AsWriteBuffer(item, (void **)&data, - &buffer_len); + if (attr && (attr != Py_None)) { + item = attr; + } + else { + item = input; + } + res = PyObject_AsWriteBuffer(item, (void **)&data, &buffer_len); if (res < 0) { PyErr_Clear(); - res = PyObject_AsReadBuffer(item, (const void **)&data, - &buffer_len); - if (res < 0) goto fail; + res = PyObject_AsReadBuffer( + item, (const void **)&data, &buffer_len); + if (res < 0) { + goto fail; + } dataflags &= ~WRITEABLE; } attr = PyDict_GetItemString(inter, "offset"); @@ -9125,7 +9158,9 @@ goto fail; } type = _array_typedescr_fromstr(PyString_AS_STRING(attr)); - if (type==NULL) goto fail; + if (type == NULL) { + goto fail; + } attr = shape; if (!PyTuple_Check(attr)) { PyErr_SetString(PyExc_TypeError, "shape must be a tuple"); @@ -9133,17 +9168,21 @@ goto fail; } n = PyTuple_GET_SIZE(attr); - for(i=0; ibase = base; @@ -9162,12 +9201,16 @@ Py_DECREF(ret); return NULL; } - for(i=0; istrides, strides, n*sizeof(intp)); } else PyErr_Clear(); @@ -9188,35 +9231,38 @@ PyObject *array_meth; array_meth = PyObject_GetAttrString(op, "__array__"); - if (array_meth == NULL) {PyErr_Clear(); return Py_NotImplemented;} + if (array_meth == NULL) { + PyErr_Clear(); + return Py_NotImplemented; + } if (context == NULL) { - if (typecode == NULL) new = PyObject_CallFunction(array_meth, - NULL); - else new = PyObject_CallFunction(array_meth, "O", typecode); + if (typecode == NULL) { + new = PyObject_CallFunction(array_meth, NULL); + } + else { + new = PyObject_CallFunction(array_meth, "O", typecode); + } } else { if (typecode == NULL) { - new = PyObject_CallFunction(array_meth, "OO", Py_None, - context); - if (new == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { + new = PyObject_CallFunction(array_meth, "OO", Py_None, context); + if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); new = PyObject_CallFunction(array_meth, ""); } } else { - new = PyObject_CallFunction(array_meth, "OO", - typecode, context); - if (new == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { + new = PyObject_CallFunction(array_meth, "OO", typecode, context); + if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); - new = PyObject_CallFunction(array_meth, "O", - typecode); + new = PyObject_CallFunction(array_meth, "O", typecode); } } } Py_DECREF(array_meth); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } if (!PyArray_Check(new)) { PyErr_SetString(PyExc_ValueError, "object __array__ method not " \ @@ -9227,23 +9273,27 @@ return new; } -/* Does not check for ENSURECOPY and NOTSWAPPED in flags */ -/* Steals a reference to newtype --- which can be NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * Does not check for ENSURECOPY and NOTSWAPPED in flags + * Steals a reference to newtype --- which can be NULL + */ static PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, int max_depth, int flags, PyObject *context) { - /* This is the main code to make a NumPy array from a Python - Object. It is called from lot's of different places which - is why there are so many checks. The comments try to - explain some of the checks. */ - - PyObject *r=NULL; + /* + * This is the main code to make a NumPy array from a Python + * Object. It is called from lot's of different places which + * is why there are so many checks. The comments try to + * explain some of the checks. + */ + PyObject *r = NULL; int seq = FALSE; - /* Is input object already an array? */ - /* This is where the flags are used */ + /* + * Is input object already an array? + * This is where the flags are used + */ if (PyArray_Check(op)) { r = PyArray_FromArray((PyArrayObject *)op, newtype, flags); } @@ -9267,8 +9317,7 @@ return NULL; } if (newtype != NULL || flags != 0) { - new = PyArray_FromArray((PyArrayObject *)r, newtype, - flags); + new = PyArray_FromArray((PyArrayObject *)r, newtype, flags); Py_DECREF(r); r = new; } @@ -9304,7 +9353,7 @@ PyErr_Clear(); if (isobject) { Py_INCREF(newtype); - r = ObjectArray_FromNestedList \ + r = ObjectArray_FromNestedList (op, newtype, flags & FORTRAN); seq = TRUE; Py_DECREF(newtype); @@ -9326,7 +9375,6 @@ } /* Be sure we succeed here */ - if(!PyArray_Check(r)) { PyErr_SetString(PyExc_RuntimeError, "internal error: PyArray_FromAny "\ @@ -9356,8 +9404,9 @@ return NULL; } -/* new reference -- accepts NULL for mintype*/ -/*NUMPY_API*/ +/*NUMPY_API +* new reference -- accepts NULL for mintype +*/ static PyArray_Descr * PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) { @@ -9365,9 +9414,8 @@ } /*NUMPY_API - Return the typecode of the array a Python object would be converted - to -*/ + * Return the typecode of the array a Python object would be converted to + */ static int PyArray_ObjectType(PyObject *op, int minimum_type) { @@ -9376,7 +9424,9 @@ int ret; intype = PyArray_DescrFromType(minimum_type); - if (intype == NULL) PyErr_Clear(); + if (intype == NULL) { + PyErr_Clear(); + } outtype = _array_find_type(op, intype, MAX_DIMS); ret = outtype->type_num; Py_DECREF(outtype); @@ -9385,56 +9435,57 @@ } -/* flags is any of - CONTIGUOUS, - FORTRAN, - ALIGNED, - WRITEABLE, - NOTSWAPPED, - ENSURECOPY, - UPDATEIFCOPY, - FORCECAST, - ENSUREARRAY, - ELEMENTSTRIDES +/* + * flags is any of + * CONTIGUOUS, + * FORTRAN, + * ALIGNED, + * WRITEABLE, + * NOTSWAPPED, + * ENSURECOPY, + * UPDATEIFCOPY, + * FORCECAST, + * ENSUREARRAY, + * ELEMENTSTRIDES + * + * or'd (|) together + * + * Any of these flags present means that the returned array should + * guarantee that aspect of the array. Otherwise the returned array + * won't guarantee it -- it will depend on the object as to whether or + * not it has such features. + * + * Note that ENSURECOPY is enough + * to guarantee CONTIGUOUS, ALIGNED and WRITEABLE + * and therefore it is redundant to include those as well. + * + * BEHAVED == ALIGNED | WRITEABLE + * CARRAY = CONTIGUOUS | BEHAVED + * FARRAY = FORTRAN | BEHAVED + * + * FORTRAN can be set in the FLAGS to request a FORTRAN array. + * Fortran arrays are always behaved (aligned, + * notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). + * + * UPDATEIFCOPY flag sets this flag in the returned array if a copy is + * made and the base argument points to the (possibly) misbehaved array. + * When the new array is deallocated, the original array held in base + * is updated with the contents of the new array. + * + * FORCECAST will cause a cast to occur regardless of whether or not + * it is safe. + */ - or'd (|) together - - Any of these flags present means that the returned array should - guarantee that aspect of the array. Otherwise the returned array - won't guarantee it -- it will depend on the object as to whether or - not it has such features. - - Note that ENSURECOPY is enough - to guarantee CONTIGUOUS, ALIGNED and WRITEABLE - and therefore it is redundant to include those as well. - - BEHAVED == ALIGNED | WRITEABLE - CARRAY = CONTIGUOUS | BEHAVED - FARRAY = FORTRAN | BEHAVED - - FORTRAN can be set in the FLAGS to request a FORTRAN array. - Fortran arrays are always behaved (aligned, - notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). - - UPDATEIFCOPY flag sets this flag in the returned array if a copy is - made and the base argument points to the (possibly) misbehaved array. - When the new array is deallocated, the original array held in base - is updated with the contents of the new array. - - FORCECAST will cause a cast to occur regardless of whether or not - it is safe. -*/ - - -/* steals a reference to descr -- accepts NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * steals a reference to descr -- accepts NULL + */ static PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, int max_depth, int requires, PyObject *context) { PyObject *obj; if (requires & NOTSWAPPED) { - if (!descr && PyArray_Check(op) && \ + if (!descr && PyArray_Check(op) && !PyArray_ISNBO(PyArray_DESCR(op)->byteorder)) { descr = PyArray_DescrNew(PyArray_DESCR(op)); } @@ -9446,9 +9497,10 @@ } } - obj = PyArray_FromAny(op, descr, min_depth, max_depth, - requires, context); - if (obj == NULL) return NULL; + obj = PyArray_FromAny(op, descr, min_depth, max_depth, requires, context); + if (obj == NULL) { + return NULL; + } if ((requires & ELEMENTSTRIDES) && !PyArray_ElementStrides(obj)) { PyObject *new; @@ -9459,25 +9511,25 @@ return obj; } -/* This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0, - ENSUREARRAY) */ -/* that special cases Arrays and PyArray_Scalars up front */ -/* It *steals a reference* to the object */ -/* It also guarantees that the result is PyArray_Type */ - -/* Because it decrefs op if any conversion needs to take place - so it can be used like PyArray_EnsureArray(some_function(...)) */ - -/*NUMPY_API*/ +/*NUMPY_API + * This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0, ENSUREARRAY) + * that special cases Arrays and PyArray_Scalars up front + * It *steals a reference* to the object + * It also guarantees that the result is PyArray_Type + * Because it decrefs op if any conversion needs to take place + * so it can be used like PyArray_EnsureArray(some_function(...)) + */ static PyObject * PyArray_EnsureArray(PyObject *op) { PyObject *new; - if (op == NULL) return NULL; - - if (PyArray_CheckExact(op)) return op; - + if (op == NULL) { + return NULL; + } + if (PyArray_CheckExact(op)) { + return op; + } if (PyArray_Check(op)) { new = PyArray_View((PyArrayObject *)op, NULL, &PyArray_Type); Py_DECREF(op); @@ -9497,25 +9549,36 @@ static PyObject * PyArray_EnsureAnyArray(PyObject *op) { - if (op && PyArray_Check(op)) return op; + if (op && PyArray_Check(op)) { + return op; + } return PyArray_EnsureArray(op); } /*NUMPY_API - Check the type coercion rules. -*/ + *Check the type coercion rules. + */ static int PyArray_CanCastSafely(int fromtype, int totype) { PyArray_Descr *from, *to; register int felsize, telsize; - if (fromtype == totype) return 1; - if (fromtype == PyArray_BOOL) return 1; - if (totype == PyArray_BOOL) return 0; - if (totype == PyArray_OBJECT || totype == PyArray_VOID) return 1; - if (fromtype == PyArray_OBJECT || fromtype == PyArray_VOID) return 0; - + if (fromtype == totype) { + return 1; + } + if (fromtype == PyArray_BOOL) { + return 1; + } + if (totype == PyArray_BOOL) { + return 0; + } + if (totype == PyArray_OBJECT || totype == PyArray_VOID) { + return 1; + } + if (fromtype == PyArray_OBJECT || fromtype == PyArray_VOID) { + return 0; + } from = PyArray_DescrFromType(fromtype); /* * cancastto is a PyArray_NOTYPE terminated C-int-array of types that @@ -9525,11 +9588,14 @@ int *curtype; curtype = from->f->cancastto; while (*curtype != PyArray_NOTYPE) { - if (*curtype++ == totype) return 1; + if (*curtype++ == totype) { + return 1; + } } } - if (PyTypeNum_ISUSERDEF(totype)) return 0; - + if (PyTypeNum_ISUSERDEF(totype)) { + return 0; + } to = PyArray_DescrFromType(totype); telsize = to->elsize; felsize = from->elsize; @@ -9618,14 +9684,14 @@ Bool ret; ret = (Bool) PyArray_CanCastSafely(fromtype, totype); - if (ret) { /* Check String and Unicode more closely */ + if (ret) { + /* Check String and Unicode more closely */ if (fromtype == PyArray_STRING) { if (totype == PyArray_STRING) { ret = (from->elsize <= to->elsize); } else if (totype == PyArray_UNICODE) { - ret = (from->elsize << 2 \ - <= to->elsize); + ret = (from->elsize << 2 <= to->elsize); } } else if (fromtype == PyArray_UNICODE) { @@ -9633,17 +9699,18 @@ ret = (from->elsize <= to->elsize); } } - /* TODO: If totype is STRING or unicode - see if the length is long enough to hold the - stringified value of the object. - */ + /* + * TODO: If totype is STRING or unicode + * see if the length is long enough to hold the + * stringified value of the object. + */ } return ret; } /*NUMPY_API - See if array scalars can be cast. -*/ + * See if array scalars can be cast. + */ static Bool PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) { @@ -9652,8 +9719,9 @@ fromtype = _typenum_fromtypeobj((PyObject *)from, 0); totype = _typenum_fromtypeobj((PyObject *)to, 0); - if (fromtype == PyArray_NOTYPE || totype == PyArray_NOTYPE) + if (fromtype == PyArray_NOTYPE || totype == PyArray_NOTYPE) { return FALSE; + } return (Bool) PyArray_CanCastSafely(fromtype, totype); } @@ -9680,26 +9748,29 @@ it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); PyObject_Init((PyObject *)it, &PyArrayIter_Type); /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/ - if (it == NULL) + if (it == NULL) { return NULL; - + } nd = ao->nd; PyArray_UpdateFlags(ao, CONTIGUOUS); - if PyArray_ISCONTIGUOUS(ao) it->contiguous = 1; - else it->contiguous = 0; + if (PyArray_ISCONTIGUOUS(ao)) { + it->contiguous = 1; + } + else { + it->contiguous = 0; + } Py_INCREF(ao); it->ao = ao; it->size = PyArray_SIZE(ao); it->nd_m1 = nd - 1; it->factors[nd-1] = 1; - for(i=0; i < nd; i++) { + for (i = 0; i < nd; i++) { it->dims_m1[i] = ao->dimensions[i] - 1; it->strides[i] = ao->strides[i]; - it->backstrides[i] = it->strides[i] * \ - it->dims_m1[i]; - if (i > 0) - it->factors[nd-i-1] = it->factors[nd-i] * \ - ao->dimensions[nd-i]; + it->backstrides[i] = it->strides[i] * it->dims_m1[i]; + if (i > 0) { + it->factors[nd-i-1] = it->factors[nd-i] * ao->dimensions[nd-i]; + } } PyArray_ITER_RESET(it); @@ -9707,8 +9778,8 @@ } /*NUMPY_API - Get Iterator broadcast to a particular shape -*/ + *Get Iterator broadcast to a particular shape + */ static PyObject * PyArray_BroadcastToShape(PyObject *obj, intp *dims, int nd) { @@ -9716,51 +9787,57 @@ int i, diff, j, compat, k; PyArrayObject *ao = (PyArrayObject *)obj; - if (ao->nd > nd) goto err; + if (ao->nd > nd) { + goto err; + } compat = 1; diff = j = nd - ao->nd; - for(i=0; ind; i++, j++) { - if (ao->dimensions[i] == 1) continue; + for (i = 0; i < ao->nd; i++, j++) { + if (ao->dimensions[i] == 1) { + continue; + } if (ao->dimensions[i] != dims[j]) { compat = 0; break; } } - if (!compat) goto err; - + if (!compat) { + goto err; + } it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); PyObject_Init((PyObject *)it, &PyArrayIter_Type); - if (it == NULL) + if (it == NULL) { return NULL; - + } PyArray_UpdateFlags(ao, CONTIGUOUS); - if PyArray_ISCONTIGUOUS(ao) it->contiguous = 1; - else it->contiguous = 0; + if (PyArray_ISCONTIGUOUS(ao)) { + it->contiguous = 1; + } + else { + it->contiguous = 0; + } Py_INCREF(ao); it->ao = ao; it->size = PyArray_MultiplyList(dims, nd); it->nd_m1 = nd - 1; it->factors[nd-1] = 1; - for(i=0; i < nd; i++) { + for (i = 0; i < nd; i++) { it->dims_m1[i] = dims[i] - 1; k = i - diff; - if ((k < 0) || - ao->dimensions[k] != dims[i]) { + if ((k < 0) || ao->dimensions[k] != dims[i]) { it->contiguous = 0; it->strides[i] = 0; } else { it->strides[i] = ao->strides[k]; } - it->backstrides[i] = it->strides[i] * \ - it->dims_m1[i]; - if (i > 0) - it->factors[nd-i-1] = it->factors[nd-i] * \ - dims[nd-i]; + it->backstrides[i] = it->strides[i] * it->dims_m1[i]; + if (i > 0) { + it->factors[nd-i-1] = it->factors[nd-i] * dims[nd-i]; + } } PyArray_ITER_RESET(it); - return (PyObject *)it; err: @@ -9774,29 +9851,31 @@ /*NUMPY_API - Get Iterator that iterates over all but one axis (don't use this with - PyArray_ITER_GOTO1D). The axis will be over-written if negative - with the axis having the smallest stride. -*/ + * Get Iterator that iterates over all but one axis (don't use this with + * PyArray_ITER_GOTO1D). The axis will be over-written if negative + * with the axis having the smallest stride. + */ static PyObject * PyArray_IterAllButAxis(PyObject *obj, int *inaxis) { PyArrayIterObject *it; int axis; it = (PyArrayIterObject *)PyArray_IterNew(obj); - if (it == NULL) return NULL; - - if (PyArray_NDIM(obj)==0) + if (it == NULL) { + return NULL; + } + if (PyArray_NDIM(obj)==0) { return (PyObject *)it; + } if (*inaxis < 0) { - int i, minaxis=0; - intp minstride=0; + int i, minaxis = 0; + intp minstride = 0; i = 0; - while (minstride==0 && i 0 && PyArray_STRIDE(obj, i) < minstride) { minaxis = i; @@ -9814,21 +9893,21 @@ it->dims_m1[axis] = 0; it->backstrides[axis] = 0; - /* (won't fix factors so don't use - PyArray_ITER_GOTO1D with this iterator) */ + /* + * (won't fix factors so don't use + * PyArray_ITER_GOTO1D with this iterator) + */ return (PyObject *)it; } - -/* don't use with PyArray_ITER_GOTO1D because factors are not - adjusted */ - /*NUMPY_API - Adjusts previously broadcasted iterators so that the axis with - the smallest sum of iterator strides is not iterated over. - Returns dimension which is smallest in the range [0,multi->nd). - A -1 is returned if multi->nd == 0. -*/ + * Adjusts previously broadcasted iterators so that the axis with + * the smallest sum of iterator strides is not iterated over. + * Returns dimension which is smallest in the range [0,multi->nd). + * A -1 is returned if multi->nd == 0. + * + * don't use with PyArray_ITER_GOTO1D because factors are not adjusted + */ static int PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) { @@ -9838,34 +9917,33 @@ intp smallest; intp sumstrides[NPY_MAXDIMS]; - if (multi->nd == 0) return -1; - - - for(i=0; ind; i++) { + if (multi->nd == 0) { + return -1; + } + for (i = 0; i < multi->nd; i++) { sumstrides[i] = 0; - for(j=0; jnumiter; j++) { + for (j = 0; j < multi->numiter; j++) { sumstrides[i] += multi->iters[j]->strides[i]; } } - axis=0; + axis = 0; smallest = sumstrides[0]; /* Find longest dimension */ - for(i=1; ind; i++) { + for (i = 1; i < multi->nd; i++) { if (sumstrides[i] < smallest) { axis = i; smallest = sumstrides[i]; } } - - for(i=0; inumiter; i++) { + for(i = 0; i < multi->numiter; i++) { it = multi->iters[i]; it->contiguous = 0; - if (it->size != 0) + if (it->size != 0) { it->size /= (it->dims_m1[axis]+1); + } it->dims_m1[axis] = 0; it->backstrides[axis] = 0; } - multi->size = multi->iters[0]->size; return axis; } @@ -9903,7 +9981,7 @@ iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind) { int index, strides, itemsize; - intp count=0; + intp count = 0; char *dptr, *optr; PyObject *r; int swap; @@ -9925,9 +10003,10 @@ strides = ind->strides[0]; dptr = ind->data; /* Get size of return array */ - while(index--) { - if (*((Bool *)dptr) != 0) + while (index--) { + if (*((Bool *)dptr) != 0) { count++; + } dptr += strides; } itemsize = self->ao->descr->elsize; @@ -9936,17 +10015,17 @@ self->ao->descr, 1, &count, NULL, NULL, 0, (PyObject *)self->ao); - if (r==NULL) return NULL; - + if (r == NULL) { + return NULL; + } /* Set up loop */ optr = PyArray_DATA(r); index = ind->dimensions[0]; dptr = ind->data; - copyswap = self->ao->descr->f->copyswap; /* Loop over Boolean array */ swap = (PyArray_ISNOTSWAPPED(self->ao) != PyArray_ISNOTSWAPPED(r)); - while(index--) { + while (index--) { if (*((Bool *)dptr) != 0) { copyswap(optr, self->dataptr, swap, self->ao); optr += itemsize; @@ -9973,7 +10052,9 @@ itemsize = self->ao->descr->elsize; if (ind->nd == 0) { num = *((intp *)ind->data); - if (num < 0) num += self->size; + if (num < 0) { + num += self->size; + } if (num < 0 || num >= self->size) { PyErr_Format(PyExc_IndexError, "index %d out of bounds" \ @@ -9994,17 +10075,23 @@ ind->nd, ind->dimensions, NULL, NULL, 0, (PyObject *)self->ao); - if (r==NULL) return NULL; - + if (r == NULL) { + return NULL; + } optr = PyArray_DATA(r); ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind); - if (ind_it == NULL) {Py_DECREF(r); return NULL;} + if (ind_it == NULL) { + Py_DECREF(r); + return NULL; + } index = ind_it->size; copyswap = PyArray_DESCR(r)->f->copyswap; swap = (PyArray_ISNOTSWAPPED(r) != PyArray_ISNOTSWAPPED(self->ao)); - while(index--) { + while (index--) { num = *((intp *)(ind_it->dataptr)); - if (num < 0) num += self->size; + if (num < 0) { + num += self->size; + } if (num < 0 || num >= self->size) { PyErr_Format(PyExc_IndexError, "index %d out of bounds" \ @@ -10029,7 +10116,7 @@ static PyObject * iter_subscript(PyArrayIterObject *self, PyObject *ind) { - PyArray_Descr *indtype=NULL; + PyArray_Descr *indtype = NULL; intp start, step_size; intp n_steps; PyObject *r; @@ -10047,7 +10134,9 @@ if (PyTuple_Check(ind)) { int len; len = PyTuple_GET_SIZE(ind); - if (len > 1) goto fail; + if (len > 1) { + goto fail; + } if (len == 0) { Py_INCREF(self->ao); return (PyObject *)self->ao; @@ -10055,12 +10144,11 @@ ind = PyTuple_GET_ITEM(ind, 0); } - /* Tuples >1d not accepted --- i.e. no newaxis */ - /* Could implement this with adjusted strides - and dimensions in iterator */ - - /* Check for Boolean -- this is first becasue - Bool is a subclass of Int */ + /* + * Tuples >1d not accepted --- i.e. no newaxis + * Could implement this with adjusted strides and dimensions in iterator + * Check for Boolean -- this is first becasue Bool is a subclass of Int + */ PyArray_ITER_RESET(self); if (PyBool_Check(ind)) { @@ -10080,12 +10168,12 @@ } /* Check for Integer or Slice */ - if (PyLong_Check(ind) || PyInt_Check(ind) || PySlice_Check(ind)) { start = parse_subindex(ind, &step_size, &n_steps, self->size); - if (start == -1) + if (start == -1) { goto fail; + } if (n_steps == RubberIndex || n_steps == PseudoIndex) { PyErr_SetString(PyExc_IndexError, "cannot use Ellipsis or newaxes here"); @@ -10104,10 +10192,12 @@ 1, &n_steps, NULL, NULL, 0, (PyObject *)self->ao); - if (r==NULL) goto fail; + if (r == NULL) { + goto fail; + } dptr = PyArray_DATA(r); copyswap = PyArray_DESCR(r)->f->copyswap; - while(n_steps--) { + while (n_steps--) { copyswap(dptr, self->dataptr, 0, r); start += step_size; PyArray_ITER_GOTO1D(self, start) @@ -10118,12 +10208,13 @@ } /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(PyArray_INTP); if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) { Py_INCREF(indtype); obj = PyArray_FromAny(ind, indtype, 0, 0, FORCECAST, NULL); - if (obj == NULL) goto fail; + if (obj == NULL) { + goto fail; + } } else { Py_INCREF(ind); @@ -10141,7 +10232,9 @@ PyObject *new; new = PyArray_FromAny(obj, indtype, 0, 0, FORCECAST | ALIGNED, NULL); - if (new==NULL) goto fail; + if (new == NULL) { + goto fail; + } Py_DECREF(obj); obj = new; r = iter_subscript_int(self, (PyArrayObject *)obj); @@ -10152,12 +10245,15 @@ Py_DECREF(obj); return r; } - else Py_DECREF(indtype); + else { + Py_DECREF(indtype); + } fail: - if (!PyErr_Occurred()) + if (!PyErr_Occurred()) { PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + } Py_XDECREF(indtype); Py_XDECREF(obj); return NULL; @@ -10191,12 +10287,13 @@ PyArray_ITER_RESET(self); /* Loop over Boolean array */ copyswap = self->ao->descr->f->copyswap; - while(index--) { + while (index--) { if (*((Bool *)dptr) != 0) { copyswap(self->dataptr, val->dataptr, swap, self->ao); PyArray_ITER_NEXT(val); - if (val->index==val->size) + if (val->index == val->size) { PyArray_ITER_RESET(val); + } } dptr += strides; PyArray_ITER_NEXT(self); @@ -10224,11 +10321,15 @@ return 0; } ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind); - if (ind_it == NULL) return -1; + if (ind_it == NULL) { + return -1; + } index = ind_it->size; - while(index--) { + while (index--) { num = *((intp *)(ind_it->dataptr)); - if (num < 0) num += self->size; + if (num < 0) { + num += self->size; + } if ((num < 0) || (num >= self->size)) { PyErr_Format(PyExc_IndexError, "index %d out of bounds" \ @@ -10241,8 +10342,9 @@ copyswap(self->dataptr, val->dataptr, swap, self->ao); PyArray_ITER_NEXT(ind_it); PyArray_ITER_NEXT(val); - if (val->index == val->size) + if (val->index == val->size) { PyArray_ITER_RESET(val); + } } Py_DECREF(ind_it); return 0; @@ -10251,14 +10353,14 @@ static int iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) { - PyObject *arrval=NULL; - PyArrayIterObject *val_it=NULL; + PyObject *arrval = NULL; + PyArrayIterObject *val_it = NULL; PyArray_Descr *type; - PyArray_Descr *indtype=NULL; - int swap, retval=-1; + PyArray_Descr *indtype = NULL; + int swap, retval = -1; intp start, step_size; intp n_steps; - PyObject *obj=NULL; + PyObject *obj = NULL; PyArray_CopySwapFunc *copyswap; @@ -10272,15 +10374,18 @@ if (PyTuple_Check(ind)) { int len; len = PyTuple_GET_SIZE(ind); - if (len > 1) goto finish; + if (len > 1) { + goto finish; + } ind = PyTuple_GET_ITEM(ind, 0); } type = self->ao->descr; - /* Check for Boolean -- this is first becasue - Bool is a subclass of Int */ - + /* + * Check for Boolean -- this is first becasue + * Bool is a subclass of Int + */ if (PyBool_Check(ind)) { retval = 0; if (PyObject_IsTrue(ind)) { @@ -10289,9 +10394,13 @@ goto finish; } - if (PySequence_Check(ind) || PySlice_Check(ind)) goto skip; + if (PySequence_Check(ind) || PySlice_Check(ind)) { + goto skip; + } start = PyArray_PyIntAsIntp(ind); - if (start==-1 && PyErr_Occurred()) PyErr_Clear(); + if (start==-1 && PyErr_Occurred()) { + PyErr_Clear(); + } else { if (start < -self->size || start >= self->size) { PyErr_Format(PyExc_ValueError, @@ -10313,41 +10422,48 @@ skip: Py_INCREF(type); arrval = PyArray_FromAny(val, type, 0, 0, 0, NULL); - if (arrval==NULL) return -1; + if (arrval == NULL) { + return -1; + } val_it = (PyArrayIterObject *)PyArray_IterNew(arrval); - if (val_it==NULL) goto finish; - if (val_it->size == 0) {retval = 0; goto finish;} + if (val_it == NULL) { + goto finish; + } + if (val_it->size == 0) { + retval = 0; + goto finish; + } copyswap = PyArray_DESCR(arrval)->f->copyswap; swap = (PyArray_ISNOTSWAPPED(self->ao)!=PyArray_ISNOTSWAPPED(arrval)); /* Check Slice */ - if (PySlice_Check(ind)) { - start = parse_subindex(ind, &step_size, &n_steps, - self->size); - if (start == -1) goto finish; + start = parse_subindex(ind, &step_size, &n_steps, self->size); + if (start == -1) { + goto finish; + } if (n_steps == RubberIndex || n_steps == PseudoIndex) { PyErr_SetString(PyExc_IndexError, "cannot use Ellipsis or newaxes here"); goto finish; } PyArray_ITER_GOTO1D(self, start); - if (n_steps == SingleIndex) { /* Integer */ - copyswap(self->dataptr, PyArray_DATA(arrval), - swap, arrval); + if (n_steps == SingleIndex) { + /* Integer */ + copyswap(self->dataptr, PyArray_DATA(arrval), swap, arrval); PyArray_ITER_RESET(self); - retval=0; + retval = 0; goto finish; } - while(n_steps--) { - copyswap(self->dataptr, val_it->dataptr, - swap, arrval); + while (n_steps--) { + copyswap(self->dataptr, val_it->dataptr, swap, arrval); start += step_size; - PyArray_ITER_GOTO1D(self, start) - PyArray_ITER_NEXT(val_it); - if (val_it->index == val_it->size) + PyArray_ITER_GOTO1D(self, start); + PyArray_ITER_NEXT(val_it); + if (val_it->index == val_it->size) { PyArray_ITER_RESET(val_it); + } } PyArray_ITER_RESET(self); retval = 0; @@ -10355,7 +10471,6 @@ } /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(PyArray_INTP); if (PyList_Check(ind)) { Py_INCREF(indtype); @@ -10370,8 +10485,9 @@ /* Check for Boolean object */ if (PyArray_TYPE(obj)==PyArray_BOOL) { if (iter_ass_sub_Bool(self, (PyArrayObject *)obj, - val_it, swap) < 0) + val_it, swap) < 0) { goto finish; + } retval=0; } /* Check for integer array */ @@ -10382,18 +10498,22 @@ FORCECAST | BEHAVED_NS, NULL); Py_DECREF(obj); obj = new; - if (new==NULL) goto finish; + if (new == NULL) { + goto finish; + } if (iter_ass_sub_int(self, (PyArrayObject *)obj, - val_it, swap) < 0) + val_it, swap) < 0) { goto finish; + } retval=0; } } finish: - if (!PyErr_Occurred() && retval < 0) + if (!PyErr_Occurred() && retval < 0) { PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + } Py_XDECREF(indtype); Py_XDECREF(obj); Py_XDECREF(val_it); From numpy-svn at scipy.org Wed Feb 18 23:58:30 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 18 Feb 2009 22:58:30 -0600 (CST) Subject: [Numpy-svn] r6400 - trunk/numpy/core/src Message-ID: <20090219045830.083B8C7C019@scipy.org> Author: charris Date: 2009-02-18 22:58:23 -0600 (Wed, 18 Feb 2009) New Revision: 6400 Modified: trunk/numpy/core/src/arrayobject.c Log: Coding style cleanups. Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2009-02-19 00:45:14 UTC (rev 6399) +++ trunk/numpy/core/src/arrayobject.c 2009-02-19 04:58:23 UTC (rev 6400) @@ -8882,88 +8882,120 @@ swapchar = str[0]; str += 1; -#define _MY_FAIL { \ - PyErr_SetString(PyExc_ValueError, msg); \ - return NULL; \ - } - typechar = str[0]; size = atoi(str + 1); switch (typechar) { - case 'b': - if (size == sizeof(Bool)) - type_num = PyArray_BOOL; - else _MY_FAIL - break; - case 'u': - if (size == sizeof(uintp)) - type_num = PyArray_UINTP; - else if (size == sizeof(char)) - type_num = PyArray_UBYTE; - else if (size == sizeof(short)) - type_num = PyArray_USHORT; - else if (size == sizeof(ulong)) - type_num = PyArray_ULONG; - else if (size == sizeof(int)) - type_num = PyArray_UINT; - else if (size == sizeof(ulonglong)) - type_num = PyArray_ULONGLONG; - else _MY_FAIL - break; - case 'i': - if (size == sizeof(intp)) - type_num = PyArray_INTP; - else if (size == sizeof(char)) - type_num = PyArray_BYTE; - else if (size == sizeof(short)) - type_num = PyArray_SHORT; - else if (size == sizeof(long)) - type_num = PyArray_LONG; - else if (size == sizeof(int)) - type_num = PyArray_INT; - else if (size == sizeof(longlong)) - type_num = PyArray_LONGLONG; - else _MY_FAIL - break; - case 'f': - if (size == sizeof(float)) - type_num = PyArray_FLOAT; - else if (size == sizeof(double)) - type_num = PyArray_DOUBLE; - else if (size == sizeof(longdouble)) - type_num = PyArray_LONGDOUBLE; - else _MY_FAIL - break; - case 'c': - if (size == sizeof(float)*2) - type_num = PyArray_CFLOAT; - else if (size == sizeof(double)*2) - type_num = PyArray_CDOUBLE; - else if (size == sizeof(longdouble)*2) - type_num = PyArray_CLONGDOUBLE; - else _MY_FAIL - break; - case 'O': - if (size == sizeof(PyObject *)) - type_num = PyArray_OBJECT; - else _MY_FAIL - break; - case PyArray_STRINGLTR: - type_num = PyArray_STRING; - break; - case PyArray_UNICODELTR: - type_num = PyArray_UNICODE; - size <<= 2; - break; - case 'V': - type_num = PyArray_VOID; - break; - default: - _MY_FAIL + case 'b': + if (size == sizeof(Bool)) { + type_num = PyArray_BOOL; } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'u': + if (size == sizeof(uintp)) { + type_num = PyArray_UINTP; + } + else if (size == sizeof(char)) { + type_num = PyArray_UBYTE; + } + else if (size == sizeof(short)) { + type_num = PyArray_USHORT; + } + else if (size == sizeof(ulong)) { + type_num = PyArray_ULONG; + } + else if (size == sizeof(int)) { + type_num = PyArray_UINT; + } + else if (size == sizeof(ulonglong)) { + type_num = PyArray_ULONGLONG; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'i': + if (size == sizeof(intp)) { + type_num = PyArray_INTP; + } + else if (size == sizeof(char)) { + type_num = PyArray_BYTE; + } + else if (size == sizeof(short)) { + type_num = PyArray_SHORT; + } + else if (size == sizeof(long)) { + type_num = PyArray_LONG; + } + else if (size == sizeof(int)) { + type_num = PyArray_INT; + } + else if (size == sizeof(longlong)) { + type_num = PyArray_LONGLONG; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'f': + if (size == sizeof(float)) { + type_num = PyArray_FLOAT; + } + else if (size == sizeof(double)) { + type_num = PyArray_DOUBLE; + } + else if (size == sizeof(longdouble)) { + type_num = PyArray_LONGDOUBLE; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'c': + if (size == sizeof(float)*2) { + type_num = PyArray_CFLOAT; + } + else if (size == sizeof(double)*2) { + type_num = PyArray_CDOUBLE; + } + else if (size == sizeof(longdouble)*2) { + type_num = PyArray_CLONGDOUBLE; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'O': + if (size == sizeof(PyObject *)) { + type_num = PyArray_OBJECT; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case PyArray_STRINGLTR: + type_num = PyArray_STRING; + break; + case PyArray_UNICODELTR: + type_num = PyArray_UNICODE; + size <<= 2; + break; + case 'V': + type_num = PyArray_VOID; + break; + default: + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } -#undef _MY_FAIL - descr = PyArray_DescrFromType(type_num); if (descr == NULL) { return NULL; @@ -9603,79 +9635,94 @@ Py_DECREF(to); switch(fromtype) { - case PyArray_BYTE: - case PyArray_SHORT: - case PyArray_INT: - case PyArray_LONG: - case PyArray_LONGLONG: - if (PyTypeNum_ISINTEGER(totype)) { - if (PyTypeNum_ISUNSIGNED(totype)) { - return 0; + case PyArray_BYTE: + case PyArray_SHORT: + case PyArray_INT: + case PyArray_LONG: + case PyArray_LONGLONG: + if (PyTypeNum_ISINTEGER(totype)) { + if (PyTypeNum_ISUNSIGNED(totype)) { + return 0; + } + else { + return telsize >= felsize; + } } + else if (PyTypeNum_ISFLOAT(totype)) { + if (felsize < 8) { + return telsize > felsize; + } + else { + return telsize >= felsize; + } + } + else if (PyTypeNum_ISCOMPLEX(totype)) { + if (felsize < 8) { + return (telsize >> 1) > felsize; + } + else { + return (telsize >> 1) >= felsize; + } + } else { - return (telsize >= felsize); + return totype > fromtype; } - } - else if (PyTypeNum_ISFLOAT(totype)) { - if (felsize < 8) - return (telsize > felsize); - else - return (telsize >= felsize); - } - else if (PyTypeNum_ISCOMPLEX(totype)) { - if (felsize < 8) - return ((telsize >> 1) > felsize); - else - return ((telsize >> 1) >= felsize); - } - else return totype > fromtype; - case PyArray_UBYTE: - case PyArray_USHORT: - case PyArray_UINT: - case PyArray_ULONG: - case PyArray_ULONGLONG: - if (PyTypeNum_ISINTEGER(totype)) { - if (PyTypeNum_ISSIGNED(totype)) { - return (telsize > felsize); + case PyArray_UBYTE: + case PyArray_USHORT: + case PyArray_UINT: + case PyArray_ULONG: + case PyArray_ULONGLONG: + if (PyTypeNum_ISINTEGER(totype)) { + if (PyTypeNum_ISSIGNED(totype)) { + return telsize > felsize; + } + else { + return telsize >= felsize; + } } + else if (PyTypeNum_ISFLOAT(totype)) { + if (felsize < 8) { + return telsize > felsize; + } + else { + return telsize >= felsize; + } + } + else if (PyTypeNum_ISCOMPLEX(totype)) { + if (felsize < 8) { + return (telsize >> 1) > felsize; + } + else { + return (telsize >> 1) >= felsize; + } + } else { - return (telsize >= felsize); + return totype > fromtype; } - } - else if (PyTypeNum_ISFLOAT(totype)) { - if (felsize < 8) - return (telsize > felsize); - else - return (telsize >= felsize); - } - else if (PyTypeNum_ISCOMPLEX(totype)) { - if (felsize < 8) - return ((telsize >> 1) > felsize); - else - return ((telsize >> 1) >= felsize); - } - else return totype > fromtype; - case PyArray_FLOAT: - case PyArray_DOUBLE: - case PyArray_LONGDOUBLE: - if (PyTypeNum_ISCOMPLEX(totype)) - return ((telsize >> 1) >= felsize); - else - return (totype > fromtype); - case PyArray_CFLOAT: - case PyArray_CDOUBLE: - case PyArray_CLONGDOUBLE: - return (totype > fromtype); - case PyArray_STRING: - case PyArray_UNICODE: - return (totype > fromtype); - default: - return 0; + case PyArray_FLOAT: + case PyArray_DOUBLE: + case PyArray_LONGDOUBLE: + if (PyTypeNum_ISCOMPLEX(totype)) { + return (telsize >> 1) >= felsize; + } + else { + return totype > fromtype; + } + case PyArray_CFLOAT: + case PyArray_CDOUBLE: + case PyArray_CLONGDOUBLE: + return totype > fromtype; + case PyArray_STRING: + case PyArray_UNICODE: + return totype > fromtype; + default: + return 0; } } -/* leaves reference count alone --- cannot be NULL*/ -/*NUMPY_API*/ +/*NUMPY_API + * leaves reference count alone --- cannot be NULL + */ static Bool PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) { @@ -9731,8 +9778,8 @@ /* and Python's array iterator ***/ /*NUMPY_API - Get Iterator. -*/ + * Get Iterator. + */ static PyObject * PyArray_IterNew(PyObject *obj) { @@ -9778,7 +9825,7 @@ } /*NUMPY_API - *Get Iterator broadcast to a particular shape + * Get Iterator broadcast to a particular shape */ static PyObject * PyArray_BroadcastToShape(PyObject *obj, intp *dims, int nd) From numpy-svn at scipy.org Thu Feb 19 03:50:19 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 02:50:19 -0600 (CST) Subject: [Numpy-svn] r6401 - branches/coremath/numpy/core/src Message-ID: <20090219085019.0844EC7C019@scipy.org> Author: cdavid Date: 2009-02-19 02:50:13 -0600 (Thu, 19 Feb 2009) New Revision: 6401 Modified: branches/coremath/numpy/core/src/umath_funcs.inc.src Log: Revert back changes using npymath in umath, too much breakage. Modified: branches/coremath/numpy/core/src/umath_funcs.inc.src =================================================================== --- branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-19 04:58:23 UTC (rev 6400) +++ branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-19 08:50:13 UTC (rev 6401) @@ -62,20 +62,20 @@ if (u == 1) { return LOG2E*x; } else { - return npy_log2 at c@(u) * x / (u - 1); + return log2 at c@(u) * x / (u - 1); } } static @type@ exp2_1m at c@(@type@ x) { - @type@ u = npy_exp at c@(x); + @type@ u = exp at c@(x); if (u == 1.0) { return LOGE2*x; } else if (u - 1 == -1) { return -LOGE2; } else { - return (u - 1) * x/npy_log2 at c@(u); + return (u - 1) * x/log2 at c@(u); } } @@ -84,10 +84,10 @@ { const @type@ tmp = x - y; if (tmp > 0) { - return x + npy_log1p at c@(exp at c@(-tmp)); + return x + log1p at c@(exp at c@(-tmp)); } else { - return y + npy_log1p at c@(exp at c@(tmp)); + return y + log1p at c@(exp at c@(tmp)); } } @@ -96,10 +96,10 @@ { const @type@ tmp = x - y; if (tmp > 0) { - return x + npy_log2_1p at c@(exp2 at c@(-tmp)); + return x + log2_1p at c@(exp2 at c@(-tmp)); } else { - return y + npy_log2_1p at c@(exp2 at c@(tmp)); + return y + log2_1p at c@(exp2 at c@(tmp)); } } @@ -261,7 +261,7 @@ if (x->real == 0. && x->imag == 0.) *r = *x; else { - s = sqrt at c@((npy_fabs at c@(x->real) + npy_hypot at c@(x->real,x->imag))/2); + s = sqrt at c@((fabs at c@(x->real) + hypot at c@(x->real,x->imag))/2); d = x->imag/(2*s); if (x->real > 0) { r->real = s; @@ -289,36 +289,36 @@ static void nc_log at c@(c at typ@ *x, c at typ@ *r) { - @typ@ l = npy_hypot at c@(x->real,x->imag); - r->imag = npy_atan2 at c@(x->imag, x->real); - r->real = npy_log at c@(l); + @typ@ l = hypot at c@(x->real,x->imag); + r->imag = atan2 at c@(x->imag, x->real); + r->real = log at c@(l); return; } static void nc_log1p at c@(c at typ@ *x, c at typ@ *r) { - @typ@ l = npy_hypot at c@(x->real + 1,x->imag); - r->imag = npy_atan2 at c@(x->imag, x->real + 1); - r->real = npy_log at c@(l); + @typ@ l = hypot at c@(x->real + 1,x->imag); + r->imag = atan2 at c@(x->imag, x->real + 1); + r->real = log at c@(l); return; } static void nc_exp at c@(c at typ@ *x, c at typ@ *r) { - @typ@ a = npy_exp at c@(x->real); - r->real = a*npy_cos at c@(x->imag); - r->imag = a*npy_sin at c@(x->imag); + @typ@ a = exp at c@(x->real); + r->real = a*cos at c@(x->imag); + r->imag = a*sin at c@(x->imag); return; } static void nc_expm1 at c@(c at typ@ *x, c at typ@ *r) { - @typ@ a = npy_exp at c@(x->real); - r->real = a*npy_cos at c@(x->imag) - 1; - r->imag = a*npy_sin at c@(x->imag); + @typ@ a = exp at c@(x->real); + r->real = a*cos at c@(x->imag) - 1; + r->imag = a*sin at c@(x->imag); return; } @@ -483,8 +483,8 @@ nc_cos at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = npy_cos at c@(xr)*npy_cosh at c@(xi); - r->imag = -npy_sin at c@(xr)*npy_sinh at c@(xi); + r->real = cos at c@(xr)*cosh at c@(xi); + r->imag = -sin at c@(xr)*sinh at c@(xi); return; } @@ -492,8 +492,8 @@ nc_cosh at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = npy_cos at c@(xi)*npy_cosh at c@(xr); - r->imag = npy_sin at c@(xi)*npy_sinh at c@(xr); + r->real = cos at c@(xi)*cosh at c@(xr); + r->imag = sin at c@(xi)*sinh at c@(xr); return; } @@ -510,8 +510,8 @@ nc_sin at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = npy_sin at c@(xr)*npy_cosh at c@(xi); - r->imag = npy_cos at c@(xr)*npy_sinh at c@(xi); + r->real = sin at c@(xr)*cosh at c@(xi); + r->imag = cos at c@(xr)*sinh at c@(xi); return; } @@ -519,8 +519,8 @@ nc_sinh at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = npy_cos at c@(xi)*npy_sinh at c@(xr); - r->imag = npy_sin at c@(xi)*npy_cosh at c@(xr); + r->real = cos at c@(xi)*sinh at c@(xr); + r->imag = sin at c@(xi)*cosh at c@(xr); return; } @@ -531,10 +531,10 @@ @typ@ rs,is,rc,ic; @typ@ d; @typ@ xr=x->real, xi=x->imag; - sr = npy_sin at c@(xr); - cr = npy_cos at c@(xr); - shi = npy_sinh at c@(xi); - chi = npy_cosh at c@(xi); + sr = sin at c@(xr); + cr = cos at c@(xr); + shi = sinh at c@(xi); + chi = cosh at c@(xi); rs = sr*chi; is = cr*shi; rc = cr*chi; @@ -552,10 +552,10 @@ @typ@ rs,is,rc,ic; @typ@ d; @typ@ xr=x->real, xi=x->imag; - si = npy_sin at c@(xi); - ci = npy_cos at c@(xi); - shr = npy_sinh at c@(xr); - chr = npy_cosh at c@(xr); + si = sin at c@(xi); + ci = cos at c@(xi); + shr = sinh at c@(xr); + chr = cosh at c@(xr); rs = ci*shr; is = si*chr; rc = ci*chr; From numpy-svn at scipy.org Thu Feb 19 03:50:42 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 02:50:42 -0600 (CST) Subject: [Numpy-svn] r6402 - branches/coremath/numpy/core/src Message-ID: <20090219085042.47CE8C7C019@scipy.org> Author: cdavid Date: 2009-02-19 02:50:33 -0600 (Thu, 19 Feb 2009) New Revision: 6402 Modified: branches/coremath/numpy/core/src/npy_math.c.src Log: Decorate all the math functions, make internal replacements static. Modified: branches/coremath/numpy/core/src/npy_math.c.src =================================================================== --- branches/coremath/numpy/core/src/npy_math.c.src 2009-02-19 08:50:13 UTC (rev 6401) +++ branches/coremath/numpy/core/src/npy_math.c.src 2009-02-19 08:50:33 UTC (rev 6402) @@ -56,7 +56,7 @@ /* Original code by Konrad Hinsen. */ #ifndef HAVE_EXPM1 -double expm1(double x) +static double expm1(double x) { double u = exp(x); if (u == 1.0) { @@ -70,7 +70,7 @@ #endif #ifndef HAVE_LOG1P -double log1p(double x) +static double log1p(double x) { double u = 1. + x; if (u == 1.0) { @@ -82,7 +82,7 @@ #endif #ifndef HAVE_HYPOT -double hypot(double x, double y) +static double hypot(double x, double y) { double yx; @@ -103,14 +103,14 @@ #endif #ifndef HAVE_ACOSH -double acosh(double x) +static double acosh(double x) { return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); } #endif #ifndef HAVE_ASINH -double asinh(double xx) +static double asinh(double xx) { double x, d; int sign; @@ -132,7 +132,7 @@ #endif #ifndef HAVE_ATANH -double atanh(double x) +static double atanh(double x) { if (x > 0) { return -0.5*log1p(-2.0*x/(1.0 + x)); @@ -144,7 +144,7 @@ #endif #ifndef HAVE_RINT -double rint(double x) +static double rint(double x) { double y, r; @@ -166,7 +166,7 @@ #endif #ifndef HAVE_TRUNC -double trunc(double x) +static double trunc(double x) { return x < 0 ? ceil(x) : floor(x); } @@ -174,7 +174,7 @@ #ifndef HAVE_EXP2 #define LOG2 0.69314718055994530943 -double exp2(double x) +static double exp2(double x) { return exp(LOG2*x); } @@ -183,7 +183,7 @@ #ifndef HAVE_LOG2 #define INVLOG2 1.4426950408889634074 -double log2(double x) +static double log2(double x) { return INVLOG2*log(x); } @@ -247,7 +247,7 @@ #undef @kind@@c@ #endif #ifndef HAVE_ at KIND@@C@ - at type@ @kind@@c@(@type@ x) +static @type@ @kind@@c@(@type@ x) { return (@type@) @kind@((double)x); } @@ -263,7 +263,7 @@ #undef @kind@@c@ #endif #ifndef HAVE_ at KIND@@C@ - at type@ @kind@@c@(@type@ x, @type@ y) +static @type@ @kind@@c@(@type@ x, @type@ y) { return (@type@) @kind@((double)x, (double) y); } @@ -274,7 +274,7 @@ #undef modf at c@ #endif #ifndef HAVE_MODF at C@ - at type@ modf at c@(@type@ x, @type@ *iptr) +static @type@ modf at c@(@type@ x, @type@ *iptr) { double niptr; double y = modf((double)x, &niptr); @@ -284,3 +284,39 @@ #endif /**end repeat**/ + +/* + * Decorate all the functions: those are the public ones + */ + +/**begin repeat + * #type = npy_longdouble,double,float# + * #c = l,,f# + */ +/**begin repeat1 + * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# + */ + + at type@ npy_ at kind@@c@(@type@ x) +{ + return @kind@@c@(x); +} + +/**end repeat1**/ + +/**begin repeat1 + * #kind = atan2,hypot,pow,fmod# + */ + at type@ npy_ at kind@@c@(@type@ x, @type@ y) +{ + return @kind@@c@(x, y); +} +/**end repeat1**/ + + at type@ npy_modf at c@(@type@ x, @type@ *iptr) +{ + return modf at c@(x, iptr); +} + +/**end repeat**/ From numpy-svn at scipy.org Thu Feb 19 03:51:05 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 02:51:05 -0600 (CST) Subject: [Numpy-svn] r6403 - branches/coremath/numpy/core/include/numpy Message-ID: <20090219085105.1CF77C7C019@scipy.org> Author: cdavid Date: 2009-02-19 02:50:58 -0600 (Thu, 19 Feb 2009) New Revision: 6403 Modified: branches/coremath/numpy/core/include/numpy/npy_math.h Log: Add a couple of basic functions to npymath. Modified: branches/coremath/numpy/core/include/numpy/npy_math.h =================================================================== --- branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 08:50:33 UTC (rev 6402) +++ branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 08:50:58 UTC (rev 6403) @@ -3,9 +3,20 @@ #include #include + /* * C99 double math funcs */ +double npy_sin(double x); +double npy_cos(double x); +double npy_tan(double x); +double npy_sinh(double x); +double npy_cosh(double x); +double npy_tanh(double x); + +double npy_exp(double x); +double npy_log(double x); + double npy_expm1(double x); double npy_log1p(double x); double npy_hypot(double x, double y); From numpy-svn at scipy.org Thu Feb 19 03:51:29 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 02:51:29 -0600 (CST) Subject: [Numpy-svn] r6404 - branches/coremath/numpy/core/src Message-ID: <20090219085129.BB20FC7C019@scipy.org> Author: cdavid Date: 2009-02-19 02:51:20 -0600 (Thu, 19 Feb 2009) New Revision: 6404 Modified: branches/coremath/numpy/core/src/umath_funcs.inc.src Log: Start using npymath functions in umath; all test pass on this one on Linux. Modified: branches/coremath/numpy/core/src/umath_funcs.inc.src =================================================================== --- branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-19 08:50:58 UTC (rev 6403) +++ branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-19 08:51:20 UTC (rev 6404) @@ -62,7 +62,7 @@ if (u == 1) { return LOG2E*x; } else { - return log2 at c@(u) * x / (u - 1); + return npy_log2 at c@(u) * x / (u - 1); } } @@ -75,7 +75,7 @@ } else if (u - 1 == -1) { return -LOGE2; } else { - return (u - 1) * x/log2 at c@(u); + return (u - 1) * x/npy_log2 at c@(u); } } @@ -84,10 +84,10 @@ { const @type@ tmp = x - y; if (tmp > 0) { - return x + log1p at c@(exp at c@(-tmp)); + return x + npy_log1p at c@(npy_exp at c@(-tmp)); } else { - return y + log1p at c@(exp at c@(tmp)); + return y + npy_log1p at c@(npy_exp at c@(tmp)); } } @@ -96,10 +96,10 @@ { const @type@ tmp = x - y; if (tmp > 0) { - return x + log2_1p at c@(exp2 at c@(-tmp)); + return x + log2_1p at c@(npy_exp2 at c@(-tmp)); } else { - return y + log2_1p at c@(exp2 at c@(tmp)); + return y + log2_1p at c@(npy_exp2 at c@(tmp)); } } From numpy-svn at scipy.org Thu Feb 19 04:17:22 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 03:17:22 -0600 (CST) Subject: [Numpy-svn] r6405 - branches/coremath/numpy/core/include/numpy Message-ID: <20090219091722.D6EDBC7C071@scipy.org> Author: cdavid Date: 2009-02-19 03:17:15 -0600 (Thu, 19 Feb 2009) New Revision: 6405 Modified: branches/coremath/numpy/core/include/numpy/npy_math.h Log: Add sqrt and fabs in npymath. Modified: branches/coremath/numpy/core/include/numpy/npy_math.h =================================================================== --- branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 08:51:20 UTC (rev 6404) +++ branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 09:17:15 UTC (rev 6405) @@ -16,6 +16,8 @@ double npy_exp(double x); double npy_log(double x); +double npy_sqrt(double x); +double npy_fabs(double x); double npy_expm1(double x); double npy_log1p(double x); From numpy-svn at scipy.org Thu Feb 19 04:18:00 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 03:18:00 -0600 (CST) Subject: [Numpy-svn] r6406 - branches/coremath/numpy/core/include/numpy Message-ID: <20090219091800.8610CC7C071@scipy.org> Author: cdavid Date: 2009-02-19 03:17:45 -0600 (Thu, 19 Feb 2009) New Revision: 6406 Modified: branches/coremath/numpy/core/include/numpy/npy_math.h Log: Add atan2 in npymath. Modified: branches/coremath/numpy/core/include/numpy/npy_math.h =================================================================== --- branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 09:17:15 UTC (rev 6405) +++ branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 09:17:45 UTC (rev 6406) @@ -30,6 +30,7 @@ double npy_exp2(double x); double npy_log2(double x); +double npy_atan2(double x, double y); /* * IEEE 754 fpu handling. Those are guaranteed to be macros */ From numpy-svn at scipy.org Thu Feb 19 04:18:28 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 03:18:28 -0600 (CST) Subject: [Numpy-svn] r6407 - branches/coremath/numpy/core/src Message-ID: <20090219091828.C9A43C7C071@scipy.org> Author: cdavid Date: 2009-02-19 03:18:21 -0600 (Thu, 19 Feb 2009) New Revision: 6407 Modified: branches/coremath/numpy/core/src/umath_funcs.inc.src Log: npymath convertion in umath_funcs done. Modified: branches/coremath/numpy/core/src/umath_funcs.inc.src =================================================================== --- branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-19 09:17:45 UTC (rev 6406) +++ branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-19 09:18:21 UTC (rev 6407) @@ -261,7 +261,7 @@ if (x->real == 0. && x->imag == 0.) *r = *x; else { - s = sqrt at c@((fabs at c@(x->real) + hypot at c@(x->real,x->imag))/2); + s = npy_sqrt at c@((npy_fabs at c@(x->real) + npy_hypot at c@(x->real,x->imag))/2); d = x->imag/(2*s); if (x->real > 0) { r->real = s; @@ -282,43 +282,43 @@ static void nc_rint at c@(c at typ@ *x, c at typ@ *r) { - r->real = rint at c@(x->real); - r->imag = rint at c@(x->imag); + r->real = npy_rint at c@(x->real); + r->imag = npy_rint at c@(x->imag); } static void nc_log at c@(c at typ@ *x, c at typ@ *r) { - @typ@ l = hypot at c@(x->real,x->imag); - r->imag = atan2 at c@(x->imag, x->real); - r->real = log at c@(l); + @typ@ l = npy_hypot at c@(x->real,x->imag); + r->imag = npy_atan2 at c@(x->imag, x->real); + r->real = npy_log at c@(l); return; } static void nc_log1p at c@(c at typ@ *x, c at typ@ *r) { - @typ@ l = hypot at c@(x->real + 1,x->imag); - r->imag = atan2 at c@(x->imag, x->real + 1); - r->real = log at c@(l); + @typ@ l = npy_hypot at c@(x->real + 1,x->imag); + r->imag = npy_atan2 at c@(x->imag, x->real + 1); + r->real = npy_log at c@(l); return; } static void nc_exp at c@(c at typ@ *x, c at typ@ *r) { - @typ@ a = exp at c@(x->real); - r->real = a*cos at c@(x->imag); - r->imag = a*sin at c@(x->imag); + @typ@ a = npy_exp at c@(x->real); + r->real = a*npy_cos at c@(x->imag); + r->imag = a*npy_sin at c@(x->imag); return; } static void nc_expm1 at c@(c at typ@ *x, c at typ@ *r) { - @typ@ a = exp at c@(x->real); - r->real = a*cos at c@(x->imag) - 1; - r->imag = a*sin at c@(x->imag); + @typ@ a = npy_exp at c@(x->real); + r->real = a*npy_cos at c@(x->imag) - 1; + r->imag = a*npy_sin at c@(x->imag); return; } @@ -483,8 +483,8 @@ nc_cos at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xr)*cosh at c@(xi); - r->imag = -sin at c@(xr)*sinh at c@(xi); + r->real = npy_cos at c@(xr)*npy_cosh at c@(xi); + r->imag = -npy_sin at c@(xr)*npy_sinh at c@(xi); return; } @@ -492,8 +492,8 @@ nc_cosh at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xi)*cosh at c@(xr); - r->imag = sin at c@(xi)*sinh at c@(xr); + r->real = npy_cos at c@(xi)*npy_cosh at c@(xr); + r->imag = npy_sin at c@(xi)*npy_sinh at c@(xr); return; } @@ -510,8 +510,8 @@ nc_sin at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = sin at c@(xr)*cosh at c@(xi); - r->imag = cos at c@(xr)*sinh at c@(xi); + r->real = npy_sin at c@(xr)*npy_cosh at c@(xi); + r->imag = npy_cos at c@(xr)*npy_sinh at c@(xi); return; } @@ -519,8 +519,8 @@ nc_sinh at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xi)*sinh at c@(xr); - r->imag = sin at c@(xi)*cosh at c@(xr); + r->real = npy_cos at c@(xi)*npy_sinh at c@(xr); + r->imag = npy_sin at c@(xi)*npy_cosh at c@(xr); return; } @@ -531,10 +531,10 @@ @typ@ rs,is,rc,ic; @typ@ d; @typ@ xr=x->real, xi=x->imag; - sr = sin at c@(xr); - cr = cos at c@(xr); - shi = sinh at c@(xi); - chi = cosh at c@(xi); + sr = npy_sin at c@(xr); + cr = npy_cos at c@(xr); + shi = npy_sinh at c@(xi); + chi = npy_cosh at c@(xi); rs = sr*chi; is = cr*shi; rc = cr*chi; @@ -552,10 +552,10 @@ @typ@ rs,is,rc,ic; @typ@ d; @typ@ xr=x->real, xi=x->imag; - si = sin at c@(xi); - ci = cos at c@(xi); - shr = sinh at c@(xr); - chr = cosh at c@(xr); + si = npy_sin at c@(xi); + ci = npy_cos at c@(xi); + shr = npy_sinh at c@(xr); + chr = npy_cosh at c@(xr); rs = ci*shr; is = si*chr; rc = ci*chr; From numpy-svn at scipy.org Thu Feb 19 04:33:45 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 03:33:45 -0600 (CST) Subject: [Numpy-svn] r6408 - branches/coremath/numpy/distutils/command Message-ID: <20090219093345.ECFC2C7C066@scipy.org> Author: cdavid Date: 2009-02-19 03:33:41 -0600 (Thu, 19 Feb 2009) New Revision: 6408 Modified: branches/coremath/numpy/distutils/command/config.py Log: Add our own check_header, since distutils one is broken. Modified: branches/coremath/numpy/distutils/command/config.py =================================================================== --- branches/coremath/numpy/distutils/command/config.py 2009-02-19 09:18:21 UTC (rev 6407) +++ branches/coremath/numpy/distutils/command/config.py 2009-02-19 09:33:41 UTC (rev 6408) @@ -144,6 +144,12 @@ (body, headers, include_dirs, libraries, library_dirs, lang)) + def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): + self._check_compiler() + return self.try_compile( + "/* we need a dummy line to make distutils happy */", + [header], include_dirs) + def check_decl(self, symbol, headers=None, include_dirs=None): self._check_compiler() From numpy-svn at scipy.org Thu Feb 19 04:36:15 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 03:36:15 -0600 (CST) Subject: [Numpy-svn] r6409 - branches/coremath/numpy/core Message-ID: <20090219093615.B0FBEC7C066@scipy.org> Author: cdavid Date: 2009-02-19 03:36:09 -0600 (Thu, 19 Feb 2009) New Revision: 6409 Modified: branches/coremath/numpy/core/setup.py Log: Do not silently fail to get sizeof basic types. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-19 09:33:41 UTC (rev 6408) +++ branches/coremath/numpy/core/setup.py 2009-02-19 09:36:09 UTC (rev 6409) @@ -146,11 +146,15 @@ res = config_cmd.check_type_size(type) if res >= 0: private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) for type in ('Py_intptr_t',): res = config_cmd.check_type_size(type, headers=["Python.h"]) if res >= 0: private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) # We check declaration AND type because that's how distutils does it. if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): From numpy-svn at scipy.org Thu Feb 19 05:05:35 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 04:05:35 -0600 (CST) Subject: [Numpy-svn] r6410 - trunk/numpy/lib/tests Message-ID: <20090219100535.AF1B8C7C066@scipy.org> Author: cdavid Date: 2009-02-19 04:05:28 -0600 (Thu, 19 Feb 2009) New Revision: 6410 Modified: trunk/numpy/lib/tests/test_io.py trunk/numpy/lib/tests/test_recfunctions.py Log: Tag known failure on win32. Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2009-02-19 09:36:09 UTC (rev 6409) +++ trunk/numpy/lib/tests/test_io.py 2009-02-19 10:05:28 UTC (rev 6410) @@ -76,6 +76,7 @@ a = np.array([1, 2, 3, 4], int) self.roundtrip(a) + @np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32") def test_mmap(self): a = np.array([[1, 2.5], [4, 7.3]]) self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) @@ -112,6 +113,7 @@ class TestSaveTxt(TestCase): + @np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32") def test_array(self): a =np.array([[1, 2], [3, 4]], float) c = StringIO.StringIO() Modified: trunk/numpy/lib/tests/test_recfunctions.py =================================================================== --- trunk/numpy/lib/tests/test_recfunctions.py 2009-02-19 09:36:09 UTC (rev 6409) +++ trunk/numpy/lib/tests/test_recfunctions.py 2009-02-19 10:05:28 UTC (rev 6410) @@ -1,3 +1,4 @@ +import sys import numpy as np import numpy.ma as ma @@ -137,6 +138,7 @@ assert_equal(test, control) + @np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32") def test_find_duplicates(self): "Test find_duplicates" a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), @@ -171,6 +173,7 @@ assert_equal(test[0], a[control]) + @np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32") def test_find_duplicates_ignoremask(self): "Test the ignoremask option of find_duplicates" ndtype = [('a', int)] From numpy-svn at scipy.org Thu Feb 19 05:25:23 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 04:25:23 -0600 (CST) Subject: [Numpy-svn] r6411 - in branches/coremath/numpy: distutils/command lib/tests Message-ID: <20090219102523.7D9D4C84119@scipy.org> Author: cdavid Date: 2009-02-19 04:24:58 -0600 (Thu, 19 Feb 2009) New Revision: 6411 Modified: branches/coremath/numpy/distutils/command/config.py branches/coremath/numpy/lib/tests/test_io.py branches/coremath/numpy/lib/tests/test_recfunctions.py Log: Merge r6410. Modified: branches/coremath/numpy/distutils/command/config.py =================================================================== --- branches/coremath/numpy/distutils/command/config.py 2009-02-19 10:05:28 UTC (rev 6410) +++ branches/coremath/numpy/distutils/command/config.py 2009-02-19 10:24:58 UTC (rev 6411) @@ -212,7 +212,7 @@ try: src, obj, exe = self._link(body, headers, include_dirs, [], [], 'c') - exe = os.path.join('.', exe) + #exe = os.path.join('.', exe) exitstatus, output = exec_command(exe, execute_in='.') if hasattr(os, 'WEXITSTATUS'): exitcode = os.WEXITSTATUS(exitstatus) Modified: branches/coremath/numpy/lib/tests/test_io.py =================================================================== --- branches/coremath/numpy/lib/tests/test_io.py 2009-02-19 10:05:28 UTC (rev 6410) +++ branches/coremath/numpy/lib/tests/test_io.py 2009-02-19 10:24:58 UTC (rev 6411) @@ -76,6 +76,7 @@ a = np.array([1, 2, 3, 4], int) self.roundtrip(a) + @np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32") def test_mmap(self): a = np.array([[1, 2.5], [4, 7.3]]) self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) @@ -112,6 +113,7 @@ class TestSaveTxt(TestCase): + @np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32") def test_array(self): a =np.array([[1, 2], [3, 4]], float) c = StringIO.StringIO() Modified: branches/coremath/numpy/lib/tests/test_recfunctions.py =================================================================== --- branches/coremath/numpy/lib/tests/test_recfunctions.py 2009-02-19 10:05:28 UTC (rev 6410) +++ branches/coremath/numpy/lib/tests/test_recfunctions.py 2009-02-19 10:24:58 UTC (rev 6411) @@ -1,3 +1,4 @@ +import sys import numpy as np import numpy.ma as ma @@ -137,6 +138,7 @@ assert_equal(test, control) + @np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32") def test_find_duplicates(self): "Test find_duplicates" a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), @@ -171,6 +173,7 @@ assert_equal(test[0], a[control]) + @np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32") def test_find_duplicates_ignoremask(self): "Test the ignoremask option of find_duplicates" ndtype = [('a', int)] From numpy-svn at scipy.org Thu Feb 19 05:49:39 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 04:49:39 -0600 (CST) Subject: [Numpy-svn] r6412 - branches/coremath/numpy/distutils/command Message-ID: <20090219104939.7B65DC7C066@scipy.org> Author: cdavid Date: 2009-02-19 04:49:34 -0600 (Thu, 19 Feb 2009) New Revision: 6412 Modified: branches/coremath/numpy/distutils/command/config.py Log: Support library_dirs in check_type. Modified: branches/coremath/numpy/distutils/command/config.py =================================================================== --- branches/coremath/numpy/distutils/command/config.py 2009-02-19 10:24:58 UTC (rev 6411) +++ branches/coremath/numpy/distutils/command/config.py 2009-02-19 10:49:34 UTC (rev 6412) @@ -165,7 +165,7 @@ return self.try_compile(body, headers, include_dirs) - def check_type_size(self, type_name, headers=None, include_dirs=None): + def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None): """Check size of a given type.""" # XXX: should also implement the cross-compiling version (using binary # search + array indexing, see AC_CHECK_SIZEOF). @@ -211,7 +211,7 @@ size = None try: src, obj, exe = self._link(body, headers, include_dirs, - [], [], 'c') + [], library_dirs, 'c') #exe = os.path.join('.', exe) exitstatus, output = exec_command(exe, execute_in='.') if hasattr(os, 'WEXITSTATUS'): From numpy-svn at scipy.org Thu Feb 19 05:50:31 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 04:50:31 -0600 (CST) Subject: [Numpy-svn] r6413 - branches/coremath/numpy/core Message-ID: <20090219105031.26BA2C7C066@scipy.org> Author: cdavid Date: 2009-02-19 04:50:16 -0600 (Thu, 19 Feb 2009) New Revision: 6413 Modified: branches/coremath/numpy/core/setup.py Log: Add python library path for python types - needed with MS compilers. Modified: branches/coremath/numpy/core/setup.py =================================================================== --- branches/coremath/numpy/core/setup.py 2009-02-19 10:49:34 UTC (rev 6412) +++ branches/coremath/numpy/core/setup.py 2009-02-19 10:50:16 UTC (rev 6413) @@ -4,7 +4,15 @@ from os.path import join from numpy.distutils import log from distutils.dep_util import newer +from distutils.sysconfig import get_config_var +def pythonlib_dir(): + """return path where libpython* is.""" + if sys.platform == 'win32': + return os.path.join(sys.prefix, "libs") + else: + return get_config_var('LIBDIR') + def is_npy_no_signal(): """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration header.""" @@ -150,7 +158,8 @@ raise SystemError("Checking sizeof (%s) failed !" % type) for type in ('Py_intptr_t',): - res = config_cmd.check_type_size(type, headers=["Python.h"]) + res = config_cmd.check_type_size(type, headers=["Python.h"], + library_dirs=[pythonlib_dir()]) if res >= 0: private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) else: @@ -158,7 +167,7 @@ # We check declaration AND type because that's how distutils does it. if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): - st = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h']) + st = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], library_dirs=[pythonlib_dir()]) assert not st == 0 private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) From numpy-svn at scipy.org Thu Feb 19 05:55:38 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 04:55:38 -0600 (CST) Subject: [Numpy-svn] r6414 - branches/coremath/numpy/core/src Message-ID: <20090219105538.65F21C7C066@scipy.org> Author: cdavid Date: 2009-02-19 04:55:31 -0600 (Thu, 19 Feb 2009) New Revision: 6414 Modified: branches/coremath/numpy/core/src/_signbit.c branches/coremath/numpy/core/src/npy_math.c.src Log: fix syntax issues for signbit internal replacement. Modified: branches/coremath/numpy/core/src/_signbit.c =================================================================== --- branches/coremath/numpy/core/src/_signbit.c 2009-02-19 10:50:16 UTC (rev 6413) +++ branches/coremath/numpy/core/src/_signbit.c 2009-02-19 10:55:31 UTC (rev 6414) @@ -1,6 +1,6 @@ /* Adapted from cephes */ -static int +int _npy_signbit_d(double x) { union Modified: branches/coremath/numpy/core/src/npy_math.c.src =================================================================== --- branches/coremath/numpy/core/src/npy_math.c.src 2009-02-19 10:50:16 UTC (rev 6413) +++ branches/coremath/numpy/core/src/npy_math.c.src 2009-02-19 10:55:31 UTC (rev 6414) @@ -198,12 +198,12 @@ #if !defined(HAVE_DECL_SIGNBIT) #include "_signbit.c" -static int _npy_signbit_f (float x) +int _npy_signbit_f (float x) { - return npy_signbit_d((double)x); + return _npy_signbit_d((double)x); } -static int _npy_signbit_ld (long double x) +int _npy_signbit_ld (long double x) { return _npy_signbit_d((double)x); } From numpy-svn at scipy.org Thu Feb 19 06:43:30 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 05:43:30 -0600 (CST) Subject: [Numpy-svn] r6415 - branches/coremath/numpy/core/include/numpy Message-ID: <20090219114330.E1B6EC7C020@scipy.org> Author: cdavid Date: 2009-02-19 05:42:23 -0600 (Thu, 19 Feb 2009) New Revision: 6415 Modified: branches/coremath/numpy/core/include/numpy/npy_math.h Log: Add more basic math funcs in npymath lib. Modified: branches/coremath/numpy/core/include/numpy/npy_math.h =================================================================== --- branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 10:55:31 UTC (rev 6414) +++ branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 11:42:23 UTC (rev 6415) @@ -14,10 +14,24 @@ double npy_cosh(double x); double npy_tanh(double x); +double npy_asin(double x); +double npy_acos(double x); +double npy_atan(double x); +double npy_aexp(double x); +double npy_alog(double x); +double npy_asqrt(double x); +double npy_afabs(double x); + +double npy_log(double x); +double npy_log10(double x); double npy_exp(double x); -double npy_log(double x); +double npy_pow(double x); double npy_sqrt(double x); + double npy_fabs(double x); +double npy_ceil(double x); +double npy_fmod(double x, double y); +double npy_floor(double x); double npy_expm1(double x); double npy_log1p(double x); From numpy-svn at scipy.org Thu Feb 19 06:45:15 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 05:45:15 -0600 (CST) Subject: [Numpy-svn] r6416 - branches/coremath/numpy/core/include/numpy Message-ID: <20090219114515.C40E9C7C020@scipy.org> Author: cdavid Date: 2009-02-19 05:44:22 -0600 (Thu, 19 Feb 2009) New Revision: 6416 Modified: branches/coremath/numpy/core/include/numpy/npy_math.h Log: Add a few more non standard functions to npy math. Modified: branches/coremath/numpy/core/include/numpy/npy_math.h =================================================================== --- branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 11:42:23 UTC (rev 6415) +++ branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 11:44:22 UTC (rev 6416) @@ -25,7 +25,6 @@ double npy_log(double x); double npy_log10(double x); double npy_exp(double x); -double npy_pow(double x); double npy_sqrt(double x); double npy_fabs(double x); @@ -45,6 +44,7 @@ double npy_log2(double x); double npy_atan2(double x, double y); +double npy_pow(double x, double y); /* * IEEE 754 fpu handling. Those are guaranteed to be macros */ @@ -149,4 +149,30 @@ npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); +/* + * Non standard functions + */ +double npy_deg2rad(double x); +double npy_rad2deg(double x); +double npy_logaddexp(double x, double y); +double npy_logaddexp2(double x, double y); + +float npy_deg2radf(float x); +float npy_rad2degf(float x); +float npy_logaddexpf(float x, float y); +float npy_logaddexp2f(float x, float y); + +npy_longdouble npy_deg2radl(npy_longdouble x); +npy_longdouble npy_rad2degl(npy_longdouble x); +npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); + +#define npy_degrees npy_rad2deg +#define npy_degreesf npy_rad2degf +#define npy_degreesl npy_rad2degl + +#define npy_radians npy_deg2rad +#define npy_radiansf npy_deg2radf +#define npy_radiansl npy_deg2radl + #endif From numpy-svn at scipy.org Thu Feb 19 06:45:55 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 05:45:55 -0600 (CST) Subject: [Numpy-svn] r6417 - branches/coremath/numpy/core/src Message-ID: <20090219114555.2F4D8C7C020@scipy.org> Author: cdavid Date: 2009-02-19 05:45:30 -0600 (Thu, 19 Feb 2009) New Revision: 6417 Modified: branches/coremath/numpy/core/src/npy_math.c.src branches/coremath/numpy/core/src/umath_funcs.inc.src Log: Move non standard, real math function from umath_funcs into npymath. Modified: branches/coremath/numpy/core/src/npy_math.c.src =================================================================== --- branches/coremath/numpy/core/src/npy_math.c.src 2009-02-19 11:44:22 UTC (rev 6416) +++ branches/coremath/numpy/core/src/npy_math.c.src 2009-02-19 11:45:30 UTC (rev 6417) @@ -285,7 +285,107 @@ /**end repeat**/ +/* + * Useful constants in three precisions: + * XXX: those should really be in the header + */ + +/**begin repeat + * #c = f, ,l# + * #C = F, ,L# + */ +#define NPY_E at c@ 2.7182818284590452353602874713526625 at C@ /* e */ +#define NPY_LOG2E at c@ 1.4426950408889634073599246810018921 at C@ /* log_2 e */ +#define NPY_LOG10E at c@ 0.4342944819032518276511289189166051 at C@ /* log_10 e */ +#define NPY_LOGE2 at c@ 0.6931471805599453094172321214581766 at C@ /* log_e 2 */ +#define NPY_LOGE10 at c@ 2.3025850929940456840179914546843642 at C@ /* log_e 10 */ +#define NPY_PI at c@ 3.1415926535897932384626433832795029 at C@ /* pi */ +#define NPY_PI_2 at c@ 1.5707963267948966192313216916397514 at C@ /* pi/2 */ +#define NPY_PI_4 at c@ 0.7853981633974483096156608458198757 at C@ /* pi/4 */ +#define NPY_1_PI at c@ 0.3183098861837906715377675267450287 at C@ /* 1/pi */ +#define NPY_2_PI at c@ 0.6366197723675813430755350534900574 at C@ /* 2/pi */ +/**end repeat**/ + /* + * Non standard functions + */ + +/**begin repeat + * #type = float, double, npy_longdouble# + * #c = f, ,l# + * #C = F, ,L# + */ + +#define LOGE2 NPY_LOGE2 at c@ +#define LOG2E NPY_LOG2E at c@ +#define RAD2DEG (180.0 at c@/NPY_PI at c@) +#define DEG2RAD (NPY_PI at c@/180.0 at c@) + +static @type@ rad2deg at c@(@type@ x) +{ + return x*RAD2DEG; +} + +static @type@ deg2rad at c@(@type@ x) +{ + return x*DEG2RAD; +} + +static @type@ log2_1p at c@(@type@ x) +{ + @type@ u = 1 + x; + if (u == 1) { + return LOG2E*x; + } else { + return npy_log2 at c@(u) * x / (u - 1); + } +} + +static @type@ exp2_1m at c@(@type@ x) +{ + @type@ u = exp at c@(x); + if (u == 1.0) { + return LOGE2*x; + } else if (u - 1 == -1) { + return -LOGE2; + } else { + return (u - 1) * x/npy_log2 at c@(u); + } +} + +static @type@ logaddexp at c@(@type@ x, @type@ y) +{ + const @type@ tmp = x - y; + if (tmp > 0) { + return x + npy_log1p at c@(npy_exp at c@(-tmp)); + } + else { + return y + npy_log1p at c@(npy_exp at c@(tmp)); + } +} + +static @type@ logaddexp2 at c@(@type@ x, @type@ y) +{ + const @type@ tmp = x - y; + if (tmp > 0) { + return x + log2_1p at c@(npy_exp2 at c@(-tmp)); + } + else { + return y + log2_1p at c@(npy_exp2 at c@(tmp)); + } +} + +#define degrees at c@ rad2deg at c@ +#define radians at c@ deg2rad at c@ + +#undef LOGE2 +#undef LOG2E +#undef RAD2DEG +#undef DEG2RAD + +/**end repeat**/ + +/* * Decorate all the functions: those are the public ones */ @@ -295,7 +395,8 @@ */ /**begin repeat1 * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, - * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2, + * rad2deg,deg2rad,exp2_1m# */ @type@ npy_ at kind@@c@(@type@ x) @@ -306,7 +407,7 @@ /**end repeat1**/ /**begin repeat1 - * #kind = atan2,hypot,pow,fmod# + * #kind = atan2,hypot,pow,fmod,logaddexp,logaddexp2# */ @type@ npy_ at kind@@c@(@type@ x, @type@ y) { Modified: branches/coremath/numpy/core/src/umath_funcs.inc.src =================================================================== --- branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-19 11:44:22 UTC (rev 6416) +++ branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-19 11:45:30 UTC (rev 6417) @@ -29,91 +29,6 @@ /**end repeat**/ /* - ****************************************************************************** - ** FLOAT FUNCTIONS ** - ****************************************************************************** - */ - -/**begin repeat - * #type = float, double, longdouble# - * #c = f, ,l# - * #C = F, ,L# - */ - -#define LOGE2 NPY_LOGE2 at c@ -#define LOG2E NPY_LOG2E at c@ -#define RAD2DEG (180.0 at c@/NPY_PI at c@) -#define DEG2RAD (NPY_PI at c@/180.0 at c@) - -static @type@ -rad2deg at c@(@type@ x) { - return x*RAD2DEG; -} - -static @type@ -deg2rad at c@(@type@ x) { - return x*DEG2RAD; -} - -static @type@ -log2_1p at c@(@type@ x) -{ - @type@ u = 1 + x; - if (u == 1) { - return LOG2E*x; - } else { - return npy_log2 at c@(u) * x / (u - 1); - } -} - -static @type@ -exp2_1m at c@(@type@ x) -{ - @type@ u = exp at c@(x); - if (u == 1.0) { - return LOGE2*x; - } else if (u - 1 == -1) { - return -LOGE2; - } else { - return (u - 1) * x/npy_log2 at c@(u); - } -} - -static @type@ -logaddexp at c@(@type@ x, @type@ y) -{ - const @type@ tmp = x - y; - if (tmp > 0) { - return x + npy_log1p at c@(npy_exp at c@(-tmp)); - } - else { - return y + npy_log1p at c@(npy_exp at c@(tmp)); - } -} - -static @type@ -logaddexp2 at c@(@type@ x, @type@ y) -{ - const @type@ tmp = x - y; - if (tmp > 0) { - return x + log2_1p at c@(npy_exp2 at c@(-tmp)); - } - else { - return y + log2_1p at c@(npy_exp2 at c@(tmp)); - } -} - -#define degrees at c@ rad2deg at c@ -#define radians at c@ deg2rad at c@ - -#undef LOGE2 -#undef LOG2E -#undef RAD2DEG -#undef DEG2RAD - -/**end repeat**/ - -/* ***************************************************************************** ** PYTHON OBJECT FUNCTIONS ** ***************************************************************************** From numpy-svn at scipy.org Thu Feb 19 06:47:03 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 05:47:03 -0600 (CST) Subject: [Numpy-svn] r6418 - branches/coremath/numpy/core/code_generators Message-ID: <20090219114703.11AD5C7C020@scipy.org> Author: cdavid Date: 2009-02-19 05:46:09 -0600 (Thu, 19 Feb 2009) New Revision: 6418 Modified: branches/coremath/numpy/core/code_generators/generate_umath.py Log: Fix umath generator to use npymath funcs instead of straight libc ones. Modified: branches/coremath/numpy/core/code_generators/generate_umath.py =================================================================== --- branches/coremath/numpy/core/code_generators/generate_umath.py 2009-02-19 11:45:30 UTC (rev 6417) +++ branches/coremath/numpy/core/code_generators/generate_umath.py 2009-02-19 11:46:09 UTC (rev 6418) @@ -37,7 +37,7 @@ self.out = self.type * nout assert len(self.out) == nout -_fdata_map = dict(f='%sf', d='%s', g='%sl', +_fdata_map = dict(f='npy_%sf', d='npy_%s', g='npy_%sl', F='nc_%sf', D='nc_%s', G='nc_%sl') def build_func_data(types, f): func_data = [] From numpy-svn at scipy.org Thu Feb 19 09:41:36 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 08:41:36 -0600 (CST) Subject: [Numpy-svn] r6419 - branches/coremath/numpy/core/include/numpy Message-ID: <20090219144136.DB95AC7C015@scipy.org> Author: cdavid Date: 2009-02-19 08:40:16 -0600 (Thu, 19 Feb 2009) New Revision: 6419 Modified: branches/coremath/numpy/core/include/numpy/npy_math.h Log: Fix npy_signbit when signbit is not available: I forgot to declare the implementation functions... stupidest thing ever. Modified: branches/coremath/numpy/core/include/numpy/npy_math.h =================================================================== --- branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 11:46:09 UTC (rev 6418) +++ branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-19 14:40:16 UTC (rev 6419) @@ -60,13 +60,16 @@ #define npy_isfinite(x) isfinite((x)) #endif -#ifndef NPY_HAVE_DECL_ISFINITE +#ifndef NPY_HAVE_DECL_ISINF #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x)) #else #define npy_isinf(x) isinf((x)) #endif #ifndef NPY_HAVE_DECL_SIGNBIT + int _npy_signbit_f(float x); + int _npy_signbit(double x); + int _npy_signbit_ld(npy_longdouble x); #define npy_signbit(x) \ (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \ : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \ From numpy-svn at scipy.org Thu Feb 19 09:46:21 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 08:46:21 -0600 (CST) Subject: [Numpy-svn] r6420 - branches/coremath/numpy/core/src Message-ID: <20090219144621.8CC4DC7C015@scipy.org> Author: cdavid Date: 2009-02-19 08:46:01 -0600 (Thu, 19 Feb 2009) New Revision: 6420 Modified: branches/coremath/numpy/core/src/umath_loops.inc.src Log: Use npymath for isnan and co. Modified: branches/coremath/numpy/core/src/umath_loops.inc.src =================================================================== --- branches/coremath/numpy/core/src/umath_loops.inc.src 2009-02-19 14:40:16 UTC (rev 6419) +++ branches/coremath/numpy/core/src/umath_loops.inc.src 2009-02-19 14:46:01 UTC (rev 6420) @@ -860,7 +860,7 @@ /**begin repeat1 * #kind = isnan, isinf, isfinite, signbit# - * #func = isnan, isinf, isfinite, signbit# + * #func = npy_isnan, npy_isinf, npy_isfinite, npy_signbit# **/ static void @TYPE at _@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) @@ -883,7 +883,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2 || isnan(in1)) ? in1 : in2; + *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2; } } /**end repeat1**/ @@ -899,7 +899,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2 || isnan(in2)) ? in1 : in2; + *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2; } } /**end repeat1**/ @@ -1173,7 +1173,7 @@ /**begin repeat1 * #kind = isnan, isinf, isfinite# - * #func = isnan, isinf, isfinite# + * #func = npy_isnan, npy_isinf, npy_isfinite# * #OP = ||, ||, &&# **/ static void @@ -1272,7 +1272,7 @@ const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - if (@OP@(in1r, in1i, in2r, in2i) || isnan(in1r) || isnan(in1i)) { + if (@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in1r) || npy_isnan(in1i)) { ((@type@ *)op1)[0] = in1r; ((@type@ *)op1)[1] = in1i; } @@ -1296,7 +1296,7 @@ const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - if (@OP@(in1r, in1i, in2r, in2i) || isnan(in2r) || isnan(in2i)) { + if (@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in2r) || npy_isnan(in2i)) { ((@type@ *)op1)[0] = in1r; ((@type@ *)op1)[1] = in1i; } From numpy-svn at scipy.org Thu Feb 19 14:28:32 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 13:28:32 -0600 (CST) Subject: [Numpy-svn] r6421 - trunk/numpy/lib/tests Message-ID: <20090219192832.D95B8C7C01E@scipy.org> Author: stefan Date: 2009-02-19 13:28:08 -0600 (Thu, 19 Feb 2009) New Revision: 6421 Modified: trunk/numpy/lib/tests/test_io.py Log: Fix tests using strptime to be Python 2.4 compatible. Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2009-02-19 14:46:01 UTC (rev 6420) +++ trunk/numpy/lib/tests/test_io.py 2009-02-19 19:28:08 UTC (rev 6421) @@ -6,11 +6,19 @@ import StringIO from tempfile import NamedTemporaryFile -import sys +import sys, time +from datetime import datetime MAJVER, MINVER = sys.version_info[:2] +def strptime(s, fmt=None): + """This function is available in the datetime module only + from Python >= 2.5. + + """ + return datetime(*time.strptime(s, fmt)[:3]) + class RoundtripTest(object): def roundtrip(self, save_func, *args, **kwargs): """ @@ -538,12 +546,11 @@ def test_converters_cornercases(self): "Test the conversion to datetime." - from datetime import datetime - converter = {'date':lambda s: datetime.strptime(s,'%Y-%m-%d %H:%M:%SZ')} + converter = {'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} data = StringIO.StringIO('2009-02-03 12:00:00Z, 72214.0') test = np.ndfromtxt(data, delimiter=',', dtype=None, names=['date','stid'], converters=converter) - control = np.array((datetime(2009,02,03,12,0), 72214.), + control = np.array((datetime(2009,02,03), 72214.), dtype=[('date', np.object_), ('stid', float)]) assert_equal(test, control) @@ -583,11 +590,11 @@ 2; 2002-01-31 """ ndtype = [('idx', int), ('code', np.object)] - func = lambda s: date(*(time.strptime(s.strip(), "%Y-%m-%d")[:3])) + func = lambda s: strptime(s.strip(), "%Y-%m-%d") converters = {1: func} test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", dtype=ndtype, converters=converters) - control = np.array([(1, date(2001,1,1)), (2, date(2002,1,31))], + control = np.array([(1, datetime(2001,1,1)), (2, datetime(2002,1,31))], dtype=ndtype) assert_equal(test, control) # From numpy-svn at scipy.org Thu Feb 19 18:25:05 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 17:25:05 -0600 (CST) Subject: [Numpy-svn] r6422 - trunk/numpy/core/src Message-ID: <20090219232505.619BBC7C019@scipy.org> Author: charris Date: 2009-02-19 17:25:01 -0600 (Thu, 19 Feb 2009) New Revision: 6422 Modified: trunk/numpy/core/src/scalartypes.inc.src Log: Coding style cleanups. Modified: trunk/numpy/core/src/scalartypes.inc.src =================================================================== --- trunk/numpy/core/src/scalartypes.inc.src 2009-02-19 19:28:08 UTC (rev 6421) +++ trunk/numpy/core/src/scalartypes.inc.src 2009-02-19 23:25:01 UTC (rev 6422) @@ -13,72 +13,74 @@ {PyObject_HEAD_INIT(&PyBoolArrType_Type) 1}, }; -/* Inheritance established later when tp_bases is set (or tp_base for - single inheritance) */ +/* + * Inheritance is established later when tp_bases is set (or tp_base for + * single inheritance) + */ /**begin repeat - -#name=number, integer, signedinteger, unsignedinteger, inexact, floating, complexfloating, flexible, character# -#NAME=Number, Integer, SignedInteger, UnsignedInteger, Inexact, Floating, ComplexFloating, Flexible, Character# -*/ - + * #name = number, integer, signedinteger, unsignedinteger, inexact, + * floating, complexfloating, flexible, character# + * #NAME = Number, Integer, SignedInteger, UnsignedInteger, Inexact, + * Floating, ComplexFloating, Flexible, Character# + */ static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@", /*tp_name*/ - sizeof(PyObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ + 0, /* ob_size*/ + "numpy. at name@", /* tp_name*/ + sizeof(PyObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; /**end repeat**/ @@ -118,13 +120,18 @@ CASE(CLONGDOUBLE, CLongDouble); CASE(OBJECT, Object); #undef CASE - case NPY_STRING: return (void *)PyString_AS_STRING(scalar); - case NPY_UNICODE: return (void *)PyUnicode_AS_DATA(scalar); - case NPY_VOID: return ((PyVoidScalarObject *)scalar)->obval; + case NPY_STRING: + return (void *)PyString_AS_STRING(scalar); + case NPY_UNICODE: + return (void *)PyUnicode_AS_DATA(scalar); + case NPY_VOID: + return ((PyVoidScalarObject *)scalar)->obval; } - /* Must be a user-defined type --- check to see which - scalar it inherits from. */ + /* + * Must be a user-defined type --- check to see which + * scalar it inherits from. + */ #define _CHK(cls) (PyObject_IsInstance(scalar, \ (PyObject *)&Py##cls##ArrType_Type)) @@ -140,7 +147,8 @@ _IFCASE(Long); _IFCASE(LongLong); } - else { /* Unsigned Integer */ + else { + /* Unsigned Integer */ _IFCASE(UByte); _IFCASE(UShort); _IFCASE(UInt); @@ -148,49 +156,64 @@ _IFCASE(ULongLong); } } - else { /* Inexact */ + else { + /* Inexact */ if _CHK(Floating) { _IFCASE(Float); _IFCASE(Double); _IFCASE(LongDouble); } - else { /*ComplexFloating */ + else { + /*ComplexFloating */ _IFCASE(CFloat); _IFCASE(CDouble); _IFCASE(CLongDouble); } } } - else if _CHK(Bool) return _OBJ(Bool); - else if _CHK(Flexible) { - if _CHK(String) return (void *)PyString_AS_STRING(scalar); - if _CHK(Unicode) return (void *)PyUnicode_AS_DATA(scalar); - if _CHK(Void) return ((PyVoidScalarObject *)scalar)->obval; + else if (_CHK(Bool)) { + return _OBJ(Bool); } - else _IFCASE(Object); + else if (_CHK(Flexible)) { + if (_CHK(String)) { + return (void *)PyString_AS_STRING(scalar); + } + if (_CHK(Unicode)) { + return (void *)PyUnicode_AS_DATA(scalar); + } + if (_CHK(Void)) { + return ((PyVoidScalarObject *)scalar)->obval; + } + } + else { + _IFCASE(Object); + } - /* Use the alignment flag to figure out where the data begins - after a PyObject_HEAD + /* + * Use the alignment flag to figure out where the data begins + * after a PyObject_HEAD */ memloc = (intp)scalar; memloc += sizeof(PyObject); - /* now round-up to the nearest alignment value - */ + /* now round-up to the nearest alignment value */ align = descr->alignment; - if (align > 1) memloc = ((memloc + align - 1)/align)*align; + if (align > 1) { + memloc = ((memloc + align - 1)/align)*align; + } return (void *)memloc; #undef _IFCASE #undef _OBJ #undef _CHK } -/* no error checking is performed -- ctypeptr must be same type as scalar */ -/* in case of flexible type, the data is not copied - into ctypeptr which is expected to be a pointer to pointer */ /*NUMPY_API - Convert to c-type -*/ + * Convert to c-type + * + * no error checking is performed -- ctypeptr must be same type as scalar + * in case of flexible type, the data is not copied + * into ctypeptr which is expected to be a pointer to pointer + */ static void PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) { @@ -202,24 +225,23 @@ if (PyTypeNum_ISEXTENDED(typecode->type_num)) { void **ct = (void **)ctypeptr; *ct = newptr; - } else { + } + else { memcpy(ctypeptr, newptr, typecode->elsize); } Py_DECREF(typecode); return; } -/* The output buffer must be large-enough to receive the value */ -/* Even for flexible types which is different from ScalarAsCtype - where only a reference for flexible types is returned -*/ - -/* This may not work right on narrow builds for NumPy unicode scalars. +/*NUMPY_API + * Cast Scalar to c-type + * + * The output buffer must be large-enough to receive the value + * Even for flexible types which is different from ScalarAsCtype + * where only a reference for flexible types is returned + * + * This may not work right on narrow builds for NumPy unicode scalars. */ - -/*NUMPY_API - Cast Scalar to c-type -*/ static int PyArray_CastScalarToCtype(PyObject *scalar, void *ctypeptr, PyArray_Descr *outcode) @@ -229,7 +251,9 @@ descr = PyArray_DescrFromScalar(scalar); castfunc = PyArray_GetCastFunc(descr, outcode->type_num); - if (castfunc == NULL) return -1; + if (castfunc == NULL) { + return -1; + } if (PyTypeNum_ISEXTENDED(descr->type_num) || PyTypeNum_ISEXTENDED(outcode->type_num)) { PyArrayObject *ain, *aout; @@ -245,7 +269,10 @@ 0, NULL, NULL, ctypeptr, CARRAY, NULL); - if (aout == NULL) {Py_DECREF(ain); return -1;} + if (aout == NULL) { + Py_DECREF(ain); + return -1; + } castfunc(ain->data, aout->data, 1, ain, aout); Py_DECREF(ain); Py_DECREF(aout); @@ -258,8 +285,8 @@ } /*NUMPY_API - Cast Scalar to c-type -*/ + * Cast Scalar to c-type + */ static int PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr *indescr, void *ctypeptr, int outtype) @@ -267,22 +294,24 @@ PyArray_VectorUnaryFunc* castfunc; void *ptr; castfunc = PyArray_GetCastFunc(indescr, outtype); - if (castfunc == NULL) return -1; + if (castfunc == NULL) { + return -1; + } ptr = scalar_value(scalar, indescr); castfunc(ptr, ctypeptr, 1, NULL, NULL); return 0; } -/* 0-dim array from array-scalar object */ -/* always contains a copy of the data - unless outcode is NULL, it is of void type and the referrer does - not own it either. -*/ - -/* steals reference to outcode */ /*NUMPY_API - Get 0-dim array from scalar -*/ + * Get 0-dim array from scalar + * + * 0-dim array from array-scalar object + * always contains a copy of the data + * unless outcode is NULL, it is of void type and the referrer does + * not own it either. + * + * steals reference to outcode + */ static PyObject * PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) { @@ -310,8 +339,10 @@ typecode, 0, NULL, NULL, NULL, 0, NULL); - if (r==NULL) {Py_XDECREF(outcode); return NULL;} - + if (r==NULL) { + Py_XDECREF(outcode); + return NULL; + } if (PyDataType_FLAGCHK(typecode, NPY_USE_SETITEM)) { if (typecode->f->setitem(scalar, PyArray_DATA(r), r) < 0) { Py_XDECREF(outcode); Py_DECREF(r); @@ -328,7 +359,8 @@ (PyArray_UCS4 *)PyArray_DATA(r), PyUnicode_GET_SIZE(scalar), PyArray_ITEMSIZE(r) >> 2); - } else + } + else #endif { memcpy(PyArray_DATA(r), memptr, PyArray_ITEMSIZE(r)); @@ -338,8 +370,9 @@ } finish: - if (outcode == NULL) return r; - + if (outcode == NULL) { + return r; + } if (outcode->type_num == typecode->type_num) { if (!PyTypeNum_ISEXTENDED(typecode->type_num) || (outcode->elsize == typecode->elsize)) @@ -353,10 +386,10 @@ } /*NUMPY_API - Get an Array Scalar From a Python Object - Returns NULL if unsuccessful but error is only - set if another error occurred. Currently only Numeric-like - object supported. + * Get an Array Scalar From a Python Object + * + * Returns NULL if unsuccessful but error is only set if another error occurred. + * Currently only Numeric-like object supported. */ static PyObject * PyArray_ScalarFromObject(PyObject *object) @@ -367,17 +400,23 @@ } if (PyInt_Check(object)) { ret = PyArrayScalar_New(Long); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, Long) = PyInt_AS_LONG(object); } else if (PyFloat_Check(object)) { ret = PyArrayScalar_New(Double); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, Double) = PyFloat_AS_DOUBLE(object); } else if (PyComplex_Check(object)) { ret = PyArrayScalar_New(CDouble); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, CDouble).real = ((PyComplexObject *)object)->cval.real; PyArrayScalar_VAL(ret, CDouble).imag = @@ -391,7 +430,9 @@ return NULL; } ret = PyArrayScalar_New(LongLong); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, LongLong) = val; } else if (PyBool_Check(object)) { @@ -410,14 +451,16 @@ gentype_alloc(PyTypeObject *type, Py_ssize_t nitems) { PyObject *obj; - const size_t size = _PyObject_VAR_SIZE(type, nitems+1); + const size_t size = _PyObject_VAR_SIZE(type, nitems + 1); obj = (PyObject *)_pya_malloc(size); memset(obj, 0, size); - if (type->tp_itemsize == 0) + if (type->tp_itemsize == 0) { PyObject_INIT(obj, type); - else + } + else { (void) PyObject_INIT_VAR((PyVarObject *)obj, type, nitems); + } return obj; } @@ -436,8 +479,7 @@ if (!PyArray_IsScalar(m1,Generic)) { if (PyArray_Check(m1)) { - ret = m1->ob_type->tp_as_number->nb_power(m1,m2, - Py_None); + ret = m1->ob_type->tp_as_number->nb_power(m1,m2, Py_None); } else { if (!PyArray_IsScalar(m2,Generic)) { @@ -445,17 +487,17 @@ return NULL; } arr = PyArray_FromScalar(m2, NULL); - if (arr == NULL) return NULL; - ret = arr->ob_type->tp_as_number->nb_power(m1, arr, - Py_None); + if (arr == NULL) { + return NULL; + } + ret = arr->ob_type->tp_as_number->nb_power(m1, arr, Py_None); Py_DECREF(arr); } return ret; } if (!PyArray_IsScalar(m2, Generic)) { if (PyArray_Check(m2)) { - ret = m2->ob_type->tp_as_number->nb_power(m1,m2, - Py_None); + ret = m2->ob_type->tp_as_number->nb_power(m1,m2, Py_None); } else { if (!PyArray_IsScalar(m1, Generic)) { @@ -463,18 +505,21 @@ return NULL; } arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return NULL; - ret = arr->ob_type->tp_as_number->nb_power(arr, m2, - Py_None); + if (arr == NULL) { + return NULL; + } + ret = arr->ob_type->tp_as_number->nb_power(arr, m2, Py_None); Py_DECREF(arr); } return ret; } - arr=arg2=NULL; + arr = arg2 = NULL; arr = PyArray_FromScalar(m1, NULL); arg2 = PyArray_FromScalar(m2, NULL); if (arr == NULL || arg2 == NULL) { - Py_XDECREF(arr); Py_XDECREF(arg2); return NULL; + Py_XDECREF(arr); + Py_XDECREF(arg2); + return NULL; } ret = arr->ob_type->tp_as_number->nb_power(arr, arg2, Py_None); Py_DECREF(arr); @@ -489,26 +534,35 @@ PyObject *arr, *meth, *ret; arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } meth = PyObject_GetAttrString(arr, str); - if (meth == NULL) {Py_DECREF(arr); return NULL;} - if (kwds == NULL) + if (meth == NULL) { + Py_DECREF(arr); + return NULL; + } + if (kwds == NULL) { ret = PyObject_CallObject(meth, args); - else + } + else { ret = PyObject_Call(meth, args, kwds); + } Py_DECREF(meth); Py_DECREF(arr); - if (ret && PyArray_Check(ret)) + if (ret && PyArray_Check(ret)) { return PyArray_Return((PyArrayObject *)ret); - else + } + else { return ret; + } } /**begin repeat * - * #name=add, subtract, divide, remainder, divmod, lshift, rshift, and, xor, or, floor_divide, true_divide# + * #name = add, subtract, divide, remainder, divmod, lshift, rshift, + * and, xor, or, floor_divide, true_divide# */ - static PyObject * gentype_ at name@(PyObject *m1, PyObject *m2) { @@ -521,28 +575,30 @@ static PyObject * gentype_multiply(PyObject *m1, PyObject *m2) { - PyObject *ret=NULL; + PyObject *ret = NULL; long repeat; if (!PyArray_IsScalar(m1, Generic) && ((m1->ob_type->tp_as_number == NULL) || (m1->ob_type->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m2 to an int and try sequence - repeat */ + /* Try to convert m2 to an int and try sequence repeat */ repeat = PyInt_AsLong(m2); - if (repeat == -1 && PyErr_Occurred()) return NULL; + if (repeat == -1 && PyErr_Occurred()) { + return NULL; + } ret = PySequence_Repeat(m1, (int) repeat); } else if (!PyArray_IsScalar(m2, Generic) && ((m2->ob_type->tp_as_number == NULL) || (m2->ob_type->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m1 to an int and try sequence - repeat */ + /* Try to convert m1 to an int and try sequence repeat */ repeat = PyInt_AsLong(m1); - if (repeat == -1 && PyErr_Occurred()) return NULL; + if (repeat == -1 && PyErr_Occurred()) { + return NULL; + } ret = PySequence_Repeat(m2, (int) repeat); } - if (ret==NULL) { + if (ret == NULL) { PyErr_Clear(); /* no effect if not set */ ret = PyArray_Type.tp_as_number->nb_multiply(m1, m2); } @@ -550,17 +606,18 @@ } /**begin repeat - -#name=positive, negative, absolute, invert, int, long, float, oct, hex# -*/ - + * + * #name=positive, negative, absolute, invert, int, long, float, oct, hex# + */ static PyObject * gentype_ at name@(PyObject *m1) { PyObject *arr, *ret; arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = arr->ob_type->tp_as_number->nb_ at name@(arr); Py_DECREF(arr); return ret; @@ -574,7 +631,9 @@ int ret; arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } ret = arr->ob_type->tp_as_number->nb_nonzero(arr); Py_DECREF(arr); return ret; @@ -587,7 +646,9 @@ PyObject *ret; arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr==NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = PyObject_Str((PyObject *)arr); Py_DECREF(arr); return ret; @@ -601,7 +662,9 @@ PyObject *ret; arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr==NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = PyObject_Str((PyObject *)arr); Py_DECREF(arr); return ret; @@ -613,9 +676,9 @@ #endif /**begin repeat - * #name=float, double, longdouble# - * #NAME=FLOAT, DOUBLE, LONGDOUBLE# - * #type=f, d, l# + * #name = float, double, longdouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# + * #type = f, d, l# */ #define _FMT1 "%%.%i" NPY_ at NAME@_FMT @@ -632,7 +695,7 @@ res = NumPyOS_ascii_format at type@(buf, buflen, format, val, 0); if (res == NULL) { fprintf(stderr, "Error while formatting\n"); - return; + return; } /* If nothing but digits after sign, append ".0" */ @@ -656,28 +719,28 @@ if (val.real == 0.0) { PyOS_snprintf(format, sizeof(format), _FMT1, prec); res = NumPyOS_ascii_format at type@(buf, buflen-1, format, val.imag, 0); - if (res == NULL) { + if (res == NULL) { fprintf(stderr, "Error while formatting\n"); - return; - } - strncat(buf, "j", 1); + return; + } + strncat(buf, "j", 1); } else { - char re[64], im[64]; - PyOS_snprintf(format, sizeof(format), _FMT1, prec); + char re[64], im[64]; + PyOS_snprintf(format, sizeof(format), _FMT1, prec); res = NumPyOS_ascii_format at type@(re, sizeof(re), format, val.real, 0); - if (res == NULL) { + if (res == NULL) { fprintf(stderr, "Error while formatting\n"); - return; - } + return; + } - PyOS_snprintf(format, sizeof(format), _FMT2, prec); + PyOS_snprintf(format, sizeof(format), _FMT2, prec); res = NumPyOS_ascii_format at type@(im, sizeof(im), format, val.imag, 0); - if (res == NULL) { + if (res == NULL) { fprintf(stderr, "Error while formatting\n"); - return; - } - PyOS_snprintf(buf, buflen, "(%s%sj)", re, im); + return; + } + PyOS_snprintf(buf, buflen, "(%s%sj)", re, im); } } @@ -686,19 +749,20 @@ /**end repeat**/ -/* over-ride repr and str of array-scalar strings and unicode to - remove NULL bytes and then call the corresponding functions - of string and unicode. +/* + * over-ride repr and str of array-scalar strings and unicode to + * remove NULL bytes and then call the corresponding functions + * of string and unicode. */ /**begin repeat -#name=string*2,unicode*2# -#form=(repr,str)*2# -#Name=String*2,Unicode*2# -#NAME=STRING*2,UNICODE*2# -#extra=AndSize*2,,# -#type=char*2, Py_UNICODE*2# -*/ + * #name = string*2,unicode*2# + * #form = (repr,str)*2# + * #Name = String*2,Unicode*2# + * #NAME = STRING*2,UNICODE*2# + * #extra = AndSize*2,,# + * #type = char*2, Py_UNICODE*2# + */ static PyObject * @name at type_@form@(PyObject *self) { @@ -710,9 +774,13 @@ ip = dptr = Py at Name@_AS_ at NAME@(self); len = Py at Name@_GET_SIZE(self); dptr += len-1; - while(len > 0 && *dptr-- == 0) len--; + while(len > 0 && *dptr-- == 0) { + len--; + } new = Py at Name@_From at Name@@extra@(ip, len); - if (new == NULL) return PyString_FromString(""); + if (new == NULL) { + return PyString_FromString(""); + } ret = Py at Name@_Type.tp_ at form@(new); Py_DECREF(new); return ret; @@ -737,10 +805,11 @@ * * These functions will return NULL if PyString creation fails. */ + /**begin repeat - * #name=float, double, longdouble# - * #Name=Float, Double, LongDouble# - * #NAME=FLOAT, DOUBLE, LONGDOUBLE# + * #name = float, double, longdouble# + * #Name = Float, Double, LongDouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# */ /**begin repeat1 * #kind = str, repr# @@ -778,38 +847,38 @@ * float type print (control print a, where a is a float type instance) */ /**begin repeat - * #name=float, double, longdouble# - * #Name=Float, Double, LongDouble# - * #NAME=FLOAT, DOUBLE, LONGDOUBLE# + * #name = float, double, longdouble# + * #Name = Float, Double, LongDouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# */ static int @name at type_print(PyObject *v, FILE *fp, int flags) { - char buf[100]; + char buf[100]; @name@ val = ((Py at Name@ScalarObject *)v)->obval; - format_ at name@(buf, sizeof(buf), val, - (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); - Py_BEGIN_ALLOW_THREADS - fputs(buf, fp); - Py_END_ALLOW_THREADS - return 0; + format_ at name@(buf, sizeof(buf), val, + (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); + Py_BEGIN_ALLOW_THREADS + fputs(buf, fp); + Py_END_ALLOW_THREADS + return 0; } static int c at name@type_print(PyObject *v, FILE *fp, int flags) { /* Size of buf: twice sizeof(real) + 2 (for the parenthesis) */ - char buf[202]; + char buf[202]; c at name@ val = ((PyC at Name@ScalarObject *)v)->obval; - format_c at name@(buf, sizeof(buf), val, - (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); - Py_BEGIN_ALLOW_THREADS - fputs(buf, fp); - Py_END_ALLOW_THREADS - return 0; + format_c at name@(buf, sizeof(buf), val, + (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); + Py_BEGIN_ALLOW_THREADS + fputs(buf, fp); + Py_END_ALLOW_THREADS + return 0; } /**end repeat**/ @@ -821,13 +890,13 @@ */ /**begin repeat - -#name=(int, long, hex, oct, float)*2# -#KIND=(Long*4, Float)*2# -#char=,,,,,c*5# -#CHAR=,,,,,C*5# -#POST=,,,,,.real*5# -*/ + * + * #name = (int, long, hex, oct, float)*2# + * #KIND = (Long*4, Float)*2# + * #char = ,,,,,c*5# + * #CHAR = ,,,,,C*5# + * #POST = ,,,,,.real*5# + */ static PyObject * @char at longdoubletype_@name@(PyObject *self) { @@ -844,46 +913,46 @@ static PyNumberMethods gentype_as_number = { - (binaryfunc)gentype_add, /*nb_add*/ - (binaryfunc)gentype_subtract, /*nb_subtract*/ - (binaryfunc)gentype_multiply, /*nb_multiply*/ - (binaryfunc)gentype_divide, /*nb_divide*/ - (binaryfunc)gentype_remainder, /*nb_remainder*/ - (binaryfunc)gentype_divmod, /*nb_divmod*/ - (ternaryfunc)gentype_power, /*nb_power*/ + (binaryfunc)gentype_add, /*nb_add*/ + (binaryfunc)gentype_subtract, /*nb_subtract*/ + (binaryfunc)gentype_multiply, /*nb_multiply*/ + (binaryfunc)gentype_divide, /*nb_divide*/ + (binaryfunc)gentype_remainder, /*nb_remainder*/ + (binaryfunc)gentype_divmod, /*nb_divmod*/ + (ternaryfunc)gentype_power, /*nb_power*/ (unaryfunc)gentype_negative, - (unaryfunc)gentype_positive, /*nb_pos*/ - (unaryfunc)gentype_absolute, /*(unaryfunc)gentype_abs,*/ - (inquiry)gentype_nonzero_number, /*nb_nonzero*/ - (unaryfunc)gentype_invert, /*nb_invert*/ - (binaryfunc)gentype_lshift, /*nb_lshift*/ - (binaryfunc)gentype_rshift, /*nb_rshift*/ - (binaryfunc)gentype_and, /*nb_and*/ - (binaryfunc)gentype_xor, /*nb_xor*/ - (binaryfunc)gentype_or, /*nb_or*/ - 0, /*nb_coerce*/ - (unaryfunc)gentype_int, /*nb_int*/ - (unaryfunc)gentype_long, /*nb_long*/ - (unaryfunc)gentype_float, /*nb_float*/ - (unaryfunc)gentype_oct, /*nb_oct*/ - (unaryfunc)gentype_hex, /*nb_hex*/ - 0, /*inplace_add*/ - 0, /*inplace_subtract*/ - 0, /*inplace_multiply*/ - 0, /*inplace_divide*/ - 0, /*inplace_remainder*/ - 0, /*inplace_power*/ - 0, /*inplace_lshift*/ - 0, /*inplace_rshift*/ - 0, /*inplace_and*/ - 0, /*inplace_xor*/ - 0, /*inplace_or*/ - (binaryfunc)gentype_floor_divide, /*nb_floor_divide*/ - (binaryfunc)gentype_true_divide, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ + (unaryfunc)gentype_positive, /*nb_pos*/ + (unaryfunc)gentype_absolute, /*(unaryfunc)gentype_abs,*/ + (inquiry)gentype_nonzero_number, /*nb_nonzero*/ + (unaryfunc)gentype_invert, /*nb_invert*/ + (binaryfunc)gentype_lshift, /*nb_lshift*/ + (binaryfunc)gentype_rshift, /*nb_rshift*/ + (binaryfunc)gentype_and, /*nb_and*/ + (binaryfunc)gentype_xor, /*nb_xor*/ + (binaryfunc)gentype_or, /*nb_or*/ + 0, /*nb_coerce*/ + (unaryfunc)gentype_int, /*nb_int*/ + (unaryfunc)gentype_long, /*nb_long*/ + (unaryfunc)gentype_float, /*nb_float*/ + (unaryfunc)gentype_oct, /*nb_oct*/ + (unaryfunc)gentype_hex, /*nb_hex*/ + 0, /*inplace_add*/ + 0, /*inplace_subtract*/ + 0, /*inplace_multiply*/ + 0, /*inplace_divide*/ + 0, /*inplace_remainder*/ + 0, /*inplace_power*/ + 0, /*inplace_lshift*/ + 0, /*inplace_rshift*/ + 0, /*inplace_and*/ + 0, /*inplace_xor*/ + 0, /*inplace_or*/ + (binaryfunc)gentype_floor_divide, /*nb_floor_divide*/ + (binaryfunc)gentype_true_divide, /*nb_true_divide*/ + 0, /*nb_inplace_floor_divide*/ + 0, /*nb_inplace_true_divide*/ #if PY_VERSION_HEX >= 0x02050000 - (unaryfunc)NULL, /*nb_index*/ + (unaryfunc)NULL, /*nb_index*/ #endif }; @@ -894,7 +963,9 @@ PyObject *arr, *ret; arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = arr->ob_type->tp_richcompare(arr, other, cmp_op); Py_DECREF(arr); return ret; @@ -917,7 +988,9 @@ { PyObject *flagobj; flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); - if (flagobj == NULL) return NULL; + if (flagobj == NULL) { + return NULL; + } ((PyArrayFlagsObject *)flagobj)->arr = NULL; ((PyArrayFlagsObject *)flagobj)->flags = self->flags; return flagobj; @@ -1016,9 +1089,13 @@ PyObject *inter; arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } inter = PyObject_GetAttrString((PyObject *)arr, "__array_interface__"); - if (inter != NULL) PyDict_SetItemString(inter, "__ref", (PyObject *)arr); + if (inter != NULL) { + PyDict_SetItemString(inter, "__ref", (PyObject *)arr); + } Py_DECREF(arr); return inter; } @@ -1076,7 +1153,9 @@ else if (PyArray_IsScalar(self, Object)) { PyObject *obj = ((PyObjectScalarObject *)self)->obval; ret = PyObject_GetAttrString(obj, "real"); - if (ret != NULL) return ret; + if (ret != NULL) { + return ret; + } PyErr_Clear(); } Py_INCREF(self); @@ -1094,8 +1173,7 @@ char *ptr; typecode = _realdescr_fromcomplexscalar(self, &typenum); ptr = (char *)scalar_value(self, NULL); - ret = PyArray_Scalar(ptr + typecode->elsize, - typecode, NULL); + ret = PyArray_Scalar(ptr + typecode->elsize, typecode, NULL); } else if (PyArray_IsScalar(self, Object)) { PyObject *obj = ((PyObjectScalarObject *)self)->obval; @@ -1131,7 +1209,9 @@ PyObject *ret, *arr; arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = PyArray_IterNew(arr); Py_DECREF(arr); return ret; @@ -1279,10 +1359,11 @@ /**begin repeat - -#name=tolist, item, tostring, astype, copy, __deepcopy__, searchsorted, view, swapaxes, conj, conjugate, nonzero, flatten, ravel, fill, transpose, newbyteorder# -*/ - + * + * #name = tolist, item, tostring, astype, copy, __deepcopy__, searchsorted, + * view, swapaxes, conj, conjugate, nonzero, flatten, ravel, fill, + * transpose, newbyteorder# + */ static PyObject * gentype_ at name@(PyObject *self, PyObject *args) { @@ -1300,7 +1381,9 @@ static PyObject * gentype_squeeze(PyObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } Py_INCREF(self); return self; } @@ -1313,17 +1396,16 @@ { Bool inplace=FALSE; - if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) + if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) { return NULL; - + } if (inplace) { PyErr_SetString(PyExc_ValueError, "cannot byteswap a scalar in-place"); return NULL; } else { - /* get the data, copyswap it and pass it to a new Array scalar - */ + /* get the data, copyswap it and pass it to a new Array scalar */ char *data; int numbytes; PyArray_Descr *descr; @@ -1333,8 +1415,13 @@ numbytes = gentype_getreadbuf(self, 0, (void **)&data); descr = PyArray_DescrFromScalar(self); newmem = _pya_malloc(descr->elsize); - if (newmem == NULL) {Py_DECREF(descr); return PyErr_NoMemory();} - else memcpy(newmem, data, descr->elsize); + if (newmem == NULL) { + Py_DECREF(descr); + return PyErr_NoMemory(); + } + else { + memcpy(newmem, data, descr->elsize); + } byte_swap_vector(newmem, 1, descr->elsize); new = PyArray_Scalar(newmem, descr, NULL); _pya_free(newmem); @@ -1345,10 +1432,12 @@ /**begin repeat - -#name=take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, round, argmax, argmin, max, min, ptp, any, all, resize, reshape, choose# -*/ - + * + * #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, + * std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, + * round, argmax, argmin, max, min, ptp, any, all, resize, reshape, + * choose# + */ static PyObject * gentype_ at name@(PyObject *self, PyObject *args, PyObject *kwds) { @@ -1362,7 +1451,9 @@ PyObject *ret; ret = gentype_generic_method((PyObject *)self, args, kwds, "getfield"); - if (!ret) return ret; + if (!ret) { + return ret; + } if (PyArray_IsScalar(ret, Generic) && \ (!PyArray_IsScalar(ret, Void))) { PyArray_Descr *new; @@ -1388,7 +1479,7 @@ static PyObject * voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) { - PyArray_Descr *typecode=NULL; + PyArray_Descr *typecode = NULL; int offset = 0; PyObject *value, *src; int mysize; @@ -1396,8 +1487,7 @@ static char *kwlist[] = {"value", "dtype", "offset", 0}; if ((self->flags & WRITEABLE) != WRITEABLE) { - PyErr_SetString(PyExc_RuntimeError, - "Can't write to memory"); + PyErr_SetString(PyExc_RuntimeError, "Can't write to memory"); return NULL; } if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|i", kwlist, @@ -1432,7 +1522,9 @@ else { /* Copy data from value to correct place in dptr */ src = PyArray_FromAny(value, typecode, 0, 0, CARRAY, NULL); - if (src == NULL) return NULL; + if (src == NULL) { + return NULL; + } typecode->f->copyswap(dptr, PyArray_DATA(src), !PyArray_ISNBO(self->descr->byteorder), src); @@ -1446,38 +1538,44 @@ static PyObject * gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args)) { - PyObject *ret=NULL, *obj=NULL, *mod=NULL; + PyObject *ret = NULL, *obj = NULL, *mod = NULL; const char *buffer; Py_ssize_t buflen; /* Return a tuple of (callable object, arguments) */ - ret = PyTuple_New(2); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } if (PyObject_AsReadBuffer(self, (const void **)&buffer, &buflen)<0) { - Py_DECREF(ret); return NULL; + Py_DECREF(ret); + return NULL; } mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) return NULL; + if (mod == NULL) { + return NULL; + } obj = PyObject_GetAttrString(mod, "scalar"); Py_DECREF(mod); - if (obj == NULL) return NULL; + if (obj == NULL) { + return NULL; + } PyTuple_SET_ITEM(ret, 0, obj); obj = PyObject_GetAttrString((PyObject *)self, "dtype"); if (PyArray_IsScalar(self, Object)) { mod = ((PyObjectScalarObject *)self)->obval; - PyTuple_SET_ITEM(ret, 1, - Py_BuildValue("NO", obj, mod)); + PyTuple_SET_ITEM(ret, 1, Py_BuildValue("NO", obj, mod)); } else { #ifndef Py_UNICODE_WIDE - /* We need to expand the buffer so that we always write - UCS4 to disk for pickle of unicode scalars. - - This could be in a unicode_reduce function, but - that would require re-factoring. - */ - int alloc=0; + /* + * We need to expand the buffer so that we always write + * UCS4 to disk for pickle of unicode scalars. + * + * This could be in a unicode_reduce function, but + * that would require re-factoring. + */ + int alloc = 0; char *tmp; int newlen; @@ -1526,13 +1624,16 @@ static PyObject * gentype_dump(PyObject *self, PyObject *args) { - PyObject *file=NULL; + PyObject *file = NULL; int ret; - if (!PyArg_ParseTuple(args, "O", &file)) + if (!PyArg_ParseTuple(args, "O", &file)) { return NULL; + } ret = PyArray_Dump(self, file, 2); - if (ret < 0) return NULL; + if (ret < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -1540,15 +1641,17 @@ static PyObject * gentype_dumps(PyObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) + if (!PyArg_ParseTuple(args, "")) { return NULL; + } return PyArray_Dumps(self, 2); } /* setting flags cannot be done for scalars */ static PyObject * -gentype_setflags(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) +gentype_setflags(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), + PyObject *NPY_UNUSED(kwds)) { Py_INCREF(Py_None); return Py_None; @@ -1776,7 +1879,9 @@ } flist = self->descr->names; m = PyTuple_GET_SIZE(flist); - if (n < 0) n += m; + if (n < 0) { + n += m; + } if (n < 0 || n >= m) { PyErr_Format(PyExc_IndexError, "invalid index (%d)", (int) n); return NULL; @@ -1803,14 +1908,17 @@ if (PyString_Check(ind) || PyUnicode_Check(ind)) { /* look up in fields */ fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) goto fail; + if (!fieldinfo) { + goto fail; + } return voidtype_getfield(self, fieldinfo, NULL); } /* try to convert it to a number */ n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) goto fail; - + if (error_converting(n)) { + goto fail; + } return voidtype_item(self, (Py_ssize_t)n); fail: @@ -1833,8 +1941,12 @@ flist = self->descr->names; m = PyTuple_GET_SIZE(flist); - if (n < 0) n += m; - if (n < 0 || n >= m) goto fail; + if (n < 0) { + n += m; + } + if (n < 0 || n >= m) { + goto fail; + } fieldinfo = PyDict_GetItem(self->descr->fields, PyTuple_GET_ITEM(flist, n)); newtup = Py_BuildValue("(OOO)", val, @@ -1842,7 +1954,9 @@ PyTuple_GET_ITEM(fieldinfo, 1)); res = voidtype_setfield(self, newtup, NULL); Py_DECREF(newtup); - if (!res) return -1; + if (!res) { + return -1; + } Py_DECREF(res); return 0; @@ -1868,20 +1982,26 @@ if (PyString_Check(ind) || PyUnicode_Check(ind)) { /* look up in fields */ fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) goto fail; + if (!fieldinfo) { + goto fail; + } newtup = Py_BuildValue("(OOO)", val, PyTuple_GET_ITEM(fieldinfo, 0), PyTuple_GET_ITEM(fieldinfo, 1)); res = voidtype_setfield(self, newtup, NULL); Py_DECREF(newtup); - if (!res) return -1; + if (!res) { + return -1; + } Py_DECREF(res); return 0; } /* try to convert it to a number */ n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) goto fail; + if (error_converting(n)) { + goto fail; + } return voidtype_ass_item(self, (Py_ssize_t)n, val); fail: @@ -1891,35 +2011,35 @@ static PyMappingMethods voidtype_as_mapping = { #if PY_VERSION_HEX >= 0x02050000 - (lenfunc)voidtype_length, /*mp_length*/ + (lenfunc)voidtype_length, /*mp_length*/ #else - (inquiry)voidtype_length, /*mp_length*/ + (inquiry)voidtype_length, /*mp_length*/ #endif - (binaryfunc)voidtype_subscript, /*mp_subscript*/ - (objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/ + (binaryfunc)voidtype_subscript, /*mp_subscript*/ + (objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/ }; static PySequenceMethods voidtype_as_sequence = { #if PY_VERSION_HEX >= 0x02050000 - (lenfunc)voidtype_length, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - (ssizeargfunc)voidtype_item, /*sq_item*/ - 0, /*sq_slice*/ - (ssizeobjargproc)voidtype_ass_item, /*sq_ass_item*/ + (lenfunc)voidtype_length, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + (ssizeargfunc)voidtype_item, /*sq_item*/ + 0, /*sq_slice*/ + (ssizeobjargproc)voidtype_ass_item, /*sq_ass_item*/ #else - (inquiry)voidtype_length, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - (intargfunc)voidtype_item, /*sq_item*/ - 0, /*sq_slice*/ - (intobjargproc)voidtype_ass_item, /*sq_ass_item*/ + (inquiry)voidtype_length, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + (intargfunc)voidtype_item, /*sq_item*/ + 0, /*sq_slice*/ + (intobjargproc)voidtype_ass_item, /*sq_ass_item*/ #endif - 0, /* ssq_ass_slice */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0, /* sq_inplace_repeat */ + 0, /* ssq_ass_slice */ + 0, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0, /* sq_inplace_repeat */ }; @@ -1970,9 +2090,10 @@ static Py_ssize_t gentype_getcharbuf(PyObject *self, Py_ssize_t segment, constchar **ptrptr) { - if (PyArray_IsScalar(self, String) || \ - PyArray_IsScalar(self, Unicode)) + if (PyArray_IsScalar(self, String) || + PyArray_IsScalar(self, Unicode)) { return gentype_getreadbuf(self, segment, (void **)ptrptr); + } else { PyErr_SetString(PyExc_TypeError, "Non-character array cannot be interpreted "\ @@ -1983,10 +2104,10 @@ static PyBufferProcs gentype_as_buffer = { - gentype_getreadbuf, /*bf_getreadbuffer*/ - NULL, /*bf_getwritebuffer*/ - gentype_getsegcount, /*bf_getsegcount*/ - gentype_getcharbuf, /*bf_getcharbuffer*/ + gentype_getreadbuf, /* bf_getreadbuffer*/ + NULL, /* bf_getwritebuffer*/ + gentype_getsegcount, /* bf_getsegcount*/ + gentype_getcharbuf, /* bf_getcharbuffer*/ }; @@ -1995,69 +2116,70 @@ static PyTypeObject PyGenericArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.generic", /*tp_name*/ - sizeof(PyObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ + 0, /* ob_size*/ + "numpy.generic", /* tp_name*/ + sizeof(PyObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; static void void_dealloc(PyVoidScalarObject *v) { - if (v->flags & OWNDATA) + if (v->flags & OWNDATA) { PyDataMem_FREE(v->obval); + } Py_XDECREF(v->descr); Py_XDECREF(v->base); v->ob_type->tp_free(v); @@ -2070,11 +2192,13 @@ v->ob_type->tp_free(v); } -/* string and unicode inherit from Python Type first and so GET_ITEM is different to get to the Python Type. +/* + * string and unicode inherit from Python Type first and so GET_ITEM + * is different to get to the Python Type. + * + * ok is a work-around for a bug in complex_new that doesn't allocate + * memory from the sub-types memory allocator. */ -/* ok is a work-around for a bug in complex_new that doesn't allocate - memory from the sub-types memory allocator. -*/ #define _WORK(num) \ if (type->tp_bases && (PyTuple_GET_SIZE(type->tp_bases)==2)) { \ @@ -2093,14 +2217,18 @@ #define _WORKz _WORK(0) #define _WORK0 -/**begin repeat1 -#name=byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, string, unicode, object# -#TYPE=BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, OBJECT# -#work=0,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,z,z,0# -#default=0*16,1*2,2# -*/ +/**begin repeat + * #name = byte, short, int, long, longlong, ubyte, ushort, uint, ulong, + * ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, + * string, unicode, object# + * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG, + * ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, + * STRING, UNICODE, OBJECT# + * #work = 0,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,z,z,0# + * #default = 0*16,1*2,2# + */ -#define _NPY_UNUSED2_1 +#define _NPY_UNUSED2_1 #define _NPY_UNUSED2_z #define _NPY_UNUSED2_0 NPY_UNUSED #define _NPY_UNUSED1_0 @@ -2119,17 +2247,20 @@ void *dest, *src; #endif - /* allow base-class (if any) to do conversion */ - /* If successful, this will jump to finish: */ + /* + * allow base-class (if any) to do conversion + * If successful, this will jump to finish: + */ _WORK at work@ if (!PyArg_ParseTuple(args, "|O", &obj)) { return NULL; } typecode = PyArray_DescrFromType(PyArray_ at TYPE@); - /* typecode is new reference and stolen by - PyArray_FromAny but not PyArray_Scalar - */ + /* + * typecode is new reference and stolen by + * PyArray_FromAny but not PyArray_Scalar + */ if (obj == NULL) { #if @default@ == 0 char *mem = malloc(sizeof(@name@)); @@ -2140,30 +2271,32 @@ #elif @default@ == 1 robj = PyArray_Scalar(NULL, typecode, NULL); #elif @default@ == 2 - Py_INCREF(Py_None); - robj = Py_None; + Py_INCREF(Py_None); + robj = Py_None; #endif - Py_DECREF(typecode); + Py_DECREF(typecode); goto finish; } - /* It is expected at this point that robj is a PyArrayScalar - (even for Object Data Type) - */ + /* + * It is expected at this point that robj is a PyArrayScalar + * (even for Object Data Type) + */ arr = PyArray_FromAny(obj, typecode, 0, 0, FORCECAST, NULL); if ((arr == NULL) || (PyArray_NDIM(arr) > 0)) { return arr; } /* 0-d array */ robj = PyArray_ToScalar(PyArray_DATA(arr), (NPY_AO *)arr); - Py_DECREF(arr); + Py_DECREF(arr); finish: - -#if @default@ == 2 /* In OBJECT case, robj is no longer a - PyArrayScalar at this point but the - remaining code assumes it is - */ + /* + * In OBJECT case, robj is no longer a + * PyArrayScalar at this point but the + * remaining code assumes it is + */ +#if @default@ == 2 return robj; #else /* Normal return */ @@ -2171,9 +2304,11 @@ return robj; } - /* This return path occurs when the requested type is not created - but another scalar object is created instead (i.e. when - the base-class does the conversion in _WORK macro) */ + /* + * This return path occurs when the requested type is not created + * but another scalar object is created instead (i.e. when + * the base-class does the conversion in _WORK macro) + */ /* Need to allocate new type and copy data-area over */ if (type->tp_itemsize) { @@ -2196,7 +2331,7 @@ *((npy_ at name@ *)dest) = *((npy_ at name@ *)src); #elif @default@ == 1 /* unicode and strings */ if (itemsize == 0) { /* unicode */ - itemsize = ((PyUnicodeObject *)robj)->length * sizeof(Py_UNICODE); + itemsize = ((PyUnicodeObject *)robj)->length * sizeof(Py_UNICODE); } memcpy(dest, src, itemsize); /* @default@ == 2 won't get here */ @@ -2216,16 +2351,21 @@ static PyObject * bool_arrtype_new(PyTypeObject *NPY_UNUSED(type), PyObject *args, PyObject *NPY_UNUSED(kwds)) { - PyObject *obj=NULL; + PyObject *obj = NULL; PyObject *arr; - if (!PyArg_ParseTuple(args, "|O", &obj)) return NULL; - if (obj == NULL) + if (!PyArg_ParseTuple(args, "|O", &obj)) { + return NULL; + } + if (obj == NULL) { PyArrayScalar_RETURN_FALSE; - if (obj == Py_False) + } + if (obj == Py_False) { PyArrayScalar_RETURN_FALSE; - if (obj == Py_True) + } + if (obj == Py_True) { PyArrayScalar_RETURN_TRUE; + } arr = PyArray_FROM_OTF(obj, PyArray_BOOL, FORCECAST); if (arr && 0 == PyArray_NDIM(arr)) { Bool val = *((Bool *)PyArray_DATA(arr)); @@ -2238,27 +2378,30 @@ static PyObject * bool_arrtype_and(PyObject *a, PyObject *b) { - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) + if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { PyArrayScalar_RETURN_BOOL_FROM_LONG ((a == PyArrayScalar_True)&(b == PyArrayScalar_True)); + } return PyGenericArrType_Type.tp_as_number->nb_and(a, b); } static PyObject * bool_arrtype_or(PyObject *a, PyObject *b) { - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) + if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { PyArrayScalar_RETURN_BOOL_FROM_LONG ((a == PyArrayScalar_True)|(b == PyArrayScalar_True)); + } return PyGenericArrType_Type.tp_as_number->nb_or(a, b); } static PyObject * bool_arrtype_xor(PyObject *a, PyObject *b) { - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) + if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { PyArrayScalar_RETURN_BOOL_FROM_LONG ((a == PyArrayScalar_True)^(b == PyArrayScalar_True)); + } return PyGenericArrType_Type.tp_as_number->nb_xor(a, b); } @@ -2270,10 +2413,13 @@ #if PY_VERSION_HEX >= 0x02050000 /**begin repeat -#name=byte, short, int, long, ubyte, ushort, longlong, uint, ulong, ulonglong# -#Name=Byte, Short, Int, Long, UByte, UShort, LongLong, UInt, ULong, ULongLong# -#type=PyInt_FromLong*6, PyLong_FromLongLong*1, PyLong_FromUnsignedLong*2, PyLong_FromUnsignedLongLong# -*/ + * #name = byte, short, int, long, ubyte, ushort, longlong, uint, ulong, + * ulonglong# + * #Name = Byte, Short, Int, Long, UByte, UShort, LongLong, UInt, ULong, + * ULongLong# + * #type = PyInt_FromLong*6, PyLong_FromLongLong*1, PyLong_FromUnsignedLong*2, + * PyLong_FromUnsignedLongLong# + */ static PyNumberMethods @name at _arrtype_as_number; static PyObject * @name at _index(PyObject *self) @@ -2281,6 +2427,7 @@ return @type@(PyArrayScalar_VAL(self, @Name@)); } /**end repeat**/ + static PyObject * bool_index(PyObject *a) { @@ -2290,50 +2437,50 @@ /* Arithmetic methods -- only so we can override &, |, ^. */ static PyNumberMethods bool_arrtype_as_number = { - 0, /* nb_add */ - 0, /* nb_subtract */ - 0, /* nb_multiply */ - 0, /* nb_divide */ - 0, /* nb_remainder */ - 0, /* nb_divmod */ - 0, /* nb_power */ - 0, /* nb_negative */ - 0, /* nb_positive */ - 0, /* nb_absolute */ - (inquiry)bool_arrtype_nonzero, /* nb_nonzero */ - 0, /* nb_invert */ - 0, /* nb_lshift */ - 0, /* nb_rshift */ - (binaryfunc)bool_arrtype_and, /* nb_and */ - (binaryfunc)bool_arrtype_xor, /* nb_xor */ - (binaryfunc)bool_arrtype_or, /* nb_or */ - 0, /* nb_coerce */ - 0, /* nb_int */ - 0, /* nb_long */ - 0, /* nb_float */ - 0, /* nb_oct */ - 0, /* nb_hex */ + 0, /* nb_add */ + 0, /* nb_subtract */ + 0, /* nb_multiply */ + 0, /* nb_divide */ + 0, /* nb_remainder */ + 0, /* nb_divmod */ + 0, /* nb_power */ + 0, /* nb_negative */ + 0, /* nb_positive */ + 0, /* nb_absolute */ + (inquiry)bool_arrtype_nonzero, /* nb_nonzero */ + 0, /* nb_invert */ + 0, /* nb_lshift */ + 0, /* nb_rshift */ + (binaryfunc)bool_arrtype_and, /* nb_and */ + (binaryfunc)bool_arrtype_xor, /* nb_xor */ + (binaryfunc)bool_arrtype_or, /* nb_or */ + 0, /* nb_coerce */ + 0, /* nb_int */ + 0, /* nb_long */ + 0, /* nb_float */ + 0, /* nb_oct */ + 0, /* nb_hex */ /* Added in release 2.0 */ - 0, /* nb_inplace_add */ - 0, /* nb_inplace_subtract */ - 0, /* nb_inplace_multiply */ - 0, /* nb_inplace_divide */ - 0, /* nb_inplace_remainder */ - 0, /* nb_inplace_power */ - 0, /* nb_inplace_lshift */ - 0, /* nb_inplace_rshift */ - 0, /* nb_inplace_and */ - 0, /* nb_inplace_xor */ - 0, /* nb_inplace_or */ + 0, /* nb_inplace_add */ + 0, /* nb_inplace_subtract */ + 0, /* nb_inplace_multiply */ + 0, /* nb_inplace_divide */ + 0, /* nb_inplace_remainder */ + 0, /* nb_inplace_power */ + 0, /* nb_inplace_lshift */ + 0, /* nb_inplace_rshift */ + 0, /* nb_inplace_and */ + 0, /* nb_inplace_xor */ + 0, /* nb_inplace_or */ /* Added in release 2.2 */ /* The following require the Py_TPFLAGS_HAVE_CLASS flag */ - 0, /* nb_floor_divide */ - 0, /* nb_true_divide */ - 0, /* nb_inplace_floor_divide */ - 0, /* nb_inplace_true_divide */ + 0, /* nb_floor_divide */ + 0, /* nb_true_divide */ + 0, /* nb_inplace_floor_divide */ + 0, /* nb_inplace_true_divide */ /* Added in release 2.5 */ #if PY_VERSION_HEX >= 0x02050000 - 0, /* nb_index */ + 0, /* nb_index */ #endif }; @@ -2341,18 +2488,20 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds)) { PyObject *obj, *arr; - ulonglong memu=1; - PyObject *new=NULL; + ulonglong memu = 1; + PyObject *new = NULL; char *destptr; - if (!PyArg_ParseTuple(args, "O", &obj)) return NULL; - /* For a VOID scalar first see if obj is an integer or long - and create new memory of that size (filled with 0) for the scalar - */ - - if (PyLong_Check(obj) || PyInt_Check(obj) || \ + if (!PyArg_ParseTuple(args, "O", &obj)) { + return NULL; + } + /* + * For a VOID scalar first see if obj is an integer or long + * and create new memory of that size (filled with 0) for the scalar + */ + if (PyLong_Check(obj) || PyInt_Check(obj) || PyArray_IsScalar(obj, Integer) || - (PyArray_Check(obj) && PyArray_NDIM(obj)==0 && \ + (PyArray_Check(obj) && PyArray_NDIM(obj)==0 && PyArray_ISINTEGER(obj))) { new = obj->ob_type->tp_as_number->nb_long(obj); } @@ -2368,7 +2517,9 @@ return NULL; } destptr = PyDataMem_NEW((int) memu); - if (destptr == NULL) return PyErr_NoMemory(); + if (destptr == NULL) { + return PyErr_NoMemory(); + } ret = type->tp_alloc(type, 0); if (ret == NULL) { PyDataMem_FREE(destptr); @@ -2376,8 +2527,8 @@ } ((PyVoidScalarObject *)ret)->obval = destptr; ((PyVoidScalarObject *)ret)->ob_size = (int) memu; - ((PyVoidScalarObject *)ret)->descr = \ - PyArray_DescrNewFromType(PyArray_VOID); + ((PyVoidScalarObject *)ret)->descr = + PyArray_DescrNewFromType(PyArray_VOID); ((PyVoidScalarObject *)ret)->descr->elsize = (int) memu; ((PyVoidScalarObject *)ret)->flags = BEHAVED | OWNDATA; ((PyVoidScalarObject *)ret)->base = NULL; @@ -2393,8 +2544,8 @@ /**************** Define Hash functions ********************/ /**begin repeat -#lname=bool,ubyte,ushort# -#name=Bool,UByte, UShort# + * #lname = bool,ubyte,ushort# + * #name = Bool,UByte, UShort# */ static long @lname at _arrtype_hash(PyObject *obj) @@ -2404,14 +2555,16 @@ /**end repeat**/ /**begin repeat -#lname=byte,short,uint,ulong# -#name=Byte,Short,UInt,ULong# + * #lname=byte,short,uint,ulong# + * #name=Byte,Short,UInt,ULong# */ static long @lname at _arrtype_hash(PyObject *obj) { long x = (long)(((Py at name@ScalarObject *)obj)->obval); - if (x == -1) x=-2; + if (x == -1) { + x = -2; + } return x; } /**end repeat**/ @@ -2421,16 +2574,18 @@ int_arrtype_hash(PyObject *obj) { long x = (long)(((PyIntScalarObject *)obj)->obval); - if (x == -1) x=-2; + if (x == -1) { + x = -2; + } return x; } #endif /**begin repeat -#char=,u# -#Char=,U# -#ext=&& (x >= LONG_MIN),# -*/ + * #char = ,u# + * #Char = ,U# + * #ext = && (x >= LONG_MIN),# + */ #if SIZEOF_LONG != SIZEOF_LONGLONG /* we assume SIZEOF_LONGLONG=2*SIZEOF_LONG */ static long @@ -2451,7 +2606,9 @@ both.v = x; y = both.hashvals[0] + (1000003)*both.hashvals[1]; } - if (y == -1) y = -2; + if (y == -1) { + y = -2; + } return y; } #endif @@ -2462,7 +2619,9 @@ ulonglong_arrtype_hash(PyObject *obj) { long x = (long)(((PyULongLongScalarObject *)obj)->obval); - if (x == -1) x=-2; + if (x == -1) { + x = -2; + } return x; } #endif @@ -2470,9 +2629,10 @@ /* Wrong thing to do for longdouble, but....*/ + /**begin repeat -#lname=float, longdouble# -#name=Float, LongDouble# + * #lname = float, longdouble# + * #name = Float, LongDouble# */ static long @lname at _arrtype_hash(PyObject *obj) @@ -2485,16 +2645,21 @@ c at lname@_arrtype_hash(PyObject *obj) { long hashreal, hashimag, combined; - hashreal = _Py_HashDouble((double) \ + hashreal = _Py_HashDouble((double) (((PyC at name@ScalarObject *)obj)->obval).real); - if (hashreal == -1) return -1; - hashimag = _Py_HashDouble((double) \ + if (hashreal == -1) { + return -1; + } + hashimag = _Py_HashDouble((double) (((PyC at name@ScalarObject *)obj)->obval).imag); - if (hashimag == -1) return -1; - + if (hashimag == -1) { + return -1; + } combined = hashreal + 1000003 * hashimag; - if (combined == -1) combined = -2; + if (combined == -1) { + combined = -2; + } return combined; } /**end repeat**/ @@ -2520,7 +2685,9 @@ /* first look in object and then hand off to generic type */ res = PyObject_GenericGetAttr(obj->obval, attr); - if (res) return res; + if (res) { + return res; + } PyErr_Clear(); return PyObject_GenericGetAttr((PyObject *)obj, attr); } @@ -2531,7 +2698,9 @@ /* first look in object and then hand off to generic type */ res = PyObject_GenericSetAttr(obj->obval, attr, val); - if (res >= 0) return res; + if (res >= 0) { + return res; + } PyErr_Clear(); return PyObject_GenericSetAttr((PyObject *)obj, attr, val); } @@ -2587,27 +2756,27 @@ static PySequenceMethods object_arrtype_as_sequence = { #if PY_VERSION_HEX >= 0x02050000 - (lenfunc)object_arrtype_length, /*sq_length*/ - (binaryfunc)object_arrtype_concat, /*sq_concat*/ - (ssizeargfunc)object_arrtype_repeat, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - (objobjproc)object_arrtype_contains, /* sq_contains */ - (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ - (ssizeargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ + (lenfunc)object_arrtype_length, /*sq_length*/ + (binaryfunc)object_arrtype_concat, /*sq_concat*/ + (ssizeargfunc)object_arrtype_repeat, /*sq_repeat*/ + 0, /*sq_item*/ + 0, /*sq_slice*/ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + (objobjproc)object_arrtype_contains, /* sq_contains */ + (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ + (ssizeargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ #else - (inquiry)object_arrtype_length, /*sq_length*/ - (binaryfunc)object_arrtype_concat, /*sq_concat*/ - (intargfunc)object_arrtype_repeat, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - (objobjproc)object_arrtype_contains, /* sq_contains */ - (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ - (intargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ + (inquiry)object_arrtype_length, /*sq_length*/ + (binaryfunc)object_arrtype_concat, /*sq_concat*/ + (intargfunc)object_arrtype_repeat, /*sq_repeat*/ + 0, /*sq_item*/ + 0, /*sq_slice*/ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + (objobjproc)object_arrtype_contains, /* sq_contains */ + (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ + (intargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ #endif }; @@ -2630,14 +2799,14 @@ int cnt; PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ - pb->bf_getsegcount == NULL || \ - (cnt = (*pb->bf_getsegcount)(self->obval, &newlen)) != 1) + if (pb == NULL || + pb->bf_getsegcount == NULL || + (cnt = (*pb->bf_getsegcount)(self->obval, &newlen)) != 1) { return 0; - - if (lenp) + } + if (lenp) { *lenp = newlen; - + } return cnt; } @@ -2646,14 +2815,13 @@ { PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ + if (pb == NULL || pb->bf_getreadbuffer == NULL || pb->bf_getsegcount == NULL) { PyErr_SetString(PyExc_TypeError, "expected a readable buffer object"); return -1; } - return (*pb->bf_getreadbuffer)(self->obval, segment, ptrptr); } @@ -2662,14 +2830,13 @@ { PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ + if (pb == NULL || pb->bf_getwritebuffer == NULL || pb->bf_getsegcount == NULL) { PyErr_SetString(PyExc_TypeError, "expected a writeable buffer object"); return -1; } - return (*pb->bf_getwritebuffer)(self->obval, segment, ptrptr); } @@ -2679,14 +2846,13 @@ { PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ + if (pb == NULL || pb->bf_getcharbuffer == NULL || pb->bf_getsegcount == NULL) { PyErr_SetString(PyExc_TypeError, "expected a character buffer object"); return -1; } - return (*pb->bf_getcharbuffer)(self->obval, segment, ptrptr); } @@ -2707,64 +2873,64 @@ static PyObject * object_arrtype_call(PyObjectScalarObject *obj, PyObject *args, PyObject *kwds) { - return PyObject_Call(obj->obval, args, kwds); + return PyObject_Call(obj->obval, args, kwds); } static PyTypeObject PyObjectArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.object_", /*tp_name*/ - sizeof(PyObjectScalarObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ - (destructor)object_arrtype_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - &object_arrtype_as_sequence, /* tp_as_sequence */ - &object_arrtype_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - (ternaryfunc)object_arrtype_call, /* tp_call */ - 0, /* tp_str */ - (getattrofunc)object_arrtype_getattro, /* tp_getattro */ - (setattrofunc)object_arrtype_setattro, /* tp_setattro */ - &object_arrtype_as_buffer, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy.object_", /* tp_name*/ + sizeof(PyObjectScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + (destructor)object_arrtype_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + &object_arrtype_as_sequence, /* tp_as_sequence */ + &object_arrtype_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + (ternaryfunc)object_arrtype_call, /* tp_call */ + 0, /* tp_str */ + (getattrofunc)object_arrtype_getattro, /* tp_getattro */ + (setattrofunc)object_arrtype_setattro, /* tp_setattro */ + &object_arrtype_as_buffer, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -2778,12 +2944,12 @@ static PyObject * gen_arrtype_subscript(PyObject *self, PyObject *key) { - /* Only [...], [...,], [, ...], - is allowed for indexing a scalar - - These return a new N-d array with a copy of - the data where N is the number of None's in . - + /* + * Only [...], [...,], [, ...], + * is allowed for indexing a scalar + * + * These return a new N-d array with a copy of + * the data where N is the number of None's in . */ PyObject *res, *ret; int N; @@ -2797,19 +2963,19 @@ "invalid index to scalar variable."); return NULL; } - - if (key == Py_Ellipsis) + if (key == Py_Ellipsis) { return res; - + } if (key == Py_None) { ret = add_new_axes_0d((PyArrayObject *)res, 1); Py_DECREF(res); return ret; } /* Must be a Tuple */ - N = count_new_axes_0d(key); - if (N < 0) return NULL; + if (N < 0) { + return NULL; + } ret = add_new_axes_0d((PyArrayObject *)res, N); Py_DECREF(res); return ret; @@ -2817,74 +2983,75 @@ /**begin repeat - * #name=bool, string, unicode, void# - * #NAME=Bool, String, Unicode, Void# - * #ex=_,_,_,# + * #name = bool, string, unicode, void# + * #NAME = Bool, String, Unicode, Void# + * #ex = _,_,_,# */ static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@@ex@", /*tp_name*/ - sizeof(Py at NAME@ScalarObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy. at name@@ex@", /* tp_name*/ + sizeof(Py at NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; /**end repeat**/ /**begin repeat -#NAME=Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble# -#name=int*5, uint*5, float*3# -#CNAME=(CHAR, SHORT, INT, LONG, LONGLONG)*2, FLOAT, DOUBLE, LONGDOUBLE# -*/ + * #NAME = Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, + * ULongLong, Float, Double, LongDouble# + * #name = int*5, uint*5, float*3# + * #CNAME = (CHAR, SHORT, INT, LONG, LONGLONG)*2, FLOAT, DOUBLE, LONGDOUBLE# + */ #if BITSOF_ at CNAME@ == 8 #define _THIS_SIZE "8" #elif BITSOF_ at CNAME@ == 16 @@ -2904,59 +3071,59 @@ #endif static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@" _THIS_SIZE, /*tp_name*/ - sizeof(Py at NAME@ScalarObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy. at name@" _THIS_SIZE, /* tp_name*/ + sizeof(Py at NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -2972,10 +3139,10 @@ /**begin repeat -#NAME=CFloat, CDouble, CLongDouble# -#name=complex*3# -#CNAME=FLOAT, DOUBLE, LONGDOUBLE# -*/ + * #NAME = CFloat, CDouble, CLongDouble# + * #name = complex*3# + * #CNAME = FLOAT, DOUBLE, LONGDOUBLE# + */ #if BITSOF_ at CNAME@ == 16 #define _THIS_SIZE2 "16" #define _THIS_SIZE1 "32" @@ -2998,65 +3165,69 @@ #define _THIS_SIZE2 "256" #define _THIS_SIZE1 "512" #endif -static PyTypeObject Py at NAME@ArrType_Type = { + +#define _THIS_DOC "Composed of two " _THIS_SIZE2 " bit floats" + + static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@" _THIS_SIZE1, /*tp_name*/ - sizeof(Py at NAME@ScalarObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - 0, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash */ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT, /*tp_flags*/ - "Composed of two " _THIS_SIZE2 " bit floats", /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy. at name@" _THIS_SIZE1, /* tp_name*/ + sizeof(Py at NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize*/ + 0, /* tp_dealloc*/ + 0, /* tp_print*/ + 0, /* tp_getattr*/ + 0, /* tp_setattr*/ + 0, /* tp_compare*/ + 0, /* tp_repr*/ + 0, /* tp_as_number*/ + 0, /* tp_as_sequence*/ + 0, /* tp_as_mapping*/ + 0, /* tp_hash */ + 0, /* tp_call*/ + 0, /* tp_str*/ + 0, /* tp_getattro*/ + 0, /* tp_setattro*/ + 0, /* tp_as_buffer*/ + Py_TPFLAGS_DEFAULT, /* tp_flags*/ + _THIS_DOC, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; #undef _THIS_SIZE1 #undef _THIS_SIZE2 +#undef _THIS_DOC /**end repeat**/ @@ -3084,12 +3255,15 @@ PyBoolArrType_Type.tp_as_number = &bool_arrtype_as_number; #if PY_VERSION_HEX >= 0x02050000 - /* need to add dummy versions with filled-in nb_index - in-order for PyType_Ready to fill in .__index__() method + /* + * need to add dummy versions with filled-in nb_index + * in-order for PyType_Ready to fill in .__index__() method */ /**begin repeat -#name=byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong# -#NAME=Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong# + * #name = byte, short, int, long, longlong, ubyte, ushort, + * uint, ulong, ulonglong# + * #NAME = Byte, Short, Int, Long, LongLong, UByte, UShort, + * UInt, ULong, ULongLong# */ Py at NAME@ArrType_Type.tp_as_number = &@name at _arrtype_as_number; Py at NAME@ArrType_Type.tp_as_number->nb_index = (unaryfunc)@name at _index; @@ -3113,15 +3287,19 @@ PyVoidArrType_Type.tp_as_sequence = &voidtype_as_sequence; /**begin repeat -#NAME=Number, Integer, SignedInteger, UnsignedInteger, Inexact, Floating, -ComplexFloating, Flexible, Character# + * #NAME= Number, Integer, SignedInteger, UnsignedInteger, Inexact, + * Floating, ComplexFloating, Flexible, Character# */ Py at NAME@ArrType_Type.tp_flags = BASEFLAGS; /**end repeat**/ /**begin repeat -#name=bool, byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, string, unicode, void, object# -#NAME=Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble, String, Unicode, Void, Object# + * #name = bool, byte, short, int, long, longlong, ubyte, ushort, uint, + * ulong, ulonglong, float, double, longdouble, cfloat, cdouble, + * clongdouble, string, unicode, void, object# + * #NAME = Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, + * ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, + * CLongDouble, String, Unicode, Void, Object# */ Py at NAME@ArrType_Type.tp_flags = BASEFLAGS; Py at NAME@ArrType_Type.tp_new = @name at _arrtype_new; @@ -3129,8 +3307,10 @@ /**end repeat**/ /**begin repeat -#name=bool, byte, short, ubyte, ushort, uint, ulong, ulonglong, float, longdouble, cfloat, clongdouble, void, object# -#NAME=Bool, Byte, Short, UByte, UShort, UInt, ULong, ULongLong, Float, LongDouble, CFloat, CLongDouble, Void, Object# + * #name = bool, byte, short, ubyte, ushort, uint, ulong, ulonglong, + * float, longdouble, cfloat, clongdouble, void, object# + * #NAME = Bool, Byte, Short, UByte, UShort, UInt, ULong, ULongLong, + * Float, LongDouble, CFloat, CLongDouble, Void, Object# */ Py at NAME@ArrType_Type.tp_hash = @name at _arrtype_hash; /**end repeat**/ @@ -3146,7 +3326,7 @@ #endif /**begin repeat - *#name = repr, str# + * #name = repr, str# */ PyFloatArrType_Type.tp_ at name@ = floattype_ at name@; PyCFloatArrType_Type.tp_ at name@ = cfloattype_ at name@; @@ -3163,15 +3343,16 @@ PyCDoubleArrType_Type.tp_print = cdoubletype_print; PyCLongDoubleArrType_Type.tp_print = clongdoubletype_print; - /* These need to be coded specially because getitem does not - return a normal Python type + /* + * These need to be coded specially because getitem does not + * return a normal Python type */ PyLongDoubleArrType_Type.tp_as_number = &longdoubletype_as_number; PyCLongDoubleArrType_Type.tp_as_number = &clongdoubletype_as_number; /**begin repeat - * #name=int, long, hex, oct, float, repr, str# - * #kind=tp_as_number->nb*5, tp*2# + * #name = int, long, hex, oct, float, repr, str# + * #kind = tp_as_number->nb*5, tp*2# */ PyLongDoubleArrType_Type. at kind@_ at name@ = longdoubletype_ at name@; PyCLongDoubleArrType_Type. at kind@_ at name@ = clongdoubletype_ at name@; @@ -3225,8 +3406,9 @@ i++; } - if (!user) return typenum; - + if (!user) { + return typenum; + } /* Search any registered types */ i = 0; while (i < PyArray_NUMUSERTYPES) { @@ -3267,36 +3449,41 @@ } /* Check the generic types */ - if ((type == (PyObject *) &PyNumberArrType_Type) || \ - (type == (PyObject *) &PyInexactArrType_Type) || \ - (type == (PyObject *) &PyFloatingArrType_Type)) + if ((type == (PyObject *) &PyNumberArrType_Type) || + (type == (PyObject *) &PyInexactArrType_Type) || + (type == (PyObject *) &PyFloatingArrType_Type)) { typenum = PyArray_DOUBLE; - else if (type == (PyObject *)&PyComplexFloatingArrType_Type) + } + else if (type == (PyObject *)&PyComplexFloatingArrType_Type) { typenum = PyArray_CDOUBLE; - else if ((type == (PyObject *)&PyIntegerArrType_Type) || \ - (type == (PyObject *)&PySignedIntegerArrType_Type)) + } + else if ((type == (PyObject *)&PyIntegerArrType_Type) || + (type == (PyObject *)&PySignedIntegerArrType_Type)) { typenum = PyArray_LONG; - else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type) + } + else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type) { typenum = PyArray_ULONG; - else if (type == (PyObject *) &PyCharacterArrType_Type) + } + else if (type == (PyObject *) &PyCharacterArrType_Type) { typenum = PyArray_STRING; - else if ((type == (PyObject *) &PyGenericArrType_Type) || \ - (type == (PyObject *) &PyFlexibleArrType_Type)) + } + else if ((type == (PyObject *) &PyGenericArrType_Type) || + (type == (PyObject *) &PyFlexibleArrType_Type)) { typenum = PyArray_VOID; + } if (typenum != PyArray_NOTYPE) { return PyArray_DescrFromType(typenum); } - /* Otherwise --- type is a sub-type of an array scalar - not corresponding to a registered data-type object. + /* + * Otherwise --- type is a sub-type of an array scalar + * not corresponding to a registered data-type object. */ - /* Do special thing for VOID sub-types - */ + /* Do special thing for VOID sub-types */ if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) { new = PyArray_DescrNewFromType(PyArray_VOID); - conv = _arraydescr_fromobj(type); if (conv) { new->fields = conv->fields; @@ -3317,8 +3504,8 @@ } /*NUMPY_API - Return the tuple of ordered field names from a dictionary. -*/ + * Return the tuple of ordered field names from a dictionary. + */ static PyObject * PyArray_FieldNames(PyObject *fields) { @@ -3332,20 +3519,25 @@ return NULL; } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; + if (_numpy_internal == NULL) { + return NULL; + } tup = PyObject_CallMethod(_numpy_internal, "_makenames_list", "O", fields); Py_DECREF(_numpy_internal); - if (tup == NULL) return NULL; + if (tup == NULL) { + return NULL; + } ret = PyTuple_GET_ITEM(tup, 0); ret = PySequence_Tuple(ret); Py_DECREF(tup); return ret; } -/* New reference */ /*NUMPY_API - Return descr object from array scalar. -*/ + * Return descr object from array scalar. + * + * New reference + */ static PyArray_Descr * PyArray_DescrFromScalar(PyObject *sc) { @@ -3361,8 +3553,9 @@ if (descr->elsize == 0) { PyArray_DESCR_REPLACE(descr); type_num = descr->type_num; - if (type_num == PyArray_STRING) + if (type_num == PyArray_STRING) { descr->elsize = PyString_GET_SIZE(sc); + } else if (type_num == PyArray_UNICODE) { descr->elsize = PyUnicode_GET_DATA_SIZE(sc); #ifndef Py_UNICODE_WIDE @@ -3378,18 +3571,20 @@ Py_XDECREF(descr->fields); descr->fields = NULL; } - if (descr->fields) + if (descr->fields) { descr->names = PyArray_FieldNames(descr->fields); + } PyErr_Clear(); } } return descr; } -/* New reference */ /*NUMPY_API - Get a typeobject from a type-number -- can return NULL. -*/ + * Get a typeobject from a type-number -- can return NULL. + * + * New reference + */ static PyObject * PyArray_TypeObjectFromType(int type) { @@ -3397,7 +3592,9 @@ PyObject *obj; descr = PyArray_DescrFromType(type); - if (descr == NULL) return NULL; + if (descr == NULL) { + return NULL; + } obj = (PyObject *)descr->typeobj; Py_XINCREF(obj); Py_DECREF(descr); From numpy-svn at scipy.org Thu Feb 19 22:41:02 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 19 Feb 2009 21:41:02 -0600 (CST) Subject: [Numpy-svn] r6423 - trunk/numpy/core/src Message-ID: <20090220034102.A5904C7C043@scipy.org> Author: charris Date: 2009-02-19 21:40:53 -0600 (Thu, 19 Feb 2009) New Revision: 6423 Modified: trunk/numpy/core/src/arrayobject.c Log: Coding style cleanups. Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2009-02-19 23:25:01 UTC (rev 6422) +++ trunk/numpy/core/src/arrayobject.c 2009-02-20 03:40:53 UTC (rev 6423) @@ -7395,77 +7395,78 @@ static PyTypeObject PyArray_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.ndarray", /*tp_name*/ - sizeof(PyArrayObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ + 0, /* ob_size */ + "numpy.ndarray", /* tp_name */ + sizeof(PyArrayObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)array_dealloc, /*tp_dealloc */ - (printfunc)NULL, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - (cmpfunc)0, /*tp_compare*/ - (reprfunc)array_repr, /*tp_repr*/ - &array_as_number, /*tp_as_number*/ - &array_as_sequence, /*tp_as_sequence*/ - &array_as_mapping, /*tp_as_mapping*/ - (hashfunc)0, /*tp_hash*/ - (ternaryfunc)0, /*tp_call*/ - (reprfunc)array_str, /*tp_str*/ - - (getattrofunc)0, /*tp_getattro*/ - (setattrofunc)0, /*tp_setattro*/ - &array_as_buffer, /*tp_as_buffer*/ + (destructor)array_dealloc, /* tp_dealloc */ + (printfunc)NULL, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)0, /* tp_compare */ + (reprfunc)array_repr, /* tp_repr */ + &array_as_number, /* tp_as_number */ + &array_as_sequence, /* tp_as_sequence */ + &array_as_mapping, /* tp_as_mapping */ + (hashfunc)0, /* tp_hash */ + (ternaryfunc)0, /* tp_call */ + (reprfunc)array_str, /* tp_str */ + (getattrofunc)0, /* tp_getattro */ + (setattrofunc)0, /* tp_setattro */ + &array_as_buffer, /* tp_as_buffer */ (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE - | Py_TPFLAGS_CHECKTYPES), /*tp_flags*/ + | Py_TPFLAGS_CHECKTYPES), /* tp_flags */ /*Documentation string */ - 0, /*tp_doc*/ + 0, /* tp_doc */ - (traverseproc)0, /*tp_traverse */ - (inquiry)0, /*tp_clear */ - (richcmpfunc)array_richcompare, /*tp_richcompare */ - offsetof(PyArrayObject, weakreflist), /*tp_weaklistoffset */ + (traverseproc)0, /* tp_traverse */ + (inquiry)0, /* tp_clear */ + (richcmpfunc)array_richcompare, /* tp_richcompare */ + offsetof(PyArrayObject, weakreflist), /* tp_weaklistoffset */ /* Iterator support (use standard) */ - (getiterfunc)array_iter, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ + (getiterfunc)array_iter, /* tp_iter */ + (iternextfunc)0, /* tp_iternext */ /* Sub-classing (new-style object) support */ - array_methods, /* tp_methods */ - 0, /* tp_members */ - array_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - array_alloc, /* tp_alloc */ - (newfunc)array_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + array_methods, /* tp_methods */ + 0, /* tp_members */ + array_getsetlist, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + array_alloc, /* tp_alloc */ + (newfunc)array_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; -/* The rest of this code is to build the right kind of array from a python */ -/* object. */ +/* + * The rest of this code is to build the right kind of array + * from a python object. + */ static int discover_depth(PyObject *s, int max, int stop_at_string, int stop_at_tuple) @@ -10552,14 +10553,13 @@ val_it, swap) < 0) { goto finish; } - retval=0; + retval = 0; } } finish: if (!PyErr_Occurred() && retval < 0) { - PyErr_SetString(PyExc_IndexError, - "unsupported iterator index"); + PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); } Py_XDECREF(indtype); Py_XDECREF(obj); @@ -10592,13 +10592,12 @@ /* Any argument ignored */ /* Two options: - 1) underlying array is contiguous - -- return 1-d wrapper around it - 2) underlying array is not contiguous - -- make new 1-d contiguous array with updateifcopy flag set - to copy back to the old array - */ - + * 1) underlying array is contiguous + * -- return 1-d wrapper around it + * 2) underlying array is not contiguous + * -- make new 1-d contiguous array with updateifcopy flag set + * to copy back to the old array + */ size = PyArray_SIZE(it->ao); Py_INCREF(it->ao->descr); if (PyArray_ISCONTIGUOUS(it->ao)) { @@ -10608,7 +10607,9 @@ NULL, it->ao->data, it->ao->flags, (PyObject *)it->ao); - if (r==NULL) return NULL; + if (r == NULL) { + return NULL; + } } else { r = PyArray_NewFromDescr(&PyArray_Type, @@ -10616,7 +10617,9 @@ 1, &size, NULL, NULL, 0, (PyObject *)it->ao); - if (r==NULL) return NULL; + if (r == NULL) { + return NULL; + } if (_flat_copyinto(r, (PyObject *)it->ao, PyArray_CORDER) < 0) { Py_DECREF(r); @@ -10634,7 +10637,9 @@ static PyObject * iter_copy(PyArrayIterObject *it, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } return PyArray_Flatten(it->ao, 0); } @@ -10651,7 +10656,9 @@ PyArrayObject *new; PyObject *ret; new = (PyArrayObject *)iter_array(self, NULL); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } ret = array_richcompare(new, other, cmp_op); Py_DECREF(new); return ret; @@ -10669,12 +10676,15 @@ { int nd; nd = self->ao->nd; - if (self->contiguous) { /* coordinates not kept track of --- need to generate - from index */ + if (self->contiguous) { + /* + * coordinates not kept track of --- + * need to generate from index + */ intp val; int i; val = self->index; - for(i=0;icoordinates[i] = val / self->factors[i]; val = val % self->factors[i]; } @@ -10691,60 +10701,60 @@ static PyTypeObject PyArrayIter_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.flatiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.flatiter", /* tp_name */ + sizeof(PyArrayIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arrayiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - &iter_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)iter_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arrayiter_next, /* tp_iternext */ - iter_methods, /* tp_methods */ - iter_members, /* tp_members */ - iter_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arrayiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + &iter_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + (richcmpfunc)iter_richcompare, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)arrayiter_next, /* tp_iternext */ + iter_methods, /* tp_methods */ + iter_members, /* tp_members */ + iter_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -10775,18 +10785,23 @@ PyArray_Descr *indtype; PyObject *arr; - if (PySlice_Check(obj) || (obj == Py_Ellipsis)) + if (PySlice_Check(obj) || (obj == Py_Ellipsis)) { return 0; + } else if (PyArray_Check(obj) && PyArray_ISBOOL(obj)) { return _nonzero_indices(obj, iter); } else { indtype = PyArray_DescrFromType(PyArray_INTP); arr = PyArray_FromAny(obj, indtype, 0, 0, FORCECAST, NULL); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } *iter = (PyArrayIterObject *)PyArray_IterNew(arr); Py_DECREF(arr); - if (*iter == NULL) return -1; + if (*iter == NULL) { + return -1; + } } return 1; } @@ -10803,23 +10818,26 @@ PyArrayIterObject *it; /* Discover the broadcast number of dimensions */ - for(i=0, nd=0; inumiter; i++) + for (i = 0, nd = 0; i < mit->numiter; i++) { nd = MAX(nd, mit->iters[i]->ao->nd); + } mit->nd = nd; /* Discover the broadcast shape in each dimension */ - for(i=0; idimensions[i] = 1; - for(j=0; jnumiter; j++) { + for (j = 0; j < mit->numiter; j++) { it = mit->iters[j]; - /* This prepends 1 to shapes not already - equal to nd */ + /* This prepends 1 to shapes not already equal to nd */ k = i + it->ao->nd - nd; - if (k>=0) { + if (k >= 0) { tmp = it->ao->dimensions[k]; - if (tmp == 1) continue; - if (mit->dimensions[i] == 1) + if (tmp == 1) { + continue; + } + if (mit->dimensions[i] == 1) { mit->dimensions[i] = tmp; + } else if (mit->dimensions[i] != tmp) { PyErr_SetString(PyExc_ValueError, "shape mismatch: objects" \ @@ -10831,9 +10849,11 @@ } } - /* Reset the iterator dimensions and strides of each iterator - object -- using 0 valued strides for broadcasting */ - /* Need to check for overflow */ + /* + * Reset the iterator dimensions and strides of each iterator + * object -- using 0 valued strides for broadcasting + * Need to check for overflow + */ tmp = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd); if (tmp < 0) { PyErr_SetString(PyExc_ValueError, @@ -10841,18 +10861,20 @@ return -1; } mit->size = tmp; - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; it->nd_m1 = mit->nd - 1; it->size = tmp; nd = it->ao->nd; it->factors[mit->nd-1] = 1; - for(j=0; j < mit->nd; j++) { + for (j = 0; j < mit->nd; j++) { it->dims_m1[j] = mit->dimensions[j] - 1; k = j + nd - mit->nd; - /* If this dimension was added or shape - of underlying array was 1 */ - if ((k < 0) || \ + /* + * If this dimension was added or shape of + * underlying array was 1 + */ + if ((k < 0) || it->ao->dimensions[k] != mit->dimensions[j]) { it->contiguous = 0; it->strides[j] = 0; @@ -10860,12 +10882,10 @@ else { it->strides[j] = it->ao->strides[k]; } - it->backstrides[j] = it->strides[j] * \ - it->dims_m1[j]; + it->backstrides[j] = it->strides[j] * it->dims_m1[j]; if (j > 0) - it->factors[mit->nd-j-1] = \ - it->factors[mit->nd-j] * \ - mit->dimensions[mit->nd-j]; + it->factors[mit->nd-j-1] = + it->factors[mit->nd-j] * mit->dimensions[mit->nd-j]; } PyArray_ITER_RESET(it); } @@ -10887,12 +10907,11 @@ if (mit->subspace != NULL) { memcpy(coord, mit->bscoord, sizeof(intp)*mit->ait->ao->nd); PyArray_ITER_RESET(mit->subspace); - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; PyArray_ITER_RESET(it); j = mit->iteraxes[i]; - copyswap(coord+j,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), + copyswap(coord+j,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), it->ao); } PyArray_ITER_GOTO(mit->ait, coord); @@ -10900,15 +10919,16 @@ mit->dataptr = mit->subspace->dataptr; } else { - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; if (it->size != 0) { PyArray_ITER_RESET(it); - copyswap(coord+i,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), + copyswap(coord+i,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), it->ao); } - else coord[i] = 0; + else { + coord[i] = 0; + } } PyArray_ITER_GOTO(mit->ait, coord); mit->dataptr = mit->ait->dataptr; @@ -10916,9 +10936,10 @@ return; } -/* This function needs to update the state of the map iterator - and point mit->dataptr to the memory-location of the next object -*/ +/* + * This function needs to update the state of the map iterator + * and point mit->dataptr to the memory-location of the next object + */ static void PyArray_MapIterNext(PyArrayMapIterObject *mit) { @@ -10928,23 +10949,22 @@ PyArray_CopySwapFunc *copyswap; mit->index += 1; - if (mit->index >= mit->size) return; + if (mit->index >= mit->size) { + return; + } copyswap = mit->iters[0]->ao->descr->f->copyswap; /* Sub-space iteration */ if (mit->subspace != NULL) { PyArray_ITER_NEXT(mit->subspace); if (mit->subspace->index >= mit->subspace->size) { - /* reset coord to coordinates of - beginning of the subspace */ - memcpy(coord, mit->bscoord, - sizeof(intp)*mit->ait->ao->nd); + /* reset coord to coordinates of beginning of the subspace */ + memcpy(coord, mit->bscoord, sizeof(intp)*mit->ait->ao->nd); PyArray_ITER_RESET(mit->subspace); - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; PyArray_ITER_NEXT(it); j = mit->iteraxes[i]; - copyswap(coord+j,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), + copyswap(coord+j,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), it->ao); } PyArray_ITER_GOTO(mit->ait, coord); @@ -10953,7 +10973,7 @@ mit->dataptr = mit->subspace->dataptr; } else { - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; PyArray_ITER_NEXT(it); copyswap(coord+i,it->dataptr, @@ -10966,26 +10986,26 @@ return; } -/* Bind a mapiteration to a particular array */ - -/* Determine if subspace iteration is necessary. If so, - 1) Fill in mit->iteraxes - 2) Create subspace iterator - 3) Update nd, dimensions, and size. - - Subspace iteration is necessary if: arr->nd > mit->numiter -*/ - -/* Need to check for index-errors somewhere. - - Let's do it at bind time and also convert all <0 values to >0 here - as well. -*/ +/* + * Bind a mapiteration to a particular array + * + * Determine if subspace iteration is necessary. If so, + * 1) Fill in mit->iteraxes + * 2) Create subspace iterator + * 3) Update nd, dimensions, and size. + * + * Subspace iteration is necessary if: arr->nd > mit->numiter + * + * Need to check for index-errors somewhere. + * + * Let's do it at bind time and also convert all <0 values to >0 here + * as well. + */ static void PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr) { int subnd; - PyObject *sub, *obj=NULL; + PyObject *sub, *obj = NULL; int i, j, n, curraxis, ellipexp, noellip; PyArrayIterObject *it; intp dimsize; @@ -10999,22 +11019,24 @@ } mit->ait = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arr); - if (mit->ait == NULL) return; - + if (mit->ait == NULL) { + return; + } /* no subspace iteration needed. Finish up and Return */ if (subnd == 0) { n = arr->nd; - for(i=0; iiteraxes[i] = i; } goto finish; } - /* all indexing arrays have been converted to 0 - therefore we can extract the subspace with a simple - getitem call which will use view semantics - */ - /* But, be sure to do it with a true array. + /* + * all indexing arrays have been converted to 0 + * therefore we can extract the subspace with a simple + * getitem call which will use view semantics + * + * But, be sure to do it with a true array. */ if (PyArray_CheckExact(arr)) { sub = array_subscript_simple(arr, mit->indexobj); @@ -11022,54 +11044,65 @@ else { Py_INCREF(arr); obj = PyArray_EnsureArray((PyObject *)arr); - if (obj == NULL) goto fail; + if (obj == NULL) { + goto fail; + } sub = array_subscript_simple((PyArrayObject *)obj, mit->indexobj); Py_DECREF(obj); } - if (sub == NULL) goto fail; + if (sub == NULL) { + goto fail; + } mit->subspace = (PyArrayIterObject *)PyArray_IterNew(sub); Py_DECREF(sub); - if (mit->subspace == NULL) goto fail; - + if (mit->subspace == NULL) { + goto fail; + } /* Expand dimensions of result */ n = mit->subspace->ao->nd; - for(i=0; idimensions[mit->nd+i] = mit->subspace->ao->dimensions[i]; + } mit->nd += n; - /* Now, we still need to interpret the ellipsis and slice objects - to determine which axes the indexing arrays are referring to - */ + /* + * Now, we still need to interpret the ellipsis and slice objects + * to determine which axes the indexing arrays are referring to + */ n = PyTuple_GET_SIZE(mit->indexobj); - /* The number of dimensions an ellipsis takes up */ ellipexp = arr->nd - n + 1; - /* Now fill in iteraxes -- remember indexing arrays have been - converted to 0's in mit->indexobj */ + /* + * Now fill in iteraxes -- remember indexing arrays have been + * converted to 0's in mit->indexobj + */ curraxis = 0; j = 0; - noellip = 1; /* Only expand the first ellipsis */ + /* Only expand the first ellipsis */ + noellip = 1; memset(mit->bscoord, 0, sizeof(intp)*arr->nd); - for(i=0; iindexobj, i); - if (PyInt_Check(obj) || PyLong_Check(obj)) + if (PyInt_Check(obj) || PyLong_Check(obj)) { mit->iteraxes[j++] = curraxis++; + } else if (noellip && obj == Py_Ellipsis) { curraxis += ellipexp; noellip = 0; } else { - intp start=0; + intp start = 0; intp stop, step; - /* Should be slice object or - another Ellipsis */ + /* Should be slice object or another Ellipsis */ if (obj == Py_Ellipsis) { mit->bscoord[curraxis] = 0; } - else if (!PySlice_Check(obj) || \ + else if (!PySlice_Check(obj) || (slice_GetIndices((PySliceObject *)obj, arr->dimensions[curraxis], &start, &stop, &step, @@ -11086,6 +11119,7 @@ curraxis += 1; } } + finish: /* Here check the indexes (now that we have iteraxes) */ mit->size = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd); @@ -11100,15 +11134,17 @@ goto fail; } - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { intp indval; it = mit->iters[i]; PyArray_ITER_RESET(it); dimsize = arr->dimensions[mit->iteraxes[i]]; - while(it->index < it->size) { + while (it->index < it->size) { indptr = ((intp *)it->dataptr); indval = *indptr; - if (indval < 0) indval += dimsize; + if (indval < 0) { + indval += dimsize; + } if (indval < 0 || indval >= dimsize) { PyErr_Format(PyExc_IndexError, "index (%d) out of range "\ @@ -11131,14 +11167,15 @@ return; } -/* This function takes a Boolean array and constructs index objects and - iterators as if nonzero(Bool) had been called -*/ +/* + * This function takes a Boolean array and constructs index objects and + * iterators as if nonzero(Bool) had been called + */ static int _nonzero_indices(PyObject *myBool, PyArrayIterObject **iters) { PyArray_Descr *typecode; - PyArrayObject *ba =NULL, *new=NULL; + PyArrayObject *ba = NULL, *new = NULL; int nd, j; intp size, i, count; Bool *ptr; @@ -11148,45 +11185,59 @@ typecode=PyArray_DescrFromType(PyArray_BOOL); ba = (PyArrayObject *)PyArray_FromAny(myBool, typecode, 0, 0, CARRAY, NULL); - if (ba == NULL) return -1; + if (ba == NULL) { + return -1; + } nd = ba->nd; - for(j=0; jdata; count = 0; /* pre-determine how many nonzero entries there are */ - for(i=0; iao->data; coords[j] = 0; dims_m1[j] = ba->dimensions[j]-1; } - ptr = (Bool *)ba->data; + if (count == 0) { + goto finish; + } - if (count == 0) goto finish; - - /* Loop through the Boolean array and copy coordinates - for non-zero entries */ - for(i=0; i=0; j--) { + for (j = nd - 1; j >= 0; j--) { if (coords[j] < dims_m1[j]) { coords[j]++; break; @@ -11202,7 +11253,7 @@ return nd; fail: - for(j=0; jiters[i] = NULL; + } mit->index = 0; mit->ait = NULL; mit->subspace = NULL; @@ -11245,7 +11298,9 @@ if (fancy == SOBJ_LISTTUP) { PyObject *newobj; newobj = PySequence_Tuple(indexobj); - if (newobj == NULL) goto fail; + if (newobj == NULL) { + goto fail; + } Py_DECREF(indexobj); indexobj = newobj; mit->indexobj = indexobj; @@ -11257,57 +11312,72 @@ #undef SOBJ_TOOMANY #undef SOBJ_LISTTUP - if (oned) return (PyObject *)mit; + if (oned) { + return (PyObject *)mit; + } + /* + * Must have some kind of fancy indexing if we are here + * indexobj is either a list, an arrayobject, or a tuple + * (with at least 1 list or arrayobject or Bool object) + */ - /* Must have some kind of fancy indexing if we are here */ - /* indexobj is either a list, an arrayobject, or a tuple - (with at least 1 list or arrayobject or Bool object), */ - /* convert all inputs to iterators */ - if (PyArray_Check(indexobj) && \ - (PyArray_TYPE(indexobj) == PyArray_BOOL)) { + if (PyArray_Check(indexobj) && (PyArray_TYPE(indexobj) == PyArray_BOOL)) { mit->numiter = _nonzero_indices(indexobj, mit->iters); - if (mit->numiter < 0) goto fail; + if (mit->numiter < 0) { + goto fail; + } mit->nd = 1; mit->dimensions[0] = mit->iters[0]->dims_m1[0]+1; Py_DECREF(mit->indexobj); mit->indexobj = PyTuple_New(mit->numiter); - if (mit->indexobj == NULL) goto fail; - for(i=0; inumiter; i++) { - PyTuple_SET_ITEM(mit->indexobj, i, - PyInt_FromLong(0)); + if (mit->indexobj == NULL) { + goto fail; } + for (i = 0; i < mit->numiter; i++) { + PyTuple_SET_ITEM(mit->indexobj, i, PyInt_FromLong(0)); + } } else if (PyArray_Check(indexobj) || !PyTuple_Check(indexobj)) { mit->numiter = 1; indtype = PyArray_DescrFromType(PyArray_INTP); arr = PyArray_FromAny(indexobj, indtype, 0, 0, FORCECAST, NULL); - if (arr == NULL) goto fail; + if (arr == NULL) { + goto fail; + } mit->iters[0] = (PyArrayIterObject *)PyArray_IterNew(arr); - if (mit->iters[0] == NULL) {Py_DECREF(arr); goto fail;} + if (mit->iters[0] == NULL) { + Py_DECREF(arr); + goto fail; + } mit->nd = PyArray_NDIM(arr); - memcpy(mit->dimensions,PyArray_DIMS(arr),mit->nd*sizeof(intp)); + memcpy(mit->dimensions, PyArray_DIMS(arr), mit->nd*sizeof(intp)); mit->size = PyArray_SIZE(arr); Py_DECREF(arr); Py_DECREF(mit->indexobj); mit->indexobj = Py_BuildValue("(N)", PyInt_FromLong(0)); } - else { /* must be a tuple */ + else { + /* must be a tuple */ PyObject *obj; PyArrayIterObject **iterp; PyObject *new; int numiters, j, n2; - /* Make a copy of the tuple -- we will be replacing - index objects with 0's */ + /* + * Make a copy of the tuple -- we will be replacing + * index objects with 0's + */ n = PyTuple_GET_SIZE(indexobj); n2 = n; new = PyTuple_New(n2); - if (new == NULL) goto fail; + if (new == NULL) { + goto fail; + } started = 0; nonindex = 0; j = 0; - for(i=0; iiters + mit->numiter; if ((numiters=_convert_obj(obj, iterp)) < 0) { @@ -11316,39 +11386,45 @@ } if (numiters > 0) { started = 1; - if (nonindex) mit->consec = 0; + if (nonindex) { + mit->consec = 0; + } mit->numiter += numiters; if (numiters == 1) { - PyTuple_SET_ITEM(new,j++, - PyInt_FromLong(0)); + PyTuple_SET_ITEM(new,j++, PyInt_FromLong(0)); } - else { /* we need to grow the - new indexing object and fill - it with 0s for each of the iterators - produced */ + else { + /* + * we need to grow the new indexing object and fill + * it with 0s for each of the iterators produced + */ int k; n2 += numiters - 1; - if (_PyTuple_Resize(&new, n2) < 0) + if (_PyTuple_Resize(&new, n2) < 0) { goto fail; - for(k=0;kindexobj); mit->indexobj = new; - /* Store the number of iterators actually converted */ - /* These will be mapped to actual axes at bind time */ - if (PyArray_Broadcast((PyArrayMultiIterObject *)mit) < 0) + /* + * Store the number of iterators actually converted + * These will be mapped to actual axes at bind time + */ + if (PyArray_Broadcast((PyArrayMultiIterObject *)mit) < 0) { goto fail; + } } return (PyObject *)mit; @@ -11366,96 +11442,94 @@ Py_XDECREF(mit->indexobj); Py_XDECREF(mit->ait); Py_XDECREF(mit->subspace); - for(i=0; inumiter; i++) + for (i = 0; i < mit->numiter; i++) { Py_XDECREF(mit->iters[i]); + } _pya_free(mit); } -/* The mapiter object must be created new each time. It does not work - to bind to a new array, and continue. - - This was the orginal intention, but currently that does not work. - Do not expose the MapIter_Type to Python. - - It's not very useful anyway, since mapiter(indexobj); mapiter.bind(a); - mapiter is equivalent to a[indexobj].flat but the latter gets to use - slice syntax. -*/ - +/* + * The mapiter object must be created new each time. It does not work + * to bind to a new array, and continue. + * + * This was the orginal intention, but currently that does not work. + * Do not expose the MapIter_Type to Python. + * + * It's not very useful anyway, since mapiter(indexobj); mapiter.bind(a); + * mapiter is equivalent to a[indexobj].flat but the latter gets to use + * slice syntax. + */ static PyTypeObject PyArrayMapIter_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.mapiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.mapiter", /* tp_name */ + sizeof(PyArrayIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraymapiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - (traverseproc)0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arraymapiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + (traverseproc)0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif - }; /** END of Subscript Iterator **/ -/* - NUMPY_API - Get MultiIterator from array of Python objects and any additional - - PyObject **mps -- array of PyObjects - int n - number of PyObjects in the array - int nadd - number of additional arrays to include in the - iterator. - - Returns a multi-iterator object. +/*NUMPY_API + * Get MultiIterator from array of Python objects and any additional + * + * PyObject **mps -- array of PyObjects + * int n - number of PyObjects in the array + * int nadd - number of additional arrays to include in the iterator. + * + * Returns a multi-iterator object. */ static PyObject * PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ...) @@ -11474,17 +11548,20 @@ "array objects (inclusive).", NPY_MAXARGS); return NULL; } - multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); + if (multi == NULL) { + return PyErr_NoMemory(); + } PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - for(i=0; iiters[i] = NULL; + for (i = 0; i < ntot; i++) { + multi->iters[i] = NULL; + } multi->numiter = ntot; multi->index = 0; va_start(va, nadd); - for(i=0; iiters[i] = (PyArrayIterObject *)PyArray_IterNew(arr); Py_DECREF(arr); } } - va_end(va); - if (!err && PyArray_Broadcast(multi) < 0) err=1; - + if (!err && PyArray_Broadcast(multi) < 0) { + err = 1; + } if (err) { Py_DECREF(multi); return NULL; } - PyArray_MultiIter_RESET(multi); - - return (PyObject *)multi; + return (PyObject *)multi; } /*NUMPY_API - Get MultiIterator, -*/ + * Get MultiIterator, + */ static PyObject * PyArray_MultiIterNew(int n, ...) { @@ -11526,7 +11602,7 @@ PyObject *current; PyObject *arr; - int i, err=0; + int i, err = 0; if (n < 2 || n > NPY_MAXARGS) { PyErr_Format(PyExc_ValueError, @@ -11538,37 +11614,40 @@ /* fprintf(stderr, "multi new...");*/ multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); + if (multi == NULL) { + return PyErr_NoMemory(); + } PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - for(i=0; iiters[i] = NULL; + for (i = 0; i < n; i++) { + multi->iters[i] = NULL; + } multi->numiter = n; multi->index = 0; va_start(va, n); - for(i=0; iiters[i] = (PyArrayIterObject *)PyArray_IterNew(arr); Py_DECREF(arr); } } - va_end(va); - if (!err && PyArray_Broadcast(multi) < 0) err=1; - + if (!err && PyArray_Broadcast(multi) < 0) { + err = 1; + } if (err) { Py_DECREF(multi); return NULL; } - PyArray_MultiIter_RESET(multi); - return (PyObject *)multi; } @@ -11588,7 +11667,9 @@ n = PyTuple_Size(args); if (n < 2 || n > NPY_MAXARGS) { - if (PyErr_Occurred()) return NULL; + if (PyErr_Occurred()) { + return NULL; + } PyErr_Format(PyExc_ValueError, "Need at least two and fewer than (%d) " \ "array objects.", NPY_MAXARGS); @@ -11596,23 +11677,31 @@ } multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); + if (multi == NULL) { + return PyErr_NoMemory(); + } PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); multi->numiter = n; multi->index = 0; - for(i=0; iiters[i] = NULL; - for(i=0; iiters[i] = NULL; + } + for (i = 0; i < n; i++) { arr = PyArray_FromAny(PyTuple_GET_ITEM(args, i), NULL, 0, 0, 0, NULL); - if (arr == NULL) goto fail; - if ((multi->iters[i] = \ - (PyArrayIterObject *)PyArray_IterNew(arr))==NULL) + if (arr == NULL) { goto fail; + } + if ((multi->iters[i] = (PyArrayIterObject *)PyArray_IterNew(arr)) + == NULL) { + goto fail; + } Py_DECREF(arr); } - if (PyArray_Broadcast(multi) < 0) goto fail; + if (PyArray_Broadcast(multi) < 0) { + goto fail; + } PyArray_MultiIter_RESET(multi); - return (PyObject *)multi; fail: @@ -11628,9 +11717,11 @@ n = multi->numiter; ret = PyTuple_New(n); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } if (multi->index < multi->size) { - for(i=0; i < n; i++) { + for (i = 0; i < n; i++) { PyArrayIterObject *it=multi->iters[i]; PyTuple_SET_ITEM(ret, i, PyArray_ToScalar(it->dataptr, it->ao)); @@ -11647,8 +11738,9 @@ { int i; - for(i=0; inumiter; i++) + for (i = 0; i < multi->numiter; i++) { Py_XDECREF(multi->iters[i]); + } multi->ob_type->tp_free((PyObject *)multi); } @@ -11658,10 +11750,12 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) self->size); #else - if (self->size < MAX_LONG) + if (self->size < MAX_LONG) { return PyInt_FromLong((long) self->size); - else + } + else { return PyLong_FromLongLong((longlong) self->size); + } #endif } @@ -11671,10 +11765,12 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) self->index); #else - if (self->size < MAX_LONG) + if (self->size < MAX_LONG) { return PyInt_FromLong((long) self->index); - else + } + else { return PyLong_FromLongLong((longlong) self->index); + } #endif } @@ -11689,10 +11785,13 @@ { PyObject *res; int i, n; + n = self->numiter; res = PyTuple_New(n); - if (res == NULL) return res; - for(i=0; iiters[i]); PyTuple_SET_ITEM(res, i, (PyObject *)self->iters[i]); } @@ -11725,8 +11824,9 @@ static PyObject * arraymultiter_reset(PyArrayMultiIterObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; - + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } PyArray_MultiIter_RESET(self); Py_INCREF(Py_None); return Py_None; @@ -11739,61 +11839,61 @@ static PyTypeObject PyArrayMultiIter_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.broadcast", /* tp_name */ - sizeof(PyArrayMultiIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.broadcast", /* tp_name */ + sizeof(PyArrayMultiIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraymultiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arraymultiter_next, /* tp_iternext */ - arraymultiter_methods, /* tp_methods */ - arraymultiter_members, /* tp_members */ - arraymultiter_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - arraymultiter_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arraymultiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)arraymultiter_next, /* tp_iternext */ + arraymultiter_methods, /* tp_methods */ + arraymultiter_members, /* tp_members */ + arraymultiter_getsetlist, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + 0, /* tp_alloc */ + arraymultiter_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -11810,21 +11910,23 @@ return new; } -/*** Array Descr Objects for dynamic types **/ +/** Array Descr Objects for dynamic types **/ -/** There are some statically-defined PyArray_Descr objects corresponding - to the basic built-in types. - These can and should be DECREF'd and INCREF'd as appropriate, anyway. - If a mistake is made in reference counting, deallocation on these - builtins will be attempted leading to problems. +/* + * There are some statically-defined PyArray_Descr objects corresponding + * to the basic built-in types. + * These can and should be DECREF'd and INCREF'd as appropriate, anyway. + * If a mistake is made in reference counting, deallocation on these + * builtins will be attempted leading to problems. + * + * This let's us deal with all PyArray_Descr objects using reference + * counting (regardless of whether they are statically or dynamically + * allocated). + */ - This let's us deal with all PyArray_Descr objects using reference - counting (regardless of whether they are statically or dynamically - allocated). -**/ - -/* base cannot be NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * base cannot be NULL + */ static PyArray_Descr * PyArray_DescrNew(PyArray_Descr *base) { @@ -11853,9 +11955,10 @@ return new; } -/* should never be called for builtin-types unless - there is a reference-count problem -*/ +/* + * should never be called for builtin-types unless + * there is a reference-count problem + */ static void arraydescr_dealloc(PyArray_Descr *self) { @@ -11878,20 +11981,29 @@ self->ob_type->tp_free((PyObject *)self); } -/* we need to be careful about setting attributes because these - objects are pointed to by arrays that depend on them for interpreting - data. Currently no attributes of data-type objects can be set - directly except names. -*/ +/* + * we need to be careful about setting attributes because these + * objects are pointed to by arrays that depend on them for interpreting + * data. Currently no attributes of data-type objects can be set + * directly except names. + */ static PyMemberDef arraydescr_members[] = { - {"type", T_OBJECT, offsetof(PyArray_Descr, typeobj), RO, NULL}, - {"kind", T_CHAR, offsetof(PyArray_Descr, kind), RO, NULL}, - {"char", T_CHAR, offsetof(PyArray_Descr, type), RO, NULL}, - {"num", T_INT, offsetof(PyArray_Descr, type_num), RO, NULL}, - {"byteorder", T_CHAR, offsetof(PyArray_Descr, byteorder), RO, NULL}, - {"itemsize", T_INT, offsetof(PyArray_Descr, elsize), RO, NULL}, - {"alignment", T_INT, offsetof(PyArray_Descr, alignment), RO, NULL}, - {"flags", T_UBYTE, offsetof(PyArray_Descr, hasobject), RO, NULL}, + {"type", + T_OBJECT, offsetof(PyArray_Descr, typeobj), RO, NULL}, + {"kind", + T_CHAR, offsetof(PyArray_Descr, kind), RO, NULL}, + {"char", + T_CHAR, offsetof(PyArray_Descr, type), RO, NULL}, + {"num", + T_INT, offsetof(PyArray_Descr, type_num), RO, NULL}, + {"byteorder", + T_CHAR, offsetof(PyArray_Descr, byteorder), RO, NULL}, + {"itemsize", + T_INT, offsetof(PyArray_Descr, elsize), RO, NULL}, + {"alignment", + T_INT, offsetof(PyArray_Descr, alignment), RO, NULL}, + {"flags", + T_UBYTE, offsetof(PyArray_Descr, hasobject), RO, NULL}, {NULL, 0, 0, 0, NULL}, }; @@ -11909,15 +12021,16 @@ static PyObject * arraydescr_protocol_typestr_get(PyArray_Descr *self) { - char basic_=self->kind; + char basic_ = self->kind; char endian = self->byteorder; - int size=self->elsize; + int size = self->elsize; if (endian == '=') { endian = '<'; - if (!PyArray_IsNativeByteOrder(endian)) endian = '>'; + if (!PyArray_IsNativeByteOrder(endian)) { + endian = '>'; + } } - if (self->type_num == PyArray_UNICODE) { size >>= 2; } @@ -11931,7 +12044,8 @@ PyTypeObject *typeobj = self->typeobj; PyObject *res; char *s; - static int prefix_len=0; + /* fixme: not reentrant */ + static int prefix_len = 0; if (PyTypeNum_ISUSERDEF(self->type_num)) { s = strrchr(typeobj->tp_name, '.'); @@ -11939,17 +12053,18 @@ res = PyString_FromString(typeobj->tp_name); } else { - res = PyString_FromStringAndSize(s+1, strlen(s)-1); + res = PyString_FromStringAndSize(s + 1, strlen(s) - 1); } return res; } else { - if (prefix_len == 0) + if (prefix_len == 0) { prefix_len = strlen("numpy."); - + } len = strlen(typeobj->tp_name); - if (*(typeobj->tp_name + (len-1)) == '_') - len-=1; + if (*(typeobj->tp_name + (len-1)) == '_') { + len -= 1; + } len -= prefix_len; res = PyString_FromStringAndSize(typeobj->tp_name+prefix_len, len); } @@ -11994,35 +12109,45 @@ if (self->names == NULL) { /* get default */ dobj = PyTuple_New(2); - if (dobj == NULL) return NULL; + if (dobj == NULL) { + return NULL; + } PyTuple_SET_ITEM(dobj, 0, PyString_FromString("")); - PyTuple_SET_ITEM(dobj, 1, \ - arraydescr_protocol_typestr_get(self)); + PyTuple_SET_ITEM(dobj, 1, arraydescr_protocol_typestr_get(self)); res = PyList_New(1); - if (res == NULL) {Py_DECREF(dobj); return NULL;} + if (res == NULL) { + Py_DECREF(dobj); + return NULL; + } PyList_SET_ITEM(res, 0, dobj); return res; } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - res = PyObject_CallMethod(_numpy_internal, "_array_descr", - "O", self); + if (_numpy_internal == NULL) { + return NULL; + } + res = PyObject_CallMethod(_numpy_internal, "_array_descr", "O", self); Py_DECREF(_numpy_internal); return res; } -/* returns 1 for a builtin type - and 2 for a user-defined data-type descriptor - return 0 if neither (i.e. it's a copy of one) -*/ +/* + * returns 1 for a builtin type + * and 2 for a user-defined data-type descriptor + * return 0 if neither (i.e. it's a copy of one) + */ static PyObject * arraydescr_isbuiltin_get(PyArray_Descr *self) { long val; val = 0; - if (self->fields == Py_None) val = 1; - if (PyTypeNum_ISUSERDEF(self->type_num)) val = 2; + if (self->fields == Py_None) { + val = 1; + } + if (PyTypeNum_ISUSERDEF(self->type_num)) { + val = 2; + } return PyInt_FromLong(val); } @@ -12033,34 +12158,42 @@ return PyArray_ISNBO(self->byteorder); } else { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; - while(PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return -1; - if (!_arraydescr_isnative(new)) return 0; + Py_ssize_t pos = 0; + while (PyDict_Next(self->fields, &pos, &key, &value)) { + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { + return -1; + } + if (!_arraydescr_isnative(new)) { + return 0; + } } } return 1; } -/* return Py_True if this data-type descriptor - has native byteorder if no fields are defined - - or if all sub-fields have native-byteorder if - fields are defined -*/ +/* + * return Py_True if this data-type descriptor + * has native byteorder if no fields are defined + * + * or if all sub-fields have native-byteorder if + * fields are defined + */ static PyObject * arraydescr_isnative_get(PyArray_Descr *self) { PyObject *ret; int retval; retval = _arraydescr_isnative(self); - if (retval == -1) return NULL; - ret = (retval ? Py_True : Py_False); + if (retval == -1) { + return NULL; + } + ret = retval ? Py_True : Py_False; Py_INCREF(ret); return ret; } @@ -12079,10 +12212,12 @@ arraydescr_hasobject_get(PyArray_Descr *self) { PyObject *res; - if (PyDataType_FLAGCHK(self, NPY_ITEM_HASOBJECT)) + if (PyDataType_FLAGCHK(self, NPY_ITEM_HASOBJECT)) { res = Py_True; - else + } + else { res = Py_False; + } Py_INCREF(res); return res; } @@ -12116,9 +12251,9 @@ return -1; } /* Make sure all entries are strings */ - for(i=0; ifields == Py_None) { descr = PyArray_DescrNew(conv); @@ -12226,9 +12362,11 @@ static PyObject * arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) { - /* version number of this pickle type. Increment if we need to - change the format. Be sure to handle the old versions in - arraydescr_setstate. */ + /* + * version number of this pickle type. Increment if we need to + * change the format. Be sure to handle the old versions in + * arraydescr_setstate. + */ const int version = 3; PyObject *ret, *mod, *obj; PyObject *state; @@ -12236,15 +12374,23 @@ int elsize, alignment; ret = PyTuple_New(3); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) {Py_DECREF(ret); return NULL;} + if (mod == NULL) { + Py_DECREF(ret); + return NULL; + } obj = PyObject_GetAttrString(mod, "dtype"); Py_DECREF(mod); - if (obj == NULL) {Py_DECREF(ret); return NULL;} + if (obj == NULL) { + Py_DECREF(ret); + return NULL; + } PyTuple_SET_ITEM(ret, 0, obj); - if (PyTypeNum_ISUSERDEF(self->type_num) || \ - ((self->type_num == PyArray_VOID && \ + if (PyTypeNum_ISUSERDEF(self->type_num) || + ((self->type_num == PyArray_VOID && self->typeobj != &PyVoidArrType_Type))) { obj = (PyObject *)self->typeobj; Py_INCREF(obj); @@ -12258,12 +12404,16 @@ } PyTuple_SET_ITEM(ret, 1, Py_BuildValue("(Nii)", obj, 0, 1)); - /* Now return the state which is at least - byteorder, subarray, and fields */ + /* + * Now return the state which is at least byteorder, + * subarray, and fields + */ endian = self->byteorder; if (endian == '=') { endian = '<'; - if (!PyArray_IsNativeByteOrder(endian)) endian = '>'; + if (!PyArray_IsNativeByteOrder(endian)) { + endian = '>'; + } } state = PyTuple_New(8); PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version)); @@ -12287,12 +12437,13 @@ elsize = self->elsize; alignment = self->alignment; } - else {elsize = -1; alignment = -1;} - + else { + elsize = -1; + alignment = -1; + } PyTuple_SET_ITEM(state, 5, PyInt_FromLong(elsize)); PyTuple_SET_ITEM(state, 6, PyInt_FromLong(alignment)); PyTuple_SET_ITEM(state, 7, PyInt_FromLong(self->hasobject)); - PyTuple_SET_ITEM(ret, 2, state); return ret; } @@ -12304,17 +12455,20 @@ _descr_find_object(PyArray_Descr *self) { if (self->hasobject || self->type_num == PyArray_OBJECT || - self->kind == 'O') + self->kind == 'O') { return NPY_OBJECT_DTYPE_FLAGS; + } if (PyDescr_HASFIELDS(self)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; + while (PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) { + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { PyErr_Clear(); return 0; } @@ -12327,64 +12481,68 @@ return 0; } -/* state is at least byteorder, subarray, and fields but could include elsize - and alignment for EXTENDED arrays -*/ - +/* + * state is at least byteorder, subarray, and fields but could include elsize + * and alignment for EXTENDED arrays + */ static PyObject * arraydescr_setstate(PyArray_Descr *self, PyObject *args) { int elsize = -1, alignment = -1; int version = 3; char endian; - PyObject *subarray, *fields, *names=NULL; + PyObject *subarray, *fields, *names = NULL; int incref_names = 1; - int dtypeflags=0; + int dtypeflags = 0; - if (self->fields == Py_None) {Py_INCREF(Py_None); return Py_None;} - + if (self->fields == Py_None) { + Py_INCREF(Py_None); + return Py_None; + } if (PyTuple_GET_SIZE(args) != 1 || !(PyTuple_Check(PyTuple_GET_ITEM(args, 0)))) { PyErr_BadInternalCall(); return NULL; } switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { - case 8: - if (!PyArg_ParseTuple(args, "(icOOOiii)", &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment, &dtypeflags)) { - return NULL; - } - break; - case 7: - if (!PyArg_ParseTuple(args, "(icOOOii)", &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment)) { - return NULL; - } - break; - case 6: - if (!PyArg_ParseTuple(args, "(icOOii)", &version, - &endian, &subarray, &fields, - &elsize, &alignment)) { - PyErr_Clear(); - } - break; - case 5: - version = 0; - if (!PyArg_ParseTuple(args, "(cOOii)", - &endian, &subarray, &fields, &elsize, - &alignment)) { - return NULL; - } - break; - default: - version = -1; /* raise an error */ + case 8: + if (!PyArg_ParseTuple(args, "(icOOOiii)", &version, &endian, + &subarray, &names, &fields, &elsize, + &alignment, &dtypeflags)) { + return NULL; + } + break; + case 7: + if (!PyArg_ParseTuple(args, "(icOOOii)", &version, &endian, + &subarray, &names, &fields, &elsize, + &alignment)) { + return NULL; + } + break; + case 6: + if (!PyArg_ParseTuple(args, "(icOOii)", &version, + &endian, &subarray, &fields, + &elsize, &alignment)) { + PyErr_Clear(); + } + break; + case 5: + version = 0; + if (!PyArg_ParseTuple(args, "(cOOii)", + &endian, &subarray, &fields, &elsize, + &alignment)) { + return NULL; + } + break; + default: + /* raise an error */ + version = -1; } - /* If we ever need another pickle format, increment the version - number. But we should still be able to handle the old versions. - */ + /* + * If we ever need another pickle format, increment the version + * number. But we should still be able to handle the old versions. + */ if (version < 0 || version > 3) { PyErr_Format(PyExc_ValueError, "can't handle version %d of numpy.dtype pickle", @@ -12397,7 +12555,9 @@ PyObject *key, *list; key = PyInt_FromLong(-1); list = PyDict_GetItem(fields, key); - if (!list) return NULL; + if (!list) { + return NULL; + } Py_INCREF(list); names = list; PyDict_DelItem(fields, key); @@ -12409,16 +12569,16 @@ } - if ((fields == Py_None && names != Py_None) || \ + if ((fields == Py_None && names != Py_None) || (names == Py_None && fields != Py_None)) { PyErr_Format(PyExc_ValueError, "inconsistent fields and names"); return NULL; } - if (endian != '|' && - PyArray_IsNativeByteOrder(endian)) endian = '='; - + if (endian != '|' && PyArray_IsNativeByteOrder(endian)) { + endian = '='; + } self->byteorder = endian; if (self->subarray) { Py_XDECREF(self->subarray->base); @@ -12441,8 +12601,9 @@ Py_INCREF(fields); Py_XDECREF(self->names); self->names = names; - if (incref_names) + if (incref_names) { Py_INCREF(names); + } } if (PyTypeNum_ISEXTENDED(self->type_num)) { @@ -12459,23 +12620,23 @@ } -/* returns a copy of the PyArray_Descr structure with the byteorder - altered: - no arguments: The byteorder is swapped (in all subfields as well) - single argument: The byteorder is forced to the given state - (in all subfields as well) - - Valid states: ('big', '>') or ('little' or '<') - ('native', or '=') - - If a descr structure with | is encountered it's own - byte-order is not changed but any fields are: -*/ - -/*NUMPY_API - Deep bytorder change of a data-type descriptor - *** Leaves reference count of self unchanged --- does not DECREF self *** - */ + /*NUMPY_API + * returns a copy of the PyArray_Descr structure with the byteorder + * altered: + * no arguments: The byteorder is swapped (in all subfields as well) + * single argument: The byteorder is forced to the given state + * (in all subfields as well) + * + * Valid states: ('big', '>') or ('little' or '<') + * ('native', or '=') + * + * If a descr structure with | is encountered it's own + * byte-order is not changed but any fields are: + * + * + * Deep bytorder change of a data-type descriptor + * *** Leaves reference count of self unchanged --- does not DECREF self *** + */ static PyArray_Descr * PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) { @@ -12485,9 +12646,14 @@ new = PyArray_DescrNew(self); endian = new->byteorder; if (endian != PyArray_IGNORE) { - if (newendian == PyArray_SWAP) { /* swap byteorder */ - if PyArray_ISNBO(endian) endian = PyArray_OPPBYTE; - else endian = PyArray_NATBYTE; + if (newendian == PyArray_SWAP) { + /* swap byteorder */ + if PyArray_ISNBO(endian) { + endian = PyArray_OPPBYTE; + } + else { + endian = PyArray_NATBYTE; + } new->byteorder = endian; } else if (newendian != PyArray_IGNORE) { @@ -12502,28 +12668,31 @@ PyArray_Descr *newdescr; Py_ssize_t pos = 0; int len, i; + newfields = PyDict_New(); - /* make new dictionary with replaced */ - /* PyArray_Descr Objects */ + /* make new dictionary with replaced PyArray_Descr Objects */ while(PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyString_Check(key) || \ - !PyTuple_Check(value) || \ - ((len=PyTuple_GET_SIZE(value)) < 2)) + if NPY_TITLE_KEY(key, value) { continue; - + } + if (!PyString_Check(key) || + !PyTuple_Check(value) || + ((len=PyTuple_GET_SIZE(value)) < 2)) { + continue; + } old = PyTuple_GET_ITEM(value, 0); - if (!PyArray_DescrCheck(old)) continue; - newdescr = PyArray_DescrNewByteorder \ - ((PyArray_Descr *)old, newendian); + if (!PyArray_DescrCheck(old)) { + continue; + } + newdescr = PyArray_DescrNewByteorder( + (PyArray_Descr *)old, newendian); if (newdescr == NULL) { Py_DECREF(newfields); Py_DECREF(new); return NULL; } newvalue = PyTuple_New(len); - PyTuple_SET_ITEM(newvalue, 0, \ - (PyObject *)newdescr); - for(i=1; isubarray) { Py_DECREF(new->subarray->base); - new->subarray->base = PyArray_DescrNewByteorder \ + new->subarray->base = PyArray_DescrNewByteorder (self->subarray->base, newendian); } return new; @@ -12549,19 +12718,20 @@ char endian=PyArray_SWAP; if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter, - &endian)) return NULL; - + &endian)) { + return NULL; + } return (PyObject *)PyArray_DescrNewByteorder(self, endian); } static PyMethodDef arraydescr_methods[] = { /* for pickling */ - {"__reduce__", (PyCFunction)arraydescr_reduce, METH_VARARGS, - NULL}, - {"__setstate__", (PyCFunction)arraydescr_setstate, METH_VARARGS, - NULL}, - {"newbyteorder", (PyCFunction)arraydescr_newbyteorder, METH_VARARGS, - NULL}, + {"__reduce__", + (PyCFunction)arraydescr_reduce, METH_VARARGS, NULL}, + {"__setstate__", + (PyCFunction)arraydescr_setstate, METH_VARARGS, NULL}, + {"newbyteorder", + (PyCFunction)arraydescr_newbyteorder, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -12577,7 +12747,9 @@ sub = PyString_FromString(""); PyErr_Clear(); } - else sub = PyObject_Str(lst); + else { + sub = PyObject_Str(lst); + } Py_XDECREF(lst); if (self->type_num != PyArray_VOID) { PyObject *p; @@ -12648,55 +12820,66 @@ static PyObject * arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op) { - PyArray_Descr *new=NULL; + PyArray_Descr *new = NULL; PyObject *result = Py_NotImplemented; if (!PyArray_DescrCheck(other)) { - if (PyArray_DescrConverter(other, &new) == PY_FAIL) + if (PyArray_DescrConverter(other, &new) == PY_FAIL) { return NULL; + } } else { new = (PyArray_Descr *)other; Py_INCREF(new); } switch (cmp_op) { - case Py_LT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) - result = Py_True; - else - result = Py_False; - break; - case Py_LE: - if (PyArray_CanCastTo(self, new)) - result = Py_True; - else - result = Py_False; - break; - case Py_EQ: - if (PyArray_EquivTypes(self, new)) - result = Py_True; - else - result = Py_False; - break; - case Py_NE: - if (PyArray_EquivTypes(self, new)) - result = Py_False; - else - result = Py_True; - break; - case Py_GT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) - result = Py_True; - else - result = Py_False; - break; - case Py_GE: - if (PyArray_CanCastTo(new, self)) - result = Py_True; - else - result = Py_False; - break; - default: - result = Py_NotImplemented; + case Py_LT: + if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_LE: + if (PyArray_CanCastTo(self, new)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_EQ: + if (PyArray_EquivTypes(self, new)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_NE: + if (PyArray_EquivTypes(self, new)) + result = Py_False; + else + result = Py_True; + break; + case Py_GT: + if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_GE: + if (PyArray_CanCastTo(new, self)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + default: + result = Py_NotImplemented; } Py_XDECREF(new); @@ -12711,12 +12894,14 @@ static Py_ssize_t descr_length(PyObject *self0) { - PyArray_Descr *self = (PyArray_Descr *)self0; - if (self->names) + if (self->names) { return PyTuple_GET_SIZE(self->names); - else return 0; + } + else { + return 0; + } } static PyObject * @@ -12724,7 +12909,7 @@ { PyObject *tup; PyArray_Descr *new; - if (length < 0) + if (length < 0) { return PyErr_Format(PyExc_ValueError, #if (PY_VERSION_HEX < 0x02050000) "Array length must be >= 0, not %d", @@ -12732,8 +12917,11 @@ "Array length must be >= 0, not %zd", #endif length); + } tup = Py_BuildValue("O" NPY_SSIZE_T_PYFMT, self, length); - if (tup == NULL) return NULL; + if (tup == NULL) { + return NULL; + } PyArray_DescrConverter(tup, &new); Py_DECREF(tup); return (PyObject *)new; @@ -12745,11 +12933,9 @@ if (self->names) { if (PyString_Check(op) || PyUnicode_Check(op)) { - PyObject *obj; - obj = PyDict_GetItem(self->fields, op); + PyObject *obj = PyDict_GetItem(self->fields, op); if (obj != NULL) { - PyObject *descr; - descr = PyTuple_GET_ITEM(obj, 0); + PyObject *descr = PyTuple_GET_ITEM(obj, 0); Py_INCREF(descr); return descr; } @@ -12761,12 +12947,12 @@ } else { PyObject *name; - int value; - value = PyArray_PyIntAsInt(op); + int value = PyArray_PyIntAsInt(op); if (!PyErr_Occurred()) { - int size; - size = PyTuple_GET_SIZE(self->names); - if (value < 0) value += size; + int size = PyTuple_GET_SIZE(self->names); + if (value < 0) { + value += size; + } if (value < 0 || value >= size) { PyErr_Format(PyExc_IndexError, "0<=index<%d not %d", @@ -12797,17 +12983,17 @@ (binaryfunc)NULL, descr_repeat, NULL, NULL, - NULL, /* sq_ass_item */ - NULL, /* ssizessizeobjargproc sq_ass_slice */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0, /* sq_inplace_repeat */ + NULL, /* sq_ass_item */ + NULL, /* ssizessizeobjargproc sq_ass_slice */ + 0, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0, /* sq_inplace_repeat */ }; static PyMappingMethods descr_as_mapping = { - descr_length, /*mp_length*/ - (binaryfunc)descr_subscript, /*mp_subscript*/ - (objobjargproc)NULL, /*mp_ass_subscript*/ + descr_length, /* mp_length*/ + (binaryfunc)descr_subscript, /* mp_subscript*/ + (objobjargproc)NULL, /* mp_ass_subscript*/ }; /****************** End of Mapping Protocol ******************************/ @@ -12815,70 +13001,71 @@ static PyTypeObject PyArrayDescr_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.dtype", /* tp_name */ - sizeof(PyArray_Descr), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.dtype", /* tp_name */ + sizeof(PyArray_Descr), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraydescr_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - (reprfunc)arraydescr_repr, /* tp_repr */ - 0, /* tp_as_number */ - &descr_as_sequence, /* tp_as_sequence */ - &descr_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc)arraydescr_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - arraydescr_methods, /* tp_methods */ - arraydescr_members, /* tp_members */ - arraydescr_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arraydescr_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arraydescr_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + (reprfunc)arraydescr_repr, /* tp_repr */ + 0, /* tp_as_number */ + &descr_as_sequence, /* tp_as_sequence */ + &descr_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + (reprfunc)arraydescr_str, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + arraydescr_methods, /* tp_methods */ + arraydescr_members, /* tp_members */ + arraydescr_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + arraydescr_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; -/** Array Flags Object **/ +/* Array Flags Object */ /*NUMPY_API - Get New ArrayFlagsObject -*/ + * + * Get New ArrayFlagsObject + */ static PyObject * PyArray_NewFlagsObject(PyObject *obj) { @@ -12891,11 +13078,12 @@ flags = PyArray_FLAGS(obj); } flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); - if (flagobj == NULL) return NULL; + if (flagobj == NULL) { + return NULL; + } Py_XINCREF(obj); ((PyArrayFlagsObject *)flagobj)->arr = obj; ((PyArrayFlagsObject *)flagobj)->flags = flags; - return flagobj; } @@ -12933,11 +13121,12 @@ PyObject *item; if (((self->flags & FORTRAN) == FORTRAN) || - ((self->flags & CONTIGUOUS) == CONTIGUOUS)) + ((self->flags & CONTIGUOUS) == CONTIGUOUS)) { item = Py_True; - else + } + else { item = Py_False; - + } Py_INCREF(item); return item; } @@ -12948,11 +13137,12 @@ PyObject *item; if (((self->flags & FORTRAN) == FORTRAN) && - !((self->flags & CONTIGUOUS) == CONTIGUOUS)) + !((self->flags & CONTIGUOUS) == CONTIGUOUS)) { item = Py_True; - else + } + else { item = Py_False; - + } Py_INCREF(item); return item; } @@ -12962,13 +13152,14 @@ { PyObject *item; - if (((self->flags & (ALIGNED|WRITEABLE|FORTRAN)) == \ + if (((self->flags & (ALIGNED|WRITEABLE|FORTRAN)) == (ALIGNED|WRITEABLE|FORTRAN)) && - !((self->flags & CONTIGUOUS) == CONTIGUOUS)) + !((self->flags & CONTIGUOUS) == CONTIGUOUS)) { item = Py_True; - else + } + else { item = Py_False; - + } Py_INCREF(item); return item; } @@ -12990,7 +13181,9 @@ } res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None, (PyObject_IsTrue(obj) ? Py_True : Py_False)); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } Py_DECREF(res); return 0; } @@ -13006,7 +13199,9 @@ res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, (PyObject_IsTrue(obj) ? Py_True : Py_False), Py_None); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } Py_DECREF(res); return 0; } @@ -13022,7 +13217,9 @@ res = PyObject_CallMethod(self->arr, "setflags", "OOO", (PyObject_IsTrue(obj) ? Py_True : Py_False), Py_None, Py_None); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } Py_DECREF(res); return 0; } @@ -13030,61 +13227,61 @@ static PyGetSetDef arrayflags_getsets[] = { {"contiguous", - (getter)arrayflags_contiguous_get, - NULL, - "", NULL}, + (getter)arrayflags_contiguous_get, + NULL, + "", NULL}, {"c_contiguous", - (getter)arrayflags_contiguous_get, - NULL, - "", NULL}, + (getter)arrayflags_contiguous_get, + NULL, + "", NULL}, {"f_contiguous", - (getter)arrayflags_fortran_get, - NULL, - "", NULL}, + (getter)arrayflags_fortran_get, + NULL, + "", NULL}, {"fortran", - (getter)arrayflags_fortran_get, - NULL, - "", NULL}, + (getter)arrayflags_fortran_get, + NULL, + "", NULL}, {"updateifcopy", - (getter)arrayflags_updateifcopy_get, - (setter)arrayflags_updateifcopy_set, - "", NULL}, + (getter)arrayflags_updateifcopy_get, + (setter)arrayflags_updateifcopy_set, + "", NULL}, {"owndata", - (getter)arrayflags_owndata_get, - NULL, - "", NULL}, + (getter)arrayflags_owndata_get, + NULL, + "", NULL}, {"aligned", - (getter)arrayflags_aligned_get, - (setter)arrayflags_aligned_set, - "", NULL}, + (getter)arrayflags_aligned_get, + (setter)arrayflags_aligned_set, + "", NULL}, {"writeable", - (getter)arrayflags_writeable_get, - (setter)arrayflags_writeable_set, - "", NULL}, + (getter)arrayflags_writeable_get, + (setter)arrayflags_writeable_set, + "", NULL}, {"fnc", - (getter)arrayflags_fnc_get, - NULL, - "", NULL}, + (getter)arrayflags_fnc_get, + NULL, + "", NULL}, {"forc", - (getter)arrayflags_forc_get, - NULL, - "", NULL}, + (getter)arrayflags_forc_get, + NULL, + "", NULL}, {"behaved", - (getter)arrayflags_behaved_get, - NULL, - "", NULL}, + (getter)arrayflags_behaved_get, + NULL, + "", NULL}, {"carray", - (getter)arrayflags_carray_get, - NULL, - "", NULL}, + (getter)arrayflags_carray_get, + NULL, + "", NULL}, {"farray", - (getter)arrayflags_farray_get, - NULL, - "", NULL}, + (getter)arrayflags_farray_get, + NULL, + "", NULL}, {"num", - (getter)arrayflags_num_get, - NULL, - "", NULL}, + (getter)arrayflags_num_get, + NULL, + "", NULL}, {NULL, NULL, NULL, NULL, NULL}, }; @@ -13093,76 +13290,93 @@ { char *key; int n; - if (!PyString_Check(ind)) goto fail; + if (!PyString_Check(ind)) { + goto fail; + } key = PyString_AS_STRING(ind); n = PyString_GET_SIZE(ind); switch(n) { - case 1: - switch(key[0]) { - case 'C': - return arrayflags_contiguous_get(self); - case 'F': - return arrayflags_fortran_get(self); - case 'W': - return arrayflags_writeable_get(self); - case 'B': - return arrayflags_behaved_get(self); - case 'O': - return arrayflags_owndata_get(self); - case 'A': - return arrayflags_aligned_get(self); - case 'U': - return arrayflags_updateifcopy_get(self); - default: - goto fail; - } - break; - case 2: - if (strncmp(key, "CA", n)==0) - return arrayflags_carray_get(self); - if (strncmp(key, "FA", n)==0) - return arrayflags_farray_get(self); - break; - case 3: - if (strncmp(key, "FNC", n)==0) - return arrayflags_fnc_get(self); - break; - case 4: - if (strncmp(key, "FORC", n)==0) - return arrayflags_forc_get(self); - break; - case 6: - if (strncmp(key, "CARRAY", n)==0) - return arrayflags_carray_get(self); - if (strncmp(key, "FARRAY", n)==0) - return arrayflags_farray_get(self); - break; - case 7: - if (strncmp(key,"FORTRAN",n)==0) - return arrayflags_fortran_get(self); - if (strncmp(key,"BEHAVED",n)==0) - return arrayflags_behaved_get(self); - if (strncmp(key,"OWNDATA",n)==0) - return arrayflags_owndata_get(self); - if (strncmp(key,"ALIGNED",n)==0) - return arrayflags_aligned_get(self); - break; - case 9: - if (strncmp(key,"WRITEABLE",n)==0) - return arrayflags_writeable_get(self); - break; - case 10: - if (strncmp(key,"CONTIGUOUS",n)==0) - return arrayflags_contiguous_get(self); - break; - case 12: - if (strncmp(key, "UPDATEIFCOPY", n)==0) - return arrayflags_updateifcopy_get(self); - if (strncmp(key, "C_CONTIGUOUS", n)==0) - return arrayflags_contiguous_get(self); - if (strncmp(key, "F_CONTIGUOUS", n)==0) - return arrayflags_fortran_get(self); - break; + case 1: + switch(key[0]) { + case 'C': + return arrayflags_contiguous_get(self); + case 'F': + return arrayflags_fortran_get(self); + case 'W': + return arrayflags_writeable_get(self); + case 'B': + return arrayflags_behaved_get(self); + case 'O': + return arrayflags_owndata_get(self); + case 'A': + return arrayflags_aligned_get(self); + case 'U': + return arrayflags_updateifcopy_get(self); + default: + goto fail; + } + break; + case 2: + if (strncmp(key, "CA", n) == 0) { + return arrayflags_carray_get(self); + } + if (strncmp(key, "FA", n) == 0) { + return arrayflags_farray_get(self); + } + break; + case 3: + if (strncmp(key, "FNC", n) == 0) { + return arrayflags_fnc_get(self); + } + break; + case 4: + if (strncmp(key, "FORC", n) == 0) { + return arrayflags_forc_get(self); + } + break; + case 6: + if (strncmp(key, "CARRAY", n) == 0) { + return arrayflags_carray_get(self); + } + if (strncmp(key, "FARRAY", n) == 0) { + return arrayflags_farray_get(self); + } + break; + case 7: + if (strncmp(key,"FORTRAN",n) == 0) { + return arrayflags_fortran_get(self); + } + if (strncmp(key,"BEHAVED",n) == 0) { + return arrayflags_behaved_get(self); + } + if (strncmp(key,"OWNDATA",n) == 0) { + return arrayflags_owndata_get(self); + } + if (strncmp(key,"ALIGNED",n) == 0) { + return arrayflags_aligned_get(self); + } + break; + case 9: + if (strncmp(key,"WRITEABLE",n) == 0) { + return arrayflags_writeable_get(self); + } + break; + case 10: + if (strncmp(key,"CONTIGUOUS",n) == 0) { + return arrayflags_contiguous_get(self); + } + break; + case 12: + if (strncmp(key, "UPDATEIFCOPY", n) == 0) { + return arrayflags_updateifcopy_get(self); + } + if (strncmp(key, "C_CONTIGUOUS", n) == 0) { + return arrayflags_contiguous_get(self); + } + if (strncmp(key, "F_CONTIGUOUS", n) == 0) { + return arrayflags_fortran_get(self); + } + break; } fail: @@ -13175,18 +13389,23 @@ { char *key; int n; - if (!PyString_Check(ind)) goto fail; + if (!PyString_Check(ind)) { + goto fail; + } key = PyString_AS_STRING(ind); n = PyString_GET_SIZE(ind); - if (((n==9) && (strncmp(key, "WRITEABLE", n)==0)) || - ((n==1) && (strncmp(key, "W", n)==0))) + if (((n==9) && (strncmp(key, "WRITEABLE", n) == 0)) || + ((n==1) && (strncmp(key, "W", n) == 0))) { return arrayflags_writeable_set(self, item); - else if (((n==7) && (strncmp(key, "ALIGNED", n)==0)) || - ((n==1) && (strncmp(key, "A", n)==0))) + } + else if (((n==7) && (strncmp(key, "ALIGNED", n) == 0)) || + ((n==1) && (strncmp(key, "A", n) == 0))) { return arrayflags_aligned_set(self, item); - else if (((n==12) && (strncmp(key, "UPDATEIFCOPY", n)==0)) || - ((n==1) && (strncmp(key, "U", n)==0))) + } + else if (((n==12) && (strncmp(key, "UPDATEIFCOPY", n) == 0)) || + ((n==1) && (strncmp(key, "U", n) == 0))) { return arrayflags_updateifcopy_set(self, item); + } fail: PyErr_SetString(PyExc_KeyError, "Unknown flag"); @@ -13196,8 +13415,12 @@ static char * _torf_(int flags, int val) { - if ((flags & val) == val) return "True"; - else return "False"; + if ((flags & val) == val) { + return "True"; + } + else { + return "False"; + } } static PyObject * @@ -13219,12 +13442,15 @@ static int arrayflags_compare(PyArrayFlagsObject *self, PyArrayFlagsObject *other) { - if (self->flags == other->flags) + if (self->flags == other->flags) { return 0; - else if (self->flags < other->flags) + } + else if (self->flags < other->flags) { return -1; - else + } + else { return 1; + } } static PyMappingMethods arrayflags_as_mapping = { @@ -13242,9 +13468,9 @@ arrayflags_new(PyTypeObject *NPY_UNUSED(self), PyObject *args, PyObject *NPY_UNUSED(kwds)) { PyObject *arg=NULL; - if (!PyArg_UnpackTuple(args, "flagsobj", 0, 1, &arg)) + if (!PyArg_UnpackTuple(args, "flagsobj", 0, 1, &arg)) { return NULL; - + } if ((arg != NULL) && PyArray_Check(arg)) { return PyArray_NewFlagsObject(arg); } @@ -13258,7 +13484,7 @@ 0, "numpy.flagsobj", sizeof(PyArrayFlagsObject), - 0, /* tp_itemsize */ + 0, /* tp_itemsize */ /* methods */ (destructor)arrayflags_dealloc, /* tp_dealloc */ 0, /* tp_print */ @@ -13283,32 +13509,32 @@ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - arrayflags_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arrayflags_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* tp_methods */ + 0, /* tp_members */ + arrayflags_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + arrayflags_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; From numpy-svn at scipy.org Fri Feb 20 08:30:26 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 07:30:26 -0600 (CST) Subject: [Numpy-svn] r6424 - trunk/numpy/distutils/fcompiler Message-ID: <20090220133026.219DFC7C02D@scipy.org> Author: cdavid Date: 2009-02-20 07:30:20 -0600 (Fri, 20 Feb 2009) New Revision: 6424 Modified: trunk/numpy/distutils/fcompiler/compaq.py Log: Unhelpful message for compaq fortran compiler. Modified: trunk/numpy/distutils/fcompiler/compaq.py =================================================================== --- trunk/numpy/distutils/fcompiler/compaq.py 2009-02-20 03:40:53 UTC (rev 6423) +++ trunk/numpy/distutils/fcompiler/compaq.py 2009-02-20 13:30:20 UTC (rev 6424) @@ -79,7 +79,7 @@ m.initialize() ar_exe = m.lib except DistutilsPlatformError, msg: - print 'Ignoring "%s" (one should fix me in fcompiler/compaq.py)' % (msg) + pass except AttributeError, msg: if '_MSVCCompiler__root' in str(msg): print 'Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg) From numpy-svn at scipy.org Fri Feb 20 11:39:55 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 10:39:55 -0600 (CST) Subject: [Numpy-svn] r6425 - in branches/numpy-mingw-w64: . doc/source/reference doc/sphinxext numpy numpy/core numpy/core/code_generators numpy/core/src numpy/core/tests numpy/distutils numpy/distutils/command numpy/distutils/fcompiler numpy/doc numpy/f2py numpy/lib numpy/lib/src numpy/lib/tests numpy/linalg numpy/linalg/tests numpy/ma numpy/ma/tests numpy/numarray numpy/oldnumeric numpy/testing numpy/testing/tests Message-ID: <20090220163955.AA382C7C02D@scipy.org> Author: cdavid Date: 2009-02-20 10:37:01 -0600 (Fri, 20 Feb 2009) New Revision: 6425 Added: branches/numpy-mingw-w64/doc/source/reference/maskedarray.baseclass.rst branches/numpy-mingw-w64/doc/source/reference/maskedarray.generic.rst branches/numpy-mingw-w64/doc/source/reference/maskedarray.rst branches/numpy-mingw-w64/numpy/core/src/numpyos.c branches/numpy-mingw-w64/numpy/doc/constants.py branches/numpy-mingw-w64/numpy/lib/_iotools.py branches/numpy-mingw-w64/numpy/lib/recfunctions.py branches/numpy-mingw-w64/numpy/lib/tests/test__iotools.py branches/numpy-mingw-w64/numpy/lib/tests/test_recfunctions.py branches/numpy-mingw-w64/numpy/testing/tests/test_decorators.py Removed: branches/numpy-mingw-w64/numpy/testing/parametric.py Modified: branches/numpy-mingw-w64/ branches/numpy-mingw-w64/LICENSE.txt branches/numpy-mingw-w64/MANIFEST.in branches/numpy-mingw-w64/THANKS.txt branches/numpy-mingw-w64/doc/source/reference/arrays.classes.rst branches/numpy-mingw-w64/doc/source/reference/arrays.ndarray.rst branches/numpy-mingw-w64/doc/source/reference/arrays.rst branches/numpy-mingw-w64/doc/sphinxext/docscrape.py branches/numpy-mingw-w64/doc/sphinxext/docscrape_sphinx.py branches/numpy-mingw-w64/doc/sphinxext/numpydoc.py branches/numpy-mingw-w64/doc/sphinxext/plot_directive.py branches/numpy-mingw-w64/numpy/add_newdocs.py branches/numpy-mingw-w64/numpy/core/SConscript branches/numpy-mingw-w64/numpy/core/_internal.py branches/numpy-mingw-w64/numpy/core/code_generators/generate_numpy_api.py branches/numpy-mingw-w64/numpy/core/setup.py branches/numpy-mingw-w64/numpy/core/src/arraymethods.c branches/numpy-mingw-w64/numpy/core/src/arrayobject.c branches/numpy-mingw-w64/numpy/core/src/arraytypes.inc.src branches/numpy-mingw-w64/numpy/core/src/multiarraymodule.c branches/numpy-mingw-w64/numpy/core/src/scalarmathmodule.c.src branches/numpy-mingw-w64/numpy/core/src/scalartypes.inc.src branches/numpy-mingw-w64/numpy/core/tests/test_memmap.py branches/numpy-mingw-w64/numpy/core/tests/test_multiarray.py branches/numpy-mingw-w64/numpy/core/tests/test_numerictypes.py branches/numpy-mingw-w64/numpy/core/tests/test_print.py branches/numpy-mingw-w64/numpy/core/tests/test_regression.py branches/numpy-mingw-w64/numpy/core/tests/test_unicode.py branches/numpy-mingw-w64/numpy/ctypeslib.py branches/numpy-mingw-w64/numpy/distutils/command/config.py branches/numpy-mingw-w64/numpy/distutils/command/scons.py branches/numpy-mingw-w64/numpy/distutils/fcompiler/compaq.py branches/numpy-mingw-w64/numpy/distutils/fcompiler/gnu.py branches/numpy-mingw-w64/numpy/distutils/lib2def.py branches/numpy-mingw-w64/numpy/distutils/mingw32ccompiler.py branches/numpy-mingw-w64/numpy/distutils/misc_util.py branches/numpy-mingw-w64/numpy/distutils/system_info.py branches/numpy-mingw-w64/numpy/f2py/cfuncs.py branches/numpy-mingw-w64/numpy/f2py/crackfortran.py branches/numpy-mingw-w64/numpy/f2py/f2py.1 branches/numpy-mingw-w64/numpy/f2py/f2py2e.py branches/numpy-mingw-w64/numpy/f2py/rules.py branches/numpy-mingw-w64/numpy/lib/__init__.py branches/numpy-mingw-w64/numpy/lib/arraysetops.py branches/numpy-mingw-w64/numpy/lib/function_base.py branches/numpy-mingw-w64/numpy/lib/getlimits.py branches/numpy-mingw-w64/numpy/lib/index_tricks.py branches/numpy-mingw-w64/numpy/lib/info.py branches/numpy-mingw-w64/numpy/lib/io.py branches/numpy-mingw-w64/numpy/lib/src/_compiled_base.c branches/numpy-mingw-w64/numpy/lib/tests/test_function_base.py branches/numpy-mingw-w64/numpy/lib/tests/test_getlimits.py branches/numpy-mingw-w64/numpy/lib/tests/test_io.py branches/numpy-mingw-w64/numpy/lib/utils.py branches/numpy-mingw-w64/numpy/linalg/linalg.py branches/numpy-mingw-w64/numpy/linalg/tests/test_linalg.py branches/numpy-mingw-w64/numpy/ma/core.py branches/numpy-mingw-w64/numpy/ma/extras.py branches/numpy-mingw-w64/numpy/ma/mrecords.py branches/numpy-mingw-w64/numpy/ma/tests/test_core.py branches/numpy-mingw-w64/numpy/ma/tests/test_extras.py branches/numpy-mingw-w64/numpy/ma/tests/test_mrecords.py branches/numpy-mingw-w64/numpy/ma/tests/test_subclassing.py branches/numpy-mingw-w64/numpy/ma/testutils.py branches/numpy-mingw-w64/numpy/numarray/util.py branches/numpy-mingw-w64/numpy/oldnumeric/arrayfns.py branches/numpy-mingw-w64/numpy/oldnumeric/mlab.py branches/numpy-mingw-w64/numpy/oldnumeric/rng.py branches/numpy-mingw-w64/numpy/testing/__init__.py branches/numpy-mingw-w64/numpy/testing/decorators.py branches/numpy-mingw-w64/numpy/testing/noseclasses.py branches/numpy-mingw-w64/numpy/testing/nosetester.py branches/numpy-mingw-w64/numpy/testing/numpytest.py Log: Merged revisions 6185-6187,6191-6221,6235-6238,6240-6241,6244,6250-6251,6253,6256,6258,6260-6261,6263,6265-6266,6268,6271,6283-6286,6291-6316,6320-6352,6354,6356,6358-6368,6370-6373,6398-6400,6410,6421-6424 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ................ r6185 | cdavid | 2008-12-22 01:19:14 +0900 (Mon, 22 Dec 2008) | 1 line Add doc sources so that sdist tarball contains them. ................ r6186 | pierregm | 2008-12-22 19:01:51 +0900 (Mon, 22 Dec 2008) | 4 lines testutils: assert_array_compare : make sure that the comparison is performed on ndarrays, and make sure we use the np version of the comparison function. core: * Try not to touch the data in unary/binary ufuncs, (including inplace) ................ r6187 | pearu | 2008-12-22 19:05:00 +0900 (Mon, 22 Dec 2008) | 1 line Fix a bug. ................ r6191 | cdavid | 2008-12-23 13:10:59 +0900 (Tue, 23 Dec 2008) | 1 line Fix typos in the comments for manifest. ................ r6192 | cdavid | 2008-12-23 13:11:12 +0900 (Tue, 23 Dec 2008) | 1 line Use msvcrt values if available for manifest generation: only there starting from python 2.6.1. ................ r6193 | pearu | 2008-12-23 18:02:15 +0900 (Tue, 23 Dec 2008) | 1 line Fix issue 964: f2py python 2.6, 2.6.1 support. ................ r6194 | pierregm | 2008-12-24 08:43:43 +0900 (Wed, 24 Dec 2008) | 12 lines testutils: * assert_equal : use assert_equal_array on records * assert_array_compare : prevent the common mask to be back-propagated to the initial input arrays. * assert_equal_array : use operator.__eq__ instead of ma.equal * assert_equal_less: use operator.__less__ instead of ma.less core: * Fixed _check_fill_value for nested flexible types * Add a ndtype option to _make_mask_descr * Fixed mask_or for nested flexible types * Fixed the printing of masked arrays w/ flexible types. ................ r6195 | cdavid | 2008-12-26 21:16:45 +0900 (Fri, 26 Dec 2008) | 1 line Update to handle numscons 0.10.0 and above. ................ r6196 | cdavid | 2008-12-26 21:36:19 +0900 (Fri, 26 Dec 2008) | 1 line Do not import msvcrt globally in mingw32compiler module, since the module is imported on all platforms. ................ r6197 | cdavid | 2008-12-26 23:39:55 +0900 (Fri, 26 Dec 2008) | 1 line Do not test for functions already tested by python configure script. ................ r6198 | cdavid | 2008-12-27 14:56:58 +0900 (Sat, 27 Dec 2008) | 1 line BUG: Add a runtime check about endianness, to detect bug 4728 in python on Mac OS X. ................ r6199 | cdavid | 2008-12-27 19:06:25 +0900 (Sat, 27 Dec 2008) | 1 line Fix some typo/syntax errors when converting dict access to a function in manifest generation. ................ r6200 | cdavid | 2008-12-27 19:15:30 +0900 (Sat, 27 Dec 2008) | 1 line BUG (#970): fix a python 2.6 bug in distutils which caused an unhelpful Error:None message when trying to build with no VS installed and without the -c mingw32 option. ................ r6201 | cdavid | 2008-12-27 19:30:49 +0900 (Sat, 27 Dec 2008) | 1 line Improve the error message when initializing compiler failed. ................ r6202 | cdavid | 2008-12-27 19:32:05 +0900 (Sat, 27 Dec 2008) | 1 line Try to initialize the msvc compiler before the general code to detect the error early. ................ r6203 | cdavid | 2008-12-27 19:43:41 +0900 (Sat, 27 Dec 2008) | 1 line BUG (#970): this commit should fix the actual bug, which albeeit linked to commir r6200, was caused in anoter code path. ................ r6204 | cdavid | 2008-12-27 19:57:05 +0900 (Sat, 27 Dec 2008) | 1 line Fix manifest generation. ................ r6205 | cdavid | 2008-12-27 20:46:08 +0900 (Sat, 27 Dec 2008) | 1 line BUG (#827): close temp file before reopning them on windows, and make sure they are not automatically deleted on close either (2.6and higher specific). ................ r6206 | cdavid | 2008-12-27 21:18:47 +0900 (Sat, 27 Dec 2008) | 1 line Do not define the union for runtime endianness detection if we don't check endianness. ................ r6207 | cdavid | 2008-12-27 22:48:52 +0900 (Sat, 27 Dec 2008) | 1 line Start working on formatting failure on 2.6: copy how python does complex formatting. ................ r6208 | cdavid | 2008-12-27 23:44:11 +0900 (Sat, 27 Dec 2008) | 1 line Fix formatting for purely imaginary complex numbers. ................ r6209 | cdavid | 2008-12-27 23:53:15 +0900 (Sat, 27 Dec 2008) | 1 line More work on formatting float. ................ r6210 | cdavid | 2008-12-27 23:59:41 +0900 (Sat, 27 Dec 2008) | 1 line Finish formatting fixes for float scalar arrays. ................ r6211 | cdavid | 2008-12-28 00:12:20 +0900 (Sun, 28 Dec 2008) | 1 line Include umath_funcs_c99 in multiarray so that we can use isinf and co macros. ................ r6212 | cdavid | 2008-12-28 01:15:04 +0900 (Sun, 28 Dec 2008) | 1 line Include config.h before our C99 math compat layer. ................ r6213 | cdavid | 2008-12-28 01:15:41 +0900 (Sun, 28 Dec 2008) | 1 line Fix formatting. ................ r6214 | cdavid | 2008-12-28 01:16:18 +0900 (Sun, 28 Dec 2008) | 1 line Do not define FMTR and FMTI macros, as those are already defined on some platforms. ................ r6215 | cdavid | 2008-12-28 01:16:52 +0900 (Sun, 28 Dec 2008) | 1 line More formatting fixes. ................ r6216 | cdavid | 2008-12-28 01:17:27 +0900 (Sun, 28 Dec 2008) | 1 line Remove undef of removed macro. ................ r6217 | cdavid | 2008-12-28 01:33:40 +0900 (Sun, 28 Dec 2008) | 1 line Do not use PyOS_ascii_formatd, as it does not handle long double correctly. ................ r6218 | cdavid | 2008-12-28 02:19:40 +0900 (Sun, 28 Dec 2008) | 1 line Try ugly hack to circumvent long double brokenness with mingw. ................ r6219 | cdavid | 2008-12-28 02:25:50 +0900 (Sun, 28 Dec 2008) | 1 line Use ugly hack for mingw long double pb with complex format function as well. ................ r6220 | cdavid | 2008-12-28 12:18:20 +0900 (Sun, 28 Dec 2008) | 1 line Revert formatting changes: ascii_formatd only works for double, so we can't use it as it is for our formatting needs. ................ r6221 | cdavid | 2008-12-28 15:44:06 +0900 (Sun, 28 Dec 2008) | 1 line Do not add doc sources through add_data_dir: it will put the docs alongside numpy, as a separate package, which is not what we want. Use the manifest instead, since that's the only way I know of to include something in sdist-generated tarballs. ................ r6235 | cdavid | 2008-12-29 16:57:52 +0900 (Mon, 29 Dec 2008) | 13 lines Merged revisions 6233-6234 via svnmerge from http://svn.scipy.org/svn/numpy/branches/fix_float_format ........ r6233 | cdavid | 2008-12-29 12:49:09 +0900 (Mon, 29 Dec 2008) | 1 line Use parametric tests for format tests so that it is clearer which type is failing. ........ r6234 | cdavid | 2008-12-29 12:49:27 +0900 (Mon, 29 Dec 2008) | 1 line Fix formatting tests: cfloat and cdouble as well as np.float and np.double are the same; make sure we test 4 bytes float. ........ ................ r6236 | cdavid | 2008-12-29 17:02:15 +0900 (Mon, 29 Dec 2008) | 1 line Add nan/inf tests for formatting. ................ r6237 | cdavid | 2008-12-29 17:26:04 +0900 (Mon, 29 Dec 2008) | 1 line Add test for real float types locale independance. ................ r6238 | cdavid | 2008-12-29 17:35:06 +0900 (Mon, 29 Dec 2008) | 1 line Clearer error messages for formatting failures. ................ r6240 | cdavid | 2008-12-30 12:48:11 +0900 (Tue, 30 Dec 2008) | 1 line Add tests for print of float types. ................ r6241 | cdavid | 2008-12-30 12:56:54 +0900 (Tue, 30 Dec 2008) | 1 line Add print tests for complex types. ................ r6244 | cdavid | 2008-12-30 13:20:48 +0900 (Tue, 30 Dec 2008) | 1 line Fix test for print: forgot to make sure the value is a float before comparing it. ................ r6250 | cdavid | 2008-12-30 14:02:28 +0900 (Tue, 30 Dec 2008) | 17 lines Merged revisions 6247-6249 via svnmerge from http://svn.scipy.org/svn/numpy/branches/fix_float_format ........ r6247 | cdavid | 2008-12-30 13:41:37 +0900 (Tue, 30 Dec 2008) | 1 line Handle 1e10 specially, as it is the limit where exp notation is shorter than decimal for single precision, but not for double (python native one). ........ r6248 | cdavid | 2008-12-30 13:47:38 +0900 (Tue, 30 Dec 2008) | 1 line Refactor a bit redirected output print test. ........ r6249 | cdavid | 2008-12-30 13:49:31 +0900 (Tue, 30 Dec 2008) | 1 line Fix test for single precision print. ........ ................ r6251 | cdavid | 2008-12-30 14:12:50 +0900 (Tue, 30 Dec 2008) | 1 line Use np.inf instead of float('inf'), as the later does not work on windows for python < 2.6. ................ r6253 | cdavid | 2008-12-30 14:15:09 +0900 (Tue, 30 Dec 2008) | 1 line Fix typo in test. ................ r6256 | cdavid | 2008-12-30 14:34:22 +0900 (Tue, 30 Dec 2008) | 1 line Special case float tests on windows: python 2.5 and below have >=3 digits in the exp. ................ r6258 | cdavid | 2008-12-30 14:42:03 +0900 (Tue, 30 Dec 2008) | 1 line Hardcode reference for inf/nan-involved values. ................ r6260 | cdavid | 2008-12-30 14:50:18 +0900 (Tue, 30 Dec 2008) | 1 line Fix more formatting tests on win32. ................ r6261 | cdavid | 2008-12-30 14:52:16 +0900 (Tue, 30 Dec 2008) | 1 line Fix some more redirected output print tests. ................ r6263 | cdavid | 2008-12-30 15:01:31 +0900 (Tue, 30 Dec 2008) | 1 line More fixes for print tests. ................ r6265 | cdavid | 2008-12-30 15:03:56 +0900 (Tue, 30 Dec 2008) | 1 line Fix typo. ................ r6266 | cdavid | 2008-12-30 15:08:06 +0900 (Tue, 30 Dec 2008) | 1 line Fix typo. ................ r6268 | cdavid | 2008-12-30 15:12:26 +0900 (Tue, 30 Dec 2008) | 1 line complex scalar arrays cannot be created from real/imag args: wrap init values in a complex. ................ r6271 | cdavid | 2008-12-30 15:32:03 +0900 (Tue, 30 Dec 2008) | 1 line Do not use dict for reference: hashing on scalar arrays does not work as I expected. ................ r6283 | ptvirtan | 2008-12-31 10:14:47 +0900 (Wed, 31 Dec 2008) | 1 line Fix #951: make tests to clean temp files properly ................ r6284 | jarrod.millman | 2009-01-01 08:25:03 +0900 (Thu, 01 Jan 2009) | 2 lines ran reindent ................ r6285 | alan.mcintyre | 2009-01-01 08:46:34 +0900 (Thu, 01 Jan 2009) | 15 lines Remove the following deprecated items from numpy.testing: - ParametricTestCase - The following arguments from numpy.testing.Tester.test(): level, verbosity, all, sys_argv, testcase_pattern - Path manipulation functions: set_package_path, set_local_path, restore_path - NumpyTestCase, NumpyTest Also separated testing parameter setup from NoseTester.test into NoseTester.prepare_test_args for use in a utility script for valgrind testing (see NumPy ticket #784). ................ r6286 | jarrod.millman | 2009-01-01 16:56:53 +0900 (Thu, 01 Jan 2009) | 2 lines add default include dir for Fedora/Red Hat (see SciPy ticket 817) ................ r6291 | cdavid | 2009-01-04 19:57:39 +0900 (Sun, 04 Jan 2009) | 1 line Do not import md5 on python >= 2.6; use hashlib instead. ................ r6292 | cdavid | 2009-01-04 20:08:16 +0900 (Sun, 04 Jan 2009) | 1 line Do not use popen* but subprocess.Popen instead. ................ r6293 | cdavid | 2009-01-04 21:03:29 +0900 (Sun, 04 Jan 2009) | 1 line Revert md5 change: hashlib.md5 is not a drop-in replacement for md5. ................ r6294 | pierregm | 2009-01-05 05:16:00 +0900 (Mon, 05 Jan 2009) | 2 lines * adapted default_fill_value for flexible datatype * fixed max/minimum_fill_value for flexible datatype ................ r6295 | stefan | 2009-01-06 06:51:18 +0900 (Tue, 06 Jan 2009) | 1 line Credit more developers. ................ r6296 | pierregm | 2009-01-06 07:52:21 +0900 (Tue, 06 Jan 2009) | 1 line *moved the printing templates out of MaskedArray.__repr__ ................ r6297 | stefan | 2009-01-06 19:09:00 +0900 (Tue, 06 Jan 2009) | 1 line Use new-style classes with multiple-inheritance to address bug in IronPython. ................ r6298 | pierregm | 2009-01-07 05:35:37 +0900 (Wed, 07 Jan 2009) | 1 line * Bugfix #961 ................ r6299 | pierregm | 2009-01-08 03:14:12 +0900 (Thu, 08 Jan 2009) | 1 line * Fixed iadd/isub/imul when the base array has no mask but the other array does ................ r6300 | pierregm | 2009-01-08 07:34:51 +0900 (Thu, 08 Jan 2009) | 3 lines * Renamed `torecords` to `toflex`, keeping `torecords` as an alias * Introduced `fromflex`, to reconstruct a masked_array from the output of `toflex` (can?\226?\128?\153t `use fromrecords` as it would clash with `numpy.ma.mrecords.fromrecords`) * Fixed a bug in MaskedBinaryOperation (#979) (wrong array broadcasting) ................ r6301 | cdavid | 2009-01-08 18:19:00 +0900 (Thu, 08 Jan 2009) | 1 line Avoid putting things into stderr when errors occurs in f2py wrappers; put all the info in the python error string instead. ................ r6302 | cdavid | 2009-01-09 00:11:32 +0900 (Fri, 09 Jan 2009) | 1 line Fix python 2.4 issue. ................ r6303 | chanley | 2009-01-09 01:30:01 +0900 (Fri, 09 Jan 2009) | 1 line Fix test_print.py function _test_locale_independance() since str(1.2) does not use the LC_NUMERIC locale to convert numbers. Fix from Mark Sienkiewicz. ................ r6304 | cdavid | 2009-01-09 04:22:21 +0900 (Fri, 09 Jan 2009) | 1 line Revert buggy test fix for locale independecce. ................ r6305 | pierregm | 2009-01-09 05:02:29 +0900 (Fri, 09 Jan 2009) | 2 lines * Add __eq__ and __ne__ for support of flexible arrays. * Fixed .filled for nested structures ................ r6306 | pierregm | 2009-01-09 06:51:04 +0900 (Fri, 09 Jan 2009) | 1 line * Remove a debugging print statement. ................ r6307 | jarrod.millman | 2009-01-09 11:14:35 +0900 (Fri, 09 Jan 2009) | 2 lines Updated license file ................ r6308 | cdavid | 2009-01-09 14:26:58 +0900 (Fri, 09 Jan 2009) | 1 line Tag formatting unit tests as known failures. ................ r6309 | jarrod.millman | 2009-01-09 17:59:29 +0900 (Fri, 09 Jan 2009) | 2 lines should be more reliable way to determine what bit platform ................ r6310 | jarrod.millman | 2009-01-09 18:14:17 +0900 (Fri, 09 Jan 2009) | 2 lines better default library paths for 64bit arch ................ r6311 | jarrod.millman | 2009-01-09 18:57:15 +0900 (Fri, 09 Jan 2009) | 2 lines simplification suggested by stefan ................ r6312 | jarrod.millman | 2009-01-09 19:02:09 +0900 (Fri, 09 Jan 2009) | 2 lines switch the order [lib,lib64] --> [lib64,lib] ................ r6313 | jarrod.millman | 2009-01-09 19:18:29 +0900 (Fri, 09 Jan 2009) | 2 lines removed unneeded import ................ r6314 | jarrod.millman | 2009-01-10 04:37:16 +0900 (Sat, 10 Jan 2009) | 2 lines can't use append an int to a string ................ r6315 | pierregm | 2009-01-10 05:18:12 +0900 (Sat, 10 Jan 2009) | 2 lines * Added flatten_structured_arrays * Fixed _get_recordarray for nested structures ................ r6316 | pierregm | 2009-01-10 10:53:05 +0900 (Sat, 10 Jan 2009) | 1 line * Add flatten_structured_array to the namespace ................ r6320 | pierregm | 2009-01-14 06:01:58 +0900 (Wed, 14 Jan 2009) | 9 lines numpy.ma.core: * introduced baseclass, sharedmask and hardmask as readonly properties of MaskedArray * docstrings update numpy.ma.extras: * docstring updates docs/reference * introduced maskedarray, maskedarray.baseclass, maskedarray.generic ................ r6321 | stefan | 2009-01-14 16:14:27 +0900 (Wed, 14 Jan 2009) | 2 lines Docstring: remove old floating point arithmetic, parallel execution and postponed import references. ................ r6322 | stefan | 2009-01-14 16:55:16 +0900 (Wed, 14 Jan 2009) | 1 line Fix printing of limits. ................ r6323 | stefan | 2009-01-14 16:56:10 +0900 (Wed, 14 Jan 2009) | 1 line Fix finfo to work on all instances, not just NumPy scalars. ................ r6324 | pierregm | 2009-01-17 09:15:15 +0900 (Sat, 17 Jan 2009) | 1 line * fixed _arraymethod.__call__ for structured arrays ................ r6325 | ptvirtan | 2009-01-18 06:24:13 +0900 (Sun, 18 Jan 2009) | 3 lines Make `trapz` accept 1-D `x` parameter for n-d `y`, even if axis != -1. Additional tests included. ................ r6326 | pierregm | 2009-01-19 17:53:53 +0900 (Mon, 19 Jan 2009) | 3 lines * renamed FlatIter to MaskedIterator * added __getitem__ to MaskedIterator ................ r6327 | pierregm | 2009-01-19 18:01:24 +0900 (Mon, 19 Jan 2009) | 2 lines * replace np.asarray by np.asanyarray in unique1d ................ r6328 | pierregm | 2009-01-19 18:04:20 +0900 (Mon, 19 Jan 2009) | 2 lines * add intersect1d, intersect1d_nu, setdiff1d, setmember1d, setxor1d, unique1d, union1d * use np.atleast1d instead of ma.atleast1d ................ r6329 | pierregm | 2009-01-20 06:22:52 +0900 (Tue, 20 Jan 2009) | 3 lines * lib : introduced _iotools * lib.io : introduced genfromtxt, ndfromtxt, mafromtxt, recfromtxt, recfromcsv. ................ r6330 | pierregm | 2009-01-22 14:37:36 +0900 (Thu, 22 Jan 2009) | 1 line * genfromtxt : if names is True, accept a line starting with a comment character as header. ................ r6331 | pierregm | 2009-01-22 14:40:25 +0900 (Thu, 22 Jan 2009) | 1 line * added recfunctions, a collection of utilities to manipulate structured arrays. ................ r6332 | pierregm | 2009-01-23 03:21:32 +0900 (Fri, 23 Jan 2009) | 2 lines * fixed a machine-dependent issue on default int (' 0) break - if i != 0: - return i + if all_exists: + return image_names # We didn't find the files, so build them print "-- Plotting figures %s" % output_base @@ -212,31 +230,24 @@ matplotlib.rcdefaults() matplotlib.rcParams.update(config.plot_rcparams) - try: - run_code(code, code_path) - except: - raise - s = cbook.exception_to_str("Exception running plot %s" % code_path) - warnings.warn(s) - return 0 + # Run code + run_code(code, code_path) + # Collect images + image_names = [] + fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() for i, figman in enumerate(fig_managers): + if len(fig_managers) == 1: + name = output_base + else: + name = "%s_%02d" % (output_base, i) + image_names.append(name) for format, dpi in formats: - if len(fig_managers) == 1: - name = output_base - else: - name = "%s_%02d" % (output_base, i) path = os.path.join(output_dir, '%s.%s' % (name, format)) - try: - figman.canvas.figure.savefig(path, dpi=dpi) - except: - s = cbook.exception_to_str("Exception running plot %s" - % code_path) - warnings.warn(s) - return 0 + figman.canvas.figure.savefig(path, dpi=dpi) - return len(fig_managers) + return image_names #------------------------------------------------------------------------------ # Generating output @@ -303,7 +314,7 @@ document.attributes['_plot_counter'] = counter output_base = '%d-%s' % (counter, os.path.basename(file_name)) - rel_name = relative_path(file_name, setup.confdir) + rel_name = relpath(file_name, setup.confdir) base, ext = os.path.splitext(output_base) if ext in ('.py', '.rst', '.txt'): @@ -334,13 +345,19 @@ f.write(unescape_doctest(code)) f.close() - source_link = relative_path(target_name, rst_dir) + source_link = relpath(target_name, rst_dir) # determine relative reference - link_dir = relative_path(output_dir, rst_dir) + link_dir = relpath(output_dir, rst_dir) # make figures - num_figs = makefig(code, file_name, output_dir, output_base, config) + try: + image_names = makefig(code, file_name, output_dir, output_base, config) + except RuntimeError, err: + reporter = state.memo.reporter + sm = reporter.system_message(3, "Exception occurred rendering plot", + line=lineno) + return [sm] # generate output if options['include-source']: @@ -353,20 +370,6 @@ else: source_code = "" - if num_figs > 0: - image_names = [] - for i in range(num_figs): - if num_figs == 1: - image_names.append(output_base) - else: - image_names.append("%s_%02d" % (output_base, i)) - else: - reporter = state.memo.reporter - sm = reporter.system_message(3, "Exception occurred rendering plot", - line=lineno) - return [sm] - - opts = [':%s: %s' % (key, val) for key, val in options.items() if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] @@ -381,24 +384,49 @@ if len(lines): state_machine.insert_input( lines, state_machine.input_lines.source(0)) + return [] -def relative_path(target, base): - target = os.path.abspath(os.path.normpath(target)) - base = os.path.abspath(os.path.normpath(base)) +if hasattr(os.path, 'relpath'): + relpath = os.path.relpath +else: + def relpath(target, base=os.curdir): + """ + Return a relative path to the target from either the current + dir or an optional base dir. Base can be a directory + specified either as absolute or relative to current dir. + """ - target_parts = target.split(os.path.sep) - base_parts = base.split(os.path.sep) - rel_parts = 0 + if not os.path.exists(target): + raise OSError, 'Target does not exist: '+target - while target_parts and base_parts and target_parts[0] == base_parts[0]: - target_parts.pop(0) - base_parts.pop(0) + if not os.path.isdir(base): + raise OSError, 'Base is not a directory or does not exist: '+base - rel_parts += len(base_parts) - return os.path.sep.join([os.path.pardir] * rel_parts + target_parts) + base_list = (os.path.abspath(base)).split(os.sep) + target_list = (os.path.abspath(target)).split(os.sep) + # On the windows platform the target may be on a completely + # different drive from the base. + if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]: + raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper() + + # Starting from the filepath root, work out how much of the + # filepath is shared by base and target. + for i in range(min(len(base_list), len(target_list))): + if base_list[i] <> target_list[i]: break + else: + # If we broke out of the loop, i is pointing to the first + # differing path elements. If we didn't break out of the + # loop, i is pointing to identical path elements. + # Increment i so that in all cases it points to the first + # differing path elements. + i+=1 + + rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:] + return os.path.join(*rel_list) + #------------------------------------------------------------------------------ # plot:: directive registration etc. #------------------------------------------------------------------------------ @@ -412,21 +440,11 @@ from docutils.parsers.rst.directives.images import Image align = Image.align -try: - from docutils.parsers.rst import Directive -except ImportError: - from docutils.parsers.rst.directives import _directives +def plot_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return run(arguments, content, options, state_machine, state, lineno) - def plot_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return run(arguments, content, options, state_machine, state, lineno) - plot_directive.__doc__ = __doc__ -else: - class plot_directive(Directive): - def run(self): - return run(self.arguments, self.content, self.options, - self.state_machine, self.state, self.lineno) - plot_directive.__doc__ = __doc__ +plot_directive.__doc__ = __doc__ def _option_boolean(arg): if not arg or not arg.strip(): Modified: branches/numpy-mingw-w64/numpy/add_newdocs.py =================================================================== --- branches/numpy-mingw-w64/numpy/add_newdocs.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/add_newdocs.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -8,140 +8,6 @@ from lib import add_newdoc -add_newdoc('numpy.core', 'dtype', -"""Create a data type. - -A numpy array is homogeneous, and contains elements described by a -dtype. A dtype can be constructed from different combinations of -fundamental numeric types, as illustrated below. - -Examples --------- - -Using array-scalar type: ->>> np.dtype(np.int16) -dtype('int16') - -Record, one field name 'f1', containing int16: ->>> np.dtype([('f1', np.int16)]) -dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) -dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) -dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) -dtype([('a', '>> np.dtype("i4, (2,3)f8") -dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) -dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) -dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) -dtype([('gender', '|S1'), ('age', '|u1')]) - -Offsets in bytes, here 0 and 25: ->>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) -dtype([('surname', '|S25'), ('age', '|u1')]) - -""") - -add_newdoc('numpy.core', 'dtype', - """ - dtype(obj, align=False, copy=False) - - Create a data type object. - - A numpy array is homogeneous, and contains elements described by a - dtype object. A dtype object can be constructed from different - combinations of fundamental numeric types. - - Parameters - ---------- - obj - Object to be converted to a data type object. - align : bool, optional - Add padding to the fields to match what a C compiler would output - for a similar C-struct. Can be ``True`` only if `obj` is a dictionary - or a comma-separated string. - copy : bool, optional - Make a new copy of the data-type object. If ``False``, the result - may just be a reference to a built-in data-type object. - - Examples - -------- - Using array-scalar type: - - >>> np.dtype(np.int16) - dtype('int16') - - Record, one field name 'f1', containing int16: - - >>> np.dtype([('f1', np.int16)]) - dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) - dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) - dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) - dtype([('a', '>> np.dtype("i4, (2,3)f8") - dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) - dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) - dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) - dtype([('gender', '|S1'), ('age', '|u1')]) - - Offsets in bytes, here 0 and 25: - - >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) - dtype([('surname', '|S25'), ('age', '|u1')]) - - """) - ############################################################################### # # flatiter @@ -150,7 +16,12 @@ # ############################################################################### -# attributes +add_newdoc('numpy.core', 'flatiter', + """ + """) + +# flatiter attributes + add_newdoc('numpy.core', 'flatiter', ('base', """documentation needed @@ -170,9 +41,8 @@ """)) +# flatiter functions - -# functions add_newdoc('numpy.core', 'flatiter', ('__array__', """__array__(type=None) Get array from iterator @@ -191,37 +61,37 @@ # ############################################################################### +add_newdoc('numpy.core', 'broadcast', + """ + """) + # attributes + add_newdoc('numpy.core', 'broadcast', ('index', """current index in broadcasted result """)) - add_newdoc('numpy.core', 'broadcast', ('iters', """tuple of individual iterators """)) - add_newdoc('numpy.core', 'broadcast', ('nd', """number of dimensions of broadcasted result """)) - add_newdoc('numpy.core', 'broadcast', ('numiter', """number of iterators """)) - add_newdoc('numpy.core', 'broadcast', ('shape', """shape of broadcasted result """)) - add_newdoc('numpy.core', 'broadcast', ('size', """total size of broadcasted result @@ -1997,6 +1867,32 @@ Equivalent to a.view(a.dtype.newbytorder(byteorder)) + Return array with dtype changed to interpret array data as + specified byte order. + + Changes are also made in all fields and sub-arrays of the array + data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order + specifications below. The default value ('S') results in + swapping the current byte order. + `new_order` codes can be any of: + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * 'S' - swap dtype from current to opposite endian + * {'|', 'I'} - ignore (no change to byte order) + The code does a case-insensitive check on the first letter of + `new_order` for these alternatives. For example, any of '>' + or 'B' or 'b' or 'brian' are valid to specify big-endian. + + Returns + ------- + new_arr : array + array with the given change to the dtype byte order. """)) @@ -2555,6 +2451,25 @@ """)) + +############################################################################## +# +# umath functions +# +############################################################################## + +add_newdoc('numpy.core.umath', 'frexp', + """ + """) + +add_newdoc('numpy.core.umath', 'frompyfunc', + """ + """) + +add_newdoc('numpy.core.umath', 'ldexp', + """ + """) + add_newdoc('numpy.core.umath','geterrobj', """geterrobj() @@ -2584,6 +2499,102 @@ """) + +############################################################################## +# +# lib._compiled_base functions +# +############################################################################## + +add_newdoc('numpy.lib._compiled_base', 'digitize', + """ + digitize(x,bins) + + Return the index of the bin to which each value of x belongs. + + Each index i returned is such that bins[i-1] <= x < bins[i] if + bins is monotonically increasing, or bins [i-1] > x >= bins[i] if + bins is monotonically decreasing. + + Beyond the bounds of the bins 0 or len(bins) is returned as appropriate. + """) + +add_newdoc('numpy.lib._compiled_base', 'bincount', + """ + bincount(x,weights=None) + + Return the number of occurrences of each value in x. + + x must be a list of non-negative integers. The output, b[i], + represents the number of times that i is found in x. If weights + is specified, every occurrence of i at a position p contributes + weights[p] instead of 1. + + See also: histogram, digitize, unique. + """) + +add_newdoc('numpy.lib._compiled_base', 'add_docstring', + """ + docstring(obj, docstring) + + Add a docstring to a built-in obj if possible. + If the obj already has a docstring raise a RuntimeError + If this routine does not know how to add a docstring to the object + raise a TypeError + """) + +add_newdoc('numpy.lib._compiled_base', 'packbits', + """ + out = numpy.packbits(myarray, axis=None) + + myarray : an integer type array whose elements should be packed to bits + + This routine packs the elements of a binary-valued dataset into a + NumPy array of type uint8 ('B') whose bits correspond to + the logical (0 or nonzero) value of the input elements. + The dimension over-which bit-packing is done is given by axis. + The shape of the output has the same number of dimensions as the input + (unless axis is None, in which case the output is 1-d). + + Example: + >>> a = array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = numpy.packbits(a,axis=-1) + >>> b + array([[[160],[64]],[[192],[32]]], dtype=uint8) + + Note that 160 = 128 + 32 + 192 = 128 + 64 + """) + +add_newdoc('numpy.lib._compiled_base', 'unpackbits', + """ + out = numpy.unpackbits(myarray, axis=None) + + myarray - array of uint8 type where each element represents a bit-field + that should be unpacked into a boolean output array + + The shape of the output array is either 1-d (if axis is None) or + the same shape as the input array with unpacking done along the + axis specified. + """) + + +############################################################################## +# +# Documentation for ufunc attributes and methods +# +############################################################################## + + +############################################################################## +# +# ufunc object +# +############################################################################## + add_newdoc('numpy.core', 'ufunc', """ Functions that operate element by element on whole arrays. @@ -2636,6 +2647,12 @@ """) +############################################################################## +# +# ufunc methods +# +############################################################################## + add_newdoc('numpy.core', 'ufunc', ('reduce', """ reduce(array, axis=0, dtype=None, out=None) @@ -2815,3 +2832,680 @@ [12, 15, 18]]) """)) + + +############################################################################## +# +# Documentation for dtype attributes and methods +# +############################################################################## + +############################################################################## +# +# dtype object +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', + """ + dtype(obj, align=False, copy=False) + + Create a data type object. + + A numpy array is homogeneous, and contains elements described by a + dtype object. A dtype object can be constructed from different + combinations of fundamental numeric types. + + Parameters + ---------- + obj + Object to be converted to a data type object. + align : bool, optional + Add padding to the fields to match what a C compiler would output + for a similar C-struct. Can be ``True`` only if `obj` is a dictionary + or a comma-separated string. + copy : bool, optional + Make a new copy of the data-type object. If ``False``, the result + may just be a reference to a built-in data-type object. + + Examples + -------- + Using array-scalar type: + + >>> np.dtype(np.int16) + dtype('int16') + + Record, one field name 'f1', containing int16: + + >>> np.dtype([('f1', np.int16)]) + dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) + dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint), ('f2', np.int32)]) + dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) + dtype([('a', '>> np.dtype("i4, (2,3)f8") + dtype([('f0', '>> np.dtype([('hello',(np.int,3)),('world',np.void,10)]) + dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) + dtype(('>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) + dtype([('gender', '|S1'), ('age', '|u1')]) + + Offsets in bytes, here 0 and 25: + + >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) + dtype([('surname', '|S25'), ('age', '|u1')]) + + """) + +############################################################################## +# +# dtype attributes +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', + ''' + dt.byteorder + + String giving byteorder of dtype + + One of: + * '=' - native byteorder + * '<' - little endian + * '>' - big endian + * '|' - endian not relevant + + Examples + -------- + >>> dt = np.dtype('i2') + >>> dt.byteorder + '=' + >>> # endian is not relevant for 8 bit numbers + >>> np.dtype('i1').byteorder + '|' + >>> # or ASCII strings + >>> np.dtype('S2').byteorder + '|' + >>> # Even if specific code is given, and it is native + >>> # '=' is the byteorder + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> dt = np.dtype(native_code + 'i2') + >>> dt.byteorder + '=' + >>> # Swapped code shows up as itself + >>> dt = np.dtype(swapped_code + 'i2') + >>> dt.byteorder == swapped_code + True + ''')) + +add_newdoc('numpy.core.multiarray', 'dtype', ('char', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('descr', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('fields', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('flags', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('kind', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('name', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('names', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('num', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('shape', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('str', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', + """ + """)) + +add_newdoc('numpy.core.multiarray', 'dtype', ('type', + """ + """)) + +############################################################################## +# +# dtype methods +# +############################################################################## + +add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', + ''' + newbyteorder(new_order='S') + + Return a new dtype with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order + specifications below. The default value ('S') results in + swapping the current byte order. + `new_order` codes can be any of: + * {'<', 'L'} - little endian + * {'>', 'B'} - big endian + * {'=', 'N'} - native order + * 'S' - swap dtype from current to opposite endian + * {'|', 'I'} - ignore (no change to byte order) + The code does a case-insensitive check on the first letter of + `new_order` for these alternatives. For example, any of '>' + or 'B' or 'b' or 'brian' are valid to specify big-endian. + + Returns + ------- + new_dtype : dtype + New dtype object with the given change to the byte order. + + Examples + -------- + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = sys_is_le and '<' or '>' + >>> swapped_code = sys_is_le and '>' or '<' + >>> native_dt = np.dtype(native_code+'i2') + >>> swapped_dt = np.dtype(swapped_code+'i2') + >>> native_dt.newbyteorder('S') == swapped_dt + True + >>> native_dt.newbyteorder() == swapped_dt + True + >>> native_dt == swapped_dt.newbyteorder('S') + True + >>> native_dt == swapped_dt.newbyteorder('=') + True + >>> native_dt == swapped_dt.newbyteorder('N') + True + >>> native_dt == native_dt.newbyteorder('|') + True + >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') + True + >>> np.dtype('>i2') == native_dt.newbyteorder('B') + True + ''')) + + +############################################################################## +# +# nd_grid instances +# +############################################################################## + +add_newdoc('numpy.lib.index_tricks', 'mgrid', + """ + Construct a multi-dimensional filled "meshgrid". + + Returns a mesh-grid when indexed. The dimension and number of the + output arrays are equal to the number of indexing dimensions. If + the step length is not a complex number, then the stop is not + inclusive. + + However, if the step length is a **complex number** (e.g. 5j), + then the integer part of its magnitude is interpreted as + specifying the number of points to create between the start and + stop values, where the stop value **is inclusive**. + + See also + -------- + ogrid + + Examples + -------- + >>> np.mgrid[0:5,0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + """) + +add_newdoc('numpy.lib.index_tricks', 'ogrid', + """ + Construct a multi-dimensional open "meshgrid". + + Returns an 'open' mesh-grid when indexed. The dimension and + number of the output arrays are equal to the number of indexing + dimensions. If the step length is not a complex number, then the + stop is not inclusive. + + The returned mesh-grid is open (or not fleshed out), so that only + one-dimension of each returned argument is greater than 1 + + If the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, + where the stop value **is inclusive**. + + See also + -------- + mgrid + + Examples + -------- + >>> np.ogrid[0:5,0:5] + [array([[0], + [1], + [2], + [3], + [4]]), array([[0, 1, 2, 3, 4]])] + """) + + +############################################################################## +# +# Documentation for `generic` attributes and methods +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'generic', + """ + """) + +# Attributes + +add_newdoc('numpy.core.numerictypes', 'generic', ('T', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('base', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('data', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flags', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flat', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('imag', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('real', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('shape', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('size', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('strides', + """ + """)) + +# Methods + +add_newdoc('numpy.core.numerictypes', 'generic', ('all', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('any', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('astype', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('choose', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('clip', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('compress', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('copy', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dump', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('fill', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('item', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('max', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('mean', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('min', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('prod', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('put', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('resize', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('round', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('sort', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('std', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('sum', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('take', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('trace', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('var', + """ + """)) + +add_newdoc('numpy.core.numerictypes', 'generic', ('view', + """ + """)) + + +############################################################################## +# +# Documentation for other scalar classes +# +############################################################################## + +add_newdoc('numpy.core.numerictypes', 'bool_', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'complex64', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'complex128', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'complex256', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float32', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float64', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float96', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'float128', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int8', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int16', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int32', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'int64', + """ + """) + +add_newdoc('numpy.core.numerictypes', 'object_', + """ + """) Modified: branches/numpy-mingw-w64/numpy/core/SConscript =================================================================== --- branches/numpy-mingw-w64/numpy/core/SConscript 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/SConscript 2009-02-20 16:37:01 UTC (rev 6425) @@ -211,6 +211,10 @@ config.Define('DISTUTILS_USE_SDK', distutils_use_sdk, "define to 1 to disable SMP support ") + if a == "Intel": + config.Define('FORCE_NO_LONG_DOUBLE_FORMATTING', 1, + "define to 1 to force long double format string to the" \ + " same as double (Lg -> g)") #-------------- # Checking Blas #-------------- Modified: branches/numpy-mingw-w64/numpy/core/_internal.py =================================================================== --- branches/numpy-mingw-w64/numpy/core/_internal.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/_internal.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -292,3 +292,22 @@ raise ValueError, "unknown field name: %s" % (name,) return tuple(list(order) + nameslist) raise ValueError, "unsupported order value: %s" % (order,) + +# Given an array with fields and a sequence of field names +# construct a new array with just those fields copied over +def _index_fields(ary, fields): + from multiarray import empty, dtype + dt = ary.dtype + new_dtype = [(name, dt[name]) for name in dt.names if name in fields] + if ary.flags.f_contiguous: + order = 'F' + else: + order = 'C' + + newarray = empty(ary.shape, dtype=new_dtype, order=order) + + for name in fields: + newarray[name] = ary[name] + + return newarray + Modified: branches/numpy-mingw-w64/numpy/core/code_generators/generate_numpy_api.py =================================================================== --- branches/numpy-mingw-w64/numpy/core/code_generators/generate_numpy_api.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/code_generators/generate_numpy_api.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -65,6 +65,13 @@ static int _import_array(void) { +#ifdef WORDS_BIGENDIAN + union { + long i; + char c[sizeof(long)]; + } bint = {1}; +#endif + PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray"); PyObject *c_api = NULL; if (numpy == NULL) return -1; @@ -83,6 +90,17 @@ (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); return -1; } + +#ifdef WORDS_BIGENDIAN + if (bint.c[0] == 1) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "python headers configured as big endian, but little endian arch "\ + "detected: this is a python 2.6.* bug (see bug 4728 in python bug "\ + "tracker )"); + return -1; + } +#endif + return 0; } Modified: branches/numpy-mingw-w64/numpy/core/setup.py =================================================================== --- branches/numpy-mingw-w64/numpy/core/setup.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/setup.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -67,8 +67,8 @@ # Mandatory functions: if not found, fail the build mandatory_funcs = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", - "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", - "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] + "floor", "ceil", "sqrt", "log10", "log", "exp", "asin", + "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp'] if not check_funcs_once(mandatory_funcs): raise SystemError("One of the required function to build numpy is not" @@ -81,6 +81,14 @@ optional_stdfuncs = ["expm1", "log1p", "acosh", "asinh", "atanh", "rint", "trunc", "exp2", "log2"] + # XXX: hack to circumvent cpp pollution from python: python put its + # config.h in the public namespace, so we have a clash for the common + # functions we test. We remove every function tested by python's autoconf, + # hoping their own test are correct + if sys.version_info[0] == 2 and sys.version_info[1] >= 6: + for f in ["expm1", "log1p", "acosh", "atanh", "asinh"]: + optional_stdfuncs.remove(f) + check_funcs(optional_stdfuncs) # C99 functions: float and long double versions @@ -179,6 +187,14 @@ headers=['stdlib.h']): moredefs.append(('PyOS_ascii_strtod', 'strtod')) + if sys.platform == "win32": + from numpy.distutils.misc_util import get_build_architecture + # On win32, force long double format string to be 'g', not + # 'Lg', since the MS runtime does not support long double whose + # size is > sizeof(double) + if get_build_architecture()=="Intel": + moredefs.append('FORCE_NO_LONG_DOUBLE_FORMATTING') + target_f = open(target,'a') for d in moredefs: if isinstance(d,str): @@ -322,6 +338,7 @@ deps = [join('src','arrayobject.c'), join('src','arraymethods.c'), join('src','scalartypes.inc.src'), + join('src','numpyos.c'), join('src','arraytypes.inc.src'), join('src','_signbit.c'), join('src','ucsnarrow.c'), Modified: branches/numpy-mingw-w64/numpy/core/src/arraymethods.c =================================================================== --- branches/numpy-mingw-w64/numpy/core/src/arraymethods.c 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/src/arraymethods.c 2009-02-20 16:37:01 UTC (rev 6425) @@ -4,10 +4,10 @@ static PyObject * array_take(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int dimension=MAX_DIMS; + int dimension = MAX_DIMS; PyObject *indices; - PyArrayObject *out=NULL; - NPY_CLIPMODE mode=NPY_RAISE; + PyArrayObject *out = NULL; + NPY_CLIPMODE mode = NPY_RAISE; static char *kwlist[] = {"indices", "axis", "out", "mode", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&O&", kwlist, @@ -26,9 +26,12 @@ array_fill(PyArrayObject *self, PyObject *args) { PyObject *obj; - if (!PyArg_ParseTuple(args, "O", &obj)) + if (!PyArg_ParseTuple(args, "O", &obj)) { return NULL; - if (PyArray_FillWithScalar(self, obj) < 0) return NULL; + } + if (PyArray_FillWithScalar(self, obj) < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -37,7 +40,7 @@ array_put(PyArrayObject *self, PyObject *args, PyObject *kwds) { PyObject *indices, *values; - NPY_CLIPMODE mode=NPY_RAISE; + NPY_CLIPMODE mode = NPY_RAISE; static char *kwlist[] = {"indices", "values", "mode", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O&", kwlist, @@ -53,7 +56,7 @@ { PyArray_Dims newshape; PyObject *ret; - PyArray_ORDER order=PyArray_CORDER; + PyArray_ORDER order = PyArray_CORDER; int n; if (kwds != NULL) { @@ -64,16 +67,20 @@ "invalid keyword argument"); return NULL; } - if ((PyArray_OrderConverter(ref, &order) == PY_FAIL)) + if ((PyArray_OrderConverter(ref, &order) == PY_FAIL)) { return NULL; + } } n = PyTuple_Size(args); if (n <= 1) { - if (PyTuple_GET_ITEM(args, 0) == Py_None) + if (PyTuple_GET_ITEM(args, 0) == Py_None) { return PyArray_View(self, NULL, NULL); + } if (!PyArg_ParseTuple(args, "O&", PyArray_IntpConverter, - &newshape)) return NULL; + &newshape)) { + return NULL; + } } else { if (!PyArray_IntpConverter(args, &newshape)) { @@ -96,16 +103,18 @@ static PyObject * array_squeeze(PyArrayObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } return PyArray_Squeeze(self); } static PyObject * array_view(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyObject *out_dtype=NULL; - PyObject *out_type=NULL; - PyArray_Descr *dtype=NULL; + PyObject *out_dtype = NULL; + PyObject *out_type = NULL; + PyArray_Descr *dtype = NULL; static char *kwlist[] = {"dtype", "type", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OO", kwlist, @@ -151,8 +160,8 @@ static PyObject * array_argmax(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -168,8 +177,8 @@ static PyObject * array_argmin(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -185,8 +194,8 @@ static PyObject * array_max(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -202,8 +211,8 @@ static PyObject * array_ptp(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -220,8 +229,8 @@ static PyObject * array_min(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -239,8 +248,9 @@ { int axis1, axis2; - if (!PyArg_ParseTuple(args, "ii", &axis1, &axis2)) return NULL; - + if (!PyArg_ParseTuple(args, "ii", &axis1, &axis2)) { + return NULL; + } return PyArray_SwapAxes(self, axis1, axis2); } @@ -252,7 +262,7 @@ static PyObject * PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) { - PyObject *ret=NULL; + PyObject *ret = NULL; if (offset < 0 || (offset + typed->elsize) > self->descr->elsize) { PyErr_Format(PyExc_ValueError, @@ -268,7 +278,9 @@ self->strides, self->data + offset, self->flags, (PyObject *)self); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } Py_INCREF(self); ((PyArrayObject *)ret)->base = (PyObject *)self; @@ -280,7 +292,7 @@ array_getfield(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyArray_Descr *dtype=NULL; + PyArray_Descr *dtype = NULL; int offset = 0; static char *kwlist[] = {"dtype", "offset", 0}; @@ -302,7 +314,7 @@ PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype, int offset, PyObject *val) { - PyObject *ret=NULL; + PyObject *ret = NULL; int retval = 0; if (offset < 0 || (offset + dtype->elsize) > self->descr->elsize) { @@ -317,7 +329,9 @@ dtype, self->nd, self->dimensions, self->strides, self->data + offset, self->flags, (PyObject *)self); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } Py_INCREF(self); ((PyArrayObject *)ret)->base = (PyObject *)self; @@ -330,7 +344,7 @@ static PyObject * array_setfield(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyArray_Descr *dtype=NULL; + PyArray_Descr *dtype = NULL; int offset = 0; PyObject *value; static char *kwlist[] = {"value", "dtype", "offset", 0}; @@ -342,8 +356,9 @@ return NULL; } - if (PyArray_SetField(self, dtype, offset, value) < 0) + if (PyArray_SetField(self, dtype, offset, value) < 0) { return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -391,8 +406,9 @@ } else { PyObject *new; - if ((ret = (PyArrayObject *)PyArray_NewCopy(self,-1)) == NULL) + if ((ret = (PyArrayObject *)PyArray_NewCopy(self,-1)) == NULL) { return NULL; + } new = PyArray_Byteswap(ret, TRUE); Py_DECREF(new); return (PyObject *)ret; @@ -403,18 +419,20 @@ static PyObject * array_byteswap(PyArrayObject *self, PyObject *args) { - Bool inplace=FALSE; + Bool inplace = FALSE; - if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) + if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) { return NULL; - + } return PyArray_Byteswap(self, inplace); } static PyObject * array_tolist(PyArrayObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } return PyArray_ToList(self); } @@ -422,12 +440,14 @@ static PyObject * array_tostring(PyArrayObject *self, PyObject *args, PyObject *kwds) { - NPY_ORDER order=NPY_CORDER; + NPY_ORDER order = NPY_CORDER; static char *kwlist[] = {"order", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&", kwlist, PyArray_OrderConverter, - &order)) return NULL; + &order)) { + return NULL; + } return PyArray_ToString(self, order); } @@ -441,17 +461,21 @@ int ret; PyObject *file; FILE *fd; - char *sep=""; - char *format=""; + char *sep = ""; + char *format = ""; static char *kwlist[] = {"file", "sep", "format", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|ss", kwlist, - &file, &sep, &format)) return NULL; + &file, &sep, &format)) { + return NULL; + } if (PyString_Check(file) || PyUnicode_Check(file)) { file = PyObject_CallFunction((PyObject *)&PyFile_Type, "Os", file, "wb"); - if (file==NULL) return NULL; + if (file == NULL) { + return NULL; + } } else { Py_INCREF(file); @@ -465,7 +489,9 @@ } ret = PyArray_ToFile(self, fd, sep, format); Py_DECREF(file); - if (ret < 0) return NULL; + if (ret < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -476,7 +502,7 @@ int n, nd; n = PyTuple_GET_SIZE(args); - if (n==1) { + if (n == 1) { PyObject *obj; obj = PyTuple_GET_ITEM(args, 0); if (PyTuple_Check(obj)) { @@ -485,7 +511,7 @@ } } - if (n==0) { + if (n == 0) { if (self->nd == 0 || PyArray_SIZE(self) == 1) return self->descr->f->getitem(self->data, self); else { @@ -495,13 +521,13 @@ return NULL; } } - else if (n != self->nd && (n > 1 || self->nd==0)) { + else if (n != self->nd && (n > 1 || self->nd == 0)) { PyErr_SetString(PyExc_ValueError, "incorrect number of indices for " \ "array"); return NULL; } - else if (n==1) { /* allows for flat getting as well as 1-d case */ + else if (n == 1) { /* allows for flat getting as well as 1-d case */ intp value, loc, index, factor; intp factors[MAX_DIMS]; value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, 0)); @@ -528,7 +554,7 @@ factor *= self->dimensions[nd]; } loc = 0; - for (nd=0; nd < self->nd; nd++) { + for (nd = 0; nd < self->nd; nd++) { index = value / factors[nd]; value = value % factors[nd]; loc += self->strides[nd]*index; @@ -541,11 +567,14 @@ else { intp loc, index[MAX_DIMS]; nd = PyArray_IntpFromSequence(args, index, MAX_DIMS); - if (nd < n) return NULL; + if (nd < n) { + return NULL; + } loc = 0; while (nd--) { - if (index[nd] < 0) + if (index[nd] < 0) { index[nd] += self->dimensions[nd]; + } if (index[nd] < 0 || index[nd] >= self->dimensions[nd]) { PyErr_SetString(PyExc_ValueError, @@ -563,7 +592,7 @@ int n, nd; int ret = -1; PyObject *obj; - n = PyTuple_GET_SIZE(args)-1; + n = PyTuple_GET_SIZE(args) - 1; if (n < 0) { PyErr_SetString(PyExc_ValueError, @@ -571,7 +600,7 @@ return NULL; } obj = PyTuple_GET_ITEM(args, n); - if (n==0) { + if (n == 0) { if (self->nd == 0 || PyArray_SIZE(self) == 1) { ret = self->descr->f->setitem(obj, self->data, self); } @@ -582,13 +611,13 @@ return NULL; } } - else if (n != self->nd && (n > 1 || self->nd==0)) { + else if (n != self->nd && (n > 1 || self->nd == 0)) { PyErr_SetString(PyExc_ValueError, "incorrect number of indices for " \ "array"); return NULL; } - else if (n==1) { /* allows for flat setting as well as 1-d case */ + else if (n == 1) { /* allows for flat setting as well as 1-d case */ intp value, loc, index, factor; intp factors[MAX_DIMS]; PyObject *indobj; @@ -602,7 +631,7 @@ nn = PyTuple_GET_SIZE(indobj); newargs = PyTuple_New(nn+1); Py_INCREF(obj); - for (i=0; idimensions[nd]; } loc = 0; - for (nd=0; nd < self->nd; nd++) { + for (nd = 0; nd < self->nd; nd++) { index = value / factors[nd]; value = value % factors[nd]; loc += self->strides[nd]*index; @@ -650,11 +679,14 @@ tupargs = PyTuple_GetSlice(args, 0, n); nd = PyArray_IntpFromSequence(tupargs, index, MAX_DIMS); Py_DECREF(tupargs); - if (nd < n) return NULL; + if (nd < n) { + return NULL; + } loc = 0; while (nd--) { - if (index[nd] < 0) + if (index[nd] < 0) { index[nd] += self->dimensions[nd]; + } if (index[nd] < 0 || index[nd] >= self->dimensions[nd]) { PyErr_SetString(PyExc_ValueError, @@ -667,7 +699,9 @@ } finish: - if (ret < 0) return NULL; + if (ret < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -676,7 +710,7 @@ static PyObject * array_cast(PyArrayObject *self, PyObject *args) { - PyArray_Descr *descr=NULL; + PyArray_Descr *descr = NULL; PyObject *obj; if (!PyArg_ParseTuple(args, "O&", PyArray_DescrConverter, @@ -729,7 +763,9 @@ PyArray_DIMS(arr), PyArray_STRIDES(arr), PyArray_DATA(arr), PyArray_FLAGS(arr), (PyObject *)self); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } Py_INCREF(arr); PyArray_BASE(ret) = arr; return ret; @@ -739,7 +775,7 @@ static PyObject * array_getarray(PyArrayObject *self, PyObject *args) { - PyArray_Descr *newtype=NULL; + PyArray_Descr *newtype = NULL; PyObject *ret; if (!PyArg_ParseTuple(args, "|O&", PyArray_DescrConverter, @@ -765,7 +801,9 @@ PyArray_STRIDES(self), PyArray_DATA(self), PyArray_FLAGS(self), NULL); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } Py_INCREF(self); PyArray_BASE(new) = (PyObject *)self; self = (PyArrayObject *)new; @@ -774,7 +812,7 @@ Py_INCREF(self); } - if ((newtype == NULL) || \ + if ((newtype == NULL) || PyArray_EquivTypes(self->descr, newtype)) { return (PyObject *)self; } @@ -791,7 +829,9 @@ { PyArray_ORDER fortran=PyArray_CORDER; if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, - &fortran)) return NULL; + &fortran)) { + return NULL; + } return PyArray_NewCopy(self, fortran); } @@ -804,7 +844,7 @@ PyObject *ret; int n; int refcheck = 1; - PyArray_ORDER fortran=PyArray_ANYORDER; + PyArray_ORDER fortran = PyArray_ANYORDER; if (kwds != NULL) { PyObject *ref; @@ -817,8 +857,9 @@ } ref = PyDict_GetItemString(kwds, "order"); if (ref != NULL || - (PyArray_OrderConverter(ref, &fortran) == PY_FAIL)) + (PyArray_OrderConverter(ref, &fortran) == PY_FAIL)) { return NULL; + } } n = PyTuple_Size(args); if (n <= 1) { @@ -827,7 +868,9 @@ return Py_None; } if (!PyArg_ParseTuple(args, "O&", PyArray_IntpConverter, - &newshape)) return NULL; + &newshape)) { + return NULL; + } } else { if (!PyArray_IntpConverter(args, &newshape)) { @@ -840,7 +883,9 @@ } ret = PyArray_Resize(self, &newshape, refcheck, fortran); PyDimMem_FREE(newshape.ptr); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } Py_DECREF(ret); Py_INCREF(Py_None); return Py_None; @@ -849,13 +894,14 @@ static PyObject * array_repeat(PyArrayObject *self, PyObject *args, PyObject *kwds) { PyObject *repeats; - int axis=MAX_DIMS; + int axis = MAX_DIMS; static char *kwlist[] = {"repeats", "axis", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist, &repeats, PyArray_AxisConverter, - &axis)) return NULL; - + &axis)) { + return NULL; + } return _ARET(PyArray_Repeat(self, repeats, axis)); } @@ -864,26 +910,27 @@ { PyObject *choices; int n; - PyArrayObject *out=NULL; - NPY_CLIPMODE clipmode=NPY_RAISE; + PyArrayObject *out = NULL; + NPY_CLIPMODE clipmode = NPY_RAISE; n = PyTuple_Size(args); if (n <= 1) { - if (!PyArg_ParseTuple(args, "O", &choices)) + if (!PyArg_ParseTuple(args, "O", &choices)) { return NULL; + } } else { choices = args; } if (kwds && PyDict_Check(kwds)) { - if (PyArray_OutputConverter(PyDict_GetItemString(kwds, - "out"), - &out) == PY_FAIL) + if (PyArray_OutputConverter(PyDict_GetItemString(kwds, "out"), + &out) == PY_FAIL) { return NULL; - if (PyArray_ClipmodeConverter(PyDict_GetItemString(kwds, - "mode"), - &clipmode) == PY_FAIL) + } + if (PyArray_ClipmodeConverter(PyDict_GetItemString(kwds, "mode"), + &clipmode) == PY_FAIL) { return NULL; + } } return _ARET(PyArray_Choose(self, choices, out, clipmode)); @@ -894,18 +941,20 @@ { int axis=-1; int val; - PyArray_SORTKIND which=PyArray_QUICKSORT; - PyObject *order=NULL; - PyArray_Descr *saved=NULL; + PyArray_SORTKIND which = PyArray_QUICKSORT; + PyObject *order = NULL; + PyArray_Descr *saved = NULL; PyArray_Descr *newd; static char *kwlist[] = {"axis", "kind", "order", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&O", kwlist, &axis, PyArray_SortkindConverter, &which, - &order)) + &order)) { return NULL; - - if (order == Py_None) order = NULL; + } + if (order == Py_None) { + order = NULL; + } if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; @@ -916,11 +965,15 @@ return NULL; } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; + if (_numpy_internal == NULL) { + return NULL; + } new_name = PyObject_CallMethod(_numpy_internal, "_newnames", "OO", saved, order); Py_DECREF(_numpy_internal); - if (new_name == NULL) return NULL; + if (new_name == NULL) { + return NULL; + } newd = PyArray_DescrNew(saved); newd->names = new_name; self->descr = newd; @@ -931,7 +984,9 @@ Py_XDECREF(self->descr); self->descr = saved; } - if (val < 0) return NULL; + if (val < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -939,19 +994,21 @@ static PyObject * array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=-1; - PyArray_SORTKIND which=PyArray_QUICKSORT; - PyObject *order=NULL, *res; + int axis = -1; + PyArray_SORTKIND which = PyArray_QUICKSORT; + PyObject *order = NULL, *res; PyArray_Descr *newd, *saved=NULL; static char *kwlist[] = {"axis", "kind", "order", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&O", kwlist, PyArray_AxisConverter, &axis, PyArray_SortkindConverter, &which, - &order)) + &order)) { return NULL; - - if (order == Py_None) order = NULL; + } + if (order == Py_None) { + order = NULL; + } if (order != NULL) { PyObject *new_name; PyObject *_numpy_internal; @@ -962,11 +1019,15 @@ return NULL; } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; + if (_numpy_internal == NULL) { + return NULL; + } new_name = PyObject_CallMethod(_numpy_internal, "_newnames", "OO", saved, order); Py_DECREF(_numpy_internal); - if (new_name == NULL) return NULL; + if (new_name == NULL) { + return NULL; + } newd = PyArray_DescrNew(saved); newd->names = new_name; self->descr = newd; @@ -989,9 +1050,9 @@ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&:searchsorted", kwlist, &keys, - PyArray_SearchsideConverter, &side)) + PyArray_SearchsideConverter, &side)) { return NULL; - + } return _ARET(PyArray_SearchSorted(self, keys, side)); } @@ -999,16 +1060,22 @@ _deepcopy_call(char *iptr, char *optr, PyArray_Descr *dtype, PyObject *deepcopy, PyObject *visit) { - if (!PyDataType_REFCHK(dtype)) return; + if (!PyDataType_REFCHK(dtype)) { + return; + } else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; + if NPY_TITLE_KEY(key, value) { + continue; + } if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; + &title)) { + return; + } _deepcopy_call(iptr + offset, optr + offset, new, deepcopy, visit); } @@ -1020,8 +1087,7 @@ otemp = (PyObject **)optr; Py_XINCREF(*itemp); /* call deepcopy on this argument */ - res = PyObject_CallFunctionObjArgs(deepcopy, - *itemp, visit, NULL); + res = PyObject_CallFunctionObjArgs(deepcopy, *itemp, visit, NULL); Py_XDECREF(*itemp); Py_XDECREF(*otemp); *otemp = res; @@ -1038,20 +1104,28 @@ PyArrayIterObject *it; PyObject *copy, *ret, *deepcopy; - if (!PyArg_ParseTuple(args, "O", &visit)) return NULL; + if (!PyArg_ParseTuple(args, "O", &visit)) { + return NULL; + } ret = PyArray_Copy(self); if (PyDataType_REFCHK(self->descr)) { copy = PyImport_ImportModule("copy"); - if (copy == NULL) return NULL; + if (copy == NULL) { + return NULL; + } deepcopy = PyObject_GetAttrString(copy, "deepcopy"); Py_DECREF(copy); - if (deepcopy == NULL) return NULL; + if (deepcopy == NULL) { + return NULL; + } it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it == NULL) {Py_DECREF(deepcopy); return NULL;} + if (it == NULL) { + Py_DECREF(deepcopy); + return NULL; + } optr = PyArray_DATA(ret); while(it->index < it->size) { - _deepcopy_call(it->dataptr, optr, self->descr, - deepcopy, visit); + _deepcopy_call(it->dataptr, optr, self->descr, deepcopy, visit); optr += self->descr->elsize; PyArray_ITER_NEXT(it); } @@ -1066,15 +1140,20 @@ _getlist_pkl(PyArrayObject *self) { PyObject *theobject; - PyArrayIterObject *iter=NULL; + PyArrayIterObject *iter = NULL; PyObject *list; PyArray_GetItemFunc *getitem; getitem = self->descr->f->getitem; iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (iter == NULL) return NULL; + if (iter == NULL) { + return NULL; + } list = PyList_New(iter->size); - if (list == NULL) {Py_DECREF(iter); return NULL;} + if (list == NULL) { + Py_DECREF(iter); + return NULL; + } while (iter->index < iter->size) { theobject = getitem(iter->dataptr, self); PyList_SET_ITEM(list, (int) iter->index, theobject); @@ -1088,12 +1167,14 @@ _setlist_pkl(PyArrayObject *self, PyObject *list) { PyObject *theobject; - PyArrayIterObject *iter=NULL; + PyArrayIterObject *iter = NULL; PyArray_SetItemFunc *setitem; setitem = self->descr->f->setitem; iter = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (iter == NULL) return -1; + if (iter == NULL) { + return -1; + } while(iter->index < iter->size) { theobject = PyList_GET_ITEM(list, (int) iter->index); setitem(theobject, iter->dataptr, self); @@ -1111,8 +1192,8 @@ change the format. Be sure to handle the old versions in array_setstate. */ const int version = 1; - PyObject *ret=NULL, *state=NULL, *obj=NULL, *mod=NULL; - PyObject *mybool, *thestr=NULL; + PyObject *ret = NULL, *state = NULL, *obj = NULL, *mod = NULL; + PyObject *mybool, *thestr = NULL; PyArray_Descr *descr; /* Return a tuple of (callable object, arguments, object's state) */ @@ -1120,9 +1201,14 @@ it can use the string object as memory without a copy */ ret = PyTuple_New(3); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) {Py_DECREF(ret); return NULL;} + if (mod == NULL) { + Py_DECREF(ret); + return NULL; + } obj = PyObject_GetAttrString(mod, "_reconstruct"); Py_DECREF(mod); PyTuple_SET_ITEM(ret, 0, obj); @@ -1150,7 +1236,8 @@ state = PyTuple_New(5); if (state == NULL) { - Py_DECREF(ret); return NULL; + Py_DECREF(ret); + return NULL; } PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version)); PyTuple_SET_ITEM(state, 1, PyObject_GetAttrString((PyObject *)self, @@ -1227,7 +1314,9 @@ self->descr = typecode; Py_INCREF(typecode); nd = PyArray_IntpFromSequence(shape, dimensions, MAX_DIMS); - if (nd < 0) return NULL; + if (nd < 0) { + return NULL; + } size = PyArray_MultiplyList(dimensions, nd); if (self->descr->elsize == 0) { PyErr_SetString(PyExc_ValueError, "Invalid data-type size."); @@ -1264,8 +1353,9 @@ } if ((self->flags & OWNDATA)) { - if (self->data != NULL) + if (self->data != NULL) { PyDataMem_FREE(self->data); + } self->flags &= ~OWNDATA; } Py_XDECREF(self->base); @@ -1312,10 +1402,12 @@ } else { self->descr = PyArray_DescrNew(typecode); - if (self->descr->byteorder == PyArray_BIG) + if (self->descr->byteorder == PyArray_BIG) { self->descr->byteorder = PyArray_LITTLE; - else if (self->descr->byteorder == PyArray_LITTLE) + } + else if (self->descr->byteorder == PyArray_LITTLE) { self->descr->byteorder = PyArray_BIG; + } } Py_DECREF(typecode); } @@ -1335,15 +1427,19 @@ if (self->data == NULL) { self->nd = 0; self->data = PyDataMem_NEW(self->descr->elsize); - if (self->dimensions) PyDimMem_FREE(self->dimensions); + if (self->dimensions) { + PyDimMem_FREE(self->dimensions); + } return PyErr_NoMemory(); } - if (PyDataType_FLAGCHK(self->descr, NPY_NEEDS_INIT)) + if (PyDataType_FLAGCHK(self->descr, NPY_NEEDS_INIT)) { memset(self->data, 0, PyArray_NBYTES(self)); + } self->flags |= OWNDATA; self->base = NULL; - if (_setlist_pkl(self, rawdata) < 0) + if (_setlist_pkl(self, rawdata) < 0) { return NULL; + } } PyArray_UpdateFlags(self, UPDATE_ALL); @@ -1356,24 +1452,32 @@ static int PyArray_Dump(PyObject *self, PyObject *file, int protocol) { - PyObject *cpick=NULL; + PyObject *cpick = NULL; PyObject *ret; - if (protocol < 0) protocol = 2; + if (protocol < 0) { + protocol = 2; + } cpick = PyImport_ImportModule("cPickle"); - if (cpick==NULL) return -1; - + if (cpick == NULL) { + return -1; + } if PyString_Check(file) { - file = PyFile_FromString(PyString_AS_STRING(file), "wb"); - if (file==NULL) return -1; + file = PyFile_FromString(PyString_AS_STRING(file), "wb"); + if (file == NULL) { + return -1; } - else Py_INCREF(file); - ret = PyObject_CallMethod(cpick, "dump", "OOi", self, - file, protocol); + } + else { + Py_INCREF(file); + } + ret = PyObject_CallMethod(cpick, "dump", "OOi", self, file, protocol); Py_XDECREF(ret); Py_DECREF(file); Py_DECREF(cpick); - if (PyErr_Occurred()) return -1; + if (PyErr_Occurred()) { + return -1; + } return 0; } @@ -1381,12 +1485,15 @@ static PyObject * PyArray_Dumps(PyObject *self, int protocol) { - PyObject *cpick=NULL; + PyObject *cpick = NULL; PyObject *ret; - if (protocol < 0) protocol = 2; - + if (protocol < 0) { + protocol = 2; + } cpick = PyImport_ImportModule("cPickle"); - if (cpick==NULL) return NULL; + if (cpick == NULL) { + return NULL; + } ret = PyObject_CallMethod(cpick, "dumps", "Oi", self, protocol); Py_DECREF(cpick); return ret; @@ -1396,13 +1503,16 @@ static PyObject * array_dump(PyArrayObject *self, PyObject *args) { - PyObject *file=NULL; + PyObject *file = NULL; int ret; - if (!PyArg_ParseTuple(args, "O", &file)) + if (!PyArg_ParseTuple(args, "O", &file)) { return NULL; + } ret = PyArray_Dump((PyObject *)self, file, 2); - if (ret < 0) return NULL; + if (ret < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -1411,8 +1521,9 @@ static PyObject * array_dumps(PyArrayObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) + if (!PyArg_ParseTuple(args, "")) { return NULL; + } return PyArray_Dumps((PyObject *)self, 2); } @@ -1420,19 +1531,26 @@ static PyObject * array_transpose(PyArrayObject *self, PyObject *args) { - PyObject *shape=Py_None; + PyObject *shape = Py_None; int n; PyArray_Dims permute; PyObject *ret; n = PyTuple_Size(args); - if (n > 1) shape = args; - else if (n == 1) shape = PyTuple_GET_ITEM(args, 0); + if (n > 1) { + shape = args; + } + else if (n == 1) { + shape = PyTuple_GET_ITEM(args, 0); + } - if (shape == Py_None) + if (shape == Py_None) { ret = PyArray_Transpose(self, NULL); + } else { - if (!PyArray_IntpConverter(shape, &permute)) return NULL; + if (!PyArray_IntpConverter(shape, &permute)) { + return NULL; + } ret = PyArray_Transpose(self, &permute); PyDimMem_FREE(permute.ptr); } @@ -1447,9 +1565,9 @@ static int _get_type_num_double(PyArray_Descr *dtype1, PyArray_Descr *dtype2) { - if (dtype2 != NULL) + if (dtype2 != NULL) { return dtype2->type_num; - + } /* For integer or bool data-types */ if (dtype1->type_num < NPY_FLOAT) { return NPY_DOUBLE; @@ -1464,9 +1582,9 @@ static PyObject * array_mean(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int num; static char *kwlist[] = {"axis", "dtype", "out", NULL}; @@ -1488,9 +1606,9 @@ static PyObject * array_sum(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int rtype; static char *kwlist[] = {"axis", "dtype", "out", NULL}; @@ -1513,9 +1631,9 @@ static PyObject * array_cumsum(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int rtype; static char *kwlist[] = {"axis", "dtype", "out", NULL}; @@ -1537,9 +1655,9 @@ static PyObject * array_prod(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int rtype; static char *kwlist[] = {"axis", "dtype", "out", NULL}; @@ -1561,9 +1679,9 @@ static PyObject * array_cumprod(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int rtype; static char *kwlist[] = {"axis", "dtype", "out", NULL}; @@ -1586,8 +1704,8 @@ static PyObject * array_any(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -1604,8 +1722,8 @@ static PyObject * array_all(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArrayObject *out = NULL; static char *kwlist[] = {"axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&O&", kwlist, @@ -1625,9 +1743,9 @@ static PyObject * array_stddev(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int num; int ddof = 0; static char *kwlist[] = {"axis", "dtype", "out", "ddof", NULL}; @@ -1651,9 +1769,9 @@ static PyObject * array_variance(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis = MAX_DIMS; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int num; int ddof = 0; static char *kwlist[] = {"axis", "dtype", "out", "ddof", NULL}; @@ -1666,7 +1784,7 @@ &out, &ddof)) { Py_XDECREF(dtype); return NULL; - } + } num = _get_type_num_double(self->descr, dtype); Py_XDECREF(dtype); @@ -1677,17 +1795,18 @@ static PyObject * array_compress(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis=MAX_DIMS; + int axis = MAX_DIMS; PyObject *condition; - PyArrayObject *out=NULL; + PyArrayObject *out = NULL; static char *kwlist[] = {"condition", "axis", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O&", kwlist, &condition, PyArray_AxisConverter, &axis, PyArray_OutputConverter, - &out)) return NULL; - + &out)) { + return NULL; + } return _ARET(PyArray_Compress(self, condition, axis, out)); } @@ -1695,8 +1814,9 @@ static PyObject * array_nonzero(PyArrayObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; - + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } return PyArray_Nonzero(self); } @@ -1704,9 +1824,9 @@ static PyObject * array_trace(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis1=0, axis2=1, offset=0; - PyArray_Descr *dtype=NULL; - PyArrayObject *out=NULL; + int axis1 = 0, axis2 = 1, offset = 0; + PyArray_Descr *dtype = NULL; + PyArrayObject *out = NULL; int rtype; static char *kwlist[] = {"offset", "axis1", "axis2", "dtype", "out", NULL}; @@ -1720,9 +1840,7 @@ rtype = _CHKTYPENUM(dtype); Py_XDECREF(dtype); - - return _ARET(PyArray_Trace(self, offset, axis1, axis2, - rtype, out)); + return _ARET(PyArray_Trace(self, offset, axis1, axis2, rtype, out)); } #undef _CHKTYPENUM @@ -1731,19 +1849,19 @@ static PyObject * array_clip(PyArrayObject *self, PyObject *args, PyObject *kwds) { - PyObject *min=NULL, *max=NULL; - PyArrayObject *out=NULL; + PyObject *min = NULL, *max = NULL; + PyArrayObject *out = NULL; static char *kwlist[] = {"min", "max", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO&", kwlist, &min, &max, PyArray_OutputConverter, - &out)) + &out)) { return NULL; - + } if (max == NULL && min == NULL) { PyErr_SetString(PyExc_ValueError, "One of max or min must be given."); - return NULL; + return NULL; } return _ARET(PyArray_Clip(self, min, max, out)); } @@ -1753,11 +1871,12 @@ array_conjugate(PyArrayObject *self, PyObject *args) { - PyArrayObject *out=NULL; + PyArrayObject *out = NULL; if (!PyArg_ParseTuple(args, "|O&", PyArray_OutputConverter, - &out)) return NULL; - + &out)) { + return NULL; + } return PyArray_Conjugate(self, out); } @@ -1765,13 +1884,13 @@ static PyObject * array_diagonal(PyArrayObject *self, PyObject *args, PyObject *kwds) { - int axis1=0, axis2=1, offset=0; + int axis1 = 0, axis2 = 1, offset = 0; static char *kwlist[] = {"offset", "axis1", "axis2", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iii", kwlist, - &offset, &axis1, &axis2)) + &offset, &axis1, &axis2)) { return NULL; - + } return _ARET(PyArray_Diagonal(self, offset, axis1, axis2)); } @@ -1779,11 +1898,11 @@ static PyObject * array_flatten(PyArrayObject *self, PyObject *args) { - PyArray_ORDER fortran=PyArray_CORDER; + PyArray_ORDER fortran = PyArray_CORDER; - if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, - &fortran)) return NULL; - + if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, &fortran)) { + return NULL; + } return PyArray_Flatten(self, fortran); } @@ -1791,11 +1910,12 @@ static PyObject * array_ravel(PyArrayObject *self, PyObject *args) { - PyArray_ORDER fortran=PyArray_CORDER; + PyArray_ORDER fortran = PyArray_CORDER; if (!PyArg_ParseTuple(args, "|O&", PyArray_OrderConverter, - &fortran)) return NULL; - + &fortran)) { + return NULL; + } return PyArray_Ravel(self, fortran); } @@ -1804,14 +1924,14 @@ array_round(PyArrayObject *self, PyObject *args, PyObject *kwds) { int decimals = 0; - PyArrayObject *out=NULL; + PyArrayObject *out = NULL; static char *kwlist[] = {"decimals", "out", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|iO&", kwlist, &decimals, PyArray_OutputConverter, - &out)) + &out)) { return NULL; - + } return _ARET(PyArray_Round(self, decimals, out)); } @@ -1824,9 +1944,9 @@ array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"write", "align", "uic", NULL}; - PyObject *write=Py_None; - PyObject *align=Py_None; - PyObject *uic=Py_None; + PyObject *write = Py_None; + PyObject *align = Py_None; + PyObject *uic = Py_None; int flagback = self->flags; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO", kwlist, @@ -1834,8 +1954,12 @@ return NULL; if (align != Py_None) { - if (PyObject_Not(align)) self->flags &= ~ALIGNED; - else if (_IsAligned(self)) self->flags |= ALIGNED; + if (PyObject_Not(align)) { + self->flags &= ~ALIGNED; + } + else if (_IsAligned(self)) { + self->flags |= ALIGNED; + } else { PyErr_SetString(PyExc_ValueError, "cannot set aligned flag of mis-"\ @@ -1888,10 +2012,13 @@ PyArray_Descr *new; if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter, - &endian)) return NULL; - + &endian)) { + return NULL; + } new = PyArray_DescrNewByteorder(self->descr, endian); - if (!new) return NULL; + if (!new) { + return NULL; + } return PyArray_View(self, new, NULL); } Modified: branches/numpy-mingw-w64/numpy/core/src/arrayobject.c =================================================================== --- branches/numpy-mingw-w64/numpy/core/src/arrayobject.c 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/src/arrayobject.c 2009-02-20 16:37:01 UTC (rev 6425) @@ -29,13 +29,15 @@ PyArray_GetPriority(PyObject *obj, double default_) { PyObject *ret; - double priority=PyArray_PRIORITY; + double priority = PyArray_PRIORITY; if (PyArray_CheckExact(obj)) return priority; ret = PyObject_GetAttrString(obj, "__array_priority__"); - if (ret != NULL) priority = PyFloat_AsDouble(ret); + if (ret != NULL) { + priority = PyFloat_AsDouble(ret); + } if (PyErr_Occurred()) { PyErr_Clear(); priority = default_; @@ -79,7 +81,9 @@ int ret, storeflags; PyObject *obj; - if (_check_object_rec(arr->descr) < 0) return NULL; + if (_check_object_rec(arr->descr) < 0) { + return NULL; + } zeroval = PyDataMem_NEW(arr->descr->elsize); if (zeroval == NULL) { PyErr_SetNone(PyExc_MemoryError); @@ -165,13 +169,15 @@ Py_XINCREF(*temp); } else if (PyDescr_HASFIELDS(descr)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; + if NPY_TITLE_KEY(key, value) { + continue; + } if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { return; @@ -199,13 +205,15 @@ Py_XDECREF(*temp); } else if PyDescr_HASFIELDS(descr) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; + if NPY_TITLE_KEY(key, value) { + continue; + } if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { return; @@ -250,12 +258,12 @@ data = (PyObject **)mp->data; n = PyArray_SIZE(mp); if (PyArray_ISALIGNED(mp)) { - for(i = 0; i < n; i++, data++) { + for (i = 0; i < n; i++, data++) { Py_XINCREF(*data); } } else { - for(i=0; idata; n = PyArray_SIZE(mp); if (PyArray_ISALIGNED(mp)) { - for(i = 0; i < n; i++, data++) Py_XDECREF(*data); + for (i = 0; i < n; i++, data++) Py_XDECREF(*data); } else { - for(i = 0; i < n; i++, data++) { + for (i = 0; i < n; i++, data++) { temp = data; Py_XDECREF(*temp); } @@ -358,7 +366,7 @@ case 2: _FAST_MOVE(Int16); case 16: - for(i=0; i 0; n--, a += stride-1) { + for (a = (char*)p; n > 0; n--, a += stride - 1) { b = a + 3; c = *a; *a++ = *b; *b-- = c; c = *a; *a = *b; *b = c; } break; case 8: - for(a = (char*)p ; n > 0; n--, a += stride-3) { + for (a = (char*)p; n > 0; n--, a += stride - 3) { b = a + 7; c = *a; *a++ = *b; *b-- = c; c = *a; *a++ = *b; *b-- = c; @@ -474,16 +482,16 @@ } break; case 2: - for(a = (char*)p ; n > 0; n--, a += stride) { + for (a = (char*)p; n > 0; n--, a += stride) { b = a + 1; c = *a; *a = *b; *b = c; } break; default: - m = size / 2; - for(a = (char *)p ; n > 0; n--, a += stride-m) { - b = a + (size-1); - for(j=0; j 0; n--, a += stride - m) { + b = a + (size - 1); + for (j = 0; j < m; j++) { c=*a; *a++ = *b; *b-- = c; } } @@ -508,10 +516,11 @@ char *d1 = (char *)dst; - if ((numitems == 1) || (itemsize == srcstrides)) + if ((numitems == 1) || (itemsize == srcstrides)) { memcpy(d1, s1, itemsize*numitems); + } else { - for(i = 0; i < numitems; i++) { + for (i = 0; i < numitems; i++) { memcpy(d1, s1, itemsize); d1 += itemsize; s1 += srcstrides; @@ -554,7 +563,6 @@ PyErr_SetString(PyExc_TypeError, msg); return -1; } - if (PyInt_Check(o)) { long_value = (longlong) PyInt_AS_LONG(o); goto finish; @@ -593,7 +601,7 @@ #if (PY_VERSION_HEX >= 0x02050000) if (PyIndex_Check(o)) { PyObject* value = PyNumber_Index(o); - if (value==NULL) { + if (value == NULL) { return -1; } long_value = (longlong) PyInt_AsSsize_t(value); @@ -655,7 +663,6 @@ PyErr_SetString(PyExc_TypeError, msg); return -1; } - if (PyInt_Check(o)) { long_value = (long) PyInt_AS_LONG(o); goto finish; @@ -665,7 +672,7 @@ } descr = &INT_Descr; - arr=NULL; + arr = NULL; if (PyArray_Check(o)) { if (PyArray_SIZE(o)!=1 || !PyArray_ISINTEGER(o)) { PyErr_SetString(PyExc_TypeError, msg); @@ -720,8 +727,7 @@ #if (SIZEOF_LONG > SIZEOF_INT) if ((long_value < INT_MIN) || (long_value > INT_MAX)) { - PyErr_SetString(PyExc_ValueError, - "integer won't fit into a C int"); + PyErr_SetString(PyExc_ValueError, "integer won't fit into a C int"); return -1; } #endif @@ -732,17 +738,19 @@ index2ptr(PyArrayObject *mp, intp i) { intp dim0; - if(mp->nd == 0) { - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed"); + + if (mp->nd == 0) { + PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed"); return NULL; } dim0 = mp->dimensions[0]; - if (i<0) i += dim0; - if (i==0 && dim0 > 0) + if (i < 0) { + i += dim0; + } + if (i == 0 && dim0 > 0) { return mp->data; - - if (i>0 && i < dim0) { + } + if (i > 0 && i < dim0) { return mp->data+i*mp->strides[0]; } PyErr_SetString(PyExc_IndexError,"index out of bounds"); @@ -766,11 +774,11 @@ static int _copy_from0d(PyArrayObject *dest, PyArrayObject *src, int usecopy, int swap) { - char *aligned=NULL; + char *aligned = NULL; char *sptr; int numcopies, nbytes; void (*myfunc)(char *, intp, char *, intp, intp, int); - int retval=-1; + int retval = -1; NPY_BEGIN_THREADS_DEF; numcopies = PyArray_SIZE(dest); @@ -807,10 +815,12 @@ intp dstride; dptr = dest->data; - if (dest->nd == 1) + if (dest->nd == 1) { dstride = dest->strides[0]; - else + } + else { dstride = nbytes; + } /* Refcount note: src and dest may have different sizes */ PyArray_INCREF(src); @@ -826,9 +836,10 @@ } else { PyArrayIterObject *dit; - int axis=-1; - dit = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)dest, &axis); + int axis = -1; + + dit = (PyArrayIterObject *) + PyArray_IterAllButAxis((PyObject *)dest, &axis); if (dit == NULL) { goto finish; } @@ -837,12 +848,10 @@ PyArray_XDECREF(dest); NPY_BEGIN_THREADS; while(dit->index < dit->size) { - myfunc(dit->dataptr, PyArray_STRIDE(dest, axis), - sptr, 0, + myfunc(dit->dataptr, PyArray_STRIDE(dest, axis), sptr, 0, PyArray_DIM(dest, axis), nbytes); if (swap) { - _strided_byte_swap(dit->dataptr, - PyArray_STRIDE(dest, axis), + _strided_byte_swap(dit->dataptr, PyArray_STRIDE(dest, axis), PyArray_DIM(dest, axis), nbytes); } PyArray_ITER_NEXT(dit); @@ -928,8 +937,7 @@ PyArray_XDECREF((PyArrayObject *)dst); NPY_BEGIN_THREADS; while(it->index < it->size) { - myfunc(dptr, elsize, it->dataptr, - PyArray_STRIDE(src,axis), + myfunc(dptr, elsize, it->dataptr, PyArray_STRIDE(src,axis), PyArray_DIM(src,axis), elsize); dptr += nbytes; PyArray_ITER_NEXT(it); @@ -949,7 +957,7 @@ void (*myfunc)(char *, intp, char *, intp, intp, int), int swap) { - int maxaxis=-1, elsize; + int maxaxis = -1, elsize; intp maxdim; PyArrayIterObject *dit, *sit; NPY_BEGIN_THREADS_DEF; @@ -1323,7 +1331,7 @@ } if (!PyArray_ISNBO(descr->byteorder)) descr->byteorder = '='; - for(i = 0; i < nd; i++) { + for (i = 0; i < nd; i++) { newd[i] = (intp) d[i]; } ret = PyArray_NewFromDescr(&PyArray_Type, descr, @@ -1409,8 +1417,9 @@ int swap; type_num = descr->type_num; - if (type_num == PyArray_BOOL) + if (type_num == PyArray_BOOL) { PyArrayScalar_RETURN_BOOL_FROM_LONG(*(Bool*)data); + } else if (PyDataType_FLAGCHK(descr, NPY_USE_GETITEM)) { return descr->f->getitem(data, base); } @@ -1420,18 +1429,23 @@ swap = !PyArray_ISNBO(descr->byteorder); if PyTypeNum_ISSTRING(type_num) { /* Eliminate NULL bytes */ char *dptr = data; - dptr += itemsize-1; - while(itemsize && *dptr-- == 0) itemsize--; + + dptr += itemsize - 1; + while(itemsize && *dptr-- == 0) { + itemsize--; + } if (type_num == PyArray_UNICODE && itemsize) { /* make sure itemsize is a multiple of 4 */ /* so round up to nearest multiple */ itemsize = (((itemsize-1) >> 2) + 1) << 2; } } - if (type->tp_itemsize != 0) /* String type */ + if (type->tp_itemsize != 0) { /* String type */ obj = type->tp_alloc(type, itemsize); - else + } + else { obj = type->tp_alloc(type, 0); + } if (obj == NULL) { return NULL; } @@ -1449,7 +1463,7 @@ int length = itemsize >> 2; #ifndef Py_UNICODE_WIDE char *buffer; - int alloc=0; + int alloc = 0; length *= 2; #endif /* Need an extra slot and need to use @@ -1468,22 +1482,25 @@ uni->defenc = NULL; #ifdef Py_UNICODE_WIDE memcpy(destptr, data, itemsize); - if (swap) + if (swap) { byte_swap_vector(destptr, length, 4); + } #else /* need aligned data buffer */ if ((swap) || ((((intp)data) % descr->alignment) != 0)) { buffer = _pya_malloc(itemsize); - if (buffer == NULL) + if (buffer == NULL) { return PyErr_NoMemory(); + } alloc = 1; memcpy(buffer, data, itemsize); if (swap) { - byte_swap_vector(buffer, - itemsize >> 2, 4); + byte_swap_vector(buffer, itemsize >> 2, 4); } } - else buffer = data; + else { + buffer = data; + } /* Allocated enough for 2-characters per itemsize. Now convert from the data-buffer @@ -1491,7 +1508,9 @@ length = PyUCS2Buffer_FromUCS4(uni->str, (PyArray_UCS4 *)buffer, itemsize >> 2); - if (alloc) _pya_free(buffer); + if (alloc) { + _pya_free(buffer); + } /* Resize the unicode result */ if (MyPyUnicode_Resize(uni, length) < 0) { Py_DECREF(obj); @@ -1635,7 +1654,7 @@ copyswap = PyArray_DESCR(arr)->f->copyswap; - for(i = 0; i < n; i++) { + for (i = 0; i < n; i++) { copyswap(dstptr, srcptr, swap, arr); dstptr += dstride; srcptr += sstride; @@ -1657,12 +1676,12 @@ int i; PyArray_Descr *descr; - for(i=0; itypeobj->tp_name, str) == 0) + if (strcmp(descr->typeobj->tp_name, str) == 0) { return descr->type_num; + } } - return PyArray_NOTYPE; } @@ -1684,10 +1703,11 @@ PyArray_ArrFuncs *f; /* See if this type is already registered */ - for(i=0; itype_num; + } } typenum = PyArray_USERDEF + NPY_NUMUSERTYPES; descr->type_num = typenum; @@ -1733,6 +1753,7 @@ { PyObject *cobj, *key; int ret; + if (totype < PyArray_NTYPES) { descr->f->cast[totype] = castfunc; return 0; @@ -1743,12 +1764,19 @@ } if (descr->f->castdict == NULL) { descr->f->castdict = PyDict_New(); - if (descr->f->castdict == NULL) return -1; + if (descr->f->castdict == NULL) { + return -1; + } } key = PyInt_FromLong(totype); - if (PyErr_Occurred()) return -1; + if (PyErr_Occurred()) { + return -1; + } cobj = PyCObject_FromVoidPtr((void *)castfunc, NULL); - if (cobj == NULL) {Py_DECREF(key); return -1;} + if (cobj == NULL) { + Py_DECREF(key); + return -1; + } ret = PyDict_SetItem(descr->f->castdict, key, cobj); Py_DECREF(key); Py_DECREF(cobj); @@ -1758,13 +1786,15 @@ static int * _append_new(int *types, int insert) { - int n=0; + int n = 0; int *newtypes; - while (types[n] != PyArray_NOTYPE) n++; - newtypes = (int *)realloc(types, (n+2)*sizeof(int)); + while (types[n] != PyArray_NOTYPE) { + n++; + } + newtypes = (int *)realloc(types, (n + 2)*sizeof(int)); newtypes[n] = insert; - newtypes[n+1] = PyArray_NOTYPE; + newtypes[n + 1] = PyArray_NOTYPE; return newtypes; } @@ -1791,22 +1821,20 @@ /* register with cancastscalarkindto */ if (descr->f->cancastscalarkindto == NULL) { int i; - descr->f->cancastscalarkindto = \ - (int **)malloc(PyArray_NSCALARKINDS* \ - sizeof(int*)); - for(i=0; if->cancastscalarkindto = + (int **)malloc(PyArray_NSCALARKINDS* sizeof(int*)); + for (i = 0; i < PyArray_NSCALARKINDS; i++) { descr->f->cancastscalarkindto[i] = NULL; } } if (descr->f->cancastscalarkindto[scalar] == NULL) { - descr->f->cancastscalarkindto[scalar] = \ + descr->f->cancastscalarkindto[scalar] = (int *)malloc(1*sizeof(int)); - descr->f->cancastscalarkindto[scalar][0] = \ + descr->f->cancastscalarkindto[scalar][0] = PyArray_NOTYPE; } - descr->f->cancastscalarkindto[scalar] = \ - _append_new(descr->f->cancastscalarkindto[scalar], - totype); + descr->f->cancastscalarkindto[scalar] = + _append_new(descr->f->cancastscalarkindto[scalar], totype); } return 0; } @@ -1859,7 +1887,7 @@ it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self); NPY_BEGIN_THREADS; - while(it->index < it->size) { + while (it->index < it->size) { if (fwrite((const void *)it->dataptr, (size_t) self->descr->elsize, 1, fp) < 1) { @@ -1885,7 +1913,7 @@ it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self); n4 = (format ? strlen((const char *)format) : 0); - while(it->index < it->size) { + while (it->index < it->size) { obj = self->descr->f->getitem(it->dataptr, self); if (obj == NULL) { Py_DECREF(it); @@ -1977,7 +2005,7 @@ sz = self->dimensions[0]; lp = PyList_New(sz); - for(i = 0; i < sz; i++) { + for (i = 0; i < sz; i++) { v = (PyArrayObject *)array_big_item(self, i); if (PyArray_Check(v) && (v->nd >= self->nd)) { PyErr_SetString(PyExc_RuntimeError, @@ -2015,7 +2043,7 @@ */ numbytes = PyArray_NBYTES(self); - if ((PyArray_ISCONTIGUOUS(self) && (order == NPY_CORDER)) || \ + if ((PyArray_ISCONTIGUOUS(self) && (order == NPY_CORDER)) || (PyArray_ISFORTRAN(self) && (order == NPY_FORTRANORDER))) { ret = PyString_FromStringAndSize(self->data, (int) numbytes); } @@ -2024,7 +2052,9 @@ if (order == NPY_FORTRANORDER) { /* iterators are always in C-order */ new = PyArray_Transpose(self, NULL); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } } else { Py_INCREF(self); @@ -2032,13 +2062,18 @@ } it = (PyArrayIterObject *)PyArray_IterNew(new); Py_DECREF(new); - if (it==NULL) return NULL; + if (it == NULL) { + return NULL; + } ret = PyString_FromStringAndSize(NULL, (int) numbytes); - if (ret == NULL) {Py_DECREF(it); return NULL;} + if (ret == NULL) { + Py_DECREF(it); + return NULL; + } dptr = PyString_AS_STRING(ret); index = it->size; elsize = self->descr->elsize; - while(index--) { + while (index--) { memcpy(dptr, it->dataptr, elsize); dptr += elsize; PyArray_ITER_NEXT(it); @@ -2057,30 +2092,34 @@ static void array_dealloc(PyArrayObject *self) { - if (self->weakreflist != NULL) + if (self->weakreflist != NULL) { PyObject_ClearWeakRefs((PyObject *)self); - - if(self->base) { - /* UPDATEIFCOPY means that base points to an - array that should be updated with the contents - of this array upon destruction. - self->base->flags must have been WRITEABLE - (checked previously) and it was locked here - thus, unlock it. - */ + } + if (self->base) { + /* + * UPDATEIFCOPY means that base points to an + * array that should be updated with the contents + * of this array upon destruction. + * self->base->flags must have been WRITEABLE + * (checked previously) and it was locked here + * thus, unlock it. + */ if (self->flags & UPDATEIFCOPY) { ((PyArrayObject *)self->base)->flags |= WRITEABLE; Py_INCREF(self); /* hold on to self in next call */ - if (PyArray_CopyAnyInto((PyArrayObject *)self->base, - self) < 0) { + if (PyArray_CopyAnyInto((PyArrayObject *)self->base, self) < 0) { PyErr_Print(); PyErr_Clear(); } - /* Don't need to DECREF -- because we are deleting - self already... */ + /* + * Don't need to DECREF -- because we are deleting + *self already... + */ } - /* In any case base is pointing to something that we need - to DECREF -- either a view or a buffer object */ + /* + * In any case base is pointing to something that we need + * to DECREF -- either a view or a buffer object + */ Py_DECREF(self->base); } @@ -2089,16 +2128,16 @@ if (PyDataType_FLAGCHK(self->descr, NPY_ITEM_REFCOUNT)) { Py_INCREF(self); /*hold on to self */ PyArray_XDECREF(self); - /* Don't need to DECREF -- because we are deleting - self already... */ + /* + * Don't need to DECREF -- because we are deleting + * self already... + */ } PyDataMem_FREE(self->data); } PyDimMem_FREE(self->dimensions); - Py_DECREF(self->descr); - self->ob_type->tp_free((PyObject *)self); } @@ -2128,8 +2167,9 @@ "0-d arrays can't be indexed"); return NULL; } - if ((item = index2ptr(self, i)) == NULL) return NULL; - + if ((item = index2ptr(self, i)) == NULL) { + return NULL; + } Py_INCREF(self->descr); r = (PyArrayObject *)PyArray_NewFromDescr(self->ob_type, self->descr, @@ -2138,7 +2178,9 @@ self->strides+1, item, self->flags, (PyObject *)self); - if (r == NULL) return NULL; + if (r == NULL) { + return NULL; + } Py_INCREF(self); r->base = (PyObject *)self; PyArray_UpdateFlags(r, CONTIGUOUS | FORTRAN); @@ -2151,12 +2193,14 @@ { if (self->nd == 1) { char *item; - if ((item = index2ptr(self, i)) == NULL) return NULL; + if ((item = index2ptr(self, i)) == NULL) { + return NULL; + } return PyArray_Scalar(item, self->descr, (PyObject *)self); } else { - return PyArray_Return((PyArrayObject *)\ - array_big_item(self, (intp) i)); + return PyArray_Return( + (PyArrayObject *) array_big_item(self, (intp) i)); } } @@ -2185,15 +2229,20 @@ if (self->nd > 1) { - if((tmp = (PyArrayObject *)array_big_item(self, i)) == NULL) + if((tmp = (PyArrayObject *)array_big_item(self, i)) == NULL) { return -1; + } ret = PyArray_CopyObject(tmp, v); Py_DECREF(tmp); return ret; } - if ((item = index2ptr(self, i)) == NULL) return -1; - if (self->descr->f->setitem(v, item, self) == -1) return -1; + if ((item = index2ptr(self, i)) == NULL) { + return -1; + } + if (self->descr->f->setitem(v, item, self) == -1) { + return -1; + } return 0; } @@ -2239,8 +2288,11 @@ if (r->step == Py_None) { *step = 1; - } else { - if (!slice_coerce_index(r->step, step)) return -1; + } + else { + if (!slice_coerce_index(r->step, step)) { + return -1; + } if (*step == 0) { PyErr_SetString(PyExc_ValueError, "slice step cannot be zero"); @@ -2248,15 +2300,20 @@ } } /* defstart = *step < 0 ? length - 1 : 0; */ - defstop = *step < 0 ? -1 : length; - if (r->start == Py_None) { *start = *step < 0 ? length-1 : 0; - } else { - if (!slice_coerce_index(r->start, start)) return -1; - if (*start < 0) *start += length; - if (*start < 0) *start = (*step < 0) ? -1 : 0; + } + else { + if (!slice_coerce_index(r->start, start)) { + return -1; + } + if (*start < 0) { + *start += length; + } + if (*start < 0) { + *start = (*step < 0) ? -1 : 0; + } if (*start >= length) { *start = (*step < 0) ? length - 1 : length; } @@ -2264,19 +2321,30 @@ if (r->stop == Py_None) { *stop = defstop; - } else { - if (!slice_coerce_index(r->stop, stop)) return -1; - if (*stop < 0) *stop += length; - if (*stop < 0) *stop = -1; - if (*stop > length) *stop = length; } + else { + if (!slice_coerce_index(r->stop, stop)) { + return -1; + } + if (*stop < 0) { + *stop += length; + } + if (*stop < 0) { + *stop = -1; + } + if (*stop > length) { + *stop = length; + } + } - if ((*step < 0 && *stop >= *start) || \ + if ((*step < 0 && *stop >= *start) || (*step > 0 && *start >= *stop)) { *slicelength = 0; - } else if (*step < 0) { + } + else if (*step < 0) { *slicelength = (*stop - *start + 1) / (*step) + 1; - } else { + } + else { *slicelength = (*stop - *start - 1) / (*step) + 1; } @@ -2295,10 +2363,12 @@ if (op == Py_None) { *n_steps = PseudoIndex; index = 0; - } else if (op == Py_Ellipsis) { + } + else if (op == Py_Ellipsis) { *n_steps = RubberIndex; index = 0; - } else if (PySlice_Check(op)) { + } + else if (PySlice_Check(op)) { intp stop; if (slice_GetIndices((PySliceObject *)op, max, &index, &stop, step_size, n_steps) < 0) { @@ -2313,7 +2383,8 @@ *step_size = 1; index = 0; } - } else { + } + else { index = PyArray_PyIntAsIntp(op); if (error_converting(index)) { PyErr_SetString(PyExc_IndexError, @@ -2324,13 +2395,16 @@ } *n_steps = SingleIndex; *step_size = 0; - if (index < 0) index += max; + if (index < 0) { + index += max; + } if (index >= max || index < 0) { PyErr_SetString(PyExc_IndexError, "invalid index"); goto fail; } } return index; + fail: return -1; } @@ -2343,7 +2417,7 @@ int i, j, n; int nd_old, nd_new, n_add, n_pseudo; intp n_steps, start, offset, step_size; - PyObject *op1=NULL; + PyObject *op1 = NULL; int is_slice; if (PySlice_Check(op) || op == Py_Ellipsis || op == Py_None) { @@ -2367,7 +2441,7 @@ nd_old = nd_new = 0; offset = 0; - for(i=0; ind ? \ + nd_old < self->nd ? self->dimensions[nd_old] : 0); Py_DECREF(op1); - if (start == -1) break; - + if (start == -1) { + break; + } if (n_steps == PseudoIndex) { dimensions[nd_new] = 1; strides[nd_new] = 0; nd_new++; - } else { + } + else { if (n_steps == RubberIndex) { - for(j=i+1, n_pseudo=0; jnd-(n-i-n_pseudo-1+nd_old); @@ -2398,14 +2475,15 @@ "too many indices"); return -1; } - for(j=0; jdimensions[nd_old]; strides[nd_new] = \ self->strides[nd_old]; nd_new++; nd_old++; } - } else { + } + else { if (nd_old >= self->nd) { PyErr_SetString(PyExc_IndexError, "too many indices"); @@ -2422,12 +2500,15 @@ } } } - if (i < n) return -1; + if (i < n) { + return -1; + } n_add = self->nd-nd_old; - for(j=0; jdimensions[nd_old]; strides[nd_new] = self->strides[nd_old]; - nd_new++; nd_old++; + nd_new++; + nd_old++; } *offset_ptr = offset; return nd_new; @@ -2446,68 +2527,73 @@ permute.ptr = d; permute.len = mit->nd; - /* arr might not have the right number of dimensions - and need to be reshaped first by pre-pending ones */ + /* + * arr might not have the right number of dimensions + * and need to be reshaped first by pre-pending ones + */ arr = *ret; if (arr->nd != mit->nd) { - for(i=1; i<=arr->nd; i++) { + for (i = 1; i <= arr->nd; i++) { permute.ptr[mit->nd-i] = arr->dimensions[arr->nd-i]; } - for(i=0; ind-arr->nd; i++) { + for (i = 0; i < mit->nd-arr->nd; i++) { permute.ptr[i] = 1; } new = PyArray_Newshape(arr, &permute, PyArray_ANYORDER); Py_DECREF(arr); *ret = (PyArrayObject *)new; - if (new == NULL) return; + if (new == NULL) { + return; + } } - /* Setting and getting need to have different permutations. - On the get we are permuting the returned object, but on - setting we are permuting the object-to-be-set. - The set permutation is the inverse of the get permutation. - */ + /* + * Setting and getting need to have different permutations. + * On the get we are permuting the returned object, but on + * setting we are permuting the object-to-be-set. + * The set permutation is the inverse of the get permutation. + */ - /* For getting the array the tuple for transpose is - (n1,...,n1+n2-1,0,...,n1-1,n1+n2,...,n3-1) - n1 is the number of dimensions of - the broadcasted index array - n2 is the number of dimensions skipped at the - start - n3 is the number of dimensions of the - result - */ + /* + * For getting the array the tuple for transpose is + * (n1,...,n1+n2-1,0,...,n1-1,n1+n2,...,n3-1) + * n1 is the number of dimensions of the broadcast index array + * n2 is the number of dimensions skipped at the start + * n3 is the number of dimensions of the result + */ - /* For setting the array the tuple for transpose is - (n2,...,n1+n2-1,0,...,n2-1,n1+n2,...n3-1) - */ + /* + * For setting the array the tuple for transpose is + * (n2,...,n1+n2-1,0,...,n2-1,n1+n2,...n3-1) + */ n1 = mit->iters[0]->nd_m1 + 1; n2 = mit->iteraxes[0]; n3 = mit->nd; - bnd = (getmap ? n1 : n2); /* use n1 as the boundary if getting - but n2 if setting */ - + /* use n1 as the boundary if getting but n2 if setting */ + bnd = getmap ? n1 : n2; val = bnd; i = 0; - while(val < n1+n2) + while (val < n1 + n2) { permute.ptr[i++] = val++; + } val = 0; - while(val < bnd) + while (val < bnd) { permute.ptr[i++] = val++; - val = n1+n2; - while(val < n3) + } + val = n1 + n2; + while (val < n3) { permute.ptr[i++] = val++; - + } new = PyArray_Transpose(*ret, &permute); Py_DECREF(*ret); *ret = (PyArrayObject *)new; } -/* Prototypes for Mapping calls --- not part of the C-API - because only useful as part of a getitem call. -*/ - +/* + * Prototypes for Mapping calls --- not part of the C-API + * because only useful as part of a getitem call. + */ static void PyArray_MapIterReset(PyArrayMapIterObject *); static void PyArray_MapIterNext(PyArrayMapIterObject *); static void PyArray_MapIterBind(PyArrayMapIterObject *, PyArrayObject *); @@ -2524,28 +2610,33 @@ PyArray_CopySwapFunc *copyswap; /* Unbound map iterator --- Bind should have been called */ - if (mit->ait == NULL) return NULL; + if (mit->ait == NULL) { + return NULL; + } /* This relies on the map iterator object telling us the shape of the new array in nd and dimensions. */ temp = mit->ait->ao; Py_INCREF(temp->descr); - ret = (PyArrayObject *)\ + ret = (PyArrayObject *) PyArray_NewFromDescr(temp->ob_type, temp->descr, mit->nd, mit->dimensions, NULL, NULL, PyArray_ISFORTRAN(temp), (PyObject *)temp); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } - /* Now just iterate through the new array filling it in - with the next object from the original array as - defined by the mapping iterator */ + /* + * Now just iterate through the new array filling it in + * with the next object from the original array as + * defined by the mapping iterator + */ - if ((it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ret)) - == NULL) { + if ((it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ret)) == NULL) { Py_DECREF(ret); return NULL; } @@ -2572,7 +2663,7 @@ static int PyArray_SetMap(PyArrayMapIterObject *mit, PyObject *op) { - PyObject *arr=NULL; + PyObject *arr = NULL; PyArrayIterObject *it; int index; int swap; @@ -2580,17 +2671,21 @@ PyArray_Descr *descr; /* Unbound Map Iterator */ - if (mit->ait == NULL) return -1; - + if (mit->ait == NULL) { + return -1; + } descr = mit->ait->ao->descr; Py_INCREF(descr); arr = PyArray_FromAny(op, descr, 0, 0, FORCECAST, NULL); - if (arr == NULL) return -1; - + if (arr == NULL) { + return -1; + } if ((mit->subspace != NULL) && (mit->consec)) { if (mit->iteraxes[0] > 0) { /* then we need to swap */ _swap_axes(mit, (PyArrayObject **)&arr, 0); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } } } @@ -2604,7 +2699,7 @@ } index = mit->size; - swap = (PyArray_ISNOTSWAPPED(mit->ait->ao) != \ + swap = (PyArray_ISNOTSWAPPED(mit->ait->ao) != (PyArray_ISNOTSWAPPED(arr))); copyswap = PyArray_DESCR(arr)->f->copyswap; PyArray_MapIterReset(mit); @@ -2615,8 +2710,9 @@ PyArray_Item_INCREF(it->dataptr, PyArray_DESCR(arr)); memmove(mit->dataptr, it->dataptr, sizeof(PyObject *)); /* ignored unless VOID array with object's */ - if (swap) + if (swap) { copyswap(mit->dataptr, NULL, swap, arr); + } PyArray_MapIterNext(mit); PyArray_ITER_NEXT(it); } @@ -2626,8 +2722,9 @@ } while(index--) { memmove(mit->dataptr, it->dataptr, PyArray_ITEMSIZE(arr)); - if (swap) + if (swap) { copyswap(mit->dataptr, NULL, swap, arr); + } PyArray_MapIterNext(mit); PyArray_ITER_NEXT(it); } @@ -2644,12 +2741,17 @@ int newaxis_count = 0; argument_count = PyTuple_GET_SIZE(tuple); - - for(i = 0; i < argument_count; ++i) { + for (i = 0; i < argument_count; ++i) { PyObject *arg = PyTuple_GET_ITEM(tuple, i); - if (arg == Py_Ellipsis && !ellipsis_count) ellipsis_count++; - else if (arg == Py_None) newaxis_count++; - else break; + if (arg == Py_Ellipsis && !ellipsis_count) { + ellipsis_count++; + } + else if (arg == Py_None) { + newaxis_count++; + } + else { + break; + } } if (i < argument_count) { PyErr_SetString(PyExc_IndexError, @@ -2659,8 +2761,7 @@ return -1; } if (newaxis_count > MAX_DIMS) { - PyErr_SetString(PyExc_IndexError, - "too many dimensions"); + PyErr_SetString(PyExc_IndexError, "too many dimensions"); return -1; } return newaxis_count; @@ -2672,7 +2773,8 @@ PyArrayObject *other; intp dimensions[MAX_DIMS]; int i; - for(i = 0; i < newaxis_count; ++i) { + + for (i = 0; i < newaxis_count; ++i) { dimensions[i] = 1; } Py_INCREF(arr->descr); @@ -2706,13 +2808,16 @@ if (PyTuple_Check(args)) { n = PyTuple_GET_SIZE(args); - if (n >= MAX_DIMS) return SOBJ_TOOMANY; - for(i=0; i= MAX_DIMS) { + return SOBJ_TOOMANY; + } + for (i = 0; i < n; i++) { obj = PyTuple_GET_ITEM(args,i); if (PyArray_Check(obj)) { if (PyArray_ISINTEGER(obj) || - PyArray_ISBOOL(obj)) + PyArray_ISBOOL(obj)) { retval = SOBJ_ISFANCY; + } else { retval = SOBJ_BADARRAY; break; @@ -2725,62 +2830,69 @@ } else if (PyArray_Check(args)) { if ((PyArray_TYPE(args)==PyArray_BOOL) || - (PyArray_ISINTEGER(args))) + (PyArray_ISINTEGER(args))) { return SOBJ_ISFANCY; - else + } + else { return SOBJ_BADARRAY; + } } else if (PySequence_Check(args)) { - /* Sequences < MAX_DIMS with any slice objects - or newaxis, or Ellipsis is considered standard - as long as there are also no Arrays and or additional - sequences embedded. - */ + /* + * Sequences < MAX_DIMS with any slice objects + * or newaxis, or Ellipsis is considered standard + * as long as there are also no Arrays and or additional + * sequences embedded. + */ retval = SOBJ_ISFANCY; n = PySequence_Size(args); - if (n<0 || n>=MAX_DIMS) return SOBJ_ISFANCY; - for(i=0; i= MAX_DIMS) { + return SOBJ_ISFANCY; + } + for (i = 0; i < n; i++) { obj = PySequence_GetItem(args, i); - if (obj == NULL) return SOBJ_ISFANCY; + if (obj == NULL) { + return SOBJ_ISFANCY; + } if (PyArray_Check(obj)) { - if (PyArray_ISINTEGER(obj) || - PyArray_ISBOOL(obj)) + if (PyArray_ISINTEGER(obj) || PyArray_ISBOOL(obj)) { retval = SOBJ_LISTTUP; - else + } + else { retval = SOBJ_BADARRAY; + } } else if (PySequence_Check(obj)) { retval = SOBJ_LISTTUP; } else if (PySlice_Check(obj) || obj == Py_Ellipsis || - obj == Py_None) { + obj == Py_None) { retval = SOBJ_NOTFANCY; } Py_DECREF(obj); - if (retval > SOBJ_ISFANCY) return retval; + if (retval > SOBJ_ISFANCY) { + return retval; + } } } return retval; } -/* Called when treating array object like a mapping -- called first from - Python when using a[object] unless object is a standard slice object - (not an extended one). +/* + * Called when treating array object like a mapping -- called first from + * Python when using a[object] unless object is a standard slice object + * (not an extended one). + * + * There are two situations: + * + * 1 - the subscript is a standard view and a reference to the + * array can be returned + * + * 2 - the subscript uses Boolean masks or integer indexing and + * therefore a new array is created and returned. + */ -*/ - -/* There are two situations: - - 1 - the subscript is a standard view and a reference to the - array can be returned - - 2 - the subscript uses Boolean masks or integer indexing and - therefore a new array is created and returned. - -*/ - /* Always returns arrays */ - static PyObject *iter_subscript(PyArrayIterObject *, PyObject *); @@ -2800,24 +2912,22 @@ PyErr_Clear(); /* Standard (view-based) Indexing */ - if ((nd = parse_index(self, op, dimensions, strides, &offset)) - == -1) return NULL; - + if ((nd = parse_index(self, op, dimensions, strides, &offset)) == -1) { + return NULL; + } /* This will only work if new array will be a view */ Py_INCREF(self->descr); - if ((other = (PyArrayObject *) \ + if ((other = (PyArrayObject *) PyArray_NewFromDescr(self->ob_type, self->descr, nd, dimensions, strides, self->data+offset, self->flags, - (PyObject *)self)) == NULL) + (PyObject *)self)) == NULL) { return NULL; - + } other->base = (PyObject *)self; Py_INCREF(self); - PyArray_UpdateFlags(other, UPDATE_ALL); - return (PyObject *)other; } @@ -2827,21 +2937,19 @@ int nd, fancy; PyArrayObject *other; PyArrayMapIterObject *mit; + PyObject *obj; if (PyString_Check(op) || PyUnicode_Check(op)) { if (self->descr->names) { - PyObject *obj; obj = PyDict_GetItem(self->descr->fields, op); if (obj != NULL) { PyArray_Descr *descr; int offset; PyObject *title; - if (PyArg_ParseTuple(obj, "Oi|O", - &descr, &offset, &title)) { + if (PyArg_ParseTuple(obj, "Oi|O", &descr, &offset, &title)) { Py_INCREF(descr); - return PyArray_GetField(self, descr, - offset); + return PyArray_GetField(self, descr, offset); } } } @@ -2852,26 +2960,58 @@ return NULL; } + /* Check for multiple field access */ + if (self->descr->names && PySequence_Check(op) && !PyTuple_Check(op)) { + int seqlen, i; + seqlen = PySequence_Size(op); + for (i = 0; i < seqlen; i++) { + obj = PySequence_GetItem(op, i); + if (!PyString_Check(obj) && !PyUnicode_Check(obj)) { + Py_DECREF(obj); + break; + } + Py_DECREF(obj); + } + /* + * extract multiple fields if all elements in sequence + * are either string or unicode (i.e. no break occurred). + */ + fancy = ((seqlen > 0) && (i == seqlen)); + if (fancy) { + PyObject *_numpy_internal; + _numpy_internal = PyImport_ImportModule("numpy.core._internal"); + if (_numpy_internal == NULL) { + return NULL; + } + obj = PyObject_CallMethod(_numpy_internal, + "_index_fields", "OO", self, op); + Py_DECREF(_numpy_internal); + return obj; + } + } + if (op == Py_Ellipsis) { Py_INCREF(self); return (PyObject *)self; } if (self->nd == 0) { - if (op == Py_None) + if (op == Py_None) { return add_new_axes_0d(self, 1); + } if (PyTuple_Check(op)) { if (0 == PyTuple_GET_SIZE(op)) { Py_INCREF(self); return (PyObject *)self; } - if ((nd = count_new_axes_0d(op)) == -1) + if ((nd = count_new_axes_0d(op)) == -1) { return NULL; + } return add_new_axes_0d(self, nd); } /* Allow Boolean mask selection also */ - if ((PyArray_Check(op) && (PyArray_DIMS(op)==0) && - PyArray_ISBOOL(op))) { + if ((PyArray_Check(op) && (PyArray_DIMS(op)==0) + && PyArray_ISBOOL(op))) { if (PyObject_IsTrue(op)) { Py_INCREF(self); return (PyObject *)self; @@ -2887,28 +3027,30 @@ NULL); } } - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed."); + PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed."); return NULL; } fancy = fancy_indexing_check(op); - if (fancy != SOBJ_NOTFANCY) { int oned; + oned = ((self->nd == 1) && !(PyTuple_Check(op) && PyTuple_GET_SIZE(op) > 1)); /* wrap arguments into a mapiter object */ - mit = (PyArrayMapIterObject *)\ - PyArray_MapIterNew(op, oned, fancy); - if (mit == NULL) return NULL; + mit = (PyArrayMapIterObject *) PyArray_MapIterNew(op, oned, fancy); + if (mit == NULL) { + return NULL; + } if (oned) { PyArrayIterObject *it; PyObject *rval; - it = (PyArrayIterObject *)\ - PyArray_IterNew((PyObject *)self); - if (it == NULL) {Py_DECREF(mit); return NULL;} + it = (PyArrayIterObject *) PyArray_IterNew((PyObject *)self); + if (it == NULL) { + Py_DECREF(mit); + return NULL; + } rval = iter_subscript(it, mit->indexobj); Py_DECREF(it); Py_DECREF(mit); @@ -2924,15 +3066,13 @@ } -/* Another assignment hacked by using CopyObject. */ - -/* This only works if subscript returns a standard view. */ - -/* Again there are two cases. In the first case, PyArray_CopyObject - can be used. In the second case, a new indexing function has to be - used. -*/ - +/* + * Another assignment hacked by using CopyObject. + * This only works if subscript returns a standard view. + * Again there are two cases. In the first case, PyArray_CopyObject + * can be used. In the second case, a new indexing function has to be + * used. + */ static int iter_ass_subscript(PyArrayIterObject *, PyObject *, PyObject *); static int @@ -2952,12 +3092,16 @@ if (PyArray_CheckExact(self)) { tmp = (PyArrayObject *)array_subscript_simple(self, index); - if (tmp == NULL) return -1; + if (tmp == NULL) { + return -1; + } } else { PyObject *tmp0; tmp0 = PyObject_GetItem((PyObject *)self, index); - if (tmp0 == NULL) return -1; + if (tmp0 == NULL) { + return -1; + } if (!PyArray_Check(tmp0)) { PyErr_SetString(PyExc_RuntimeError, "Getitem not returning array."); @@ -2990,10 +3134,14 @@ for(i=0; i 0) || - PyList_Check(obj)) return -1; + if ((PyArray_Check(obj) && PyArray_NDIM(obj) > 0) + || PyList_Check(obj)) { + return -1; + } temp = PyArray_PyIntAsIntp(obj); - if (error_converting(temp)) return -1; + if (error_converting(temp)) { + return -1; + } vals[i] = temp; } return 0; @@ -3023,26 +3171,27 @@ !PySequence_Check(index))) { intp value; value = PyArray_PyIntAsIntp(index); - if (PyErr_Occurred()) + if (PyErr_Occurred()) { PyErr_Clear(); - else + } + else { return array_ass_big_item(self, value, op); + } } if (PyString_Check(index) || PyUnicode_Check(index)) { if (self->descr->names) { PyObject *obj; + obj = PyDict_GetItem(self->descr->fields, index); if (obj != NULL) { PyArray_Descr *descr; int offset; PyObject *title; - if (PyArg_ParseTuple(obj, "Oi|O", - &descr, &offset, &title)) { + if (PyArg_ParseTuple(obj, "Oi|O", &descr, &offset, &title)) { Py_INCREF(descr); - return PyArray_SetField(self, descr, - offset, op); + return PyArray_SetField(self, descr, offset, op); } } } @@ -3054,17 +3203,19 @@ } if (self->nd == 0) { - /* Several different exceptions to the 0-d no-indexing rule - - 1) ellipses - 2) empty tuple - 3) Using newaxis (None) - 4) Boolean mask indexing - */ - if (index == Py_Ellipsis || index == Py_None || \ - (PyTuple_Check(index) && (0 == PyTuple_GET_SIZE(index) || \ - count_new_axes_0d(index) > 0))) + /* + * Several different exceptions to the 0-d no-indexing rule + * + * 1) ellipses + * 2) empty tuple + * 3) Using newaxis (None) + * 4) Boolean mask indexing + */ + if (index == Py_Ellipsis || index == Py_None || + (PyTuple_Check(index) && (0 == PyTuple_GET_SIZE(index) || + count_new_axes_0d(index) > 0))) { return self->descr->f->setitem(op, self->data, self); + } if (PyBool_Check(index) || PyArray_IsScalar(index, Bool) || (PyArray_Check(index) && (PyArray_DIMS(index)==0) && PyArray_ISBOOL(index))) { @@ -3075,8 +3226,7 @@ return 0; } } - PyErr_SetString(PyExc_IndexError, - "0-d arrays can't be indexed."); + PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed."); return -1; } @@ -3086,8 +3236,11 @@ && (_tuple_of_integers(index, vals, self->nd) >= 0)) { int i; char *item; - for(i=0; ind; i++) { - if (vals[i] < 0) vals[i] += self->dimensions[i]; + + for (i = 0; i < self->nd; i++) { + if (vals[i] < 0) { + vals[i] += self->dimensions[i]; + } if ((vals[i] < 0) || (vals[i] >= self->dimensions[i])) { PyErr_Format(PyExc_IndexError, "index (%"INTP_FMT") out of range "\ @@ -3097,25 +3250,27 @@ } } item = PyArray_GetPtr(self, vals); - /* fprintf(stderr, "Here I am...\n");*/ return self->descr->f->setitem(op, item, self); } PyErr_Clear(); fancy = fancy_indexing_check(index); - if (fancy != SOBJ_NOTFANCY) { oned = ((self->nd == 1) && !(PyTuple_Check(index) && PyTuple_GET_SIZE(index) > 1)); - - mit = (PyArrayMapIterObject *) \ - PyArray_MapIterNew(index, oned, fancy); - if (mit == NULL) return -1; + mit = (PyArrayMapIterObject *) PyArray_MapIterNew(index, oned, fancy); + if (mit == NULL) { + return -1; + } if (oned) { PyArrayIterObject *it; int rval; + it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it == NULL) {Py_DECREF(mit); return -1;} + if (it == NULL) { + Py_DECREF(mit); + return -1; + } rval = iter_ass_subscript(it, mit->indexobj, op); Py_DECREF(it); Py_DECREF(mit); @@ -3131,10 +3286,11 @@ } -/* There are places that require that array_subscript return a PyArrayObject - and not possibly a scalar. Thus, this is the function exposed to - Python so that 0-dim arrays are passed as scalars -*/ +/* + * There are places that require that array_subscript return a PyArrayObject + * and not possibly a scalar. Thus, this is the function exposed to + * Python so that 0-dim arrays are passed as scalars + */ static PyObject * @@ -3144,13 +3300,14 @@ PyArrayObject *mp; intp vals[MAX_DIMS]; - if (PyInt_Check(op) || PyArray_IsScalar(op, Integer) || \ + if (PyInt_Check(op) || PyArray_IsScalar(op, Integer) || PyLong_Check(op) || (PyIndex_Check(op) && !PySequence_Check(op))) { intp value; value = PyArray_PyIntAsIntp(op); - if (PyErr_Occurred()) + if (PyErr_Occurred()) { PyErr_Clear(); + } else { return array_item_nice(self, (Py_ssize_t) value); } @@ -3161,8 +3318,11 @@ && (_tuple_of_integers(op, vals, self->nd) >= 0)) { int i; char *item; - for(i=0; ind; i++) { - if (vals[i] < 0) vals[i] += self->dimensions[i]; + + for (i = 0; i < self->nd; i++) { + if (vals[i] < 0) { + vals[i] += self->dimensions[i]; + } if ((vals[i] < 0) || (vals[i] >= self->dimensions[i])) { PyErr_Format(PyExc_IndexError, "index (%"INTP_FMT") out of range "\ @@ -3177,27 +3337,29 @@ PyErr_Clear(); mp = (PyArrayObject *)array_subscript(self, op); + /* + * mp could be a scalar if op is not an Int, Scalar, Long or other Index + * object and still convertable to an integer (so that the code goes to + * array_subscript_simple). So, this cast is a bit dangerous.. + */ - /* mp could be a scalar if op is not an Int, Scalar, Long or other Index - object and still convertable to an integer (so that the code goes to - array_subscript_simple). So, this cast is a bit dangerous.. - */ + /* + * The following is just a copy of PyArray_Return with an + * additional logic in the nd == 0 case. + */ - /* The following is just a copy of PyArray_Return with an - additional logic in the nd == 0 case. - */ - - if (mp == NULL) return NULL; - + if (mp == NULL) { + return NULL; + } if (PyErr_Occurred()) { Py_XDECREF(mp); return NULL; } - if (PyArray_Check(mp) && mp->nd == 0) { Bool noellipses = TRUE; - if ((op == Py_Ellipsis) || PyString_Check(op) || PyUnicode_Check(op)) + if ((op == Py_Ellipsis) || PyString_Check(op) || PyUnicode_Check(op)) { noellipses = FALSE; + } else if (PyBool_Check(op) || PyArray_IsScalar(op, Bool) || (PyArray_Check(op) && (PyArray_DIMS(op)==0) && PyArray_ISBOOL(op))) { @@ -3206,12 +3368,14 @@ else if (PySequence_Check(op)) { int n, i; PyObject *temp; + n = PySequence_Size(op); - i=0; + i = 0; while (idata; return PyArray_NBYTES(self); @@ -3283,10 +3446,11 @@ static Py_ssize_t array_getwritebuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr) { - if (PyArray_CHKFLAGS(self, WRITEABLE)) + if (PyArray_CHKFLAGS(self, WRITEABLE)) { return array_getreadbuf(self, segment, (void **) ptrptr); + } else { - PyErr_SetString(PyExc_ValueError, "array cannot be "\ + PyErr_SetString(PyExc_ValueError, "array cannot be " "accessed as a writeable buffer"); return -1; } @@ -3300,14 +3464,14 @@ static PyBufferProcs array_as_buffer = { #if PY_VERSION_HEX >= 0x02050000 - (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (segcountproc)array_getsegcount, /*bf_getsegcount*/ - (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ + (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ + (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ + (segcountproc)array_getsegcount, /*bf_getsegcount*/ + (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ #else (getreadbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ (getwritebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (getsegcountproc)array_getsegcount, /*bf_getsegcount*/ + (getsegcountproc)array_getsegcount, /*bf_getsegcount*/ (getcharbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ #endif }; @@ -3321,40 +3485,40 @@ typedef struct { - PyObject *add, - *subtract, - *multiply, - *divide, - *remainder, - *power, - *square, - *reciprocal, - *ones_like, - *sqrt, - *negative, - *absolute, - *invert, - *left_shift, - *right_shift, - *bitwise_and, - *bitwise_xor, - *bitwise_or, - *less, - *less_equal, - *equal, - *not_equal, - *greater, - *greater_equal, - *floor_divide, - *true_divide, - *logical_or, - *logical_and, - *floor, - *ceil, - *maximum, - *minimum, - *rint, - *conjugate; + PyObject *add; + PyObject *subtract; + PyObject *multiply; + PyObject *divide; + PyObject *remainder; + PyObject *power; + PyObject *square; + PyObject *reciprocal; + PyObject *ones_like; + PyObject *sqrt; + PyObject *negative; + PyObject *absolute; + PyObject *invert; + PyObject *left_shift; + PyObject *right_shift; + PyObject *bitwise_and; + PyObject *bitwise_xor; + PyObject *bitwise_or; + PyObject *less; + PyObject *less_equal; + PyObject *equal; + PyObject *not_equal; + PyObject *greater; + PyObject *greater_equal; + PyObject *floor_divide; + PyObject *true_divide; + PyObject *logical_or; + PyObject *logical_and; + PyObject *floor; + PyObject *ceil; + PyObject *maximum; + PyObject *minimum; + PyObject *rint; + PyObject *conjugate; } NumericOps; static NumericOps n_ops; /* NB: static objects initialized to zero */ @@ -3472,21 +3636,19 @@ static PyObject * _get_keywords(int rtype, PyArrayObject *out) { - PyObject *kwds=NULL; + PyObject *kwds = NULL; if (rtype != PyArray_NOTYPE || out != NULL) { kwds = PyDict_New(); if (rtype != PyArray_NOTYPE) { PyArray_Descr *descr; descr = PyArray_DescrFromType(rtype); if (descr) { - PyDict_SetItemString(kwds, "dtype", - (PyObject *)descr); + PyDict_SetItemString(kwds, "dtype", (PyObject *)descr); Py_DECREF(descr); } } if (out != NULL) { - PyDict_SetItemString(kwds, "out", - (PyObject *)out); + PyDict_SetItemString(kwds, "out", (PyObject *)out); } } return kwds; @@ -3496,7 +3658,7 @@ PyArray_GenericReduceFunction(PyArrayObject *m1, PyObject *op, int axis, int rtype, PyArrayObject *out) { - PyObject *args, *ret=NULL, *meth; + PyObject *args, *ret = NULL, *meth; PyObject *kwds; if (op == NULL) { Py_INCREF(Py_NotImplemented); @@ -3519,7 +3681,7 @@ PyArray_GenericAccumulateFunction(PyArrayObject *m1, PyObject *op, int axis, int rtype, PyArrayObject *out) { - PyObject *args, *ret=NULL, *meth; + PyObject *args, *ret = NULL, *meth; PyObject *kwds; if (op == NULL) { Py_INCREF(Py_NotImplemented); @@ -3640,8 +3802,9 @@ PyObject* value = PyNumber_Index(o2); Py_ssize_t val; if (value==NULL) { - if (PyErr_Occurred()) + if (PyErr_Occurred()) { PyErr_Clear(); + } return 0; } val = PyInt_AsSsize_t(value); @@ -3658,8 +3821,10 @@ /* optimize float array or complex array to a scalar power */ static PyObject * -fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace) { +fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace) +{ double exp; + if (PyArray_Check(a1) && array_power_is_scalar(o2, &exp)) { PyObject *fastop = NULL; if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) { @@ -3675,33 +3840,37 @@ } else { return PyArray_Copy(a1); } - } else if (exp == -1.0) { + } + else if (exp == -1.0) { fastop = n_ops.reciprocal; - } else if (exp == 0.0) { + } + else if (exp == 0.0) { fastop = n_ops.ones_like; - } else if (exp == 0.5) { + } + else if (exp == 0.5) { fastop = n_ops.sqrt; - } else if (exp == 2.0) { + } + else if (exp == 2.0) { fastop = n_ops.square; - } else { + } + else { return NULL; } + if (inplace) { - return PyArray_GenericInplaceUnaryFunction(a1, - fastop); + return PyArray_GenericInplaceUnaryFunction(a1, fastop); } else { - return PyArray_GenericUnaryFunction(a1, - fastop); + return PyArray_GenericUnaryFunction(a1, fastop); } } else if (exp==2.0) { fastop = n_ops.multiply; if (inplace) { - return PyArray_GenericInplaceBinaryFunction \ + return PyArray_GenericInplaceBinaryFunction (a1, (PyObject *)a1, fastop); } else { - return PyArray_GenericBinaryFunction \ + return PyArray_GenericBinaryFunction (a1, (PyObject *)a1, fastop); } } @@ -3877,7 +4046,9 @@ Bool anyTRUE = FALSE; it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp); - if (it==NULL) return anyTRUE; + if (it == NULL) { + return anyTRUE; + } index = it->size; while(index--) { if (mp->descr->f->nonzero(it->dataptr, mp)) { @@ -3894,6 +4065,7 @@ _array_nonzero(PyArrayObject *mp) { intp n; + n = PyArray_SIZE(mp); if (n == 1) { return mp->descr->f->nonzero(mp->data, mp); @@ -3918,7 +4090,9 @@ PyObject *divp, *modp, *result; divp = array_floor_divide(op1, op2); - if (divp == NULL) return NULL; + if (divp == NULL) { + return NULL; + } modp = array_remainder(op1, op2); if (modp == NULL) { Py_DECREF(divp); @@ -3941,7 +4115,9 @@ return NULL; } pv = v->descr->f->getitem(v->data, v); - if (pv == NULL) return NULL; + if (pv == NULL) { + return NULL; + } if (pv->ob_type->tp_as_number == 0) { PyErr_SetString(PyExc_TypeError, "cannot convert to an int; "\ "scalar object is not a number"); @@ -3970,7 +4146,9 @@ return NULL; } pv = v->descr->f->getitem(v->data, v); - if (pv == NULL) return NULL; + if (pv == NULL) { + return NULL; + } if (pv->ob_type->tp_as_number == 0) { PyErr_SetString(PyExc_TypeError, "cannot convert to a "\ "float; scalar object is not a number"); @@ -4066,8 +4244,7 @@ static PyObject * _array_copy_nice(PyArrayObject *self) { - return PyArray_Return((PyArrayObject *) \ - PyArray_Copy(self)); + return PyArray_Return((PyArrayObject *) PyArray_Copy(self)); } #if PY_VERSION_HEX >= 0x02050000 @@ -4109,8 +4286,10 @@ (unaryfunc)array_oct, /*nb_oct*/ (unaryfunc)array_hex, /*nb_hex*/ - /*This code adds augmented assignment functionality*/ - /*that was made available in Python 2.0*/ + /* + * This code adds augmented assignment functionality + * that was made available in Python 2.0 + */ (binaryfunc)array_inplace_add, /*inplace_add*/ (binaryfunc)array_inplace_subtract, /*inplace_subtract*/ (binaryfunc)array_inplace_multiply, /*inplace_multiply*/ @@ -4160,15 +4339,26 @@ } l=self->dimensions[0]; - if (ilow < 0) ilow = 0; - else if (ilow > l) ilow = l; - if (ihigh < ilow) ihigh = ilow; - else if (ihigh > l) ihigh = l; + if (ilow < 0) { + ilow = 0; + } + else if (ilow > l) { + ilow = l; + } + if (ihigh < ilow) { + ihigh = ilow; + } + else if (ihigh > l) { + ihigh = l; + } if (ihigh != ilow) { data = index2ptr(self, ilow); - if (data == NULL) return NULL; - } else { + if (data == NULL) { + return NULL; + } + } + else { data = self->data; } @@ -4180,7 +4370,9 @@ self->strides, data, self->flags, (PyObject *)self); self->dimensions[0] = l; - if (r == NULL) return NULL; + if (r == NULL) { + return NULL; + } r->base = (PyObject *)self; Py_INCREF(self); PyArray_UpdateFlags(r, UPDATE_ALL); @@ -4204,9 +4396,9 @@ "array is not writeable"); return -1; } - if ((tmp = (PyArrayObject *)array_slice(self, ilow, ihigh)) \ - == NULL) + if ((tmp = (PyArrayObject *)array_slice(self, ilow, ihigh)) == NULL) { return -1; + } ret = PyArray_CopyObject(tmp, v); Py_DECREF(tmp); @@ -4223,7 +4415,9 @@ res = PyArray_EnsureAnyArray(PyObject_RichCompare((PyObject *)self, el, Py_EQ)); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } ret = array_any_nonzero((PyArrayObject *)res); Py_DECREF(res); return ret; @@ -4268,11 +4462,12 @@ char *ostring; int i, N; -#define CHECK_MEMORY if (*n >= *max_n-16) { *max_n *= 2; \ - *string = (char *)_pya_realloc(*string, *max_n); } +#define CHECK_MEMORY do { if (*n >= *max_n-16) { \ + *max_n *= 2; \ + *string = (char *)_pya_realloc(*string, *max_n); \ + }} while (0) if (nd == 0) { - if ((op = descr->f->getitem(data, self)) == NULL) { return -1; } @@ -4284,33 +4479,33 @@ ostring = PyString_AsString(sp); N = PyString_Size(sp)*sizeof(char); *n += N; - CHECK_MEMORY - memmove(*string + (*n - N), ostring, N); + CHECK_MEMORY; + memmove(*string + (*n - N), ostring, N); Py_DECREF(sp); Py_DECREF(op); return 0; } else { - CHECK_MEMORY - (*string)[*n] = '['; + CHECK_MEMORY; + (*string)[*n] = '['; *n += 1; - for(i = 0; i < dimensions[0]; i++) { + for (i = 0; i < dimensions[0]; i++) { if (dump_data(string, n, max_n, data + (*strides)*i, nd - 1, dimensions + 1, strides + 1, self) < 0) { return -1; } - CHECK_MEMORY - if (i < dimensions[0] - 1) { - (*string)[*n] = ','; - (*string)[*n+1] = ' '; - *n += 2; - } + CHECK_MEMORY; + if (i < dimensions[0] - 1) { + (*string)[*n] = ','; + (*string)[*n+1] = ' '; + *n += 2; + } } - CHECK_MEMORY - (*string)[*n] = ']'; - *n += 1; + CHECK_MEMORY; + (*string)[*n] = ']'; + *n += 1; return 0; } @@ -4369,8 +4564,8 @@ static PyObject *PyArray_ReprFunction = NULL; /*NUMPY_API - Set the array print function to be a Python function. -*/ + * Set the array print function to be a Python function. + */ static void PyArray_SetStringFunction(PyObject *op, int repr) { @@ -4381,7 +4576,8 @@ Py_XINCREF(op); /* Remember new callback */ PyArray_ReprFunction = op; - } else { + } + else { /* Dispose of previous callback */ Py_XDECREF(PyArray_StrFunction); /* Add a reference to new callback */ @@ -4398,7 +4594,8 @@ if (PyArray_ReprFunction == NULL) { s = array_repr_builtin(self, 1); - } else { + } + else { arglist = Py_BuildValue("(O)", self); s = PyEval_CallObject(PyArray_ReprFunction, arglist); Py_DECREF(arglist); @@ -4413,7 +4610,8 @@ if (PyArray_StrFunction == NULL) { s = array_repr_builtin(self, 0); - } else { + } + else { arglist = Py_BuildValue("(O)", self); s = PyEval_CallObject(PyArray_StrFunction, arglist); Py_DECREF(arglist); @@ -4483,29 +4681,46 @@ memcpy(s2t, s2, size); } val = PyArray_CompareUCS4(s1t, s2t, MIN(len1,len2)); - if ((val != 0) || (len1 == len2)) goto finish; - if (len2 > len1) {sptr = s2t+len1; val = -1; diff=len2-len1;} - else {sptr = s1t+len2; val = 1; diff=len1-len2;} + if ((val != 0) || (len1 == len2)) { + goto finish; + } + if (len2 > len1) { + sptr = s2t+len1; + val = -1; + diff = len2-len1; + } + else { + sptr = s1t+len2; + val = 1; + diff=len1-len2; + } while (diff--) { - if (*sptr != 0) goto finish; + if (*sptr != 0) { + goto finish; + } sptr++; } val = 0; finish: - if (s1t != s1) free(s1t); - if (s2t != s2) free(s2t); + if (s1t != s1) { + free(s1t); + } + if (s2t != s2) { + free(s2t); + } return val; } -/* Compare s1 and s2 which are not necessarily NULL-terminated. - s1 is of length len1 - s2 is of length len2 - If they are NULL terminated, then stop comparison. -*/ +/* + * Compare s1 and s2 which are not necessarily NULL-terminated. + * s1 is of length len1 + * s2 is of length len2 + * If they are NULL terminated, then stop comparison. + */ static int _mystrncmp(char *s1, char *s2, int len1, int len2) { @@ -4514,11 +4729,23 @@ int diff; val = memcmp(s1, s2, MIN(len1, len2)); - if ((val != 0) || (len1 == len2)) return val; - if (len2 > len1) {sptr = s2+len1; val = -1; diff=len2-len1;} - else {sptr = s1+len2; val = 1; diff=len1-len2;} + if ((val != 0) || (len1 == len2)) { + return val; + } + if (len2 > len1) { + sptr = s2 + len1; + val = -1; + diff = len2 - len1; + } + else { + sptr = s1 + len2; + val = 1; + diff = len1 - len2; + } while (diff--) { - if (*sptr != 0) return val; + if (*sptr != 0) { + return val; + } sptr++; } return 0; /* Only happens if NULLs are everywhere */ @@ -4536,27 +4763,30 @@ static void _rstripw(char *s, int n) { int i; - for(i=n-1; i>=1; i--) /* Never strip to length 0. */ - { - int c = s[i]; - if (!c || isspace(c)) - s[i] = 0; - else - break; + for (i = n - 1; i >= 1; i--) { /* Never strip to length 0. */ + int c = s[i]; + + if (!c || isspace(c)) { + s[i] = 0; } + else { + break; + } + } } static void _unistripw(PyArray_UCS4 *s, int n) { int i; - for(i=n-1; i>=1; i--) /* Never strip to length 0. */ - { - PyArray_UCS4 c = s[i]; - if (!c || isspace(c)) - s[i] = 0; - else - break; + for (i = n - 1; i >= 1; i--) { /* Never strip to length 0. */ + PyArray_UCS4 c = s[i]; + if (!c || isspace(c)) { + s[i] = 0; } + else { + break; + } + } } @@ -4695,8 +4925,7 @@ _loop(>=) break; default: - PyErr_SetString(PyExc_RuntimeError, - "bad comparison operator"); + PyErr_SetString(PyExc_RuntimeError, "bad comparison operator"); return -1; } return 0; @@ -4718,7 +4947,7 @@ /* Cast arrays to a common type */ if (self->descr->type_num != other->descr->type_num) { PyObject *new; - if (self->descr->type_num == PyArray_STRING && \ + if (self->descr->type_num == PyArray_STRING && other->descr->type_num == PyArray_UNICODE) { Py_INCREF(other->descr); new = PyArray_FromAny((PyObject *)self, other->descr, @@ -4729,7 +4958,7 @@ Py_INCREF(other); self = (PyArrayObject *)new; } - else if (self->descr->type_num == PyArray_UNICODE && \ + else if (self->descr->type_num == PyArray_UNICODE && other->descr->type_num == PyArray_STRING) { Py_INCREF(self->descr); new = PyArray_FromAny((PyObject *)other, self->descr, @@ -4771,12 +5000,10 @@ } if (self->descr->type_num == PyArray_UNICODE) { - val = _compare_strings(result, mit, cmp_op, _myunincmp, - rstrip); + val = _compare_strings(result, mit, cmp_op, _myunincmp, rstrip); } else { - val = _compare_strings(result, mit, cmp_op, _mystrncmp, - rstrip); + val = _compare_strings(result, mit, cmp_op, _mystrncmp, rstrip); } if (val < 0) { @@ -4788,16 +5015,16 @@ return result; } -/* VOID-type arrays can only be compared equal and not-equal - in which case the fields are all compared by extracting the fields - and testing one at a time... - equality testing is performed using logical_ands on all the fields. - in-equality testing is performed using logical_ors on all the fields. - - VOID-type arrays without fields are compared for equality by comparing their - memory at each location directly (using string-code). -*/ - +/* + * VOID-type arrays can only be compared equal and not-equal + * in which case the fields are all compared by extracting the fields + * and testing one at a time... + * equality testing is performed using logical_ands on all the fields. + * in-equality testing is performed using logical_ors on all the fields. + * + * VOID-type arrays without fields are compared for equality by comparing their + * memory at each location directly (using string-code). + */ static PyObject *array_richcompare(PyArrayObject *, PyObject *, int); @@ -4810,21 +5037,23 @@ return NULL; } if (PyArray_HASFIELDS(self)) { - PyObject *res=NULL, *temp, *a, *b; + PyObject *res = NULL, *temp, *a, *b; PyObject *key, *value, *temp2; PyObject *op; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; op = (cmp_op == Py_EQ ? n_ops.logical_and : n_ops.logical_or); while (PyDict_Next(self->descr->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; + if NPY_TITLE_KEY(key, value) { + continue; + } a = PyArray_EnsureAnyArray(array_subscript(self, key)); - if (a==NULL) { + if (a == NULL) { Py_XDECREF(res); return NULL; } b = array_subscript(other, key); - if (b==NULL) { + if (b == NULL) { Py_XDECREF(res); Py_DECREF(a); return NULL; @@ -4855,8 +5084,10 @@ return res; } else { - /* compare as a string */ - /* assumes self and other have same descr->type */ + /* + * compare as a string. Assumes self and + * other have same descr->type + */ return _strings_richcompare(self, other, cmp_op, 0); } } @@ -4867,15 +5098,14 @@ PyObject *array_other, *result = NULL; int typenum; - switch (cmp_op) - { + switch (cmp_op) { case Py_LT: result = PyArray_GenericBinaryFunction(self, other, - n_ops.less); + n_ops.less); break; case Py_LE: result = PyArray_GenericBinaryFunction(self, other, - n_ops.less_equal); + n_ops.less_equal); break; case Py_EQ: if (other == Py_None) { @@ -4889,15 +5119,14 @@ typenum = PyArray_NOTYPE; } array_other = PyArray_FromObject(other, - typenum, 0, 0); - /* If not successful, then return False - This fixes code that used to - allow equality comparisons between arrays - and other objects which would give a result - of False - */ - if ((array_other == NULL) || \ - (array_other == Py_None)) { + typenum, 0, 0); + /* + * If not successful, then return False. This fixes code + * that used to allow equality comparisons between arrays + * and other objects which would give a result of False. + */ + if ((array_other == NULL) || + (array_other == Py_None)) { Py_XDECREF(array_other); PyErr_Clear(); Py_INCREF(Py_False); @@ -4909,16 +5138,17 @@ array_other = other; } result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.equal); + array_other, + n_ops.equal); if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { + (self->descr->type_num == PyArray_VOID)) { int _res; - _res = PyObject_RichCompareBool \ - ((PyObject *)self->descr, - (PyObject *)\ - PyArray_DESCR(array_other), - Py_EQ); + + _res = PyObject_RichCompareBool + ((PyObject *)self->descr, + (PyObject *)\ + PyArray_DESCR(array_other), + Py_EQ); if (_res < 0) { Py_DECREF(result); Py_DECREF(array_other); @@ -4926,18 +5156,19 @@ } if (_res) { Py_DECREF(result); - result = _void_compare\ - (self, - (PyArrayObject *)array_other, - cmp_op); + result = _void_compare + (self, + (PyArrayObject *)array_other, + cmp_op); Py_DECREF(array_other); } return result; } - /* If the comparison results in NULL, then the - two array objects can not be compared together so - return zero - */ + /* + * If the comparison results in NULL, then the + * two array objects can not be compared together so + * return zero + */ Py_DECREF(array_other); if (result == NULL) { PyErr_Clear(); @@ -4956,14 +5187,13 @@ if (typenum != PyArray_OBJECT) { typenum = PyArray_NOTYPE; } - array_other = PyArray_FromObject(other, - typenum, 0, 0); - /* If not successful, then objects cannot be - compared and cannot be equal, therefore, - return True; - */ - if ((array_other == NULL) || \ - (array_other == Py_None)) { + array_other = PyArray_FromObject(other, typenum, 0, 0); + /* + * If not successful, then objects cannot be + * compared and cannot be equal, therefore, + * return True; + */ + if ((array_other == NULL) || (array_other == Py_None)) { Py_XDECREF(array_other); PyErr_Clear(); Py_INCREF(Py_True); @@ -4975,16 +5205,17 @@ array_other = other; } result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.not_equal); + array_other, + n_ops.not_equal); if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { + (self->descr->type_num == PyArray_VOID)) { int _res; - _res = PyObject_RichCompareBool\ - ((PyObject *)self->descr, - (PyObject *)\ - PyArray_DESCR(array_other), - Py_EQ); + + _res = PyObject_RichCompareBool( + (PyObject *)self->descr, + (PyObject *) + PyArray_DESCR(array_other), + Py_EQ); if (_res < 0) { Py_DECREF(result); Py_DECREF(array_other); @@ -4992,10 +5223,10 @@ } if (_res) { Py_DECREF(result); - result = _void_compare\ - (self, - (PyArrayObject *)array_other, - cmp_op); + result = _void_compare( + self, + (PyArrayObject *)array_other, + cmp_op); Py_DECREF(array_other); } return result; @@ -5010,19 +5241,21 @@ break; case Py_GT: result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater); + n_ops.greater); break; case Py_GE: result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater_equal); + n_ops.greater_equal); break; default: result = Py_NotImplemented; Py_INCREF(result); - } + } if (result == Py_NotImplemented) { /* Try to handle string comparisons */ - if (self->descr->type_num == PyArray_OBJECT) return result; + if (self->descr->type_num == PyArray_OBJECT) { + return result; + } array_other = PyArray_FromObject(other,PyArray_NOTYPE, 0, 0); if (PyArray_ISSTRING(self) && PyArray_ISSTRING(array_other)) { Py_DECREF(result); @@ -5047,7 +5280,10 @@ if ((*axis >= MAX_DIMS) || (n==0)) { if (n != 1) { temp1 = PyArray_Ravel(arr,0); - if (temp1 == NULL) {*axis=0; return NULL;} + if (temp1 == NULL) { + *axis = 0; + return NULL; + } *axis = PyArray_NDIM(temp1)-1; } else { @@ -5055,7 +5291,9 @@ Py_INCREF(temp1); *axis = 0; } - if (!flags) return temp1; + if (!flags) { + return temp1; + } } else { temp1 = (PyObject *)arr; @@ -5065,13 +5303,17 @@ temp2 = PyArray_CheckFromAny((PyObject *)temp1, NULL, 0, 0, flags, NULL); Py_DECREF(temp1); - if (temp2 == NULL) return NULL; + if (temp2 == NULL) { + return NULL; + } } else { temp2 = (PyObject *)temp1; } n = PyArray_NDIM(temp2); - if (*axis < 0) *axis += n; + if (*axis < 0) { + *axis += n; + } if ((*axis < 0) || (*axis >= n)) { PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", *axis); @@ -5094,8 +5336,11 @@ { int i; PyObject *intTuple = PyTuple_New(len); - if (!intTuple) goto fail; - for(i=0; i= SIZEOF_INTP - if (!(op = PyNumber_Int(seq))) return -1; + if (!(op = PyNumber_Int(seq))) { + return -1; + } #else - if (!(op = PyNumber_Long(seq))) return -1; + if (!(op = PyNumber_Long(seq))) { + return -1; + } #endif nd = 1; #if SIZEOF_LONG >= SIZEOF_INTP @@ -5139,17 +5391,22 @@ vals[0] = (intp ) PyLong_AsLongLong(op); #endif Py_DECREF(op); - } else { - for(i=0; i < MIN(nd,maxvals); i++) { + } + else { + for (i = 0; i < MIN(nd,maxvals); i++) { op = PySequence_GetItem(seq, i); - if (op == NULL) return -1; + if (op == NULL) { + return -1; + } #if SIZEOF_LONG >= SIZEOF_INTP vals[i]=(intp )PyInt_AsLong(op); #else vals[i]=(intp )PyLong_AsLongLong(op); #endif Py_DECREF(op); - if(PyErr_Occurred()) return -1; + if(PyErr_Occurred()) { + return -1; + } } } return nd; @@ -5157,10 +5414,12 @@ -/* Check whether the given array is stored contiguously (row-wise) in - memory. */ - -/* 0-strided arrays are not contiguous (even if dimension == 1) */ +/* + * Check whether the given array is stored contiguously + * (row-wise) in memory. + * + * 0-strided arrays are not contiguous (even if dimension == 1) + */ static int _IsContiguous(PyArrayObject *ap) { @@ -5168,15 +5427,22 @@ register intp dim; register int i; - if (ap->nd == 0) return 1; + if (ap->nd == 0) { + return 1; + } sd = ap->descr->elsize; - if (ap->nd == 1) return (ap->dimensions[0] == 1 || \ - sd == ap->strides[0]); - for(i = ap->nd-1; i >= 0; --i) { + if (ap->nd == 1) { + return ap->dimensions[0] == 1 || sd == ap->strides[0]; + } + for (i = ap->nd - 1; i >= 0; --i) { dim = ap->dimensions[i]; /* contiguous by definition */ - if (dim == 0) return 1; - if (ap->strides[i] != sd) return 0; + if (dim == 0) { + return 1; + } + if (ap->strides[i] != sd) { + return 0; + } sd *= dim; } return 1; @@ -5191,15 +5457,22 @@ register intp dim; register int i; - if (ap->nd == 0) return 1; + if (ap->nd == 0) { + return 1; + } sd = ap->descr->elsize; - if (ap->nd == 1) return (ap->dimensions[0] == 1 || \ - sd == ap->strides[0]); - for(i=0; i< ap->nd; ++i) { + if (ap->nd == 1) { + return ap->dimensions[0] == 1 || sd == ap->strides[0]; + } + for (i = 0; i < ap->nd; ++i) { dim = ap->dimensions[i]; /* fortran contiguous by definition */ - if (dim == 0) return 1; - if (ap->strides[i] != sd) return 0; + if (dim == 0) { + return 1; + } + if (ap->strides[i] != sd) { + return 0; + } sd *= dim; } return 1; @@ -5208,20 +5481,22 @@ static int _IsAligned(PyArrayObject *ap) { - int i, alignment, aligned=1; + int i, alignment, aligned = 1; intp ptr; int type = ap->descr->type_num; - if ((type == PyArray_STRING) || (type == PyArray_VOID)) + if ((type == PyArray_STRING) || (type == PyArray_VOID)) { return 1; - + } alignment = ap->descr->alignment; - if (alignment == 1) return 1; - + if (alignment == 1) { + return 1; + } ptr = (intp) ap->data; aligned = (ptr % alignment) == 0; - for(i=0; i nd; i++) + for (i = 0; i < ap->nd; i++) { aligned &= ((ap->strides[i] % alignment) == 0); + } return aligned != 0; } @@ -5233,31 +5508,37 @@ Py_ssize_t n; /* If we own our own data, then no-problem */ - if ((base == NULL) || (ap->flags & OWNDATA)) return TRUE; + if ((base == NULL) || (ap->flags & OWNDATA)) { + return TRUE; + } + /* + * Get to the final base object + * If it is a writeable array, then return TRUE + * If we can find an array object + * or a writeable buffer object as the final base object + * or a string object (for pickling support memory savings). + * - this last could be removed if a proper pickleable + * buffer was added to Python. + */ - /* Get to the final base object - If it is a writeable array, then return TRUE - If we can find an array object - or a writeable buffer object as the final base object - or a string object (for pickling support memory savings). - - this last could be removed if a proper pickleable - buffer was added to Python. - */ - while(PyArray_Check(base)) { - if (PyArray_CHKFLAGS(base, OWNDATA)) + if (PyArray_CHKFLAGS(base, OWNDATA)) { return (Bool) (PyArray_ISWRITEABLE(base)); + } base = PyArray_BASE(base); } - /* here so pickle support works seamlessly - and unpickled array can be set and reset writeable - -- could be abused -- */ - if PyString_Check(base) return TRUE; - - if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) + /* + * here so pickle support works seamlessly + * and unpickled array can be set and reset writeable + * -- could be abused -- + */ + if PyString_Check(base) { + return TRUE; + } + if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) { return FALSE; - + } return TRUE; } @@ -5267,20 +5548,21 @@ static int PyArray_ElementStrides(PyObject *arr) { - register int itemsize = PyArray_ITEMSIZE(arr); - register int i, N=PyArray_NDIM(arr); - register intp *strides = PyArray_STRIDES(arr); + int itemsize = PyArray_ITEMSIZE(arr); + int i, N = PyArray_NDIM(arr); + intp *strides = PyArray_STRIDES(arr); - for(i=0; iflags |= FORTRAN; - if (ret->nd > 1) ret->flags &= ~CONTIGUOUS; + if (ret->nd > 1) { + ret->flags &= ~CONTIGUOUS; + } } - else ret->flags &= ~FORTRAN; + else { + ret->flags &= ~FORTRAN; + } } if (flagmask & CONTIGUOUS) { if (_IsContiguous(ret)) { ret->flags |= CONTIGUOUS; - if (ret->nd > 1) ret->flags &= ~FORTRAN; + if (ret->nd > 1) { + ret->flags &= ~FORTRAN; + } } - else ret->flags &= ~CONTIGUOUS; + else { + ret->flags &= ~CONTIGUOUS; + } } if (flagmask & ALIGNED) { - if (_IsAligned(ret)) ret->flags |= ALIGNED; - else ret->flags &= ~ALIGNED; + if (_IsAligned(ret)) { + ret->flags |= ALIGNED; + } + else { + ret->flags &= ~ALIGNED; + } } - /* This is not checked by default WRITEABLE is not - part of UPDATE_ALL */ + /* + * This is not checked by default WRITEABLE is not + * part of UPDATE_ALL + */ if (flagmask & WRITEABLE) { - if (_IsWriteable(ret)) ret->flags |= WRITEABLE; - else ret->flags &= ~WRITEABLE; + if (_IsWriteable(ret)) { + ret->flags |= WRITEABLE; + } + else { + ret->flags &= ~WRITEABLE; + } } return; } -/* This routine checks to see if newstrides (of length nd) will not - ever be able to walk outside of the memory implied numbytes and offset. +/* + * This routine checks to see if newstrides (of length nd) will not + * ever be able to walk outside of the memory implied numbytes and offset. + * + * The available memory is assumed to start at -offset and proceed + * to numbytes-offset. The strides are checked to ensure + * that accessing memory using striding will not try to reach beyond + * this memory for any of the axes. + * + * If numbytes is 0 it will be calculated using the dimensions and + * element-size. + * + * This function checks for walking beyond the beginning and right-end + * of the buffer and therefore works for any integer stride (positive + * or negative). + */ - The available memory is assumed to start at -offset and proceed - to numbytes-offset. The strides are checked to ensure - that accessing memory using striding will not try to reach beyond - this memory for any of the axes. - - If numbytes is 0 it will be calculated using the dimensions and - element-size. - - This function checks for walking beyond the beginning and right-end - of the buffer and therefore works for any integer stride (positive - or negative). -*/ - /*NUMPY_API*/ static Bool PyArray_CheckStrides(int elsize, int nd, intp numbytes, intp offset, @@ -5338,37 +5639,38 @@ intp begin; intp end; - if (numbytes == 0) + if (numbytes == 0) { numbytes = PyArray_MultiplyList(dims, nd) * elsize; - + } begin = -offset; end = numbytes - offset - elsize; - for(i=0; i end)) + for (i = 0; i < nd; i++) { + byte_begin = newstrides[i]*(dims[i] - 1); + if ((byte_begin < begin) || (byte_begin > end)) { return FALSE; + } } return TRUE; - } -/* This is the main array creation routine. */ +/* + * This is the main array creation routine. + * + * Flags argument has multiple related meanings + * depending on data and strides: + * + * If data is given, then flags is flags associated with data. + * If strides is not given, then a contiguous strides array will be created + * and the CONTIGUOUS bit will be set. If the flags argument + * has the FORTRAN bit set, then a FORTRAN-style strides array will be + * created (and of course the FORTRAN flag bit will be set). + * + * If data is not given but created here, then flags will be DEFAULT + * and a non-zero flags argument can be used to indicate a FORTRAN style + * array is desired. + */ -/* Flags argument has multiple related meanings - depending on data and strides: - - If data is given, then flags is flags associated with data. - If strides is not given, then a contiguous strides array will be created - and the CONTIGUOUS bit will be set. If the flags argument - has the FORTRAN bit set, then a FORTRAN-style strides array will be - created (and of course the FORTRAN flag bit will be set). - - If data is not given but created here, then flags will be DEFAULT - and a non-zero flags argument can be used to indicate a FORTRAN style - array is desired. -*/ - static size_t _array_fill_strides(intp *strides, intp *dims, int nd, size_t itemsize, int inflag, int *objflags) @@ -5376,29 +5678,37 @@ int i; /* Only make Fortran strides if not contiguous as well */ if ((inflag & FORTRAN) && !(inflag & CONTIGUOUS)) { - for(i=0; i 1) *objflags &= ~CONTIGUOUS; - else *objflags |= CONTIGUOUS; + if (nd > 1) { + *objflags &= ~CONTIGUOUS; + } + else { + *objflags |= CONTIGUOUS; + } } else { - for(i=nd-1;i>=0;i--) { + for (i = nd - 1; i >= 0; i--) { strides[i] = itemsize; itemsize *= dims[i] ? dims[i] : 1; } *objflags |= CONTIGUOUS; - if (nd > 1) *objflags &= ~FORTRAN; - else *objflags |= FORTRAN; + if (nd > 1) { + *objflags &= ~FORTRAN; + } + else { + *objflags |= FORTRAN; + } } return itemsize; } /*NUMPY_API - Generic new array creation routine. -*/ + * Generic new array creation routine. + */ static PyObject * PyArray_New(PyTypeObject *subtype, int nd, intp *dims, int type_num, intp *strides, void *data, int itemsize, int flags, @@ -5408,7 +5718,9 @@ PyObject *new; descr = PyArray_DescrFromType(type_num); - if (descr == NULL) return NULL; + if (descr == NULL) { + return NULL; + } if (descr->elsize == 0) { if (itemsize < 1) { PyErr_SetString(PyExc_ValueError, @@ -5424,14 +5736,16 @@ return new; } -/* Change a sub-array field to the base descriptor */ -/* and update the dimensions and strides - appropriately. Dimensions and strides are added - to the end unless we have a FORTRAN array - and then they are added to the beginning - - Strides are only added if given (because data is given). -*/ +/* + * Change a sub-array field to the base descriptor + * + * and update the dimensions and strides + * appropriately. Dimensions and strides are added + * to the end unless we have a FORTRAN array + * and then they are added to the beginning + * + * Strides are only added if given (because data is given). + */ static int _update_descr_and_dimensions(PyArray_Descr **des, intp *newdims, intp *newstrides, int oldnd, int isfortran) @@ -5458,16 +5772,17 @@ newnd = oldnd + numnew; - if (newnd > MAX_DIMS) goto finish; + if (newnd > MAX_DIMS) { + goto finish; + } if (isfortran) { memmove(newdims+numnew, newdims, oldnd*sizeof(intp)); mydim = newdims; } - if (tuple) { - for(i=0; isubarray->shape, i)); + for (i = 0; i < numnew; i++) { + mydim[i] = (intp) PyInt_AsLong( + PyTuple_GET_ITEM(old->subarray->shape, i)); } } else { @@ -5477,15 +5792,15 @@ if (newstrides) { intp tempsize; intp *mystrides; + mystrides = newstrides + oldnd; if (isfortran) { - memmove(newstrides+numnew, newstrides, - oldnd*sizeof(intp)); + memmove(newstrides+numnew, newstrides, oldnd*sizeof(intp)); mystrides = newstrides; } /* Make new strides -- alwasy C-contiguous */ tempsize = (*des)->elsize; - for(i=numnew-1; i>=0; i--) { + for (i = numnew - 1; i >= 0; i--) { mystrides[i] = tempsize; tempsize *= mydim[i] ? mydim[i] : 1; } @@ -5498,10 +5813,11 @@ } -/* steals a reference to descr (even on failure) */ /*NUMPY_API - Generic new array creation routine. -*/ + * Generic new array creation routine. + * + * steals a reference to descr (even on failure) + */ static PyObject * PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd, intp *dims, intp *strides, void *data, @@ -5516,9 +5832,9 @@ if (descr->subarray) { PyObject *ret; intp newdims[2*MAX_DIMS]; - intp *newstrides=NULL; - int isfortran=0; - isfortran = (data && (flags & FORTRAN) && !(flags & CONTIGUOUS)) || \ + intp *newstrides = NULL; + int isfortran = 0; + isfortran = (data && (flags & FORTRAN) && !(flags & CONTIGUOUS)) || (!data && flags); memcpy(newdims, dims, nd*sizeof(intp)); if (strides) { @@ -5532,7 +5848,6 @@ data, flags, obj); return ret; } - if (nd < 0) { PyErr_SetString(PyExc_ValueError, "number of dimensions must be >=0"); @@ -5556,13 +5871,19 @@ return NULL; } PyArray_DESCR_REPLACE(descr); - if (descr->type_num == NPY_STRING) descr->elsize = 1; - else descr->elsize = sizeof(PyArray_UCS4); + if (descr->type_num == NPY_STRING) { + descr->elsize = 1; + } + else { + descr->elsize = sizeof(PyArray_UCS4); + } sd = (size_t) descr->elsize; } largest = MAX_INTP / sd; - for(i=0;iflags = DEFAULT; if (flags) { self->flags |= FORTRAN; - if (nd > 1) self->flags &= ~CONTIGUOUS; + if (nd > 1) { + self->flags &= ~CONTIGUOUS; + } flags = FORTRAN; } } - else self->flags = (flags & ~UPDATEIFCOPY); - + else { + self->flags = (flags & ~UPDATEIFCOPY); + } self->descr = descr; self->base = (PyObject *)NULL; self->weakreflist = (PyObject *)NULL; @@ -5613,84 +5937,102 @@ sd = _array_fill_strides(self->strides, dims, nd, sd, flags, &(self->flags)); } - else { /* we allow strides even when we create - the memory, but be careful with this... - */ + else { + /* + * we allow strides even when we create + * the memory, but be careful with this... + */ memcpy(self->strides, strides, sizeof(intp)*nd); sd *= size; } } - else { self->dimensions = self->strides = NULL; } + else { + self->dimensions = self->strides = NULL; + } if (data == NULL) { + /* + * Allocate something even for zero-space arrays + * e.g. shape=(0,) -- otherwise buffer exposure + * (a.data) doesn't work as it should. + */ - /* Allocate something even for zero-space arrays - e.g. shape=(0,) -- otherwise buffer exposure - (a.data) doesn't work as it should. */ - - if (sd==0) sd = descr->elsize; - - if ((data = PyDataMem_NEW(sd))==NULL) { + if (sd == 0) { + sd = descr->elsize; + } + if ((data = PyDataMem_NEW(sd)) == NULL) { PyErr_NoMemory(); goto fail; } self->flags |= OWNDATA; - /* It is bad to have unitialized OBJECT pointers */ - /* which could also be sub-fields of a VOID array */ + /* + * It is bad to have unitialized OBJECT pointers + * which could also be sub-fields of a VOID array + */ if (PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT)) { memset(data, 0, sd); } } else { - self->flags &= ~OWNDATA; /* If data is passed in, - this object won't own it - by default. - Caller must arrange for - this to be reset if truly - desired */ + /* + * If data is passed in, this object won't own it by default. + * Caller must arrange for this to be reset if truly desired + */ + self->flags &= ~OWNDATA; } self->data = data; - /* call the __array_finalize__ - method if a subtype. - If obj is NULL, then call method with Py_None - */ + /* + * call the __array_finalize__ + * method if a subtype. + * If obj is NULL, then call method with Py_None + */ if ((subtype != &PyArray_Type)) { PyObject *res, *func, *args; - static PyObject *str=NULL; + static PyObject *str = NULL; if (str == NULL) { str = PyString_InternFromString("__array_finalize__"); } func = PyObject_GetAttr((PyObject *)self, str); if (func && func != Py_None) { - if (strides != NULL) { /* did not allocate own data - or funny strides */ - /* update flags before finalize function */ + if (strides != NULL) { + /* + * did not allocate own data or funny strides + * update flags before finalize function + */ PyArray_UpdateFlags(self, UPDATE_ALL); } - if PyCObject_Check(func) { /* A C-function is stored here */ - PyArray_FinalizeFunc *cfunc; - cfunc = PyCObject_AsVoidPtr(func); - Py_DECREF(func); - if (cfunc(self, obj) < 0) goto fail; + if PyCObject_Check(func) { + /* A C-function is stored here */ + PyArray_FinalizeFunc *cfunc; + cfunc = PyCObject_AsVoidPtr(func); + Py_DECREF(func); + if (cfunc(self, obj) < 0) { + goto fail; } + } else { args = PyTuple_New(1); - if (obj == NULL) obj=Py_None; + if (obj == NULL) { + obj=Py_None; + } Py_INCREF(obj); PyTuple_SET_ITEM(args, 0, obj); res = PyObject_Call(func, args, NULL); Py_DECREF(args); Py_DECREF(func); - if (res == NULL) goto fail; - else Py_DECREF(res); + if (res == NULL) { + goto fail; + } + else { + Py_DECREF(res); + } } } else Py_XDECREF(func); } - return (PyObject *)self; fail: @@ -5705,14 +6047,17 @@ memset(optr, 0, dtype->elsize); } else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { + return; + } _putzero(optr + offset, zero, new); } } @@ -5727,13 +6072,11 @@ /*NUMPY_API - Resize (reallocate data). Only works if nothing else is referencing - this array and it is contiguous. - If refcheck is 0, then the reference count is not checked - and assumed to be 1. - You still must own this data and have no weak-references and no base - object. -*/ + * Resize (reallocate data). Only works if nothing else is referencing this + * array and it is contiguous. If refcheck is 0, then the reference count is + * not checked and assumed to be 1. You still must own this data and have no + * weak-references and no base object. + */ static PyObject * PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, NPY_ORDER fortran) @@ -5754,9 +6097,9 @@ return NULL; } - if (fortran == PyArray_ANYORDER) + if (fortran == PyArray_ANYORDER) { fortran = PyArray_CORDER; - + } if (self->descr->elsize == 0) { PyErr_SetString(PyExc_ValueError, "Bad data-type size."); return NULL; @@ -5764,7 +6107,9 @@ newsize = 1; largest = MAX_INTP / self->descr->elsize; for(k=0; k 2) || (self->base != NULL) || \ + if (refcheck) { + refcnt = REFCOUNT(self); + } + else { + refcnt = 1; + } + if ((refcnt > 2) || (self->base != NULL) || (self->weakreflist != NULL)) { PyErr_SetString(PyExc_ValueError, "cannot resize an array that has "\ @@ -5797,8 +6146,12 @@ return NULL; } - if (newsize == 0) sd = self->descr->elsize; - else sd = newsize * self->descr->elsize; + if (newsize == 0) { + sd = self->descr->elsize; + } + else { + sd = newsize*self->descr->elsize; + } /* Reallocate space if needed */ new_data = PyDataMem_RENEW(self->data, sd); if (new_data == NULL) { @@ -5817,21 +6170,20 @@ char *optr; optr = self->data + oldsize*elsize; n = newsize - oldsize; - for(k=0; kdescr); optr += elsize; } Py_DECREF(zero); } else{ - memset(self->data+oldsize*elsize, 0, - (newsize-oldsize)*elsize); + memset(self->data+oldsize*elsize, 0, (newsize-oldsize)*elsize); } } - if (self->nd != new_nd) { /* Different number of dimensions. */ + if (self->nd != new_nd) { + /* Different number of dimensions. */ self->nd = new_nd; - /* Need new dimensions and strides arrays */ dimptr = PyDimMem_RENEW(self->dimensions, 2*new_nd); if (dimptr == NULL) { @@ -5848,42 +6200,44 @@ sd = (size_t) self->descr->elsize; sd = (size_t) _array_fill_strides(new_strides, new_dimensions, new_nd, sd, self->flags, &(self->flags)); - memmove(self->dimensions, new_dimensions, new_nd*sizeof(intp)); memmove(self->strides, new_strides, new_nd*sizeof(intp)); - Py_INCREF(Py_None); return Py_None; - } static void _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype) { if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { - if ((obj == Py_None) || - (PyInt_Check(obj) && PyInt_AsLong(obj)==0)) + if ((obj == Py_None) || (PyInt_Check(obj) && PyInt_AsLong(obj)==0)) { return; + } else { PyObject *arr; Py_INCREF(dtype); arr = PyArray_NewFromDescr(&PyArray_Type, dtype, 0, NULL, NULL, NULL, 0, NULL); - if (arr!=NULL) + if (arr!=NULL) { dtype->f->setitem(obj, optr, arr); + } Py_XDECREF(arr); } } else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; + while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { + return; + } _fillobject(optr + offset, obj, new); } } @@ -5896,8 +6250,9 @@ } } -/* Assumes contiguous */ -/*NUMPY_API*/ +/*NUMPY_API + * Assumes contiguous + */ static void PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) { @@ -5908,12 +6263,12 @@ optr = (PyObject **)(arr->data); n = PyArray_SIZE(arr); if (obj == NULL) { - for(i=0; idata; - for(i=0; idescr); optr += arr->descr->elsize; } @@ -5950,7 +6305,9 @@ descr = PyArray_DESCR(arr); Py_INCREF(descr); newarr = PyArray_FromAny(obj, descr, 0,0, ALIGNED, NULL); - if (newarr == NULL) return -1; + if (newarr == NULL) { + return -1; + } fromptr = PyArray_DATA(newarr); swap = (PyArray_ISNOTSWAPPED(arr) != PyArray_ISNOTSWAPPED(newarr)); } @@ -5980,7 +6337,7 @@ Py_XDECREF(newarr); return -1; } - while(size--) { + while (size--) { copyswap(iter->dataptr, fromptr, swap, arr); PyArray_ITER_NEXT(iter); } @@ -6007,14 +6364,11 @@ PyArrayObject *ret; buffer.ptr = NULL; - /* Usually called with shape and type - but can also be called with buffer, strides, and swapped info - */ - - /* For now, let's just use this to create an empty, contiguous - array of a specific type and shape. - */ - + /* + * Usually called with shape and type but can also be called with buffer, + * strides, and swapped info For now, let's just use this to create an + * empty, contiguous array of a specific type and shape. + */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&LO&O&", kwlist, PyArray_IntpConverter, &dims, @@ -6026,16 +6380,17 @@ &PyArray_IntpConverter, &strides, &PyArray_OrderConverter, - &order)) + &order)) { goto fail; - - if (order == PyArray_FORTRANORDER) fortran = 1; - - if (descr == NULL) + } + if (order == PyArray_FORTRANORDER) { + fortran = 1; + } + if (descr == NULL) { descr = PyArray_DescrFromType(PyArray_DEFAULT); + } itemsize = descr->elsize; - if (itemsize == 0) { PyErr_SetString(PyExc_ValueError, "data-type with unspecified variable length"); @@ -6073,27 +6428,31 @@ } if (buffer.ptr == NULL) { - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(subtype, descr, (int)dims.len, dims.ptr, strides.ptr, NULL, fortran, NULL); - if (ret == NULL) {descr=NULL;goto fail;} + if (ret == NULL) { + descr = NULL; + goto fail; + } if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)) { /* place Py_None in object positions */ PyArray_FillObjectArray(ret, Py_None); if (PyErr_Occurred()) { - descr=NULL; + descr = NULL; goto fail; } } } - else { /* buffer given -- use it */ + else { + /* buffer given -- use it */ if (dims.len == 1 && dims.ptr[0] == -1) { dims.ptr[0] = (buffer.len-(intp)offset) / itemsize; } - else if ((strides.ptr == NULL) && \ - (buffer.len < ((intp)itemsize)* \ + else if ((strides.ptr == NULL) && + (buffer.len < ((intp)itemsize)* PyArray_MultiplyList(dims.ptr, dims.len))) { PyErr_SetString(PyExc_TypeError, "buffer is too small for " \ @@ -6101,27 +6460,38 @@ goto fail; } /* get writeable and aligned */ - if (fortran) buffer.flags |= FORTRAN; + if (fortran) { + buffer.flags |= FORTRAN; + } ret = (PyArrayObject *)\ PyArray_NewFromDescr(subtype, descr, dims.len, dims.ptr, strides.ptr, offset + (char *)buffer.ptr, buffer.flags, NULL); - if (ret == NULL) {descr=NULL; goto fail;} + if (ret == NULL) { + descr = NULL; + goto fail; + } PyArray_UpdateFlags(ret, UPDATE_ALL); ret->base = buffer.base; Py_INCREF(buffer.base); } PyDimMem_FREE(dims.ptr); - if (strides.ptr) PyDimMem_FREE(strides.ptr); + if (strides.ptr) { + PyDimMem_FREE(strides.ptr); + } return (PyObject *)ret; fail: Py_XDECREF(descr); - if (dims.ptr) PyDimMem_FREE(dims.ptr); - if (strides.ptr) PyDimMem_FREE(strides.ptr); + if (dims.ptr) { + PyDimMem_FREE(dims.ptr); + } + if (strides.ptr) { + PyDimMem_FREE(strides.ptr); + } return NULL; } @@ -6167,7 +6537,9 @@ /* Assumes C-order */ ret = PyArray_Reshape(self, val); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } if (PyArray_DATA(ret) != PyArray_DATA(self)) { Py_DECREF(ret); PyErr_SetString(PyExc_AttributeError, @@ -6180,7 +6552,8 @@ PyDimMem_FREE(self->dimensions); nd = PyArray_NDIM(ret); self->nd = nd; - if (nd > 0) { /* create new dimensions and strides */ + if (nd > 0) { + /* create new dimensions and strides */ self->dimensions = PyDimMem_NEW(2*nd); if (self->dimensions == NULL) { Py_DECREF(ret); @@ -6188,12 +6561,13 @@ return -1; } self->strides = self->dimensions + nd; - memcpy(self->dimensions, PyArray_DIMS(ret), - nd*sizeof(intp)); - memcpy(self->strides, PyArray_STRIDES(ret), - nd*sizeof(intp)); + memcpy(self->dimensions, PyArray_DIMS(ret), nd*sizeof(intp)); + memcpy(self->strides, PyArray_STRIDES(ret), nd*sizeof(intp)); } - else {self->dimensions=NULL; self->strides=NULL;} + else { + self->dimensions = NULL; + self->strides = NULL; + } Py_DECREF(ret); PyArray_UpdateFlags(self, CONTIGUOUS | FORTRAN); return 0; @@ -6211,12 +6585,12 @@ { PyArray_Dims newstrides = {NULL, 0}; PyArrayObject *new; - intp numbytes=0; - intp offset=0; + intp numbytes = 0; + intp offset = 0; Py_ssize_t buf_len; char *buf; - if (!PyArray_IntpConverter(obj, &newstrides) || \ + if (!PyArray_IntpConverter(obj, &newstrides) || newstrides.ptr == NULL) { PyErr_SetString(PyExc_TypeError, "invalid strides"); return -1; @@ -6230,9 +6604,10 @@ while(new->base && PyArray_Check(new->base)) { new = (PyArrayObject *)(new->base); } - /* Get the available memory through the buffer - interface on new->base or if that fails - from the current new */ + /* + * Get the available memory through the buffer interface on + * new->base or if that fails from the current new + */ if (new->base && PyObject_AsReadBuffer(new->base, (const void **)&buf, &buf_len) >= 0) { @@ -6268,10 +6643,12 @@ static PyObject * array_priority_get(PyArrayObject *self) { - if (PyArray_CheckExact(self)) + if (PyArray_CheckExact(self)) { return PyFloat_FromDouble(PyArray_PRIORITY); - else + } + else { return PyFloat_FromDouble(PyArray_SUBTYPE_PRIORITY); + } } static PyObject *arraydescr_protocol_typestr_get(PyArray_Descr *); @@ -6298,16 +6675,23 @@ PyObject *dobj; res = arraydescr_protocol_descr_get(self->descr); - if (res) return res; + if (res) { + return res; + } PyErr_Clear(); /* get default */ dobj = PyTuple_New(2); - if (dobj == NULL) return NULL; + if (dobj == NULL) { + return NULL; + } PyTuple_SET_ITEM(dobj, 0, PyString_FromString("")); PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); res = PyList_New(1); - if (res == NULL) {Py_DECREF(dobj); return NULL;} + if (res == NULL) { + Py_DECREF(dobj); + return NULL; + } PyList_SET_ITEM(res, 0, dobj); return res; } @@ -6316,9 +6700,9 @@ array_protocol_strides_get(PyArrayObject *self) { if PyArray_ISCONTIGUOUS(self) { - Py_INCREF(Py_None); - return Py_None; - } + Py_INCREF(Py_None); + return Py_None; + } return PyArray_IntTupleFromIntp(self->nd, self->strides); } @@ -6339,9 +6723,10 @@ PyObject *_numpy_internal; PyObject *ret; _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - ret = PyObject_CallMethod(_numpy_internal, "_ctypes", - "ON", self, + if (_numpy_internal == NULL) { + return NULL; + } + ret = PyObject_CallMethod(_numpy_internal, "_ctypes", "ON", self, PyLong_FromVoidPtr(self->data)); Py_DECREF(_numpy_internal); return ret; @@ -6352,8 +6737,11 @@ { PyObject *dict; PyObject *obj; + dict = PyDict_New(); - if (dict == NULL) return NULL; + if (dict == NULL) { + return NULL; + } /* dataptr */ obj = array_dataptr_get(self); @@ -6393,11 +6781,12 @@ return NULL; } nbytes = PyArray_NBYTES(self); - if PyArray_ISWRITEABLE(self) - return PyBuffer_FromReadWriteObject((PyObject *)self, 0, - (int) nbytes); - else + if PyArray_ISWRITEABLE(self) { + return PyBuffer_FromReadWriteObject((PyObject *)self, 0, (int) nbytes); + } + else { return PyBuffer_FromObject((PyObject *)self, 0, (int) nbytes); + } } static int @@ -6409,8 +6798,7 @@ if (PyObject_AsWriteBuffer(op, &buf, &buf_len) < 0) { writeable = 0; - if (PyObject_AsReadBuffer(op, (const void **)&buf, - &buf_len) < 0) { + if (PyObject_AsReadBuffer(op, (const void **)&buf, &buf_len) < 0) { PyErr_SetString(PyExc_AttributeError, "object does not have single-segment " \ "buffer interface"); @@ -6423,8 +6811,7 @@ return -1; } if (PyArray_NBYTES(self) > buf_len) { - PyErr_SetString(PyExc_AttributeError, - "not enough data for array"); + PyErr_SetString(PyExc_AttributeError, "not enough data for array"); return -1; } if (self->flags & OWNDATA) { @@ -6442,8 +6829,9 @@ self->base = op; self->data = buf; self->flags = CARRAY; - if (!writeable) + if (!writeable) { self->flags &= ~WRITEABLE; + } return 0; } @@ -6461,10 +6849,12 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) size); #else - if (size > MAX_LONG || size < MIN_LONG) + if (size > MAX_LONG || size < MIN_LONG) { return PyLong_FromLongLong(size); - else + } + else { return PyInt_FromLong((long) size); + } #endif } @@ -6475,28 +6865,29 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) nbytes); #else - if (nbytes > MAX_LONG || nbytes < MIN_LONG) + if (nbytes > MAX_LONG || nbytes < MIN_LONG) { return PyLong_FromLongLong(nbytes); - else + } + else { return PyInt_FromLong((long) nbytes); + } #endif } -/* If the type is changed. - Also needing change: strides, itemsize +/* + * If the type is changed. + * Also needing change: strides, itemsize + * + * Either itemsize is exactly the same or the array is single-segment + * (contiguous or fortran) with compatibile dimensions The shape and strides + * will be adjusted in that case as well. + */ - Either itemsize is exactly the same - or the array is single-segment (contiguous or fortran) with - compatibile dimensions - - The shape and strides will be adjusted in that case as well. -*/ - static int array_descr_set(PyArrayObject *self, PyObject *arg) { - PyArray_Descr *newtype=NULL; + PyArray_Descr *newtype = NULL; intp newdim; int index; char *msg = "new type not compatible with array."; @@ -6525,51 +6916,61 @@ } - if ((newtype->elsize != self->descr->elsize) && \ - (self->nd == 0 || !PyArray_ISONESEGMENT(self) || \ - newtype->subarray)) goto fail; - - if (PyArray_ISCONTIGUOUS(self)) index = self->nd - 1; - else index = 0; - + if ((newtype->elsize != self->descr->elsize) && + (self->nd == 0 || !PyArray_ISONESEGMENT(self) || + newtype->subarray)) { + goto fail; + } + if (PyArray_ISCONTIGUOUS(self)) { + index = self->nd - 1; + } + else { + index = 0; + } if (newtype->elsize < self->descr->elsize) { - /* if it is compatible increase the size of the - dimension at end (or at the front for FORTRAN) - */ - if (self->descr->elsize % newtype->elsize != 0) + /* + * if it is compatible increase the size of the + * dimension at end (or at the front for FORTRAN) + */ + if (self->descr->elsize % newtype->elsize != 0) { goto fail; + } newdim = self->descr->elsize / newtype->elsize; self->dimensions[index] *= newdim; self->strides[index] = newtype->elsize; } - else if (newtype->elsize > self->descr->elsize) { - - /* Determine if last (or first if FORTRAN) dimension - is compatible */ - + /* + * Determine if last (or first if FORTRAN) dimension + * is compatible + */ newdim = self->dimensions[index] * self->descr->elsize; - if ((newdim % newtype->elsize) != 0) goto fail; - + if ((newdim % newtype->elsize) != 0) { + goto fail; + } self->dimensions[index] = newdim / newtype->elsize; self->strides[index] = newtype->elsize; } /* fall through -- adjust type*/ - Py_DECREF(self->descr); if (newtype->subarray) { - /* create new array object from data and update - dimensions, strides and descr from it */ + /* + * create new array object from data and update + * dimensions, strides and descr from it + */ PyArrayObject *temp; - - /* We would decref newtype here --- temp will - steal a reference to it */ - temp = (PyArrayObject *) \ + /* + * We would decref newtype here. + * temp will steal a reference to it + */ + temp = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, newtype, self->nd, self->dimensions, self->strides, self->data, self->flags, NULL); - if (temp == NULL) return -1; + if (temp == NULL) { + return -1; + } PyDimMem_FREE(self->dimensions); self->dimensions = temp->dimensions; self->nd = temp->nd; @@ -6584,7 +6985,6 @@ self->descr = newtype; PyArray_UpdateFlags(self, UPDATE_ALL); - return 0; fail: @@ -6599,7 +6999,9 @@ PyArrayInterface *inter; inter = (PyArrayInterface *)_pya_malloc(sizeof(PyArrayInterface)); - if (inter==NULL) return PyErr_NoMemory(); + if (inter==NULL) { + return PyErr_NoMemory(); + } inter->two = 2; inter->nd = self->nd; inter->typekind = self->descr->kind; @@ -6608,9 +7010,10 @@ /* reset unused flags */ inter->flags &= ~(UPDATEIFCOPY | OWNDATA); if (PyArray_ISNOTSWAPPED(self)) inter->flags |= NOTSWAPPED; - /* Copy shape and strides over since these can be reset - when the array is "reshaped". - */ + /* + * Copy shape and strides over since these can be reset + *when the array is "reshaped". + */ if (self->nd > 0) { inter->shape = (intp *)_pya_malloc(2*sizeof(intp)*self->nd); if (inter->shape == NULL) { @@ -6628,10 +7031,16 @@ inter->data = self->data; if (self->descr->names) { inter->descr = arraydescr_protocol_descr_get(self->descr); - if (inter->descr == NULL) PyErr_Clear(); - else inter->flags &= ARR_HAS_DESCR; + if (inter->descr == NULL) { + PyErr_Clear(); + } + else { + inter->flags &= ARR_HAS_DESCR; + } } - else inter->descr = NULL; + else { + inter->descr = NULL; + } Py_INCREF(self); return PyCObject_FromVoidPtrAndDesc(inter, self, gentype_struct_free); } @@ -6658,7 +7067,7 @@ PyArray_FillObjectArray(ret, zero); Py_DECREF(zero); if (PyErr_Occurred()) { - Py_DECREF(ret); + Py_DECREF(ret); return -1; } } @@ -6666,14 +7075,14 @@ intp n = PyArray_NBYTES(ret); memset(ret->data, 0, n); } - return 0; + return 0; } -/* Create a view of a complex array with an equivalent data-type - except it is real instead of complex. -*/ - +/* + * Create a view of a complex array with an equivalent data-type + * except it is real instead of complex. + */ static PyArrayObject * _get_part(PyArrayObject *self, int imag) { @@ -6692,7 +7101,7 @@ Py_DECREF(type); type = new; } - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(self->ob_type, type, self->nd, @@ -6700,7 +7109,9 @@ self->strides, self->data + offset, self->flags, (PyObject *)self); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } ret->flags &= ~CONTIGUOUS; ret->flags &= ~FORTRAN; Py_INCREF(self); @@ -6733,14 +7144,19 @@ if (PyArray_ISCOMPLEX(self)) { ret = _get_part(self, 0); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } } else { Py_INCREF(self); ret = self; } new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0, NULL); - if (new == NULL) {Py_DECREF(ret); return -1;} + if (new == NULL) { + Py_DECREF(ret); + return -1; + } rint = PyArray_MoveInto(ret, new); Py_DECREF(ret); Py_DECREF(new); @@ -6759,15 +7175,17 @@ Py_INCREF(self->descr); ret = (PyArrayObject *)PyArray_NewFromDescr(self->ob_type, self->descr, - self->nd, + self->nd, self->dimensions, NULL, NULL, PyArray_ISFORTRAN(self), (PyObject *)self); - if (ret == NULL) return NULL; - - if (_zerofill(ret) < 0) return NULL; - + if (ret == NULL) { + return NULL; + } + if (_zerofill(ret) < 0) { + return NULL; + } ret->flags &= ~WRITEABLE; } return (PyObject *) ret; @@ -6782,9 +7200,14 @@ int rint; ret = _get_part(self, 1); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0, NULL); - if (new == NULL) {Py_DECREF(ret); return -1;} + if (new == NULL) { + Py_DECREF(ret); + return -1; + } rint = PyArray_MoveInto(ret, new); Py_DECREF(ret); Py_DECREF(new); @@ -6806,9 +7229,9 @@ static int array_flat_set(PyArrayObject *self, PyObject *val) { - PyObject *arr=NULL; + PyObject *arr = NULL; int retval = -1; - PyArrayIterObject *selfit=NULL, *arrit=NULL; + PyArrayIterObject *selfit = NULL, *arrit = NULL; PyArray_Descr *typecode; int swap; PyArray_CopySwapFunc *copyswap; @@ -6817,28 +7240,36 @@ Py_INCREF(typecode); arr = PyArray_FromAny(val, typecode, 0, 0, FORCECAST | FORTRAN_IF(self), NULL); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } arrit = (PyArrayIterObject *)PyArray_IterNew(arr); - if (arrit == NULL) goto exit; + if (arrit == NULL) { + goto exit; + } selfit = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (selfit == NULL) goto exit; - - if (arrit->size == 0) {retval = 0; goto exit;} - + if (selfit == NULL) { + goto exit; + } + if (arrit->size == 0) { + retval = 0; + goto exit; + } swap = PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(arr); copyswap = self->descr->f->copyswap; if (PyDataType_REFCHK(self->descr)) { - while(selfit->index < selfit->size) { + while (selfit->index < selfit->size) { PyArray_Item_XDECREF(selfit->dataptr, self->descr); PyArray_Item_INCREF(arrit->dataptr, PyArray_DESCR(arr)); - memmove(selfit->dataptr, arrit->dataptr, - sizeof(PyObject **)); - if (swap) + memmove(selfit->dataptr, arrit->dataptr, sizeof(PyObject **)); + if (swap) { copyswap(selfit->dataptr, NULL, swap, self); + } PyArray_ITER_NEXT(selfit); PyArray_ITER_NEXT(arrit); - if (arrit->index == arrit->size) + if (arrit->index == arrit->size) { PyArray_ITER_RESET(arrit); + } } retval = 0; goto exit; @@ -6846,14 +7277,17 @@ while(selfit->index < selfit->size) { memmove(selfit->dataptr, arrit->dataptr, self->descr->elsize); - if (swap) + if (swap) { copyswap(selfit->dataptr, NULL, swap, self); + } PyArray_ITER_NEXT(selfit); PyArray_ITER_NEXT(arrit); - if (arrit->index == arrit->size) + if (arrit->index == arrit->size) { PyArray_ITER_RESET(arrit); + } } retval = 0; + exit: Py_XDECREF(selfit); Py_XDECREF(arrit); @@ -6961,77 +7395,78 @@ static PyTypeObject PyArray_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.ndarray", /*tp_name*/ - sizeof(PyArrayObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ + 0, /* ob_size */ + "numpy.ndarray", /* tp_name */ + sizeof(PyArrayObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)array_dealloc, /*tp_dealloc */ - (printfunc)NULL, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - (cmpfunc)0, /*tp_compare*/ - (reprfunc)array_repr, /*tp_repr*/ - &array_as_number, /*tp_as_number*/ - &array_as_sequence, /*tp_as_sequence*/ - &array_as_mapping, /*tp_as_mapping*/ - (hashfunc)0, /*tp_hash*/ - (ternaryfunc)0, /*tp_call*/ - (reprfunc)array_str, /*tp_str*/ - - (getattrofunc)0, /*tp_getattro*/ - (setattrofunc)0, /*tp_setattro*/ - &array_as_buffer, /*tp_as_buffer*/ + (destructor)array_dealloc, /* tp_dealloc */ + (printfunc)NULL, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)0, /* tp_compare */ + (reprfunc)array_repr, /* tp_repr */ + &array_as_number, /* tp_as_number */ + &array_as_sequence, /* tp_as_sequence */ + &array_as_mapping, /* tp_as_mapping */ + (hashfunc)0, /* tp_hash */ + (ternaryfunc)0, /* tp_call */ + (reprfunc)array_str, /* tp_str */ + (getattrofunc)0, /* tp_getattro */ + (setattrofunc)0, /* tp_setattro */ + &array_as_buffer, /* tp_as_buffer */ (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE - | Py_TPFLAGS_CHECKTYPES), /*tp_flags*/ + | Py_TPFLAGS_CHECKTYPES), /* tp_flags */ /*Documentation string */ - 0, /*tp_doc*/ + 0, /* tp_doc */ - (traverseproc)0, /*tp_traverse */ - (inquiry)0, /*tp_clear */ - (richcmpfunc)array_richcompare, /*tp_richcompare */ - offsetof(PyArrayObject, weakreflist), /*tp_weaklistoffset */ + (traverseproc)0, /* tp_traverse */ + (inquiry)0, /* tp_clear */ + (richcmpfunc)array_richcompare, /* tp_richcompare */ + offsetof(PyArrayObject, weakreflist), /* tp_weaklistoffset */ /* Iterator support (use standard) */ - (getiterfunc)array_iter, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ + (getiterfunc)array_iter, /* tp_iter */ + (iternextfunc)0, /* tp_iternext */ /* Sub-classing (new-style object) support */ - array_methods, /* tp_methods */ - 0, /* tp_members */ - array_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - array_alloc, /* tp_alloc */ - (newfunc)array_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + array_methods, /* tp_methods */ + 0, /* tp_members */ + array_getsetlist, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + array_alloc, /* tp_alloc */ + (newfunc)array_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; -/* The rest of this code is to build the right kind of array from a python */ -/* object. */ +/* + * The rest of this code is to build the right kind of array + * from a python object. + */ static int discover_depth(PyObject *s, int max, int stop_at_string, int stop_at_tuple) @@ -7120,13 +7555,12 @@ } n = PyObject_Length(s); - if ((nd == 0) || PyString_Check(s) || PyUnicode_Check(s) || PyBuffer_Check(s)) { *itemsize = MAX(*itemsize, n); return 0; } - for(i = 0; i < n; i++) { + for (i = 0; i < n; i++) { if ((e = PySequence_GetItem(s,i))==NULL) { return -1; } @@ -7156,8 +7590,7 @@ } return 0; } - - n=PyObject_Length(s); + n = PyObject_Length(s); *d = n; if (*d < 0) { return -1; @@ -7207,10 +7640,11 @@ } - if (chktype->type_num > mintype->type_num) + if (chktype->type_num > mintype->type_num) { outtype_num = chktype->type_num; + } else { - if (PyDataType_ISOBJECT(chktype) && \ + if (PyDataType_ISOBJECT(chktype) && PyDataType_ISSTRING(mintype)) { return PyArray_DescrFromType(NPY_OBJECT); } @@ -7220,10 +7654,11 @@ } save_num = outtype_num; - while(outtype_num < PyArray_NTYPES && + while (outtype_num < PyArray_NTYPES && !(PyArray_CanCastSafely(chktype->type_num, outtype_num) - && PyArray_CanCastSafely(mintype->type_num, outtype_num))) + && PyArray_CanCastSafely(mintype->type_num, outtype_num))) { outtype_num++; + } if (outtype_num == PyArray_NTYPES) { outtype = PyArray_DescrFromType(save_num); } @@ -7232,11 +7667,13 @@ } if (PyTypeNum_ISEXTENDED(outtype->type_num)) { int testsize = outtype->elsize; - register int chksize, minsize; + int chksize, minsize; chksize = chktype->elsize; minsize = mintype->elsize; - /* Handle string->unicode case separately - because string itemsize is 4* as large */ + /* + * Handle string->unicode case separately + * because string itemsize is 4* as large + */ if (outtype->type_num == PyArray_UNICODE && mintype->type_num == PyArray_STRING) { testsize = MAX(chksize, 4*minsize); @@ -7269,7 +7706,8 @@ /* bools are a subclass of int */ if (PyBool_Check(op)) { return PyArray_DescrFromType(PyArray_BOOL); - } else { + } + else { return PyArray_DescrFromType(PyArray_LONG); } } @@ -7307,39 +7745,42 @@ } -/* op is an object to be converted to an ndarray. - - minitype is the minimum type-descriptor needed. - - max is the maximum number of dimensions -- used for recursive call - to avoid infinite recursion... - -*/ - +/* + * op is an object to be converted to an ndarray. + * + * minitype is the minimum type-descriptor needed. + * + * max is the maximum number of dimensions -- used for recursive call + * to avoid infinite recursion... + */ static PyArray_Descr * _array_find_type(PyObject *op, PyArray_Descr *minitype, int max) { int l; PyObject *ip; - PyArray_Descr *chktype=NULL; + PyArray_Descr *chktype = NULL; PyArray_Descr *outtype; - /* These need to come first because if op already carries - a descr structure, then we want it to be the result if minitype - is NULL. - */ - + /* + * These need to come first because if op already carries + * a descr structure, then we want it to be the result if minitype + * is NULL. + */ if (PyArray_Check(op)) { chktype = PyArray_DESCR(op); Py_INCREF(chktype); - if (minitype == NULL) return chktype; + if (minitype == NULL) { + return chktype; + } Py_INCREF(minitype); goto finish; } if (PyArray_IsScalar(op, Generic)) { chktype = PyArray_DescrFromScalar(op); - if (minitype == NULL) return chktype; + if (minitype == NULL) { + return chktype; + } Py_INCREF(minitype); goto finish; } @@ -7347,10 +7788,12 @@ if (minitype == NULL) { minitype = PyArray_DescrFromType(PyArray_BOOL); } - else Py_INCREF(minitype); - - if (max < 0) goto deflt; - + else { + Py_INCREF(minitype); + } + if (max < 0) { + goto deflt; + } chktype = _array_find_python_scalar_type(op); if (chktype) { goto finish; @@ -7361,15 +7804,17 @@ PyObject *new; new = PyDict_GetItemString(ip, "typestr"); if (new && PyString_Check(new)) { - chktype =_array_typedescr_fromstr \ - (PyString_AS_STRING(new)); + chktype =_array_typedescr_fromstr(PyString_AS_STRING(new)); } } Py_DECREF(ip); - if (chktype) goto finish; + if (chktype) { + goto finish; + } } - else PyErr_Clear(); - + else { + PyErr_Clear(); + } if ((ip=PyObject_GetAttrString(op, "__array_struct__")) != NULL) { PyArrayInterface *inter; char buf[40]; @@ -7382,9 +7827,13 @@ } } Py_DECREF(ip); - if (chktype) goto finish; + if (chktype) { + goto finish; + } } - else PyErr_Clear(); + else { + PyErr_Clear(); + } if (PyString_Check(op)) { chktype = PyArray_DescrNewFromType(PyArray_STRING); @@ -7420,10 +7869,10 @@ if (PyErr_Occurred()) PyErr_Clear(); } - if (PyInstance_Check(op)) goto deflt; - + if (PyInstance_Check(op)) { + goto deflt; + } if (PySequence_Check(op)) { - l = PyObject_Length(op); if (l < 0 && PyErr_Occurred()) { PyErr_Clear(); @@ -7457,13 +7906,14 @@ chktype = _use_default_type(op); finish: - outtype = _array_small_type(chktype, minitype); Py_DECREF(chktype); Py_DECREF(minitype); - /* VOID Arrays should not occur by "default" - unless input was already a VOID */ - if (outtype->type_num == PyArray_VOID && \ + /* + * VOID Arrays should not occur by "default" + * unless input was already a VOID + */ + if (outtype->type_num == PyArray_VOID && minitype->type_num != PyArray_VOID) { Py_DECREF(outtype); return PyArray_DescrFromType(PyArray_OBJECT); @@ -7478,15 +7928,15 @@ Py_ssize_t i, slen; int res = 0; - /* This code is to ensure that the sequence access below will - return a lower-dimensional sequence. + /* + * This code is to ensure that the sequence access below will + * return a lower-dimensional sequence. */ if (PyArray_Check(s) && !(PyArray_CheckExact(s))) { - /* FIXME: This could probably copy the entire subarray - at once here using a faster algorithm. - Right now, just make sure a base-class array - is used so that the dimensionality reduction assumption - is correct. + /* + * FIXME: This could probably copy the entire subarray at once here using + * a faster algorithm. Right now, just make sure a base-class array is + * used so that the dimensionality reduction assumption is correct. */ s = PyArray_EnsureArray(s); } @@ -7498,14 +7948,13 @@ } slen = PySequence_Length(s); - if (slen != a->dimensions[dim]) { PyErr_Format(PyExc_ValueError, "setArrayFromSequence: sequence/array shape mismatch."); return -1; } - for(i=0; ind - dim) > 1) { res = setArrayFromSequence(a, o, dim+1, offset); @@ -7514,7 +7963,9 @@ res = a->descr->f->setitem(o, (a->data + offset), a); } Py_DECREF(o); - if (res < 0) return res; + if (res < 0) { + return res; + } offset += a->strides[dim]; } return 0; @@ -7534,12 +7985,13 @@ "assignment to 0-d array"); return -1; } - return setArrayFromSequence(self, v, 0, 0); } -/* "Array Scalars don't call this code" */ -/* steals reference to typecode -- no NULL*/ +/* + * "Array Scalars don't call this code" + * steals reference to typecode -- no NULL + */ static PyObject * Array_FromPyScalar(PyObject *op, PyArray_Descr *typecode) { @@ -7552,7 +8004,6 @@ if (itemsize == 0 && PyTypeNum_ISEXTENDED(type)) { itemsize = PyObject_Length(op); - if (type == PyArray_UNICODE) { itemsize *= 4; } @@ -7579,21 +8030,21 @@ if (PyErr_Occurred()) { Py_DECREF(ret); return NULL; - } + } else { return (PyObject *)ret; } } -/* If s is not a list, return 0 - Otherwise: - - run object_depth_and_dimension on all the elements - and make sure the returned shape and size - is the same for each element - -*/ +/* + * If s is not a list, return 0 + * Otherwise: + * + * run object_depth_and_dimension on all the elements + * and make sure the returned shape and size is the + * same for each element + */ static int object_depth_and_dimension(PyObject *s, int max, intp *dims) { @@ -7631,7 +8082,7 @@ } nd = object_depth_and_dimension(obj, max - 1, newdims); - for(i = 1; i < size; i++) { + for (i = 1; i < size; i++) { if (islist) { obj = PyList_GET_ITEM(s, i); } @@ -7647,7 +8098,7 @@ } } - for(i = 1; i <= nd; i++) { + for (i = 1; i <= nd; i++) { dims[i] = newdims[i-1]; } dims[0] = size; @@ -7670,12 +8121,10 @@ if (nd == 0) { return Array_FromPyScalar(s, typecode); } - r = (PyArrayObject*)PyArray_NewFromDescr(&PyArray_Type, typecode, nd, d, NULL, NULL, fortran, NULL); - if (!r) { return NULL; } @@ -7686,12 +8135,12 @@ return (PyObject*)r; } -/* +/* * isobject means that we are constructing an * object array on-purpose with a nested list. * Only a list is interpreted as a sequence with these rules + * steals reference to typecode */ -/* steals reference to typecode */ static PyObject * Array_FromSequence(PyObject *s, PyArray_Descr *typecode, int fortran, int min_depth, int max_depth) @@ -7707,11 +8156,9 @@ int itemsize = typecode->elsize; check_it = (typecode->type != PyArray_CHARLTR); - stop_at_string = (type != PyArray_STRING) || (typecode->type == PyArray_STRINGLTR); - - stop_at_tuple = (type == PyArray_VOID && (typecode->names \ + stop_at_tuple = (type == PyArray_VOID && (typecode->names || typecode->subarray)); nd = discover_depth(s, MAX_DIMS + 1, stop_at_string, stop_at_tuple); @@ -7776,8 +8223,8 @@ /*NUMPY_API - Is the typenum valid? -*/ + * Is the typenum valid? + */ static int PyArray_ValidType(int type) { @@ -7792,11 +8239,11 @@ return res; } -/* For backward compatibility */ - -/* steals reference to at --- cannot be NULL*/ /*NUMPY_API - *Cast an array using typecode structure. + * For backward compatibility + * + * Cast an array using typecode structure. + * steals reference to at --- cannot be NULL */ static PyObject * PyArray_CastToType(PyArrayObject *mp, PyArray_Descr *at, int fortran) @@ -7807,12 +8254,11 @@ mpd = mp->descr; - if (((mpd == at) || ((mpd->type_num == at->type_num) && \ - PyArray_EquivByteorders(mpd->byteorder,\ - at->byteorder) && \ - ((mpd->elsize == at->elsize) || \ - (at->elsize==0)))) && \ - PyArray_ISBEHAVED_RO(mp)) { + if (((mpd == at) || + ((mpd->type_num == at->type_num) && + PyArray_EquivByteorders(mpd->byteorder, at->byteorder) && + ((mpd->elsize == at->elsize) || (at->elsize==0)))) && + PyArray_ISBEHAVED_RO(mp)) { Py_DECREF(at); Py_INCREF(mp); return (PyObject *)mp; @@ -7823,7 +8269,7 @@ if (at == NULL) { return NULL; } - if (mpd->type_num == PyArray_STRING && + if (mpd->type_num == PyArray_STRING && at->type_num == PyArray_UNICODE) { at->elsize = mpd->elsize << 2; } @@ -7857,14 +8303,15 @@ } /*NUMPY_API - Get a cast function to cast from the input descriptor to the - output type_number (must be a registered data-type). - Returns NULL if un-successful. -*/ + * Get a cast function to cast from the input descriptor to the + * output type_number (must be a registered data-type). + * Returns NULL if un-successful. + */ static PyArray_VectorUnaryFunc * PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) { - PyArray_VectorUnaryFunc *castfunc=NULL; + PyArray_VectorUnaryFunc *castfunc = NULL; + if (type_num < PyArray_NTYPES) { castfunc = descr->f->cast[type_num]; } @@ -7889,19 +8336,19 @@ return castfunc; } - PyErr_SetString(PyExc_ValueError, - "No cast function available."); + PyErr_SetString(PyExc_ValueError, "No cast function available."); return NULL; } -/* Reference counts: - copyswapn is used which increases and decreases reference counts for OBJECT arrays. - All that needs to happen is for any reference counts in the buffers to be - decreased when completely finished with the buffers. - - buffers[0] is the destination - buffers[1] is the source -*/ +/* + * Reference counts: + * copyswapn is used which increases and decreases reference counts for OBJECT arrays. + * All that needs to happen is for any reference counts in the buffers to be + * decreased when completely finished with the buffers. + * + * buffers[0] is the destination + * buffers[1] is the source + */ static void _strided_buffered_cast(char *dptr, intp dstride, int delsize, int dswap, PyArray_CopySwapNFunc *dcopyfunc, @@ -7913,10 +8360,11 @@ { int i; if (N <= bufsize) { - /* 1. copy input to buffer and swap - 2. cast input to output - 3. swap output if necessary and copy from output buffer - */ + /* + * 1. copy input to buffer and swap + * 2. cast input to output + * 3. swap output if necessary and copy from output buffer + */ scopyfunc(buffers[1], selsize, sptr, sstride, N, sswap, src); castfunc(buffers[1], buffers[0], N, src, dest); dcopyfunc(dptr, dstride, buffers[0], delsize, N, dswap, dest); @@ -7925,9 +8373,9 @@ /* otherwise we need to divide up into bufsize pieces */ i = 0; - while(N > 0) { - int newN; - newN = MIN(N, bufsize); + while (N > 0) { + int newN = MIN(N, bufsize); + _strided_buffered_cast(dptr+i*dstride, dstride, delsize, dswap, dcopyfunc, sptr+i*sstride, sstride, selsize, @@ -8007,7 +8455,7 @@ } #endif - while(multi->index < multi->size) { + while (multi->index < multi->size) { _strided_buffered_cast(multi->iters[0]->dataptr, ostrides, delsize, oswap, ocopyfunc, @@ -8026,13 +8474,13 @@ Py_DECREF(multi); if (PyDataType_REFCHK(in->descr)) { obptr = buffers[1]; - for(i = 0; i < N; i++, obptr+=selsize) { + for (i = 0; i < N; i++, obptr+=selsize) { PyArray_Item_XDECREF(obptr, out->descr); } } if (PyDataType_REFCHK(out->descr)) { obptr = buffers[0]; - for(i = 0; i < N; i++, obptr+=delsize) { + for (i = 0; i < N; i++, obptr+=delsize) { PyArray_Item_XDECREF(obptr, out->descr); } } @@ -8062,7 +8510,7 @@ { int simple; int same; - PyArray_VectorUnaryFunc *castfunc=NULL; + PyArray_VectorUnaryFunc *castfunc = NULL; int mpsize = PyArray_SIZE(mp); int iswap, oswap; NPY_BEGIN_THREADS_DEF; @@ -8071,8 +8519,7 @@ return 0; } if (!PyArray_ISWRITEABLE(out)) { - PyErr_SetString(PyExc_ValueError, - "output array is not writeable"); + PyErr_SetString(PyExc_ValueError, "output array is not writeable"); return -1; } @@ -8127,13 +8574,13 @@ { char *inbuffer, *bptr, *optr; char *outbuffer=NULL; - PyArrayIterObject *it_in=NULL, *it_out=NULL; + PyArrayIterObject *it_in = NULL, *it_out = NULL; register intp i, index; intp ncopies = PyArray_SIZE(out) / PyArray_SIZE(in); int elsize=in->descr->elsize; int nels = PyArray_BUFSIZE; int el; - int inswap, outswap=0; + int inswap, outswap = 0; int obuf=!PyArray_ISCARRAY(out); int oelsize = out->descr->elsize; PyArray_CopySwapFunc *in_csn; @@ -8152,45 +8599,50 @@ inswap = !(PyArray_ISFLEXIBLE(in) || PyArray_ISNOTSWAPPED(in)); inbuffer = PyDataMem_NEW(PyArray_BUFSIZE*elsize); - if (inbuffer == NULL) return -1; - if (PyArray_ISOBJECT(in)) + if (inbuffer == NULL) { + return -1; + } + if (PyArray_ISOBJECT(in)) { memset(inbuffer, 0, PyArray_BUFSIZE*elsize); + } it_in = (PyArrayIterObject *)PyArray_IterNew((PyObject *)in); - if (it_in == NULL) goto exit; - + if (it_in == NULL) { + goto exit; + } if (obuf) { - outswap = !(PyArray_ISFLEXIBLE(out) || \ + outswap = !(PyArray_ISFLEXIBLE(out) || PyArray_ISNOTSWAPPED(out)); outbuffer = PyDataMem_NEW(PyArray_BUFSIZE*oelsize); - if (outbuffer == NULL) goto exit; - if (PyArray_ISOBJECT(out)) + if (outbuffer == NULL) { + goto exit; + } + if (PyArray_ISOBJECT(out)) { memset(outbuffer, 0, PyArray_BUFSIZE*oelsize); - + } it_out = (PyArrayIterObject *)PyArray_IterNew((PyObject *)out); - if (it_out == NULL) goto exit; - + if (it_out == NULL) { + goto exit; + } nels = MIN(nels, PyArray_BUFSIZE); } optr = (obuf) ? outbuffer: out->data; bptr = inbuffer; el = 0; - while(ncopies--) { + while (ncopies--) { index = it_in->size; PyArray_ITER_RESET(it_in); - while(index--) { + while (index--) { in_csn(bptr, it_in->dataptr, inswap, in); bptr += elsize; PyArray_ITER_NEXT(it_in); el += 1; if ((el == nels) || (index == 0)) { /* buffer filled, do cast */ - castfunc(inbuffer, optr, el, in, out); - if (obuf) { /* Copy from outbuffer to array */ - for(i=0; idataptr, optr, outswap, out); @@ -8208,6 +8660,7 @@ } } retval = 0; + exit: Py_XDECREF(it_in); PyDataMem_FREE(inbuffer); @@ -8219,20 +8672,21 @@ } /*NUMPY_API - Cast to an already created array. Arrays don't have to be "broadcastable" - Only requirement is they have the same number of elements. -*/ + * Cast to an already created array. Arrays don't have to be "broadcastable" + * Only requirement is they have the same number of elements. + */ static int PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) { int simple; - PyArray_VectorUnaryFunc *castfunc=NULL; + PyArray_VectorUnaryFunc *castfunc = NULL; int mpsize = PyArray_SIZE(mp); - if (mpsize == 0) return 0; + if (mpsize == 0) { + return 0; + } if (!PyArray_ISWRITEABLE(out)) { - PyErr_SetString(PyExc_ValueError, - "output array is not writeable"); + PyErr_SetString(PyExc_ValueError, "output array is not writeable"); return -1; } @@ -8244,36 +8698,34 @@ } castfunc = PyArray_GetCastFunc(mp->descr, out->descr->type_num); - if (castfunc == NULL) return -1; - - + if (castfunc == NULL) { + return -1; + } simple = ((PyArray_ISCARRAY_RO(mp) && PyArray_ISCARRAY(out)) || (PyArray_ISFARRAY_RO(mp) && PyArray_ISFARRAY(out))); - if (simple) { castfunc(mp->data, out->data, mpsize, mp, out); return 0; } - if (PyArray_SAMESHAPE(out, mp)) { int iswap, oswap; iswap = PyArray_ISBYTESWAPPED(mp) && !PyArray_ISFLEXIBLE(mp); oswap = PyArray_ISBYTESWAPPED(out) && !PyArray_ISFLEXIBLE(out); return _broadcast_cast(out, mp, castfunc, iswap, oswap); } - return _bufferedcast(out, mp, castfunc); } -/* steals reference to newtype --- acc. NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * steals reference to newtype --- acc. NULL + */ static PyObject * PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) { - PyArrayObject *ret=NULL; + PyArrayObject *ret = NULL; int itemsize; int copy = 0; int arrflags; @@ -8282,9 +8734,7 @@ PyTypeObject *subtype; oldtype = PyArray_DESCR(arr); - subtype = arr->ob_type; - if (newtype == NULL) { newtype = oldtype; Py_INCREF(oldtype); } @@ -8298,10 +8748,11 @@ itemsize = newtype->elsize; } - /* Can't cast unless ndim-0 array, FORCECAST is specified - or the cast is safe. - */ - if (!(flags & FORCECAST) && !PyArray_NDIM(arr)==0 && + /* + * Can't cast unless ndim-0 array, FORCECAST is specified + * or the cast is safe. + */ + if (!(flags & FORCECAST) && !PyArray_NDIM(arr) == 0 && !PyArray_CanCastTo(oldtype, newtype)) { Py_DECREF(newtype); PyErr_SetString(PyExc_TypeError, @@ -8313,16 +8764,15 @@ /* Don't copy if sizes are compatible */ if ((flags & ENSURECOPY) || PyArray_EquivTypes(oldtype, newtype)) { arrflags = arr->flags; - - copy = (flags & ENSURECOPY) || \ - ((flags & CONTIGUOUS) && (!(arrflags & CONTIGUOUS))) \ - || ((flags & ALIGNED) && (!(arrflags & ALIGNED))) \ - || (arr->nd > 1 && \ - ((flags & FORTRAN) && (!(arrflags & FORTRAN)))) \ + copy = (flags & ENSURECOPY) || + ((flags & CONTIGUOUS) && (!(arrflags & CONTIGUOUS))) + || ((flags & ALIGNED) && (!(arrflags & ALIGNED))) + || (arr->nd > 1 && + ((flags & FORTRAN) && (!(arrflags & FORTRAN)))) || ((flags & WRITEABLE) && (!(arrflags & WRITEABLE))); if (copy) { - if ((flags & UPDATEIFCOPY) && \ + if ((flags & UPDATEIFCOPY) && (!PyArray_ISWRITEABLE(arr))) { Py_DECREF(newtype); PyErr_SetString(PyExc_ValueError, msg); @@ -8331,7 +8781,7 @@ if ((flags & ENSUREARRAY)) { subtype = &PyArray_Type; } - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(subtype, newtype, arr->nd, arr->dimensions, @@ -8352,14 +8802,16 @@ Py_INCREF(arr); } } - /* If no copy then just increase the reference - count and return the input */ + /* + * If no copy then just increase the reference + * count and return the input + */ else { Py_DECREF(newtype); if ((flags & ENSUREARRAY) && !PyArray_CheckExact(arr)) { Py_INCREF(arr->descr); - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, arr->descr, arr->nd, @@ -8379,10 +8831,12 @@ } } - /* The desired output type is different than the input - array type and copy was not specified */ + /* + * The desired output type is different than the input + * array type and copy was not specified + */ else { - if ((flags & UPDATEIFCOPY) && \ + if ((flags & UPDATEIFCOPY) && (!PyArray_ISWRITEABLE(arr))) { Py_DECREF(newtype); PyErr_SetString(PyExc_ValueError, msg); @@ -8391,7 +8845,7 @@ if ((flags & ENSUREARRAY)) { subtype = &PyArray_Type; } - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(subtype, newtype, arr->nd, arr->dimensions, NULL, NULL, @@ -8429,99 +8883,137 @@ swapchar = str[0]; str += 1; -#define _MY_FAIL { \ - PyErr_SetString(PyExc_ValueError, msg); \ - return NULL; \ - } - typechar = str[0]; size = atoi(str + 1); switch (typechar) { - case 'b': - if (size == sizeof(Bool)) - type_num = PyArray_BOOL; - else _MY_FAIL - break; - case 'u': - if (size == sizeof(uintp)) - type_num = PyArray_UINTP; - else if (size == sizeof(char)) - type_num = PyArray_UBYTE; - else if (size == sizeof(short)) - type_num = PyArray_USHORT; - else if (size == sizeof(ulong)) - type_num = PyArray_ULONG; - else if (size == sizeof(int)) - type_num = PyArray_UINT; - else if (size == sizeof(ulonglong)) - type_num = PyArray_ULONGLONG; - else _MY_FAIL - break; - case 'i': - if (size == sizeof(intp)) - type_num = PyArray_INTP; - else if (size == sizeof(char)) - type_num = PyArray_BYTE; - else if (size == sizeof(short)) - type_num = PyArray_SHORT; - else if (size == sizeof(long)) - type_num = PyArray_LONG; - else if (size == sizeof(int)) - type_num = PyArray_INT; - else if (size == sizeof(longlong)) - type_num = PyArray_LONGLONG; - else _MY_FAIL - break; - case 'f': - if (size == sizeof(float)) - type_num = PyArray_FLOAT; - else if (size == sizeof(double)) - type_num = PyArray_DOUBLE; - else if (size == sizeof(longdouble)) - type_num = PyArray_LONGDOUBLE; - else _MY_FAIL - break; - case 'c': - if (size == sizeof(float)*2) - type_num = PyArray_CFLOAT; - else if (size == sizeof(double)*2) - type_num = PyArray_CDOUBLE; - else if (size == sizeof(longdouble)*2) - type_num = PyArray_CLONGDOUBLE; - else _MY_FAIL - break; - case 'O': - if (size == sizeof(PyObject *)) - type_num = PyArray_OBJECT; - else _MY_FAIL - break; - case PyArray_STRINGLTR: - type_num = PyArray_STRING; - break; - case PyArray_UNICODELTR: - type_num = PyArray_UNICODE; - size <<= 2; - break; - case 'V': - type_num = PyArray_VOID; - break; - default: - _MY_FAIL + case 'b': + if (size == sizeof(Bool)) { + type_num = PyArray_BOOL; } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'u': + if (size == sizeof(uintp)) { + type_num = PyArray_UINTP; + } + else if (size == sizeof(char)) { + type_num = PyArray_UBYTE; + } + else if (size == sizeof(short)) { + type_num = PyArray_USHORT; + } + else if (size == sizeof(ulong)) { + type_num = PyArray_ULONG; + } + else if (size == sizeof(int)) { + type_num = PyArray_UINT; + } + else if (size == sizeof(ulonglong)) { + type_num = PyArray_ULONGLONG; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'i': + if (size == sizeof(intp)) { + type_num = PyArray_INTP; + } + else if (size == sizeof(char)) { + type_num = PyArray_BYTE; + } + else if (size == sizeof(short)) { + type_num = PyArray_SHORT; + } + else if (size == sizeof(long)) { + type_num = PyArray_LONG; + } + else if (size == sizeof(int)) { + type_num = PyArray_INT; + } + else if (size == sizeof(longlong)) { + type_num = PyArray_LONGLONG; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'f': + if (size == sizeof(float)) { + type_num = PyArray_FLOAT; + } + else if (size == sizeof(double)) { + type_num = PyArray_DOUBLE; + } + else if (size == sizeof(longdouble)) { + type_num = PyArray_LONGDOUBLE; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'c': + if (size == sizeof(float)*2) { + type_num = PyArray_CFLOAT; + } + else if (size == sizeof(double)*2) { + type_num = PyArray_CDOUBLE; + } + else if (size == sizeof(longdouble)*2) { + type_num = PyArray_CLONGDOUBLE; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'O': + if (size == sizeof(PyObject *)) { + type_num = PyArray_OBJECT; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case PyArray_STRINGLTR: + type_num = PyArray_STRING; + break; + case PyArray_UNICODELTR: + type_num = PyArray_UNICODE; + size <<= 2; + break; + case 'V': + type_num = PyArray_VOID; + break; + default: + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } -#undef _MY_FAIL - descr = PyArray_DescrFromType(type_num); - if (descr == NULL) return NULL; + if (descr == NULL) { + return NULL; + } swap = !PyArray_ISNBO(swapchar); if (descr->elsize == 0 || swap) { /* Need to make a new PyArray_Descr */ PyArray_DESCR_REPLACE(descr); - if (descr==NULL) return NULL; - if (descr->elsize == 0) + if (descr==NULL) { + return NULL; + } + if (descr->elsize == 0) { descr->elsize = size; - if (swap) + } + if (swap) { descr->byteorder = swapchar; + } } return descr; } @@ -8530,7 +9022,7 @@ static PyObject * PyArray_FromStructInterface(PyObject *input) { - PyArray_Descr *thetype=NULL; + PyArray_Descr *thetype = NULL; char buf[40]; PyArrayInterface *inter; PyObject *attr, *r; @@ -8541,9 +9033,13 @@ PyErr_Clear(); return Py_NotImplemented; } - if (!PyCObject_Check(attr)) goto fail; + if (!PyCObject_Check(attr)) { + goto fail; + } inter = PyCObject_AsVoidPtr(attr); - if (inter->two != 2) goto fail; + if (inter->two != 2) { + goto fail; + } if ((inter->flags & NOTSWAPPED) != NOTSWAPPED) { endian = PyArray_OPPBYTE; inter->flags &= ~NOTSWAPPED; @@ -8587,10 +9083,10 @@ static PyObject * PyArray_FromInterface(PyObject *input) { - PyObject *attr=NULL, *item=NULL; - PyObject *tstr=NULL, *shape=NULL; - PyObject *inter=NULL; - PyObject *base=NULL; + PyObject *attr = NULL, *item = NULL; + PyObject *tstr = NULL, *shape = NULL; + PyObject *inter = NULL; + PyObject *base = NULL; PyArrayObject *ret; PyArray_Descr *type=NULL; char *data; @@ -8605,26 +9101,42 @@ /* Get the strides */ inter = PyObject_GetAttrString(input, "__array_interface__"); - if (inter == NULL) {PyErr_Clear(); return Py_NotImplemented;} - if (!PyDict_Check(inter)) {Py_DECREF(inter); return Py_NotImplemented;} - + if (inter == NULL) { + PyErr_Clear(); + return Py_NotImplemented; + } + if (!PyDict_Check(inter)) { + Py_DECREF(inter); + return Py_NotImplemented; + } shape = PyDict_GetItemString(inter, "shape"); - if (shape == NULL) {Py_DECREF(inter); return Py_NotImplemented;} + if (shape == NULL) { + Py_DECREF(inter); + return Py_NotImplemented; + } tstr = PyDict_GetItemString(inter, "typestr"); - if (tstr == NULL) {Py_DECREF(inter); return Py_NotImplemented;} + if (tstr == NULL) { + Py_DECREF(inter); + return Py_NotImplemented; + } attr = PyDict_GetItemString(inter, "data"); base = input; if ((attr == NULL) || (attr==Py_None) || (!PyTuple_Check(attr))) { - if (attr && (attr != Py_None)) item=attr; - else item=input; - res = PyObject_AsWriteBuffer(item, (void **)&data, - &buffer_len); + if (attr && (attr != Py_None)) { + item = attr; + } + else { + item = input; + } + res = PyObject_AsWriteBuffer(item, (void **)&data, &buffer_len); if (res < 0) { PyErr_Clear(); - res = PyObject_AsReadBuffer(item, (const void **)&data, - &buffer_len); - if (res < 0) goto fail; + res = PyObject_AsReadBuffer( + item, (const void **)&data, &buffer_len); + if (res < 0) { + goto fail; + } dataflags &= ~WRITEABLE; } attr = PyDict_GetItemString(inter, "offset"); @@ -8679,7 +9191,9 @@ goto fail; } type = _array_typedescr_fromstr(PyString_AS_STRING(attr)); - if (type==NULL) goto fail; + if (type == NULL) { + goto fail; + } attr = shape; if (!PyTuple_Check(attr)) { PyErr_SetString(PyExc_TypeError, "shape must be a tuple"); @@ -8687,17 +9201,21 @@ goto fail; } n = PyTuple_GET_SIZE(attr); - for(i=0; ibase = base; @@ -8716,12 +9234,16 @@ Py_DECREF(ret); return NULL; } - for(i=0; istrides, strides, n*sizeof(intp)); } else PyErr_Clear(); @@ -8742,35 +9264,38 @@ PyObject *array_meth; array_meth = PyObject_GetAttrString(op, "__array__"); - if (array_meth == NULL) {PyErr_Clear(); return Py_NotImplemented;} + if (array_meth == NULL) { + PyErr_Clear(); + return Py_NotImplemented; + } if (context == NULL) { - if (typecode == NULL) new = PyObject_CallFunction(array_meth, - NULL); - else new = PyObject_CallFunction(array_meth, "O", typecode); + if (typecode == NULL) { + new = PyObject_CallFunction(array_meth, NULL); + } + else { + new = PyObject_CallFunction(array_meth, "O", typecode); + } } else { if (typecode == NULL) { - new = PyObject_CallFunction(array_meth, "OO", Py_None, - context); - if (new == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { + new = PyObject_CallFunction(array_meth, "OO", Py_None, context); + if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); new = PyObject_CallFunction(array_meth, ""); } } else { - new = PyObject_CallFunction(array_meth, "OO", - typecode, context); - if (new == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { + new = PyObject_CallFunction(array_meth, "OO", typecode, context); + if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); - new = PyObject_CallFunction(array_meth, "O", - typecode); + new = PyObject_CallFunction(array_meth, "O", typecode); } } } Py_DECREF(array_meth); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } if (!PyArray_Check(new)) { PyErr_SetString(PyExc_ValueError, "object __array__ method not " \ @@ -8781,23 +9306,27 @@ return new; } -/* Does not check for ENSURECOPY and NOTSWAPPED in flags */ -/* Steals a reference to newtype --- which can be NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * Does not check for ENSURECOPY and NOTSWAPPED in flags + * Steals a reference to newtype --- which can be NULL + */ static PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, int max_depth, int flags, PyObject *context) { - /* This is the main code to make a NumPy array from a Python - Object. It is called from lot's of different places which - is why there are so many checks. The comments try to - explain some of the checks. */ - - PyObject *r=NULL; + /* + * This is the main code to make a NumPy array from a Python + * Object. It is called from lot's of different places which + * is why there are so many checks. The comments try to + * explain some of the checks. + */ + PyObject *r = NULL; int seq = FALSE; - /* Is input object already an array? */ - /* This is where the flags are used */ + /* + * Is input object already an array? + * This is where the flags are used + */ if (PyArray_Check(op)) { r = PyArray_FromArray((PyArrayObject *)op, newtype, flags); } @@ -8821,8 +9350,7 @@ return NULL; } if (newtype != NULL || flags != 0) { - new = PyArray_FromArray((PyArrayObject *)r, newtype, - flags); + new = PyArray_FromArray((PyArrayObject *)r, newtype, flags); Py_DECREF(r); r = new; } @@ -8858,7 +9386,7 @@ PyErr_Clear(); if (isobject) { Py_INCREF(newtype); - r = ObjectArray_FromNestedList \ + r = ObjectArray_FromNestedList (op, newtype, flags & FORTRAN); seq = TRUE; Py_DECREF(newtype); @@ -8880,7 +9408,6 @@ } /* Be sure we succeed here */ - if(!PyArray_Check(r)) { PyErr_SetString(PyExc_RuntimeError, "internal error: PyArray_FromAny "\ @@ -8910,8 +9437,9 @@ return NULL; } -/* new reference -- accepts NULL for mintype*/ -/*NUMPY_API*/ +/*NUMPY_API +* new reference -- accepts NULL for mintype +*/ static PyArray_Descr * PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) { @@ -8919,9 +9447,8 @@ } /*NUMPY_API - Return the typecode of the array a Python object would be converted - to -*/ + * Return the typecode of the array a Python object would be converted to + */ static int PyArray_ObjectType(PyObject *op, int minimum_type) { @@ -8930,7 +9457,9 @@ int ret; intype = PyArray_DescrFromType(minimum_type); - if (intype == NULL) PyErr_Clear(); + if (intype == NULL) { + PyErr_Clear(); + } outtype = _array_find_type(op, intype, MAX_DIMS); ret = outtype->type_num; Py_DECREF(outtype); @@ -8939,56 +9468,57 @@ } -/* flags is any of - CONTIGUOUS, - FORTRAN, - ALIGNED, - WRITEABLE, - NOTSWAPPED, - ENSURECOPY, - UPDATEIFCOPY, - FORCECAST, - ENSUREARRAY, - ELEMENTSTRIDES +/* + * flags is any of + * CONTIGUOUS, + * FORTRAN, + * ALIGNED, + * WRITEABLE, + * NOTSWAPPED, + * ENSURECOPY, + * UPDATEIFCOPY, + * FORCECAST, + * ENSUREARRAY, + * ELEMENTSTRIDES + * + * or'd (|) together + * + * Any of these flags present means that the returned array should + * guarantee that aspect of the array. Otherwise the returned array + * won't guarantee it -- it will depend on the object as to whether or + * not it has such features. + * + * Note that ENSURECOPY is enough + * to guarantee CONTIGUOUS, ALIGNED and WRITEABLE + * and therefore it is redundant to include those as well. + * + * BEHAVED == ALIGNED | WRITEABLE + * CARRAY = CONTIGUOUS | BEHAVED + * FARRAY = FORTRAN | BEHAVED + * + * FORTRAN can be set in the FLAGS to request a FORTRAN array. + * Fortran arrays are always behaved (aligned, + * notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). + * + * UPDATEIFCOPY flag sets this flag in the returned array if a copy is + * made and the base argument points to the (possibly) misbehaved array. + * When the new array is deallocated, the original array held in base + * is updated with the contents of the new array. + * + * FORCECAST will cause a cast to occur regardless of whether or not + * it is safe. + */ - or'd (|) together - - Any of these flags present means that the returned array should - guarantee that aspect of the array. Otherwise the returned array - won't guarantee it -- it will depend on the object as to whether or - not it has such features. - - Note that ENSURECOPY is enough - to guarantee CONTIGUOUS, ALIGNED and WRITEABLE - and therefore it is redundant to include those as well. - - BEHAVED == ALIGNED | WRITEABLE - CARRAY = CONTIGUOUS | BEHAVED - FARRAY = FORTRAN | BEHAVED - - FORTRAN can be set in the FLAGS to request a FORTRAN array. - Fortran arrays are always behaved (aligned, - notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). - - UPDATEIFCOPY flag sets this flag in the returned array if a copy is - made and the base argument points to the (possibly) misbehaved array. - When the new array is deallocated, the original array held in base - is updated with the contents of the new array. - - FORCECAST will cause a cast to occur regardless of whether or not - it is safe. -*/ - - -/* steals a reference to descr -- accepts NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * steals a reference to descr -- accepts NULL + */ static PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, int max_depth, int requires, PyObject *context) { PyObject *obj; if (requires & NOTSWAPPED) { - if (!descr && PyArray_Check(op) && \ + if (!descr && PyArray_Check(op) && !PyArray_ISNBO(PyArray_DESCR(op)->byteorder)) { descr = PyArray_DescrNew(PyArray_DESCR(op)); } @@ -9000,9 +9530,10 @@ } } - obj = PyArray_FromAny(op, descr, min_depth, max_depth, - requires, context); - if (obj == NULL) return NULL; + obj = PyArray_FromAny(op, descr, min_depth, max_depth, requires, context); + if (obj == NULL) { + return NULL; + } if ((requires & ELEMENTSTRIDES) && !PyArray_ElementStrides(obj)) { PyObject *new; @@ -9013,25 +9544,25 @@ return obj; } -/* This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0, - ENSUREARRAY) */ -/* that special cases Arrays and PyArray_Scalars up front */ -/* It *steals a reference* to the object */ -/* It also guarantees that the result is PyArray_Type */ - -/* Because it decrefs op if any conversion needs to take place - so it can be used like PyArray_EnsureArray(some_function(...)) */ - -/*NUMPY_API*/ +/*NUMPY_API + * This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0, ENSUREARRAY) + * that special cases Arrays and PyArray_Scalars up front + * It *steals a reference* to the object + * It also guarantees that the result is PyArray_Type + * Because it decrefs op if any conversion needs to take place + * so it can be used like PyArray_EnsureArray(some_function(...)) + */ static PyObject * PyArray_EnsureArray(PyObject *op) { PyObject *new; - if (op == NULL) return NULL; - - if (PyArray_CheckExact(op)) return op; - + if (op == NULL) { + return NULL; + } + if (PyArray_CheckExact(op)) { + return op; + } if (PyArray_Check(op)) { new = PyArray_View((PyArrayObject *)op, NULL, &PyArray_Type); Py_DECREF(op); @@ -9051,25 +9582,36 @@ static PyObject * PyArray_EnsureAnyArray(PyObject *op) { - if (op && PyArray_Check(op)) return op; + if (op && PyArray_Check(op)) { + return op; + } return PyArray_EnsureArray(op); } /*NUMPY_API - Check the type coercion rules. -*/ + *Check the type coercion rules. + */ static int PyArray_CanCastSafely(int fromtype, int totype) { PyArray_Descr *from, *to; register int felsize, telsize; - if (fromtype == totype) return 1; - if (fromtype == PyArray_BOOL) return 1; - if (totype == PyArray_BOOL) return 0; - if (totype == PyArray_OBJECT || totype == PyArray_VOID) return 1; - if (fromtype == PyArray_OBJECT || fromtype == PyArray_VOID) return 0; - + if (fromtype == totype) { + return 1; + } + if (fromtype == PyArray_BOOL) { + return 1; + } + if (totype == PyArray_BOOL) { + return 0; + } + if (totype == PyArray_OBJECT || totype == PyArray_VOID) { + return 1; + } + if (fromtype == PyArray_OBJECT || fromtype == PyArray_VOID) { + return 0; + } from = PyArray_DescrFromType(fromtype); /* * cancastto is a PyArray_NOTYPE terminated C-int-array of types that @@ -9079,11 +9621,14 @@ int *curtype; curtype = from->f->cancastto; while (*curtype != PyArray_NOTYPE) { - if (*curtype++ == totype) return 1; + if (*curtype++ == totype) { + return 1; + } } } - if (PyTypeNum_ISUSERDEF(totype)) return 0; - + if (PyTypeNum_ISUSERDEF(totype)) { + return 0; + } to = PyArray_DescrFromType(totype); telsize = to->elsize; felsize = from->elsize; @@ -9091,79 +9636,94 @@ Py_DECREF(to); switch(fromtype) { - case PyArray_BYTE: - case PyArray_SHORT: - case PyArray_INT: - case PyArray_LONG: - case PyArray_LONGLONG: - if (PyTypeNum_ISINTEGER(totype)) { - if (PyTypeNum_ISUNSIGNED(totype)) { - return 0; + case PyArray_BYTE: + case PyArray_SHORT: + case PyArray_INT: + case PyArray_LONG: + case PyArray_LONGLONG: + if (PyTypeNum_ISINTEGER(totype)) { + if (PyTypeNum_ISUNSIGNED(totype)) { + return 0; + } + else { + return telsize >= felsize; + } } + else if (PyTypeNum_ISFLOAT(totype)) { + if (felsize < 8) { + return telsize > felsize; + } + else { + return telsize >= felsize; + } + } + else if (PyTypeNum_ISCOMPLEX(totype)) { + if (felsize < 8) { + return (telsize >> 1) > felsize; + } + else { + return (telsize >> 1) >= felsize; + } + } else { - return (telsize >= felsize); + return totype > fromtype; } - } - else if (PyTypeNum_ISFLOAT(totype)) { - if (felsize < 8) - return (telsize > felsize); - else - return (telsize >= felsize); - } - else if (PyTypeNum_ISCOMPLEX(totype)) { - if (felsize < 8) - return ((telsize >> 1) > felsize); - else - return ((telsize >> 1) >= felsize); - } - else return totype > fromtype; - case PyArray_UBYTE: - case PyArray_USHORT: - case PyArray_UINT: - case PyArray_ULONG: - case PyArray_ULONGLONG: - if (PyTypeNum_ISINTEGER(totype)) { - if (PyTypeNum_ISSIGNED(totype)) { - return (telsize > felsize); + case PyArray_UBYTE: + case PyArray_USHORT: + case PyArray_UINT: + case PyArray_ULONG: + case PyArray_ULONGLONG: + if (PyTypeNum_ISINTEGER(totype)) { + if (PyTypeNum_ISSIGNED(totype)) { + return telsize > felsize; + } + else { + return telsize >= felsize; + } } + else if (PyTypeNum_ISFLOAT(totype)) { + if (felsize < 8) { + return telsize > felsize; + } + else { + return telsize >= felsize; + } + } + else if (PyTypeNum_ISCOMPLEX(totype)) { + if (felsize < 8) { + return (telsize >> 1) > felsize; + } + else { + return (telsize >> 1) >= felsize; + } + } else { - return (telsize >= felsize); + return totype > fromtype; } - } - else if (PyTypeNum_ISFLOAT(totype)) { - if (felsize < 8) - return (telsize > felsize); - else - return (telsize >= felsize); - } - else if (PyTypeNum_ISCOMPLEX(totype)) { - if (felsize < 8) - return ((telsize >> 1) > felsize); - else - return ((telsize >> 1) >= felsize); - } - else return totype > fromtype; - case PyArray_FLOAT: - case PyArray_DOUBLE: - case PyArray_LONGDOUBLE: - if (PyTypeNum_ISCOMPLEX(totype)) - return ((telsize >> 1) >= felsize); - else - return (totype > fromtype); - case PyArray_CFLOAT: - case PyArray_CDOUBLE: - case PyArray_CLONGDOUBLE: - return (totype > fromtype); - case PyArray_STRING: - case PyArray_UNICODE: - return (totype > fromtype); - default: - return 0; + case PyArray_FLOAT: + case PyArray_DOUBLE: + case PyArray_LONGDOUBLE: + if (PyTypeNum_ISCOMPLEX(totype)) { + return (telsize >> 1) >= felsize; + } + else { + return totype > fromtype; + } + case PyArray_CFLOAT: + case PyArray_CDOUBLE: + case PyArray_CLONGDOUBLE: + return totype > fromtype; + case PyArray_STRING: + case PyArray_UNICODE: + return totype > fromtype; + default: + return 0; } } -/* leaves reference count alone --- cannot be NULL*/ -/*NUMPY_API*/ +/*NUMPY_API + * leaves reference count alone --- cannot be NULL + */ static Bool PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) { @@ -9172,14 +9732,14 @@ Bool ret; ret = (Bool) PyArray_CanCastSafely(fromtype, totype); - if (ret) { /* Check String and Unicode more closely */ + if (ret) { + /* Check String and Unicode more closely */ if (fromtype == PyArray_STRING) { if (totype == PyArray_STRING) { ret = (from->elsize <= to->elsize); } else if (totype == PyArray_UNICODE) { - ret = (from->elsize << 2 \ - <= to->elsize); + ret = (from->elsize << 2 <= to->elsize); } } else if (fromtype == PyArray_UNICODE) { @@ -9187,17 +9747,18 @@ ret = (from->elsize <= to->elsize); } } - /* TODO: If totype is STRING or unicode - see if the length is long enough to hold the - stringified value of the object. - */ + /* + * TODO: If totype is STRING or unicode + * see if the length is long enough to hold the + * stringified value of the object. + */ } return ret; } /*NUMPY_API - See if array scalars can be cast. -*/ + * See if array scalars can be cast. + */ static Bool PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) { @@ -9206,8 +9767,9 @@ fromtype = _typenum_fromtypeobj((PyObject *)from, 0); totype = _typenum_fromtypeobj((PyObject *)to, 0); - if (fromtype == PyArray_NOTYPE || totype == PyArray_NOTYPE) + if (fromtype == PyArray_NOTYPE || totype == PyArray_NOTYPE) { return FALSE; + } return (Bool) PyArray_CanCastSafely(fromtype, totype); } @@ -9217,8 +9779,8 @@ /* and Python's array iterator ***/ /*NUMPY_API - Get Iterator. -*/ + * Get Iterator. + */ static PyObject * PyArray_IterNew(PyObject *obj) { @@ -9234,26 +9796,29 @@ it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); PyObject_Init((PyObject *)it, &PyArrayIter_Type); /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/ - if (it == NULL) + if (it == NULL) { return NULL; - + } nd = ao->nd; PyArray_UpdateFlags(ao, CONTIGUOUS); - if PyArray_ISCONTIGUOUS(ao) it->contiguous = 1; - else it->contiguous = 0; + if (PyArray_ISCONTIGUOUS(ao)) { + it->contiguous = 1; + } + else { + it->contiguous = 0; + } Py_INCREF(ao); it->ao = ao; it->size = PyArray_SIZE(ao); it->nd_m1 = nd - 1; it->factors[nd-1] = 1; - for(i=0; i < nd; i++) { + for (i = 0; i < nd; i++) { it->dims_m1[i] = ao->dimensions[i] - 1; it->strides[i] = ao->strides[i]; - it->backstrides[i] = it->strides[i] * \ - it->dims_m1[i]; - if (i > 0) - it->factors[nd-i-1] = it->factors[nd-i] * \ - ao->dimensions[nd-i]; + it->backstrides[i] = it->strides[i] * it->dims_m1[i]; + if (i > 0) { + it->factors[nd-i-1] = it->factors[nd-i] * ao->dimensions[nd-i]; + } } PyArray_ITER_RESET(it); @@ -9261,8 +9826,8 @@ } /*NUMPY_API - Get Iterator broadcast to a particular shape -*/ + * Get Iterator broadcast to a particular shape + */ static PyObject * PyArray_BroadcastToShape(PyObject *obj, intp *dims, int nd) { @@ -9270,51 +9835,57 @@ int i, diff, j, compat, k; PyArrayObject *ao = (PyArrayObject *)obj; - if (ao->nd > nd) goto err; + if (ao->nd > nd) { + goto err; + } compat = 1; diff = j = nd - ao->nd; - for(i=0; ind; i++, j++) { - if (ao->dimensions[i] == 1) continue; + for (i = 0; i < ao->nd; i++, j++) { + if (ao->dimensions[i] == 1) { + continue; + } if (ao->dimensions[i] != dims[j]) { compat = 0; break; } } - if (!compat) goto err; - + if (!compat) { + goto err; + } it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); PyObject_Init((PyObject *)it, &PyArrayIter_Type); - if (it == NULL) + if (it == NULL) { return NULL; - + } PyArray_UpdateFlags(ao, CONTIGUOUS); - if PyArray_ISCONTIGUOUS(ao) it->contiguous = 1; - else it->contiguous = 0; + if (PyArray_ISCONTIGUOUS(ao)) { + it->contiguous = 1; + } + else { + it->contiguous = 0; + } Py_INCREF(ao); it->ao = ao; it->size = PyArray_MultiplyList(dims, nd); it->nd_m1 = nd - 1; it->factors[nd-1] = 1; - for(i=0; i < nd; i++) { + for (i = 0; i < nd; i++) { it->dims_m1[i] = dims[i] - 1; k = i - diff; - if ((k < 0) || - ao->dimensions[k] != dims[i]) { + if ((k < 0) || ao->dimensions[k] != dims[i]) { it->contiguous = 0; it->strides[i] = 0; } else { it->strides[i] = ao->strides[k]; } - it->backstrides[i] = it->strides[i] * \ - it->dims_m1[i]; - if (i > 0) - it->factors[nd-i-1] = it->factors[nd-i] * \ - dims[nd-i]; + it->backstrides[i] = it->strides[i] * it->dims_m1[i]; + if (i > 0) { + it->factors[nd-i-1] = it->factors[nd-i] * dims[nd-i]; + } } PyArray_ITER_RESET(it); - return (PyObject *)it; err: @@ -9328,29 +9899,31 @@ /*NUMPY_API - Get Iterator that iterates over all but one axis (don't use this with - PyArray_ITER_GOTO1D). The axis will be over-written if negative - with the axis having the smallest stride. -*/ + * Get Iterator that iterates over all but one axis (don't use this with + * PyArray_ITER_GOTO1D). The axis will be over-written if negative + * with the axis having the smallest stride. + */ static PyObject * PyArray_IterAllButAxis(PyObject *obj, int *inaxis) { PyArrayIterObject *it; int axis; it = (PyArrayIterObject *)PyArray_IterNew(obj); - if (it == NULL) return NULL; - - if (PyArray_NDIM(obj)==0) + if (it == NULL) { + return NULL; + } + if (PyArray_NDIM(obj)==0) { return (PyObject *)it; + } if (*inaxis < 0) { - int i, minaxis=0; - intp minstride=0; + int i, minaxis = 0; + intp minstride = 0; i = 0; - while (minstride==0 && i 0 && PyArray_STRIDE(obj, i) < minstride) { minaxis = i; @@ -9368,21 +9941,21 @@ it->dims_m1[axis] = 0; it->backstrides[axis] = 0; - /* (won't fix factors so don't use - PyArray_ITER_GOTO1D with this iterator) */ + /* + * (won't fix factors so don't use + * PyArray_ITER_GOTO1D with this iterator) + */ return (PyObject *)it; } - -/* don't use with PyArray_ITER_GOTO1D because factors are not - adjusted */ - /*NUMPY_API - Adjusts previously broadcasted iterators so that the axis with - the smallest sum of iterator strides is not iterated over. - Returns dimension which is smallest in the range [0,multi->nd). - A -1 is returned if multi->nd == 0. -*/ + * Adjusts previously broadcasted iterators so that the axis with + * the smallest sum of iterator strides is not iterated over. + * Returns dimension which is smallest in the range [0,multi->nd). + * A -1 is returned if multi->nd == 0. + * + * don't use with PyArray_ITER_GOTO1D because factors are not adjusted + */ static int PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) { @@ -9392,34 +9965,33 @@ intp smallest; intp sumstrides[NPY_MAXDIMS]; - if (multi->nd == 0) return -1; - - - for(i=0; ind; i++) { + if (multi->nd == 0) { + return -1; + } + for (i = 0; i < multi->nd; i++) { sumstrides[i] = 0; - for(j=0; jnumiter; j++) { + for (j = 0; j < multi->numiter; j++) { sumstrides[i] += multi->iters[j]->strides[i]; } } - axis=0; + axis = 0; smallest = sumstrides[0]; /* Find longest dimension */ - for(i=1; ind; i++) { + for (i = 1; i < multi->nd; i++) { if (sumstrides[i] < smallest) { axis = i; smallest = sumstrides[i]; } } - - for(i=0; inumiter; i++) { + for(i = 0; i < multi->numiter; i++) { it = multi->iters[i]; it->contiguous = 0; - if (it->size != 0) + if (it->size != 0) { it->size /= (it->dims_m1[axis]+1); + } it->dims_m1[axis] = 0; it->backstrides[axis] = 0; } - multi->size = multi->iters[0]->size; return axis; } @@ -9457,7 +10029,7 @@ iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind) { int index, strides, itemsize; - intp count=0; + intp count = 0; char *dptr, *optr; PyObject *r; int swap; @@ -9479,9 +10051,10 @@ strides = ind->strides[0]; dptr = ind->data; /* Get size of return array */ - while(index--) { - if (*((Bool *)dptr) != 0) + while (index--) { + if (*((Bool *)dptr) != 0) { count++; + } dptr += strides; } itemsize = self->ao->descr->elsize; @@ -9490,17 +10063,17 @@ self->ao->descr, 1, &count, NULL, NULL, 0, (PyObject *)self->ao); - if (r==NULL) return NULL; - + if (r == NULL) { + return NULL; + } /* Set up loop */ optr = PyArray_DATA(r); index = ind->dimensions[0]; dptr = ind->data; - copyswap = self->ao->descr->f->copyswap; /* Loop over Boolean array */ swap = (PyArray_ISNOTSWAPPED(self->ao) != PyArray_ISNOTSWAPPED(r)); - while(index--) { + while (index--) { if (*((Bool *)dptr) != 0) { copyswap(optr, self->dataptr, swap, self->ao); optr += itemsize; @@ -9527,7 +10100,9 @@ itemsize = self->ao->descr->elsize; if (ind->nd == 0) { num = *((intp *)ind->data); - if (num < 0) num += self->size; + if (num < 0) { + num += self->size; + } if (num < 0 || num >= self->size) { PyErr_Format(PyExc_IndexError, "index %d out of bounds" \ @@ -9548,17 +10123,23 @@ ind->nd, ind->dimensions, NULL, NULL, 0, (PyObject *)self->ao); - if (r==NULL) return NULL; - + if (r == NULL) { + return NULL; + } optr = PyArray_DATA(r); ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind); - if (ind_it == NULL) {Py_DECREF(r); return NULL;} + if (ind_it == NULL) { + Py_DECREF(r); + return NULL; + } index = ind_it->size; copyswap = PyArray_DESCR(r)->f->copyswap; swap = (PyArray_ISNOTSWAPPED(r) != PyArray_ISNOTSWAPPED(self->ao)); - while(index--) { + while (index--) { num = *((intp *)(ind_it->dataptr)); - if (num < 0) num += self->size; + if (num < 0) { + num += self->size; + } if (num < 0 || num >= self->size) { PyErr_Format(PyExc_IndexError, "index %d out of bounds" \ @@ -9583,7 +10164,7 @@ static PyObject * iter_subscript(PyArrayIterObject *self, PyObject *ind) { - PyArray_Descr *indtype=NULL; + PyArray_Descr *indtype = NULL; intp start, step_size; intp n_steps; PyObject *r; @@ -9601,7 +10182,9 @@ if (PyTuple_Check(ind)) { int len; len = PyTuple_GET_SIZE(ind); - if (len > 1) goto fail; + if (len > 1) { + goto fail; + } if (len == 0) { Py_INCREF(self->ao); return (PyObject *)self->ao; @@ -9609,12 +10192,11 @@ ind = PyTuple_GET_ITEM(ind, 0); } - /* Tuples >1d not accepted --- i.e. no newaxis */ - /* Could implement this with adjusted strides - and dimensions in iterator */ - - /* Check for Boolean -- this is first becasue - Bool is a subclass of Int */ + /* + * Tuples >1d not accepted --- i.e. no newaxis + * Could implement this with adjusted strides and dimensions in iterator + * Check for Boolean -- this is first becasue Bool is a subclass of Int + */ PyArray_ITER_RESET(self); if (PyBool_Check(ind)) { @@ -9634,12 +10216,12 @@ } /* Check for Integer or Slice */ - if (PyLong_Check(ind) || PyInt_Check(ind) || PySlice_Check(ind)) { start = parse_subindex(ind, &step_size, &n_steps, self->size); - if (start == -1) + if (start == -1) { goto fail; + } if (n_steps == RubberIndex || n_steps == PseudoIndex) { PyErr_SetString(PyExc_IndexError, "cannot use Ellipsis or newaxes here"); @@ -9658,10 +10240,12 @@ 1, &n_steps, NULL, NULL, 0, (PyObject *)self->ao); - if (r==NULL) goto fail; + if (r == NULL) { + goto fail; + } dptr = PyArray_DATA(r); copyswap = PyArray_DESCR(r)->f->copyswap; - while(n_steps--) { + while (n_steps--) { copyswap(dptr, self->dataptr, 0, r); start += step_size; PyArray_ITER_GOTO1D(self, start) @@ -9672,12 +10256,13 @@ } /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(PyArray_INTP); if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) { Py_INCREF(indtype); obj = PyArray_FromAny(ind, indtype, 0, 0, FORCECAST, NULL); - if (obj == NULL) goto fail; + if (obj == NULL) { + goto fail; + } } else { Py_INCREF(ind); @@ -9695,7 +10280,9 @@ PyObject *new; new = PyArray_FromAny(obj, indtype, 0, 0, FORCECAST | ALIGNED, NULL); - if (new==NULL) goto fail; + if (new == NULL) { + goto fail; + } Py_DECREF(obj); obj = new; r = iter_subscript_int(self, (PyArrayObject *)obj); @@ -9706,12 +10293,15 @@ Py_DECREF(obj); return r; } - else Py_DECREF(indtype); + else { + Py_DECREF(indtype); + } fail: - if (!PyErr_Occurred()) + if (!PyErr_Occurred()) { PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + } Py_XDECREF(indtype); Py_XDECREF(obj); return NULL; @@ -9745,12 +10335,13 @@ PyArray_ITER_RESET(self); /* Loop over Boolean array */ copyswap = self->ao->descr->f->copyswap; - while(index--) { + while (index--) { if (*((Bool *)dptr) != 0) { copyswap(self->dataptr, val->dataptr, swap, self->ao); PyArray_ITER_NEXT(val); - if (val->index==val->size) + if (val->index == val->size) { PyArray_ITER_RESET(val); + } } dptr += strides; PyArray_ITER_NEXT(self); @@ -9778,11 +10369,15 @@ return 0; } ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind); - if (ind_it == NULL) return -1; + if (ind_it == NULL) { + return -1; + } index = ind_it->size; - while(index--) { + while (index--) { num = *((intp *)(ind_it->dataptr)); - if (num < 0) num += self->size; + if (num < 0) { + num += self->size; + } if ((num < 0) || (num >= self->size)) { PyErr_Format(PyExc_IndexError, "index %d out of bounds" \ @@ -9795,8 +10390,9 @@ copyswap(self->dataptr, val->dataptr, swap, self->ao); PyArray_ITER_NEXT(ind_it); PyArray_ITER_NEXT(val); - if (val->index == val->size) + if (val->index == val->size) { PyArray_ITER_RESET(val); + } } Py_DECREF(ind_it); return 0; @@ -9805,14 +10401,14 @@ static int iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) { - PyObject *arrval=NULL; - PyArrayIterObject *val_it=NULL; + PyObject *arrval = NULL; + PyArrayIterObject *val_it = NULL; PyArray_Descr *type; - PyArray_Descr *indtype=NULL; - int swap, retval=-1; + PyArray_Descr *indtype = NULL; + int swap, retval = -1; intp start, step_size; intp n_steps; - PyObject *obj=NULL; + PyObject *obj = NULL; PyArray_CopySwapFunc *copyswap; @@ -9826,15 +10422,18 @@ if (PyTuple_Check(ind)) { int len; len = PyTuple_GET_SIZE(ind); - if (len > 1) goto finish; + if (len > 1) { + goto finish; + } ind = PyTuple_GET_ITEM(ind, 0); } type = self->ao->descr; - /* Check for Boolean -- this is first becasue - Bool is a subclass of Int */ - + /* + * Check for Boolean -- this is first becasue + * Bool is a subclass of Int + */ if (PyBool_Check(ind)) { retval = 0; if (PyObject_IsTrue(ind)) { @@ -9843,9 +10442,13 @@ goto finish; } - if (PySequence_Check(ind) || PySlice_Check(ind)) goto skip; + if (PySequence_Check(ind) || PySlice_Check(ind)) { + goto skip; + } start = PyArray_PyIntAsIntp(ind); - if (start==-1 && PyErr_Occurred()) PyErr_Clear(); + if (start==-1 && PyErr_Occurred()) { + PyErr_Clear(); + } else { if (start < -self->size || start >= self->size) { PyErr_Format(PyExc_ValueError, @@ -9867,41 +10470,48 @@ skip: Py_INCREF(type); arrval = PyArray_FromAny(val, type, 0, 0, 0, NULL); - if (arrval==NULL) return -1; + if (arrval == NULL) { + return -1; + } val_it = (PyArrayIterObject *)PyArray_IterNew(arrval); - if (val_it==NULL) goto finish; - if (val_it->size == 0) {retval = 0; goto finish;} + if (val_it == NULL) { + goto finish; + } + if (val_it->size == 0) { + retval = 0; + goto finish; + } copyswap = PyArray_DESCR(arrval)->f->copyswap; swap = (PyArray_ISNOTSWAPPED(self->ao)!=PyArray_ISNOTSWAPPED(arrval)); /* Check Slice */ - if (PySlice_Check(ind)) { - start = parse_subindex(ind, &step_size, &n_steps, - self->size); - if (start == -1) goto finish; + start = parse_subindex(ind, &step_size, &n_steps, self->size); + if (start == -1) { + goto finish; + } if (n_steps == RubberIndex || n_steps == PseudoIndex) { PyErr_SetString(PyExc_IndexError, "cannot use Ellipsis or newaxes here"); goto finish; } PyArray_ITER_GOTO1D(self, start); - if (n_steps == SingleIndex) { /* Integer */ - copyswap(self->dataptr, PyArray_DATA(arrval), - swap, arrval); + if (n_steps == SingleIndex) { + /* Integer */ + copyswap(self->dataptr, PyArray_DATA(arrval), swap, arrval); PyArray_ITER_RESET(self); - retval=0; + retval = 0; goto finish; } - while(n_steps--) { - copyswap(self->dataptr, val_it->dataptr, - swap, arrval); + while (n_steps--) { + copyswap(self->dataptr, val_it->dataptr, swap, arrval); start += step_size; - PyArray_ITER_GOTO1D(self, start) - PyArray_ITER_NEXT(val_it); - if (val_it->index == val_it->size) + PyArray_ITER_GOTO1D(self, start); + PyArray_ITER_NEXT(val_it); + if (val_it->index == val_it->size) { PyArray_ITER_RESET(val_it); + } } PyArray_ITER_RESET(self); retval = 0; @@ -9909,7 +10519,6 @@ } /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(PyArray_INTP); if (PyList_Check(ind)) { Py_INCREF(indtype); @@ -9924,8 +10533,9 @@ /* Check for Boolean object */ if (PyArray_TYPE(obj)==PyArray_BOOL) { if (iter_ass_sub_Bool(self, (PyArrayObject *)obj, - val_it, swap) < 0) + val_it, swap) < 0) { goto finish; + } retval=0; } /* Check for integer array */ @@ -9936,18 +10546,21 @@ FORCECAST | BEHAVED_NS, NULL); Py_DECREF(obj); obj = new; - if (new==NULL) goto finish; + if (new == NULL) { + goto finish; + } if (iter_ass_sub_int(self, (PyArrayObject *)obj, - val_it, swap) < 0) + val_it, swap) < 0) { goto finish; - retval=0; + } + retval = 0; } } finish: - if (!PyErr_Occurred() && retval < 0) - PyErr_SetString(PyExc_IndexError, - "unsupported iterator index"); + if (!PyErr_Occurred() && retval < 0) { + PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + } Py_XDECREF(indtype); Py_XDECREF(obj); Py_XDECREF(val_it); @@ -9979,13 +10592,12 @@ /* Any argument ignored */ /* Two options: - 1) underlying array is contiguous - -- return 1-d wrapper around it - 2) underlying array is not contiguous - -- make new 1-d contiguous array with updateifcopy flag set - to copy back to the old array - */ - + * 1) underlying array is contiguous + * -- return 1-d wrapper around it + * 2) underlying array is not contiguous + * -- make new 1-d contiguous array with updateifcopy flag set + * to copy back to the old array + */ size = PyArray_SIZE(it->ao); Py_INCREF(it->ao->descr); if (PyArray_ISCONTIGUOUS(it->ao)) { @@ -9995,7 +10607,9 @@ NULL, it->ao->data, it->ao->flags, (PyObject *)it->ao); - if (r==NULL) return NULL; + if (r == NULL) { + return NULL; + } } else { r = PyArray_NewFromDescr(&PyArray_Type, @@ -10003,7 +10617,9 @@ 1, &size, NULL, NULL, 0, (PyObject *)it->ao); - if (r==NULL) return NULL; + if (r == NULL) { + return NULL; + } if (_flat_copyinto(r, (PyObject *)it->ao, PyArray_CORDER) < 0) { Py_DECREF(r); @@ -10021,7 +10637,9 @@ static PyObject * iter_copy(PyArrayIterObject *it, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } return PyArray_Flatten(it->ao, 0); } @@ -10038,7 +10656,9 @@ PyArrayObject *new; PyObject *ret; new = (PyArrayObject *)iter_array(self, NULL); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } ret = array_richcompare(new, other, cmp_op); Py_DECREF(new); return ret; @@ -10056,12 +10676,15 @@ { int nd; nd = self->ao->nd; - if (self->contiguous) { /* coordinates not kept track of --- need to generate - from index */ + if (self->contiguous) { + /* + * coordinates not kept track of --- + * need to generate from index + */ intp val; int i; val = self->index; - for(i=0;icoordinates[i] = val / self->factors[i]; val = val % self->factors[i]; } @@ -10078,60 +10701,60 @@ static PyTypeObject PyArrayIter_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.flatiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.flatiter", /* tp_name */ + sizeof(PyArrayIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arrayiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - &iter_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)iter_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arrayiter_next, /* tp_iternext */ - iter_methods, /* tp_methods */ - iter_members, /* tp_members */ - iter_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arrayiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + &iter_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + (richcmpfunc)iter_richcompare, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)arrayiter_next, /* tp_iternext */ + iter_methods, /* tp_methods */ + iter_members, /* tp_members */ + iter_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -10162,18 +10785,23 @@ PyArray_Descr *indtype; PyObject *arr; - if (PySlice_Check(obj) || (obj == Py_Ellipsis)) + if (PySlice_Check(obj) || (obj == Py_Ellipsis)) { return 0; + } else if (PyArray_Check(obj) && PyArray_ISBOOL(obj)) { return _nonzero_indices(obj, iter); } else { indtype = PyArray_DescrFromType(PyArray_INTP); arr = PyArray_FromAny(obj, indtype, 0, 0, FORCECAST, NULL); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } *iter = (PyArrayIterObject *)PyArray_IterNew(arr); Py_DECREF(arr); - if (*iter == NULL) return -1; + if (*iter == NULL) { + return -1; + } } return 1; } @@ -10190,23 +10818,26 @@ PyArrayIterObject *it; /* Discover the broadcast number of dimensions */ - for(i=0, nd=0; inumiter; i++) + for (i = 0, nd = 0; i < mit->numiter; i++) { nd = MAX(nd, mit->iters[i]->ao->nd); + } mit->nd = nd; /* Discover the broadcast shape in each dimension */ - for(i=0; idimensions[i] = 1; - for(j=0; jnumiter; j++) { + for (j = 0; j < mit->numiter; j++) { it = mit->iters[j]; - /* This prepends 1 to shapes not already - equal to nd */ + /* This prepends 1 to shapes not already equal to nd */ k = i + it->ao->nd - nd; - if (k>=0) { + if (k >= 0) { tmp = it->ao->dimensions[k]; - if (tmp == 1) continue; - if (mit->dimensions[i] == 1) + if (tmp == 1) { + continue; + } + if (mit->dimensions[i] == 1) { mit->dimensions[i] = tmp; + } else if (mit->dimensions[i] != tmp) { PyErr_SetString(PyExc_ValueError, "shape mismatch: objects" \ @@ -10218,9 +10849,11 @@ } } - /* Reset the iterator dimensions and strides of each iterator - object -- using 0 valued strides for broadcasting */ - /* Need to check for overflow */ + /* + * Reset the iterator dimensions and strides of each iterator + * object -- using 0 valued strides for broadcasting + * Need to check for overflow + */ tmp = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd); if (tmp < 0) { PyErr_SetString(PyExc_ValueError, @@ -10228,18 +10861,20 @@ return -1; } mit->size = tmp; - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; it->nd_m1 = mit->nd - 1; it->size = tmp; nd = it->ao->nd; it->factors[mit->nd-1] = 1; - for(j=0; j < mit->nd; j++) { + for (j = 0; j < mit->nd; j++) { it->dims_m1[j] = mit->dimensions[j] - 1; k = j + nd - mit->nd; - /* If this dimension was added or shape - of underlying array was 1 */ - if ((k < 0) || \ + /* + * If this dimension was added or shape of + * underlying array was 1 + */ + if ((k < 0) || it->ao->dimensions[k] != mit->dimensions[j]) { it->contiguous = 0; it->strides[j] = 0; @@ -10247,12 +10882,10 @@ else { it->strides[j] = it->ao->strides[k]; } - it->backstrides[j] = it->strides[j] * \ - it->dims_m1[j]; + it->backstrides[j] = it->strides[j] * it->dims_m1[j]; if (j > 0) - it->factors[mit->nd-j-1] = \ - it->factors[mit->nd-j] * \ - mit->dimensions[mit->nd-j]; + it->factors[mit->nd-j-1] = + it->factors[mit->nd-j] * mit->dimensions[mit->nd-j]; } PyArray_ITER_RESET(it); } @@ -10274,12 +10907,11 @@ if (mit->subspace != NULL) { memcpy(coord, mit->bscoord, sizeof(intp)*mit->ait->ao->nd); PyArray_ITER_RESET(mit->subspace); - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; PyArray_ITER_RESET(it); j = mit->iteraxes[i]; - copyswap(coord+j,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), + copyswap(coord+j,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), it->ao); } PyArray_ITER_GOTO(mit->ait, coord); @@ -10287,15 +10919,16 @@ mit->dataptr = mit->subspace->dataptr; } else { - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; if (it->size != 0) { PyArray_ITER_RESET(it); - copyswap(coord+i,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), + copyswap(coord+i,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), it->ao); } - else coord[i] = 0; + else { + coord[i] = 0; + } } PyArray_ITER_GOTO(mit->ait, coord); mit->dataptr = mit->ait->dataptr; @@ -10303,9 +10936,10 @@ return; } -/* This function needs to update the state of the map iterator - and point mit->dataptr to the memory-location of the next object -*/ +/* + * This function needs to update the state of the map iterator + * and point mit->dataptr to the memory-location of the next object + */ static void PyArray_MapIterNext(PyArrayMapIterObject *mit) { @@ -10315,23 +10949,22 @@ PyArray_CopySwapFunc *copyswap; mit->index += 1; - if (mit->index >= mit->size) return; + if (mit->index >= mit->size) { + return; + } copyswap = mit->iters[0]->ao->descr->f->copyswap; /* Sub-space iteration */ if (mit->subspace != NULL) { PyArray_ITER_NEXT(mit->subspace); if (mit->subspace->index >= mit->subspace->size) { - /* reset coord to coordinates of - beginning of the subspace */ - memcpy(coord, mit->bscoord, - sizeof(intp)*mit->ait->ao->nd); + /* reset coord to coordinates of beginning of the subspace */ + memcpy(coord, mit->bscoord, sizeof(intp)*mit->ait->ao->nd); PyArray_ITER_RESET(mit->subspace); - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; PyArray_ITER_NEXT(it); j = mit->iteraxes[i]; - copyswap(coord+j,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), + copyswap(coord+j,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), it->ao); } PyArray_ITER_GOTO(mit->ait, coord); @@ -10340,7 +10973,7 @@ mit->dataptr = mit->subspace->dataptr; } else { - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; PyArray_ITER_NEXT(it); copyswap(coord+i,it->dataptr, @@ -10353,26 +10986,26 @@ return; } -/* Bind a mapiteration to a particular array */ - -/* Determine if subspace iteration is necessary. If so, - 1) Fill in mit->iteraxes - 2) Create subspace iterator - 3) Update nd, dimensions, and size. - - Subspace iteration is necessary if: arr->nd > mit->numiter -*/ - -/* Need to check for index-errors somewhere. - - Let's do it at bind time and also convert all <0 values to >0 here - as well. -*/ +/* + * Bind a mapiteration to a particular array + * + * Determine if subspace iteration is necessary. If so, + * 1) Fill in mit->iteraxes + * 2) Create subspace iterator + * 3) Update nd, dimensions, and size. + * + * Subspace iteration is necessary if: arr->nd > mit->numiter + * + * Need to check for index-errors somewhere. + * + * Let's do it at bind time and also convert all <0 values to >0 here + * as well. + */ static void PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr) { int subnd; - PyObject *sub, *obj=NULL; + PyObject *sub, *obj = NULL; int i, j, n, curraxis, ellipexp, noellip; PyArrayIterObject *it; intp dimsize; @@ -10386,22 +11019,24 @@ } mit->ait = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arr); - if (mit->ait == NULL) return; - + if (mit->ait == NULL) { + return; + } /* no subspace iteration needed. Finish up and Return */ if (subnd == 0) { n = arr->nd; - for(i=0; iiteraxes[i] = i; } goto finish; } - /* all indexing arrays have been converted to 0 - therefore we can extract the subspace with a simple - getitem call which will use view semantics - */ - /* But, be sure to do it with a true array. + /* + * all indexing arrays have been converted to 0 + * therefore we can extract the subspace with a simple + * getitem call which will use view semantics + * + * But, be sure to do it with a true array. */ if (PyArray_CheckExact(arr)) { sub = array_subscript_simple(arr, mit->indexobj); @@ -10409,54 +11044,65 @@ else { Py_INCREF(arr); obj = PyArray_EnsureArray((PyObject *)arr); - if (obj == NULL) goto fail; + if (obj == NULL) { + goto fail; + } sub = array_subscript_simple((PyArrayObject *)obj, mit->indexobj); Py_DECREF(obj); } - if (sub == NULL) goto fail; + if (sub == NULL) { + goto fail; + } mit->subspace = (PyArrayIterObject *)PyArray_IterNew(sub); Py_DECREF(sub); - if (mit->subspace == NULL) goto fail; - + if (mit->subspace == NULL) { + goto fail; + } /* Expand dimensions of result */ n = mit->subspace->ao->nd; - for(i=0; idimensions[mit->nd+i] = mit->subspace->ao->dimensions[i]; + } mit->nd += n; - /* Now, we still need to interpret the ellipsis and slice objects - to determine which axes the indexing arrays are referring to - */ + /* + * Now, we still need to interpret the ellipsis and slice objects + * to determine which axes the indexing arrays are referring to + */ n = PyTuple_GET_SIZE(mit->indexobj); - /* The number of dimensions an ellipsis takes up */ ellipexp = arr->nd - n + 1; - /* Now fill in iteraxes -- remember indexing arrays have been - converted to 0's in mit->indexobj */ + /* + * Now fill in iteraxes -- remember indexing arrays have been + * converted to 0's in mit->indexobj + */ curraxis = 0; j = 0; - noellip = 1; /* Only expand the first ellipsis */ + /* Only expand the first ellipsis */ + noellip = 1; memset(mit->bscoord, 0, sizeof(intp)*arr->nd); - for(i=0; iindexobj, i); - if (PyInt_Check(obj) || PyLong_Check(obj)) + if (PyInt_Check(obj) || PyLong_Check(obj)) { mit->iteraxes[j++] = curraxis++; + } else if (noellip && obj == Py_Ellipsis) { curraxis += ellipexp; noellip = 0; } else { - intp start=0; + intp start = 0; intp stop, step; - /* Should be slice object or - another Ellipsis */ + /* Should be slice object or another Ellipsis */ if (obj == Py_Ellipsis) { mit->bscoord[curraxis] = 0; } - else if (!PySlice_Check(obj) || \ + else if (!PySlice_Check(obj) || (slice_GetIndices((PySliceObject *)obj, arr->dimensions[curraxis], &start, &stop, &step, @@ -10473,6 +11119,7 @@ curraxis += 1; } } + finish: /* Here check the indexes (now that we have iteraxes) */ mit->size = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd); @@ -10487,15 +11134,17 @@ goto fail; } - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { intp indval; it = mit->iters[i]; PyArray_ITER_RESET(it); dimsize = arr->dimensions[mit->iteraxes[i]]; - while(it->index < it->size) { + while (it->index < it->size) { indptr = ((intp *)it->dataptr); indval = *indptr; - if (indval < 0) indval += dimsize; + if (indval < 0) { + indval += dimsize; + } if (indval < 0 || indval >= dimsize) { PyErr_Format(PyExc_IndexError, "index (%d) out of range "\ @@ -10518,14 +11167,15 @@ return; } -/* This function takes a Boolean array and constructs index objects and - iterators as if nonzero(Bool) had been called -*/ +/* + * This function takes a Boolean array and constructs index objects and + * iterators as if nonzero(Bool) had been called + */ static int _nonzero_indices(PyObject *myBool, PyArrayIterObject **iters) { PyArray_Descr *typecode; - PyArrayObject *ba =NULL, *new=NULL; + PyArrayObject *ba = NULL, *new = NULL; int nd, j; intp size, i, count; Bool *ptr; @@ -10535,45 +11185,59 @@ typecode=PyArray_DescrFromType(PyArray_BOOL); ba = (PyArrayObject *)PyArray_FromAny(myBool, typecode, 0, 0, CARRAY, NULL); - if (ba == NULL) return -1; + if (ba == NULL) { + return -1; + } nd = ba->nd; - for(j=0; jdata; count = 0; /* pre-determine how many nonzero entries there are */ - for(i=0; iao->data; coords[j] = 0; dims_m1[j] = ba->dimensions[j]-1; } - ptr = (Bool *)ba->data; + if (count == 0) { + goto finish; + } - if (count == 0) goto finish; - - /* Loop through the Boolean array and copy coordinates - for non-zero entries */ - for(i=0; i=0; j--) { + for (j = nd - 1; j >= 0; j--) { if (coords[j] < dims_m1[j]) { coords[j]++; break; @@ -10589,7 +11253,7 @@ return nd; fail: - for(j=0; jiters[i] = NULL; + } mit->index = 0; mit->ait = NULL; mit->subspace = NULL; @@ -10632,7 +11298,9 @@ if (fancy == SOBJ_LISTTUP) { PyObject *newobj; newobj = PySequence_Tuple(indexobj); - if (newobj == NULL) goto fail; + if (newobj == NULL) { + goto fail; + } Py_DECREF(indexobj); indexobj = newobj; mit->indexobj = indexobj; @@ -10644,57 +11312,72 @@ #undef SOBJ_TOOMANY #undef SOBJ_LISTTUP - if (oned) return (PyObject *)mit; + if (oned) { + return (PyObject *)mit; + } + /* + * Must have some kind of fancy indexing if we are here + * indexobj is either a list, an arrayobject, or a tuple + * (with at least 1 list or arrayobject or Bool object) + */ - /* Must have some kind of fancy indexing if we are here */ - /* indexobj is either a list, an arrayobject, or a tuple - (with at least 1 list or arrayobject or Bool object), */ - /* convert all inputs to iterators */ - if (PyArray_Check(indexobj) && \ - (PyArray_TYPE(indexobj) == PyArray_BOOL)) { + if (PyArray_Check(indexobj) && (PyArray_TYPE(indexobj) == PyArray_BOOL)) { mit->numiter = _nonzero_indices(indexobj, mit->iters); - if (mit->numiter < 0) goto fail; + if (mit->numiter < 0) { + goto fail; + } mit->nd = 1; mit->dimensions[0] = mit->iters[0]->dims_m1[0]+1; Py_DECREF(mit->indexobj); mit->indexobj = PyTuple_New(mit->numiter); - if (mit->indexobj == NULL) goto fail; - for(i=0; inumiter; i++) { - PyTuple_SET_ITEM(mit->indexobj, i, - PyInt_FromLong(0)); + if (mit->indexobj == NULL) { + goto fail; } + for (i = 0; i < mit->numiter; i++) { + PyTuple_SET_ITEM(mit->indexobj, i, PyInt_FromLong(0)); + } } else if (PyArray_Check(indexobj) || !PyTuple_Check(indexobj)) { mit->numiter = 1; indtype = PyArray_DescrFromType(PyArray_INTP); arr = PyArray_FromAny(indexobj, indtype, 0, 0, FORCECAST, NULL); - if (arr == NULL) goto fail; + if (arr == NULL) { + goto fail; + } mit->iters[0] = (PyArrayIterObject *)PyArray_IterNew(arr); - if (mit->iters[0] == NULL) {Py_DECREF(arr); goto fail;} + if (mit->iters[0] == NULL) { + Py_DECREF(arr); + goto fail; + } mit->nd = PyArray_NDIM(arr); - memcpy(mit->dimensions,PyArray_DIMS(arr),mit->nd*sizeof(intp)); + memcpy(mit->dimensions, PyArray_DIMS(arr), mit->nd*sizeof(intp)); mit->size = PyArray_SIZE(arr); Py_DECREF(arr); Py_DECREF(mit->indexobj); mit->indexobj = Py_BuildValue("(N)", PyInt_FromLong(0)); } - else { /* must be a tuple */ + else { + /* must be a tuple */ PyObject *obj; PyArrayIterObject **iterp; PyObject *new; int numiters, j, n2; - /* Make a copy of the tuple -- we will be replacing - index objects with 0's */ + /* + * Make a copy of the tuple -- we will be replacing + * index objects with 0's + */ n = PyTuple_GET_SIZE(indexobj); n2 = n; new = PyTuple_New(n2); - if (new == NULL) goto fail; + if (new == NULL) { + goto fail; + } started = 0; nonindex = 0; j = 0; - for(i=0; iiters + mit->numiter; if ((numiters=_convert_obj(obj, iterp)) < 0) { @@ -10703,39 +11386,45 @@ } if (numiters > 0) { started = 1; - if (nonindex) mit->consec = 0; + if (nonindex) { + mit->consec = 0; + } mit->numiter += numiters; if (numiters == 1) { - PyTuple_SET_ITEM(new,j++, - PyInt_FromLong(0)); + PyTuple_SET_ITEM(new,j++, PyInt_FromLong(0)); } - else { /* we need to grow the - new indexing object and fill - it with 0s for each of the iterators - produced */ + else { + /* + * we need to grow the new indexing object and fill + * it with 0s for each of the iterators produced + */ int k; n2 += numiters - 1; - if (_PyTuple_Resize(&new, n2) < 0) + if (_PyTuple_Resize(&new, n2) < 0) { goto fail; - for(k=0;kindexobj); mit->indexobj = new; - /* Store the number of iterators actually converted */ - /* These will be mapped to actual axes at bind time */ - if (PyArray_Broadcast((PyArrayMultiIterObject *)mit) < 0) + /* + * Store the number of iterators actually converted + * These will be mapped to actual axes at bind time + */ + if (PyArray_Broadcast((PyArrayMultiIterObject *)mit) < 0) { goto fail; + } } return (PyObject *)mit; @@ -10753,96 +11442,94 @@ Py_XDECREF(mit->indexobj); Py_XDECREF(mit->ait); Py_XDECREF(mit->subspace); - for(i=0; inumiter; i++) + for (i = 0; i < mit->numiter; i++) { Py_XDECREF(mit->iters[i]); + } _pya_free(mit); } -/* The mapiter object must be created new each time. It does not work - to bind to a new array, and continue. - - This was the orginal intention, but currently that does not work. - Do not expose the MapIter_Type to Python. - - It's not very useful anyway, since mapiter(indexobj); mapiter.bind(a); - mapiter is equivalent to a[indexobj].flat but the latter gets to use - slice syntax. -*/ - +/* + * The mapiter object must be created new each time. It does not work + * to bind to a new array, and continue. + * + * This was the orginal intention, but currently that does not work. + * Do not expose the MapIter_Type to Python. + * + * It's not very useful anyway, since mapiter(indexobj); mapiter.bind(a); + * mapiter is equivalent to a[indexobj].flat but the latter gets to use + * slice syntax. + */ static PyTypeObject PyArrayMapIter_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.mapiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.mapiter", /* tp_name */ + sizeof(PyArrayIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraymapiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - (traverseproc)0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arraymapiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + (traverseproc)0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif - }; /** END of Subscript Iterator **/ -/* - NUMPY_API - Get MultiIterator from array of Python objects and any additional - - PyObject **mps -- array of PyObjects - int n - number of PyObjects in the array - int nadd - number of additional arrays to include in the - iterator. - - Returns a multi-iterator object. +/*NUMPY_API + * Get MultiIterator from array of Python objects and any additional + * + * PyObject **mps -- array of PyObjects + * int n - number of PyObjects in the array + * int nadd - number of additional arrays to include in the iterator. + * + * Returns a multi-iterator object. */ static PyObject * PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ...) @@ -10861,17 +11548,20 @@ "array objects (inclusive).", NPY_MAXARGS); return NULL; } - multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); + if (multi == NULL) { + return PyErr_NoMemory(); + } PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - for(i=0; iiters[i] = NULL; + for (i = 0; i < ntot; i++) { + multi->iters[i] = NULL; + } multi->numiter = ntot; multi->index = 0; va_start(va, nadd); - for(i=0; iiters[i] = (PyArrayIterObject *)PyArray_IterNew(arr); Py_DECREF(arr); } } - va_end(va); - if (!err && PyArray_Broadcast(multi) < 0) err=1; - + if (!err && PyArray_Broadcast(multi) < 0) { + err = 1; + } if (err) { Py_DECREF(multi); return NULL; } - PyArray_MultiIter_RESET(multi); - - return (PyObject *)multi; + return (PyObject *)multi; } /*NUMPY_API - Get MultiIterator, -*/ + * Get MultiIterator, + */ static PyObject * PyArray_MultiIterNew(int n, ...) { @@ -10913,7 +11602,7 @@ PyObject *current; PyObject *arr; - int i, err=0; + int i, err = 0; if (n < 2 || n > NPY_MAXARGS) { PyErr_Format(PyExc_ValueError, @@ -10925,37 +11614,40 @@ /* fprintf(stderr, "multi new...");*/ multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); + if (multi == NULL) { + return PyErr_NoMemory(); + } PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - for(i=0; iiters[i] = NULL; + for (i = 0; i < n; i++) { + multi->iters[i] = NULL; + } multi->numiter = n; multi->index = 0; va_start(va, n); - for(i=0; iiters[i] = (PyArrayIterObject *)PyArray_IterNew(arr); Py_DECREF(arr); } } - va_end(va); - if (!err && PyArray_Broadcast(multi) < 0) err=1; - + if (!err && PyArray_Broadcast(multi) < 0) { + err = 1; + } if (err) { Py_DECREF(multi); return NULL; } - PyArray_MultiIter_RESET(multi); - return (PyObject *)multi; } @@ -10975,7 +11667,9 @@ n = PyTuple_Size(args); if (n < 2 || n > NPY_MAXARGS) { - if (PyErr_Occurred()) return NULL; + if (PyErr_Occurred()) { + return NULL; + } PyErr_Format(PyExc_ValueError, "Need at least two and fewer than (%d) " \ "array objects.", NPY_MAXARGS); @@ -10983,23 +11677,31 @@ } multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); + if (multi == NULL) { + return PyErr_NoMemory(); + } PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); multi->numiter = n; multi->index = 0; - for(i=0; iiters[i] = NULL; - for(i=0; iiters[i] = NULL; + } + for (i = 0; i < n; i++) { arr = PyArray_FromAny(PyTuple_GET_ITEM(args, i), NULL, 0, 0, 0, NULL); - if (arr == NULL) goto fail; - if ((multi->iters[i] = \ - (PyArrayIterObject *)PyArray_IterNew(arr))==NULL) + if (arr == NULL) { goto fail; + } + if ((multi->iters[i] = (PyArrayIterObject *)PyArray_IterNew(arr)) + == NULL) { + goto fail; + } Py_DECREF(arr); } - if (PyArray_Broadcast(multi) < 0) goto fail; + if (PyArray_Broadcast(multi) < 0) { + goto fail; + } PyArray_MultiIter_RESET(multi); - return (PyObject *)multi; fail: @@ -11015,9 +11717,11 @@ n = multi->numiter; ret = PyTuple_New(n); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } if (multi->index < multi->size) { - for(i=0; i < n; i++) { + for (i = 0; i < n; i++) { PyArrayIterObject *it=multi->iters[i]; PyTuple_SET_ITEM(ret, i, PyArray_ToScalar(it->dataptr, it->ao)); @@ -11034,8 +11738,9 @@ { int i; - for(i=0; inumiter; i++) + for (i = 0; i < multi->numiter; i++) { Py_XDECREF(multi->iters[i]); + } multi->ob_type->tp_free((PyObject *)multi); } @@ -11045,10 +11750,12 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) self->size); #else - if (self->size < MAX_LONG) + if (self->size < MAX_LONG) { return PyInt_FromLong((long) self->size); - else + } + else { return PyLong_FromLongLong((longlong) self->size); + } #endif } @@ -11058,10 +11765,12 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) self->index); #else - if (self->size < MAX_LONG) + if (self->size < MAX_LONG) { return PyInt_FromLong((long) self->index); - else + } + else { return PyLong_FromLongLong((longlong) self->index); + } #endif } @@ -11076,10 +11785,13 @@ { PyObject *res; int i, n; + n = self->numiter; res = PyTuple_New(n); - if (res == NULL) return res; - for(i=0; iiters[i]); PyTuple_SET_ITEM(res, i, (PyObject *)self->iters[i]); } @@ -11112,8 +11824,9 @@ static PyObject * arraymultiter_reset(PyArrayMultiIterObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; - + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } PyArray_MultiIter_RESET(self); Py_INCREF(Py_None); return Py_None; @@ -11126,61 +11839,61 @@ static PyTypeObject PyArrayMultiIter_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.broadcast", /* tp_name */ - sizeof(PyArrayMultiIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.broadcast", /* tp_name */ + sizeof(PyArrayMultiIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraymultiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arraymultiter_next, /* tp_iternext */ - arraymultiter_methods, /* tp_methods */ - arraymultiter_members, /* tp_members */ - arraymultiter_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - arraymultiter_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arraymultiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)arraymultiter_next, /* tp_iternext */ + arraymultiter_methods, /* tp_methods */ + arraymultiter_members, /* tp_members */ + arraymultiter_getsetlist, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + 0, /* tp_alloc */ + arraymultiter_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -11197,21 +11910,23 @@ return new; } -/*** Array Descr Objects for dynamic types **/ +/** Array Descr Objects for dynamic types **/ -/** There are some statically-defined PyArray_Descr objects corresponding - to the basic built-in types. - These can and should be DECREF'd and INCREF'd as appropriate, anyway. - If a mistake is made in reference counting, deallocation on these - builtins will be attempted leading to problems. +/* + * There are some statically-defined PyArray_Descr objects corresponding + * to the basic built-in types. + * These can and should be DECREF'd and INCREF'd as appropriate, anyway. + * If a mistake is made in reference counting, deallocation on these + * builtins will be attempted leading to problems. + * + * This let's us deal with all PyArray_Descr objects using reference + * counting (regardless of whether they are statically or dynamically + * allocated). + */ - This let's us deal with all PyArray_Descr objects using reference - counting (regardless of whether they are statically or dynamically - allocated). -**/ - -/* base cannot be NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * base cannot be NULL + */ static PyArray_Descr * PyArray_DescrNew(PyArray_Descr *base) { @@ -11240,9 +11955,10 @@ return new; } -/* should never be called for builtin-types unless - there is a reference-count problem -*/ +/* + * should never be called for builtin-types unless + * there is a reference-count problem + */ static void arraydescr_dealloc(PyArray_Descr *self) { @@ -11265,20 +11981,29 @@ self->ob_type->tp_free((PyObject *)self); } -/* we need to be careful about setting attributes because these - objects are pointed to by arrays that depend on them for interpreting - data. Currently no attributes of data-type objects can be set - directly except names. -*/ +/* + * we need to be careful about setting attributes because these + * objects are pointed to by arrays that depend on them for interpreting + * data. Currently no attributes of data-type objects can be set + * directly except names. + */ static PyMemberDef arraydescr_members[] = { - {"type", T_OBJECT, offsetof(PyArray_Descr, typeobj), RO, NULL}, - {"kind", T_CHAR, offsetof(PyArray_Descr, kind), RO, NULL}, - {"char", T_CHAR, offsetof(PyArray_Descr, type), RO, NULL}, - {"num", T_INT, offsetof(PyArray_Descr, type_num), RO, NULL}, - {"byteorder", T_CHAR, offsetof(PyArray_Descr, byteorder), RO, NULL}, - {"itemsize", T_INT, offsetof(PyArray_Descr, elsize), RO, NULL}, - {"alignment", T_INT, offsetof(PyArray_Descr, alignment), RO, NULL}, - {"flags", T_UBYTE, offsetof(PyArray_Descr, hasobject), RO, NULL}, + {"type", + T_OBJECT, offsetof(PyArray_Descr, typeobj), RO, NULL}, + {"kind", + T_CHAR, offsetof(PyArray_Descr, kind), RO, NULL}, + {"char", + T_CHAR, offsetof(PyArray_Descr, type), RO, NULL}, + {"num", + T_INT, offsetof(PyArray_Descr, type_num), RO, NULL}, + {"byteorder", + T_CHAR, offsetof(PyArray_Descr, byteorder), RO, NULL}, + {"itemsize", + T_INT, offsetof(PyArray_Descr, elsize), RO, NULL}, + {"alignment", + T_INT, offsetof(PyArray_Descr, alignment), RO, NULL}, + {"flags", + T_UBYTE, offsetof(PyArray_Descr, hasobject), RO, NULL}, {NULL, 0, 0, 0, NULL}, }; @@ -11296,15 +12021,16 @@ static PyObject * arraydescr_protocol_typestr_get(PyArray_Descr *self) { - char basic_=self->kind; + char basic_ = self->kind; char endian = self->byteorder; - int size=self->elsize; + int size = self->elsize; if (endian == '=') { endian = '<'; - if (!PyArray_IsNativeByteOrder(endian)) endian = '>'; + if (!PyArray_IsNativeByteOrder(endian)) { + endian = '>'; + } } - if (self->type_num == PyArray_UNICODE) { size >>= 2; } @@ -11318,7 +12044,8 @@ PyTypeObject *typeobj = self->typeobj; PyObject *res; char *s; - static int prefix_len=0; + /* fixme: not reentrant */ + static int prefix_len = 0; if (PyTypeNum_ISUSERDEF(self->type_num)) { s = strrchr(typeobj->tp_name, '.'); @@ -11326,17 +12053,18 @@ res = PyString_FromString(typeobj->tp_name); } else { - res = PyString_FromStringAndSize(s+1, strlen(s)-1); + res = PyString_FromStringAndSize(s + 1, strlen(s) - 1); } return res; } else { - if (prefix_len == 0) + if (prefix_len == 0) { prefix_len = strlen("numpy."); - + } len = strlen(typeobj->tp_name); - if (*(typeobj->tp_name + (len-1)) == '_') - len-=1; + if (*(typeobj->tp_name + (len-1)) == '_') { + len -= 1; + } len -= prefix_len; res = PyString_FromStringAndSize(typeobj->tp_name+prefix_len, len); } @@ -11381,35 +12109,45 @@ if (self->names == NULL) { /* get default */ dobj = PyTuple_New(2); - if (dobj == NULL) return NULL; + if (dobj == NULL) { + return NULL; + } PyTuple_SET_ITEM(dobj, 0, PyString_FromString("")); - PyTuple_SET_ITEM(dobj, 1, \ - arraydescr_protocol_typestr_get(self)); + PyTuple_SET_ITEM(dobj, 1, arraydescr_protocol_typestr_get(self)); res = PyList_New(1); - if (res == NULL) {Py_DECREF(dobj); return NULL;} + if (res == NULL) { + Py_DECREF(dobj); + return NULL; + } PyList_SET_ITEM(res, 0, dobj); return res; } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - res = PyObject_CallMethod(_numpy_internal, "_array_descr", - "O", self); + if (_numpy_internal == NULL) { + return NULL; + } + res = PyObject_CallMethod(_numpy_internal, "_array_descr", "O", self); Py_DECREF(_numpy_internal); return res; } -/* returns 1 for a builtin type - and 2 for a user-defined data-type descriptor - return 0 if neither (i.e. it's a copy of one) -*/ +/* + * returns 1 for a builtin type + * and 2 for a user-defined data-type descriptor + * return 0 if neither (i.e. it's a copy of one) + */ static PyObject * arraydescr_isbuiltin_get(PyArray_Descr *self) { long val; val = 0; - if (self->fields == Py_None) val = 1; - if (PyTypeNum_ISUSERDEF(self->type_num)) val = 2; + if (self->fields == Py_None) { + val = 1; + } + if (PyTypeNum_ISUSERDEF(self->type_num)) { + val = 2; + } return PyInt_FromLong(val); } @@ -11420,34 +12158,42 @@ return PyArray_ISNBO(self->byteorder); } else { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; - while(PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return -1; - if (!_arraydescr_isnative(new)) return 0; + Py_ssize_t pos = 0; + while (PyDict_Next(self->fields, &pos, &key, &value)) { + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { + return -1; + } + if (!_arraydescr_isnative(new)) { + return 0; + } } } return 1; } -/* return Py_True if this data-type descriptor - has native byteorder if no fields are defined - - or if all sub-fields have native-byteorder if - fields are defined -*/ +/* + * return Py_True if this data-type descriptor + * has native byteorder if no fields are defined + * + * or if all sub-fields have native-byteorder if + * fields are defined + */ static PyObject * arraydescr_isnative_get(PyArray_Descr *self) { PyObject *ret; int retval; retval = _arraydescr_isnative(self); - if (retval == -1) return NULL; - ret = (retval ? Py_True : Py_False); + if (retval == -1) { + return NULL; + } + ret = retval ? Py_True : Py_False; Py_INCREF(ret); return ret; } @@ -11466,10 +12212,12 @@ arraydescr_hasobject_get(PyArray_Descr *self) { PyObject *res; - if (PyDataType_FLAGCHK(self, NPY_ITEM_HASOBJECT)) + if (PyDataType_FLAGCHK(self, NPY_ITEM_HASOBJECT)) { res = Py_True; - else + } + else { res = Py_False; + } Py_INCREF(res); return res; } @@ -11503,9 +12251,9 @@ return -1; } /* Make sure all entries are strings */ - for(i=0; ifields == Py_None) { descr = PyArray_DescrNew(conv); @@ -11613,9 +12362,11 @@ static PyObject * arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) { - /* version number of this pickle type. Increment if we need to - change the format. Be sure to handle the old versions in - arraydescr_setstate. */ + /* + * version number of this pickle type. Increment if we need to + * change the format. Be sure to handle the old versions in + * arraydescr_setstate. + */ const int version = 3; PyObject *ret, *mod, *obj; PyObject *state; @@ -11623,15 +12374,23 @@ int elsize, alignment; ret = PyTuple_New(3); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) {Py_DECREF(ret); return NULL;} + if (mod == NULL) { + Py_DECREF(ret); + return NULL; + } obj = PyObject_GetAttrString(mod, "dtype"); Py_DECREF(mod); - if (obj == NULL) {Py_DECREF(ret); return NULL;} + if (obj == NULL) { + Py_DECREF(ret); + return NULL; + } PyTuple_SET_ITEM(ret, 0, obj); - if (PyTypeNum_ISUSERDEF(self->type_num) || \ - ((self->type_num == PyArray_VOID && \ + if (PyTypeNum_ISUSERDEF(self->type_num) || + ((self->type_num == PyArray_VOID && self->typeobj != &PyVoidArrType_Type))) { obj = (PyObject *)self->typeobj; Py_INCREF(obj); @@ -11645,12 +12404,16 @@ } PyTuple_SET_ITEM(ret, 1, Py_BuildValue("(Nii)", obj, 0, 1)); - /* Now return the state which is at least - byteorder, subarray, and fields */ + /* + * Now return the state which is at least byteorder, + * subarray, and fields + */ endian = self->byteorder; if (endian == '=') { endian = '<'; - if (!PyArray_IsNativeByteOrder(endian)) endian = '>'; + if (!PyArray_IsNativeByteOrder(endian)) { + endian = '>'; + } } state = PyTuple_New(8); PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version)); @@ -11674,12 +12437,13 @@ elsize = self->elsize; alignment = self->alignment; } - else {elsize = -1; alignment = -1;} - + else { + elsize = -1; + alignment = -1; + } PyTuple_SET_ITEM(state, 5, PyInt_FromLong(elsize)); PyTuple_SET_ITEM(state, 6, PyInt_FromLong(alignment)); PyTuple_SET_ITEM(state, 7, PyInt_FromLong(self->hasobject)); - PyTuple_SET_ITEM(ret, 2, state); return ret; } @@ -11691,17 +12455,20 @@ _descr_find_object(PyArray_Descr *self) { if (self->hasobject || self->type_num == PyArray_OBJECT || - self->kind == 'O') + self->kind == 'O') { return NPY_OBJECT_DTYPE_FLAGS; + } if (PyDescr_HASFIELDS(self)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; + while (PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) { + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { PyErr_Clear(); return 0; } @@ -11714,64 +12481,68 @@ return 0; } -/* state is at least byteorder, subarray, and fields but could include elsize - and alignment for EXTENDED arrays -*/ - +/* + * state is at least byteorder, subarray, and fields but could include elsize + * and alignment for EXTENDED arrays + */ static PyObject * arraydescr_setstate(PyArray_Descr *self, PyObject *args) { int elsize = -1, alignment = -1; int version = 3; char endian; - PyObject *subarray, *fields, *names=NULL; + PyObject *subarray, *fields, *names = NULL; int incref_names = 1; - int dtypeflags=0; + int dtypeflags = 0; - if (self->fields == Py_None) {Py_INCREF(Py_None); return Py_None;} - + if (self->fields == Py_None) { + Py_INCREF(Py_None); + return Py_None; + } if (PyTuple_GET_SIZE(args) != 1 || !(PyTuple_Check(PyTuple_GET_ITEM(args, 0)))) { PyErr_BadInternalCall(); return NULL; } switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { - case 8: - if (!PyArg_ParseTuple(args, "(icOOOiii)", &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment, &dtypeflags)) { - return NULL; - } - break; - case 7: - if (!PyArg_ParseTuple(args, "(icOOOii)", &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment)) { - return NULL; - } - break; - case 6: - if (!PyArg_ParseTuple(args, "(icOOii)", &version, - &endian, &subarray, &fields, - &elsize, &alignment)) { - PyErr_Clear(); - } - break; - case 5: - version = 0; - if (!PyArg_ParseTuple(args, "(cOOii)", - &endian, &subarray, &fields, &elsize, - &alignment)) { - return NULL; - } - break; - default: - version = -1; /* raise an error */ + case 8: + if (!PyArg_ParseTuple(args, "(icOOOiii)", &version, &endian, + &subarray, &names, &fields, &elsize, + &alignment, &dtypeflags)) { + return NULL; + } + break; + case 7: + if (!PyArg_ParseTuple(args, "(icOOOii)", &version, &endian, + &subarray, &names, &fields, &elsize, + &alignment)) { + return NULL; + } + break; + case 6: + if (!PyArg_ParseTuple(args, "(icOOii)", &version, + &endian, &subarray, &fields, + &elsize, &alignment)) { + PyErr_Clear(); + } + break; + case 5: + version = 0; + if (!PyArg_ParseTuple(args, "(cOOii)", + &endian, &subarray, &fields, &elsize, + &alignment)) { + return NULL; + } + break; + default: + /* raise an error */ + version = -1; } - /* If we ever need another pickle format, increment the version - number. But we should still be able to handle the old versions. - */ + /* + * If we ever need another pickle format, increment the version + * number. But we should still be able to handle the old versions. + */ if (version < 0 || version > 3) { PyErr_Format(PyExc_ValueError, "can't handle version %d of numpy.dtype pickle", @@ -11784,7 +12555,9 @@ PyObject *key, *list; key = PyInt_FromLong(-1); list = PyDict_GetItem(fields, key); - if (!list) return NULL; + if (!list) { + return NULL; + } Py_INCREF(list); names = list; PyDict_DelItem(fields, key); @@ -11796,16 +12569,16 @@ } - if ((fields == Py_None && names != Py_None) || \ + if ((fields == Py_None && names != Py_None) || (names == Py_None && fields != Py_None)) { PyErr_Format(PyExc_ValueError, "inconsistent fields and names"); return NULL; } - if (endian != '|' && - PyArray_IsNativeByteOrder(endian)) endian = '='; - + if (endian != '|' && PyArray_IsNativeByteOrder(endian)) { + endian = '='; + } self->byteorder = endian; if (self->subarray) { Py_XDECREF(self->subarray->base); @@ -11828,8 +12601,9 @@ Py_INCREF(fields); Py_XDECREF(self->names); self->names = names; - if (incref_names) + if (incref_names) { Py_INCREF(names); + } } if (PyTypeNum_ISEXTENDED(self->type_num)) { @@ -11846,23 +12620,23 @@ } -/* returns a copy of the PyArray_Descr structure with the byteorder - altered: - no arguments: The byteorder is swapped (in all subfields as well) - single argument: The byteorder is forced to the given state - (in all subfields as well) - - Valid states: ('big', '>') or ('little' or '<') - ('native', or '=') - - If a descr structure with | is encountered it's own - byte-order is not changed but any fields are: -*/ - -/*NUMPY_API - Deep bytorder change of a data-type descriptor - *** Leaves reference count of self unchanged --- does not DECREF self *** - */ + /*NUMPY_API + * returns a copy of the PyArray_Descr structure with the byteorder + * altered: + * no arguments: The byteorder is swapped (in all subfields as well) + * single argument: The byteorder is forced to the given state + * (in all subfields as well) + * + * Valid states: ('big', '>') or ('little' or '<') + * ('native', or '=') + * + * If a descr structure with | is encountered it's own + * byte-order is not changed but any fields are: + * + * + * Deep bytorder change of a data-type descriptor + * *** Leaves reference count of self unchanged --- does not DECREF self *** + */ static PyArray_Descr * PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) { @@ -11872,9 +12646,14 @@ new = PyArray_DescrNew(self); endian = new->byteorder; if (endian != PyArray_IGNORE) { - if (newendian == PyArray_SWAP) { /* swap byteorder */ - if PyArray_ISNBO(endian) endian = PyArray_OPPBYTE; - else endian = PyArray_NATBYTE; + if (newendian == PyArray_SWAP) { + /* swap byteorder */ + if PyArray_ISNBO(endian) { + endian = PyArray_OPPBYTE; + } + else { + endian = PyArray_NATBYTE; + } new->byteorder = endian; } else if (newendian != PyArray_IGNORE) { @@ -11889,28 +12668,31 @@ PyArray_Descr *newdescr; Py_ssize_t pos = 0; int len, i; + newfields = PyDict_New(); - /* make new dictionary with replaced */ - /* PyArray_Descr Objects */ + /* make new dictionary with replaced PyArray_Descr Objects */ while(PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyString_Check(key) || \ - !PyTuple_Check(value) || \ - ((len=PyTuple_GET_SIZE(value)) < 2)) + if NPY_TITLE_KEY(key, value) { continue; - + } + if (!PyString_Check(key) || + !PyTuple_Check(value) || + ((len=PyTuple_GET_SIZE(value)) < 2)) { + continue; + } old = PyTuple_GET_ITEM(value, 0); - if (!PyArray_DescrCheck(old)) continue; - newdescr = PyArray_DescrNewByteorder \ - ((PyArray_Descr *)old, newendian); + if (!PyArray_DescrCheck(old)) { + continue; + } + newdescr = PyArray_DescrNewByteorder( + (PyArray_Descr *)old, newendian); if (newdescr == NULL) { Py_DECREF(newfields); Py_DECREF(new); return NULL; } newvalue = PyTuple_New(len); - PyTuple_SET_ITEM(newvalue, 0, \ - (PyObject *)newdescr); - for(i=1; isubarray) { Py_DECREF(new->subarray->base); - new->subarray->base = PyArray_DescrNewByteorder \ + new->subarray->base = PyArray_DescrNewByteorder (self->subarray->base, newendian); } return new; @@ -11936,19 +12718,20 @@ char endian=PyArray_SWAP; if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter, - &endian)) return NULL; - + &endian)) { + return NULL; + } return (PyObject *)PyArray_DescrNewByteorder(self, endian); } static PyMethodDef arraydescr_methods[] = { /* for pickling */ - {"__reduce__", (PyCFunction)arraydescr_reduce, METH_VARARGS, - NULL}, - {"__setstate__", (PyCFunction)arraydescr_setstate, METH_VARARGS, - NULL}, - {"newbyteorder", (PyCFunction)arraydescr_newbyteorder, METH_VARARGS, - NULL}, + {"__reduce__", + (PyCFunction)arraydescr_reduce, METH_VARARGS, NULL}, + {"__setstate__", + (PyCFunction)arraydescr_setstate, METH_VARARGS, NULL}, + {"newbyteorder", + (PyCFunction)arraydescr_newbyteorder, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -11964,7 +12747,9 @@ sub = PyString_FromString(""); PyErr_Clear(); } - else sub = PyObject_Str(lst); + else { + sub = PyObject_Str(lst); + } Py_XDECREF(lst); if (self->type_num != PyArray_VOID) { PyObject *p; @@ -12035,55 +12820,66 @@ static PyObject * arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op) { - PyArray_Descr *new=NULL; + PyArray_Descr *new = NULL; PyObject *result = Py_NotImplemented; if (!PyArray_DescrCheck(other)) { - if (PyArray_DescrConverter(other, &new) == PY_FAIL) + if (PyArray_DescrConverter(other, &new) == PY_FAIL) { return NULL; + } } else { new = (PyArray_Descr *)other; Py_INCREF(new); } switch (cmp_op) { - case Py_LT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) - result = Py_True; - else - result = Py_False; - break; - case Py_LE: - if (PyArray_CanCastTo(self, new)) - result = Py_True; - else - result = Py_False; - break; - case Py_EQ: - if (PyArray_EquivTypes(self, new)) - result = Py_True; - else - result = Py_False; - break; - case Py_NE: - if (PyArray_EquivTypes(self, new)) - result = Py_False; - else - result = Py_True; - break; - case Py_GT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) - result = Py_True; - else - result = Py_False; - break; - case Py_GE: - if (PyArray_CanCastTo(new, self)) - result = Py_True; - else - result = Py_False; - break; - default: - result = Py_NotImplemented; + case Py_LT: + if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_LE: + if (PyArray_CanCastTo(self, new)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_EQ: + if (PyArray_EquivTypes(self, new)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_NE: + if (PyArray_EquivTypes(self, new)) + result = Py_False; + else + result = Py_True; + break; + case Py_GT: + if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_GE: + if (PyArray_CanCastTo(new, self)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + default: + result = Py_NotImplemented; } Py_XDECREF(new); @@ -12098,12 +12894,14 @@ static Py_ssize_t descr_length(PyObject *self0) { - PyArray_Descr *self = (PyArray_Descr *)self0; - if (self->names) + if (self->names) { return PyTuple_GET_SIZE(self->names); - else return 0; + } + else { + return 0; + } } static PyObject * @@ -12111,7 +12909,7 @@ { PyObject *tup; PyArray_Descr *new; - if (length < 0) + if (length < 0) { return PyErr_Format(PyExc_ValueError, #if (PY_VERSION_HEX < 0x02050000) "Array length must be >= 0, not %d", @@ -12119,8 +12917,11 @@ "Array length must be >= 0, not %zd", #endif length); + } tup = Py_BuildValue("O" NPY_SSIZE_T_PYFMT, self, length); - if (tup == NULL) return NULL; + if (tup == NULL) { + return NULL; + } PyArray_DescrConverter(tup, &new); Py_DECREF(tup); return (PyObject *)new; @@ -12132,11 +12933,9 @@ if (self->names) { if (PyString_Check(op) || PyUnicode_Check(op)) { - PyObject *obj; - obj = PyDict_GetItem(self->fields, op); + PyObject *obj = PyDict_GetItem(self->fields, op); if (obj != NULL) { - PyObject *descr; - descr = PyTuple_GET_ITEM(obj, 0); + PyObject *descr = PyTuple_GET_ITEM(obj, 0); Py_INCREF(descr); return descr; } @@ -12148,12 +12947,12 @@ } else { PyObject *name; - int value; - value = PyArray_PyIntAsInt(op); + int value = PyArray_PyIntAsInt(op); if (!PyErr_Occurred()) { - int size; - size = PyTuple_GET_SIZE(self->names); - if (value < 0) value += size; + int size = PyTuple_GET_SIZE(self->names); + if (value < 0) { + value += size; + } if (value < 0 || value >= size) { PyErr_Format(PyExc_IndexError, "0<=index<%d not %d", @@ -12184,17 +12983,17 @@ (binaryfunc)NULL, descr_repeat, NULL, NULL, - NULL, /* sq_ass_item */ - NULL, /* ssizessizeobjargproc sq_ass_slice */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0, /* sq_inplace_repeat */ + NULL, /* sq_ass_item */ + NULL, /* ssizessizeobjargproc sq_ass_slice */ + 0, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0, /* sq_inplace_repeat */ }; static PyMappingMethods descr_as_mapping = { - descr_length, /*mp_length*/ - (binaryfunc)descr_subscript, /*mp_subscript*/ - (objobjargproc)NULL, /*mp_ass_subscript*/ + descr_length, /* mp_length*/ + (binaryfunc)descr_subscript, /* mp_subscript*/ + (objobjargproc)NULL, /* mp_ass_subscript*/ }; /****************** End of Mapping Protocol ******************************/ @@ -12202,70 +13001,71 @@ static PyTypeObject PyArrayDescr_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.dtype", /* tp_name */ - sizeof(PyArray_Descr), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.dtype", /* tp_name */ + sizeof(PyArray_Descr), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraydescr_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - (reprfunc)arraydescr_repr, /* tp_repr */ - 0, /* tp_as_number */ - &descr_as_sequence, /* tp_as_sequence */ - &descr_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc)arraydescr_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - arraydescr_methods, /* tp_methods */ - arraydescr_members, /* tp_members */ - arraydescr_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arraydescr_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arraydescr_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + (reprfunc)arraydescr_repr, /* tp_repr */ + 0, /* tp_as_number */ + &descr_as_sequence, /* tp_as_sequence */ + &descr_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + (reprfunc)arraydescr_str, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + arraydescr_methods, /* tp_methods */ + arraydescr_members, /* tp_members */ + arraydescr_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + arraydescr_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; -/** Array Flags Object **/ +/* Array Flags Object */ /*NUMPY_API - Get New ArrayFlagsObject -*/ + * + * Get New ArrayFlagsObject + */ static PyObject * PyArray_NewFlagsObject(PyObject *obj) { @@ -12278,11 +13078,12 @@ flags = PyArray_FLAGS(obj); } flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); - if (flagobj == NULL) return NULL; + if (flagobj == NULL) { + return NULL; + } Py_XINCREF(obj); ((PyArrayFlagsObject *)flagobj)->arr = obj; ((PyArrayFlagsObject *)flagobj)->flags = flags; - return flagobj; } @@ -12320,11 +13121,12 @@ PyObject *item; if (((self->flags & FORTRAN) == FORTRAN) || - ((self->flags & CONTIGUOUS) == CONTIGUOUS)) + ((self->flags & CONTIGUOUS) == CONTIGUOUS)) { item = Py_True; - else + } + else { item = Py_False; - + } Py_INCREF(item); return item; } @@ -12335,11 +13137,12 @@ PyObject *item; if (((self->flags & FORTRAN) == FORTRAN) && - !((self->flags & CONTIGUOUS) == CONTIGUOUS)) + !((self->flags & CONTIGUOUS) == CONTIGUOUS)) { item = Py_True; - else + } + else { item = Py_False; - + } Py_INCREF(item); return item; } @@ -12349,13 +13152,14 @@ { PyObject *item; - if (((self->flags & (ALIGNED|WRITEABLE|FORTRAN)) == \ + if (((self->flags & (ALIGNED|WRITEABLE|FORTRAN)) == (ALIGNED|WRITEABLE|FORTRAN)) && - !((self->flags & CONTIGUOUS) == CONTIGUOUS)) + !((self->flags & CONTIGUOUS) == CONTIGUOUS)) { item = Py_True; - else + } + else { item = Py_False; - + } Py_INCREF(item); return item; } @@ -12377,7 +13181,9 @@ } res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None, (PyObject_IsTrue(obj) ? Py_True : Py_False)); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } Py_DECREF(res); return 0; } @@ -12393,7 +13199,9 @@ res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, (PyObject_IsTrue(obj) ? Py_True : Py_False), Py_None); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } Py_DECREF(res); return 0; } @@ -12409,7 +13217,9 @@ res = PyObject_CallMethod(self->arr, "setflags", "OOO", (PyObject_IsTrue(obj) ? Py_True : Py_False), Py_None, Py_None); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } Py_DECREF(res); return 0; } @@ -12417,61 +13227,61 @@ static PyGetSetDef arrayflags_getsets[] = { {"contiguous", - (getter)arrayflags_contiguous_get, - NULL, - "", NULL}, + (getter)arrayflags_contiguous_get, + NULL, + "", NULL}, {"c_contiguous", - (getter)arrayflags_contiguous_get, - NULL, - "", NULL}, + (getter)arrayflags_contiguous_get, + NULL, + "", NULL}, {"f_contiguous", - (getter)arrayflags_fortran_get, - NULL, - "", NULL}, + (getter)arrayflags_fortran_get, + NULL, + "", NULL}, {"fortran", - (getter)arrayflags_fortran_get, - NULL, - "", NULL}, + (getter)arrayflags_fortran_get, + NULL, + "", NULL}, {"updateifcopy", - (getter)arrayflags_updateifcopy_get, - (setter)arrayflags_updateifcopy_set, - "", NULL}, + (getter)arrayflags_updateifcopy_get, + (setter)arrayflags_updateifcopy_set, + "", NULL}, {"owndata", - (getter)arrayflags_owndata_get, - NULL, - "", NULL}, + (getter)arrayflags_owndata_get, + NULL, + "", NULL}, {"aligned", - (getter)arrayflags_aligned_get, - (setter)arrayflags_aligned_set, - "", NULL}, + (getter)arrayflags_aligned_get, + (setter)arrayflags_aligned_set, + "", NULL}, {"writeable", - (getter)arrayflags_writeable_get, - (setter)arrayflags_writeable_set, - "", NULL}, + (getter)arrayflags_writeable_get, + (setter)arrayflags_writeable_set, + "", NULL}, {"fnc", - (getter)arrayflags_fnc_get, - NULL, - "", NULL}, + (getter)arrayflags_fnc_get, + NULL, + "", NULL}, {"forc", - (getter)arrayflags_forc_get, - NULL, - "", NULL}, + (getter)arrayflags_forc_get, + NULL, + "", NULL}, {"behaved", - (getter)arrayflags_behaved_get, - NULL, - "", NULL}, + (getter)arrayflags_behaved_get, + NULL, + "", NULL}, {"carray", - (getter)arrayflags_carray_get, - NULL, - "", NULL}, + (getter)arrayflags_carray_get, + NULL, + "", NULL}, {"farray", - (getter)arrayflags_farray_get, - NULL, - "", NULL}, + (getter)arrayflags_farray_get, + NULL, + "", NULL}, {"num", - (getter)arrayflags_num_get, - NULL, - "", NULL}, + (getter)arrayflags_num_get, + NULL, + "", NULL}, {NULL, NULL, NULL, NULL, NULL}, }; @@ -12480,76 +13290,93 @@ { char *key; int n; - if (!PyString_Check(ind)) goto fail; + if (!PyString_Check(ind)) { + goto fail; + } key = PyString_AS_STRING(ind); n = PyString_GET_SIZE(ind); switch(n) { - case 1: - switch(key[0]) { - case 'C': - return arrayflags_contiguous_get(self); - case 'F': - return arrayflags_fortran_get(self); - case 'W': - return arrayflags_writeable_get(self); - case 'B': - return arrayflags_behaved_get(self); - case 'O': - return arrayflags_owndata_get(self); - case 'A': - return arrayflags_aligned_get(self); - case 'U': - return arrayflags_updateifcopy_get(self); - default: - goto fail; - } - break; - case 2: - if (strncmp(key, "CA", n)==0) - return arrayflags_carray_get(self); - if (strncmp(key, "FA", n)==0) - return arrayflags_farray_get(self); - break; - case 3: - if (strncmp(key, "FNC", n)==0) - return arrayflags_fnc_get(self); - break; - case 4: - if (strncmp(key, "FORC", n)==0) - return arrayflags_forc_get(self); - break; - case 6: - if (strncmp(key, "CARRAY", n)==0) - return arrayflags_carray_get(self); - if (strncmp(key, "FARRAY", n)==0) - return arrayflags_farray_get(self); - break; - case 7: - if (strncmp(key,"FORTRAN",n)==0) - return arrayflags_fortran_get(self); - if (strncmp(key,"BEHAVED",n)==0) - return arrayflags_behaved_get(self); - if (strncmp(key,"OWNDATA",n)==0) - return arrayflags_owndata_get(self); - if (strncmp(key,"ALIGNED",n)==0) - return arrayflags_aligned_get(self); - break; - case 9: - if (strncmp(key,"WRITEABLE",n)==0) - return arrayflags_writeable_get(self); - break; - case 10: - if (strncmp(key,"CONTIGUOUS",n)==0) - return arrayflags_contiguous_get(self); - break; - case 12: - if (strncmp(key, "UPDATEIFCOPY", n)==0) - return arrayflags_updateifcopy_get(self); - if (strncmp(key, "C_CONTIGUOUS", n)==0) - return arrayflags_contiguous_get(self); - if (strncmp(key, "F_CONTIGUOUS", n)==0) - return arrayflags_fortran_get(self); - break; + case 1: + switch(key[0]) { + case 'C': + return arrayflags_contiguous_get(self); + case 'F': + return arrayflags_fortran_get(self); + case 'W': + return arrayflags_writeable_get(self); + case 'B': + return arrayflags_behaved_get(self); + case 'O': + return arrayflags_owndata_get(self); + case 'A': + return arrayflags_aligned_get(self); + case 'U': + return arrayflags_updateifcopy_get(self); + default: + goto fail; + } + break; + case 2: + if (strncmp(key, "CA", n) == 0) { + return arrayflags_carray_get(self); + } + if (strncmp(key, "FA", n) == 0) { + return arrayflags_farray_get(self); + } + break; + case 3: + if (strncmp(key, "FNC", n) == 0) { + return arrayflags_fnc_get(self); + } + break; + case 4: + if (strncmp(key, "FORC", n) == 0) { + return arrayflags_forc_get(self); + } + break; + case 6: + if (strncmp(key, "CARRAY", n) == 0) { + return arrayflags_carray_get(self); + } + if (strncmp(key, "FARRAY", n) == 0) { + return arrayflags_farray_get(self); + } + break; + case 7: + if (strncmp(key,"FORTRAN",n) == 0) { + return arrayflags_fortran_get(self); + } + if (strncmp(key,"BEHAVED",n) == 0) { + return arrayflags_behaved_get(self); + } + if (strncmp(key,"OWNDATA",n) == 0) { + return arrayflags_owndata_get(self); + } + if (strncmp(key,"ALIGNED",n) == 0) { + return arrayflags_aligned_get(self); + } + break; + case 9: + if (strncmp(key,"WRITEABLE",n) == 0) { + return arrayflags_writeable_get(self); + } + break; + case 10: + if (strncmp(key,"CONTIGUOUS",n) == 0) { + return arrayflags_contiguous_get(self); + } + break; + case 12: + if (strncmp(key, "UPDATEIFCOPY", n) == 0) { + return arrayflags_updateifcopy_get(self); + } + if (strncmp(key, "C_CONTIGUOUS", n) == 0) { + return arrayflags_contiguous_get(self); + } + if (strncmp(key, "F_CONTIGUOUS", n) == 0) { + return arrayflags_fortran_get(self); + } + break; } fail: @@ -12562,18 +13389,23 @@ { char *key; int n; - if (!PyString_Check(ind)) goto fail; + if (!PyString_Check(ind)) { + goto fail; + } key = PyString_AS_STRING(ind); n = PyString_GET_SIZE(ind); - if (((n==9) && (strncmp(key, "WRITEABLE", n)==0)) || - ((n==1) && (strncmp(key, "W", n)==0))) + if (((n==9) && (strncmp(key, "WRITEABLE", n) == 0)) || + ((n==1) && (strncmp(key, "W", n) == 0))) { return arrayflags_writeable_set(self, item); - else if (((n==7) && (strncmp(key, "ALIGNED", n)==0)) || - ((n==1) && (strncmp(key, "A", n)==0))) + } + else if (((n==7) && (strncmp(key, "ALIGNED", n) == 0)) || + ((n==1) && (strncmp(key, "A", n) == 0))) { return arrayflags_aligned_set(self, item); - else if (((n==12) && (strncmp(key, "UPDATEIFCOPY", n)==0)) || - ((n==1) && (strncmp(key, "U", n)==0))) + } + else if (((n==12) && (strncmp(key, "UPDATEIFCOPY", n) == 0)) || + ((n==1) && (strncmp(key, "U", n) == 0))) { return arrayflags_updateifcopy_set(self, item); + } fail: PyErr_SetString(PyExc_KeyError, "Unknown flag"); @@ -12583,8 +13415,12 @@ static char * _torf_(int flags, int val) { - if ((flags & val) == val) return "True"; - else return "False"; + if ((flags & val) == val) { + return "True"; + } + else { + return "False"; + } } static PyObject * @@ -12606,12 +13442,15 @@ static int arrayflags_compare(PyArrayFlagsObject *self, PyArrayFlagsObject *other) { - if (self->flags == other->flags) + if (self->flags == other->flags) { return 0; - else if (self->flags < other->flags) + } + else if (self->flags < other->flags) { return -1; - else + } + else { return 1; + } } static PyMappingMethods arrayflags_as_mapping = { @@ -12629,9 +13468,9 @@ arrayflags_new(PyTypeObject *NPY_UNUSED(self), PyObject *args, PyObject *NPY_UNUSED(kwds)) { PyObject *arg=NULL; - if (!PyArg_UnpackTuple(args, "flagsobj", 0, 1, &arg)) + if (!PyArg_UnpackTuple(args, "flagsobj", 0, 1, &arg)) { return NULL; - + } if ((arg != NULL) && PyArray_Check(arg)) { return PyArray_NewFlagsObject(arg); } @@ -12645,7 +13484,7 @@ 0, "numpy.flagsobj", sizeof(PyArrayFlagsObject), - 0, /* tp_itemsize */ + 0, /* tp_itemsize */ /* methods */ (destructor)arrayflags_dealloc, /* tp_dealloc */ 0, /* tp_print */ @@ -12670,32 +13509,32 @@ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - arrayflags_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arrayflags_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* tp_methods */ + 0, /* tp_members */ + arrayflags_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + arrayflags_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; Modified: branches/numpy-mingw-w64/numpy/core/src/arraytypes.inc.src =================================================================== --- branches/numpy-mingw-w64/numpy/core/src/arraytypes.inc.src 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/src/arraytypes.inc.src 2009-02-20 16:37:01 UTC (rev 6425) @@ -2,41 +2,17 @@ #include "config.h" static double -_getNAN(void) { -#ifdef NAN - return NAN; -#else - static double nan=0; - - if (nan == 0) { - double mul = 1e100; - double tmp = 0.0; - double pinf=0; - pinf = mul; - for (;;) { - pinf *= mul; - if (pinf == tmp) break; - tmp = pinf; - } - nan = pinf / pinf; - } - return nan; -#endif -} - - -static double MyPyFloat_AsDouble(PyObject *obj) { double ret = 0; PyObject *num; if (obj == Py_None) { - return _getNAN(); + return NumPyOS_NAN; } num = PyNumber_Float(obj); if (num == NULL) { - return _getNAN(); + return NumPyOS_NAN; } ret = PyFloat_AsDouble(num); Py_DECREF(num); @@ -192,7 +168,7 @@ op2 = op; Py_INCREF(op); } if (op2 == Py_None) { - oop.real = oop.imag = _getNAN(); + oop.real = oop.imag = NumPyOS_NAN; } else { oop = PyComplex_AsCComplex (op2); @@ -897,17 +873,30 @@ */ /**begin repeat - -#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#type=short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble# -#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT,"f","lf","Lf"# +#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# +#type=short,ushort,int,uint,long,ulong,longlong,ulonglong# +#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT# */ static int @fname at _scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) { return fscanf(fp, "%"@format@, ip); } +/**end repeat**/ +/**begin repeat +#fname=FLOAT,DOUBLE,LONGDOUBLE# +#type=float,double,longdouble# +*/ +static int + at fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) +{ + double result; + int ret; + ret = NumPyOS_ascii_ftolf(fp, &result); + *ip = (@type@) result; + return ret; +} /**end repeat**/ /**begin repeat @@ -966,19 +955,15 @@ #fname=FLOAT,DOUBLE,LONGDOUBLE# #type=float,double,longdouble# */ -#if (PY_VERSION_HEX >= 0x02040000) || defined(PyOS_ascii_strtod) static int @fname at _fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) { double result; - result = PyOS_ascii_strtod(str, endptr); + result = NumPyOS_ascii_strtod(str, endptr); *ip = (@type@) result; return 0; } -#else -#define @fname at _fromstr NULL -#endif /**end repeat**/ Modified: branches/numpy-mingw-w64/numpy/core/src/multiarraymodule.c =================================================================== --- branches/numpy-mingw-w64/numpy/core/src/multiarraymodule.c 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/src/multiarraymodule.c 2009-02-20 16:37:01 UTC (rev 6425) @@ -81,6 +81,10 @@ return NULL; } +/* XXX: We include c99 compat math module here because it is needed for + * numpyos.c (included by arrayobject). This is bad - we should separate + * declaration/implementation and share this in a lib. */ +#include "umath_funcs_c99.inc" /* Including this file is the only way I know how to declare functions static in each file, and store the pointers from functions in both @@ -7705,6 +7709,9 @@ PyObject *m, *d, *s; PyObject *c_api; + /* Initialize constants etc. */ + NumPyOS_init(); + /* Create the module and add the functions */ m = Py_InitModule("multiarray", array_module_methods); if (!m) goto err; Copied: branches/numpy-mingw-w64/numpy/core/src/numpyos.c (from rev 6424, trunk/numpy/core/src/numpyos.c) Modified: branches/numpy-mingw-w64/numpy/core/src/scalarmathmodule.c.src =================================================================== --- branches/numpy-mingw-w64/numpy/core/src/scalarmathmodule.c.src 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/src/scalarmathmodule.c.src 2009-02-20 16:37:01 UTC (rev 6425) @@ -636,8 +636,11 @@ &errobj) < 0) return NULL; first = 1; - if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) + if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { + Py_XDECREF(errobj); return NULL; + } + Py_XDECREF(errobj); } #endif @@ -736,8 +739,11 @@ &errobj) < 0) return NULL; first = 1; - if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) + if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { + Py_XDECREF(errobj); return NULL; + } + Py_XDECREF(errobj); } #if @isint@ Modified: branches/numpy-mingw-w64/numpy/core/src/scalartypes.inc.src =================================================================== --- branches/numpy-mingw-w64/numpy/core/src/scalartypes.inc.src 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/src/scalartypes.inc.src 2009-02-20 16:37:01 UTC (rev 6425) @@ -5,77 +5,82 @@ #endif #include "numpy/arrayscalars.h" +#include "config.h" +#include "numpyos.c" + static PyBoolScalarObject _PyArrayScalar_BoolValues[2] = { {PyObject_HEAD_INIT(&PyBoolArrType_Type) 0}, {PyObject_HEAD_INIT(&PyBoolArrType_Type) 1}, }; -/* Inheritance established later when tp_bases is set (or tp_base for - single inheritance) */ +/* + * Inheritance is established later when tp_bases is set (or tp_base for + * single inheritance) + */ /**begin repeat - -#name=number, integer, signedinteger, unsignedinteger, inexact, floating, complexfloating, flexible, character# -#NAME=Number, Integer, SignedInteger, UnsignedInteger, Inexact, Floating, ComplexFloating, Flexible, Character# -*/ - + * #name = number, integer, signedinteger, unsignedinteger, inexact, + * floating, complexfloating, flexible, character# + * #NAME = Number, Integer, SignedInteger, UnsignedInteger, Inexact, + * Floating, ComplexFloating, Flexible, Character# + */ static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@", /*tp_name*/ - sizeof(PyObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ + 0, /* ob_size*/ + "numpy. at name@", /* tp_name*/ + sizeof(PyObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; /**end repeat**/ @@ -115,13 +120,18 @@ CASE(CLONGDOUBLE, CLongDouble); CASE(OBJECT, Object); #undef CASE - case NPY_STRING: return (void *)PyString_AS_STRING(scalar); - case NPY_UNICODE: return (void *)PyUnicode_AS_DATA(scalar); - case NPY_VOID: return ((PyVoidScalarObject *)scalar)->obval; + case NPY_STRING: + return (void *)PyString_AS_STRING(scalar); + case NPY_UNICODE: + return (void *)PyUnicode_AS_DATA(scalar); + case NPY_VOID: + return ((PyVoidScalarObject *)scalar)->obval; } - /* Must be a user-defined type --- check to see which - scalar it inherits from. */ + /* + * Must be a user-defined type --- check to see which + * scalar it inherits from. + */ #define _CHK(cls) (PyObject_IsInstance(scalar, \ (PyObject *)&Py##cls##ArrType_Type)) @@ -137,7 +147,8 @@ _IFCASE(Long); _IFCASE(LongLong); } - else { /* Unsigned Integer */ + else { + /* Unsigned Integer */ _IFCASE(UByte); _IFCASE(UShort); _IFCASE(UInt); @@ -145,49 +156,64 @@ _IFCASE(ULongLong); } } - else { /* Inexact */ + else { + /* Inexact */ if _CHK(Floating) { _IFCASE(Float); _IFCASE(Double); _IFCASE(LongDouble); } - else { /*ComplexFloating */ + else { + /*ComplexFloating */ _IFCASE(CFloat); _IFCASE(CDouble); _IFCASE(CLongDouble); } } } - else if _CHK(Bool) return _OBJ(Bool); - else if _CHK(Flexible) { - if _CHK(String) return (void *)PyString_AS_STRING(scalar); - if _CHK(Unicode) return (void *)PyUnicode_AS_DATA(scalar); - if _CHK(Void) return ((PyVoidScalarObject *)scalar)->obval; + else if (_CHK(Bool)) { + return _OBJ(Bool); } - else _IFCASE(Object); + else if (_CHK(Flexible)) { + if (_CHK(String)) { + return (void *)PyString_AS_STRING(scalar); + } + if (_CHK(Unicode)) { + return (void *)PyUnicode_AS_DATA(scalar); + } + if (_CHK(Void)) { + return ((PyVoidScalarObject *)scalar)->obval; + } + } + else { + _IFCASE(Object); + } - /* Use the alignment flag to figure out where the data begins - after a PyObject_HEAD + /* + * Use the alignment flag to figure out where the data begins + * after a PyObject_HEAD */ memloc = (intp)scalar; memloc += sizeof(PyObject); - /* now round-up to the nearest alignment value - */ + /* now round-up to the nearest alignment value */ align = descr->alignment; - if (align > 1) memloc = ((memloc + align - 1)/align)*align; + if (align > 1) { + memloc = ((memloc + align - 1)/align)*align; + } return (void *)memloc; #undef _IFCASE #undef _OBJ #undef _CHK } -/* no error checking is performed -- ctypeptr must be same type as scalar */ -/* in case of flexible type, the data is not copied - into ctypeptr which is expected to be a pointer to pointer */ /*NUMPY_API - Convert to c-type -*/ + * Convert to c-type + * + * no error checking is performed -- ctypeptr must be same type as scalar + * in case of flexible type, the data is not copied + * into ctypeptr which is expected to be a pointer to pointer + */ static void PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) { @@ -199,24 +225,23 @@ if (PyTypeNum_ISEXTENDED(typecode->type_num)) { void **ct = (void **)ctypeptr; *ct = newptr; - } else { + } + else { memcpy(ctypeptr, newptr, typecode->elsize); } Py_DECREF(typecode); return; } -/* The output buffer must be large-enough to receive the value */ -/* Even for flexible types which is different from ScalarAsCtype - where only a reference for flexible types is returned -*/ - -/* This may not work right on narrow builds for NumPy unicode scalars. +/*NUMPY_API + * Cast Scalar to c-type + * + * The output buffer must be large-enough to receive the value + * Even for flexible types which is different from ScalarAsCtype + * where only a reference for flexible types is returned + * + * This may not work right on narrow builds for NumPy unicode scalars. */ - -/*NUMPY_API - Cast Scalar to c-type -*/ static int PyArray_CastScalarToCtype(PyObject *scalar, void *ctypeptr, PyArray_Descr *outcode) @@ -226,7 +251,9 @@ descr = PyArray_DescrFromScalar(scalar); castfunc = PyArray_GetCastFunc(descr, outcode->type_num); - if (castfunc == NULL) return -1; + if (castfunc == NULL) { + return -1; + } if (PyTypeNum_ISEXTENDED(descr->type_num) || PyTypeNum_ISEXTENDED(outcode->type_num)) { PyArrayObject *ain, *aout; @@ -242,7 +269,10 @@ 0, NULL, NULL, ctypeptr, CARRAY, NULL); - if (aout == NULL) {Py_DECREF(ain); return -1;} + if (aout == NULL) { + Py_DECREF(ain); + return -1; + } castfunc(ain->data, aout->data, 1, ain, aout); Py_DECREF(ain); Py_DECREF(aout); @@ -255,8 +285,8 @@ } /*NUMPY_API - Cast Scalar to c-type -*/ + * Cast Scalar to c-type + */ static int PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr *indescr, void *ctypeptr, int outtype) @@ -264,22 +294,24 @@ PyArray_VectorUnaryFunc* castfunc; void *ptr; castfunc = PyArray_GetCastFunc(indescr, outtype); - if (castfunc == NULL) return -1; + if (castfunc == NULL) { + return -1; + } ptr = scalar_value(scalar, indescr); castfunc(ptr, ctypeptr, 1, NULL, NULL); return 0; } -/* 0-dim array from array-scalar object */ -/* always contains a copy of the data - unless outcode is NULL, it is of void type and the referrer does - not own it either. -*/ - -/* steals reference to outcode */ /*NUMPY_API - Get 0-dim array from scalar -*/ + * Get 0-dim array from scalar + * + * 0-dim array from array-scalar object + * always contains a copy of the data + * unless outcode is NULL, it is of void type and the referrer does + * not own it either. + * + * steals reference to outcode + */ static PyObject * PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) { @@ -307,8 +339,10 @@ typecode, 0, NULL, NULL, NULL, 0, NULL); - if (r==NULL) {Py_XDECREF(outcode); return NULL;} - + if (r==NULL) { + Py_XDECREF(outcode); + return NULL; + } if (PyDataType_FLAGCHK(typecode, NPY_USE_SETITEM)) { if (typecode->f->setitem(scalar, PyArray_DATA(r), r) < 0) { Py_XDECREF(outcode); Py_DECREF(r); @@ -325,7 +359,8 @@ (PyArray_UCS4 *)PyArray_DATA(r), PyUnicode_GET_SIZE(scalar), PyArray_ITEMSIZE(r) >> 2); - } else + } + else #endif { memcpy(PyArray_DATA(r), memptr, PyArray_ITEMSIZE(r)); @@ -335,8 +370,9 @@ } finish: - if (outcode == NULL) return r; - + if (outcode == NULL) { + return r; + } if (outcode->type_num == typecode->type_num) { if (!PyTypeNum_ISEXTENDED(typecode->type_num) || (outcode->elsize == typecode->elsize)) @@ -350,10 +386,10 @@ } /*NUMPY_API - Get an Array Scalar From a Python Object - Returns NULL if unsuccessful but error is only - set if another error occurred. Currently only Numeric-like - object supported. + * Get an Array Scalar From a Python Object + * + * Returns NULL if unsuccessful but error is only set if another error occurred. + * Currently only Numeric-like object supported. */ static PyObject * PyArray_ScalarFromObject(PyObject *object) @@ -364,17 +400,23 @@ } if (PyInt_Check(object)) { ret = PyArrayScalar_New(Long); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, Long) = PyInt_AS_LONG(object); } else if (PyFloat_Check(object)) { ret = PyArrayScalar_New(Double); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, Double) = PyFloat_AS_DOUBLE(object); } else if (PyComplex_Check(object)) { ret = PyArrayScalar_New(CDouble); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, CDouble).real = ((PyComplexObject *)object)->cval.real; PyArrayScalar_VAL(ret, CDouble).imag = @@ -388,7 +430,9 @@ return NULL; } ret = PyArrayScalar_New(LongLong); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, LongLong) = val; } else if (PyBool_Check(object)) { @@ -407,14 +451,16 @@ gentype_alloc(PyTypeObject *type, Py_ssize_t nitems) { PyObject *obj; - const size_t size = _PyObject_VAR_SIZE(type, nitems+1); + const size_t size = _PyObject_VAR_SIZE(type, nitems + 1); obj = (PyObject *)_pya_malloc(size); memset(obj, 0, size); - if (type->tp_itemsize == 0) + if (type->tp_itemsize == 0) { PyObject_INIT(obj, type); - else + } + else { (void) PyObject_INIT_VAR((PyVarObject *)obj, type, nitems); + } return obj; } @@ -433,8 +479,7 @@ if (!PyArray_IsScalar(m1,Generic)) { if (PyArray_Check(m1)) { - ret = m1->ob_type->tp_as_number->nb_power(m1,m2, - Py_None); + ret = m1->ob_type->tp_as_number->nb_power(m1,m2, Py_None); } else { if (!PyArray_IsScalar(m2,Generic)) { @@ -442,17 +487,17 @@ return NULL; } arr = PyArray_FromScalar(m2, NULL); - if (arr == NULL) return NULL; - ret = arr->ob_type->tp_as_number->nb_power(m1, arr, - Py_None); + if (arr == NULL) { + return NULL; + } + ret = arr->ob_type->tp_as_number->nb_power(m1, arr, Py_None); Py_DECREF(arr); } return ret; } if (!PyArray_IsScalar(m2, Generic)) { if (PyArray_Check(m2)) { - ret = m2->ob_type->tp_as_number->nb_power(m1,m2, - Py_None); + ret = m2->ob_type->tp_as_number->nb_power(m1,m2, Py_None); } else { if (!PyArray_IsScalar(m1, Generic)) { @@ -460,18 +505,21 @@ return NULL; } arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return NULL; - ret = arr->ob_type->tp_as_number->nb_power(arr, m2, - Py_None); + if (arr == NULL) { + return NULL; + } + ret = arr->ob_type->tp_as_number->nb_power(arr, m2, Py_None); Py_DECREF(arr); } return ret; } - arr=arg2=NULL; + arr = arg2 = NULL; arr = PyArray_FromScalar(m1, NULL); arg2 = PyArray_FromScalar(m2, NULL); if (arr == NULL || arg2 == NULL) { - Py_XDECREF(arr); Py_XDECREF(arg2); return NULL; + Py_XDECREF(arr); + Py_XDECREF(arg2); + return NULL; } ret = arr->ob_type->tp_as_number->nb_power(arr, arg2, Py_None); Py_DECREF(arr); @@ -486,26 +534,35 @@ PyObject *arr, *meth, *ret; arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } meth = PyObject_GetAttrString(arr, str); - if (meth == NULL) {Py_DECREF(arr); return NULL;} - if (kwds == NULL) + if (meth == NULL) { + Py_DECREF(arr); + return NULL; + } + if (kwds == NULL) { ret = PyObject_CallObject(meth, args); - else + } + else { ret = PyObject_Call(meth, args, kwds); + } Py_DECREF(meth); Py_DECREF(arr); - if (ret && PyArray_Check(ret)) + if (ret && PyArray_Check(ret)) { return PyArray_Return((PyArrayObject *)ret); - else + } + else { return ret; + } } /**begin repeat * - * #name=add, subtract, divide, remainder, divmod, lshift, rshift, and, xor, or, floor_divide, true_divide# + * #name = add, subtract, divide, remainder, divmod, lshift, rshift, + * and, xor, or, floor_divide, true_divide# */ - static PyObject * gentype_ at name@(PyObject *m1, PyObject *m2) { @@ -518,28 +575,30 @@ static PyObject * gentype_multiply(PyObject *m1, PyObject *m2) { - PyObject *ret=NULL; + PyObject *ret = NULL; long repeat; if (!PyArray_IsScalar(m1, Generic) && ((m1->ob_type->tp_as_number == NULL) || (m1->ob_type->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m2 to an int and try sequence - repeat */ + /* Try to convert m2 to an int and try sequence repeat */ repeat = PyInt_AsLong(m2); - if (repeat == -1 && PyErr_Occurred()) return NULL; + if (repeat == -1 && PyErr_Occurred()) { + return NULL; + } ret = PySequence_Repeat(m1, (int) repeat); } else if (!PyArray_IsScalar(m2, Generic) && ((m2->ob_type->tp_as_number == NULL) || (m2->ob_type->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m1 to an int and try sequence - repeat */ + /* Try to convert m1 to an int and try sequence repeat */ repeat = PyInt_AsLong(m1); - if (repeat == -1 && PyErr_Occurred()) return NULL; + if (repeat == -1 && PyErr_Occurred()) { + return NULL; + } ret = PySequence_Repeat(m2, (int) repeat); } - if (ret==NULL) { + if (ret == NULL) { PyErr_Clear(); /* no effect if not set */ ret = PyArray_Type.tp_as_number->nb_multiply(m1, m2); } @@ -547,17 +606,18 @@ } /**begin repeat - -#name=positive, negative, absolute, invert, int, long, float, oct, hex# -*/ - + * + * #name=positive, negative, absolute, invert, int, long, float, oct, hex# + */ static PyObject * gentype_ at name@(PyObject *m1) { PyObject *arr, *ret; arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = arr->ob_type->tp_as_number->nb_ at name@(arr); Py_DECREF(arr); return ret; @@ -571,7 +631,9 @@ int ret; arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } ret = arr->ob_type->tp_as_number->nb_nonzero(arr); Py_DECREF(arr); return ret; @@ -584,7 +646,9 @@ PyObject *ret; arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr==NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = PyObject_Str((PyObject *)arr); Py_DECREF(arr); return ret; @@ -598,29 +662,44 @@ PyObject *ret; arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr==NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = PyObject_Str((PyObject *)arr); Py_DECREF(arr); return ret; } +#ifdef FORCE_NO_LONG_DOUBLE_FORMATTING +#undef NPY_LONGDOUBLE_FMT +#define NPY_LONGDOUBLE_FMT NPY_DOUBLE_FMT +#endif + /**begin repeat - * #name=float, double, longdouble# - * #NAME=FLOAT, DOUBLE, LONGDOUBLE# + * #name = float, double, longdouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# + * #type = f, d, l# */ -#define FMT "%.*" NPY_ at NAME@_FMT -#define CFMT1 "%.*" NPY_ at NAME@_FMT "j" -#define CFMT2 "(%.*" NPY_ at NAME@_FMT "%+.*" NPY_ at NAME@_FMT "j)" +#define _FMT1 "%%.%i" NPY_ at NAME@_FMT +#define _FMT2 "%%+.%i" NPY_ at NAME@_FMT static void format_ at name@(char *buf, size_t buflen, @name@ val, unsigned int prec) { - int cnt, i; + /* XXX: Find a correct size here for format string */ + char format[64], *res; + int i, cnt; - cnt = PyOS_snprintf(buf, buflen, FMT, prec, val); + PyOS_snprintf(format, sizeof(format), _FMT1, prec); + res = NumPyOS_ascii_format at type@(buf, buflen, format, val, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } /* If nothing but digits after sign, append ".0" */ + cnt = strlen(buf); for (i = (val < 0) ? 1 : 0; i < cnt; ++i) { if (!isdigit(Py_CHARMASK(buf[i]))) { break; @@ -634,33 +713,56 @@ static void format_c at name@(char *buf, size_t buflen, c at name@ val, unsigned int prec) { + /* XXX: Find a correct size here for format string */ + char format[64]; + char *res; if (val.real == 0.0) { - PyOS_snprintf(buf, buflen, CFMT1, prec, val.imag); + PyOS_snprintf(format, sizeof(format), _FMT1, prec); + res = NumPyOS_ascii_format at type@(buf, buflen-1, format, val.imag, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } + strncat(buf, "j", 1); } else { - PyOS_snprintf(buf, buflen, CFMT2, prec, val.real, prec, val.imag); + char re[64], im[64]; + PyOS_snprintf(format, sizeof(format), _FMT1, prec); + res = NumPyOS_ascii_format at type@(re, sizeof(re), format, val.real, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } + + PyOS_snprintf(format, sizeof(format), _FMT2, prec); + res = NumPyOS_ascii_format at type@(im, sizeof(im), format, val.imag, 0); + if (res == NULL) { + fprintf(stderr, "Error while formatting\n"); + return; + } + PyOS_snprintf(buf, buflen, "(%s%sj)", re, im); } } -#undef FMT -#undef CFMT1 -#undef CFMT2 +#undef _FMT1 +#undef _FMT2 /**end repeat**/ -/* over-ride repr and str of array-scalar strings and unicode to - remove NULL bytes and then call the corresponding functions - of string and unicode. +/* + * over-ride repr and str of array-scalar strings and unicode to + * remove NULL bytes and then call the corresponding functions + * of string and unicode. */ /**begin repeat -#name=string*2,unicode*2# -#form=(repr,str)*2# -#Name=String*2,Unicode*2# -#NAME=STRING*2,UNICODE*2# -#extra=AndSize*2,,# -#type=char*2, Py_UNICODE*2# -*/ + * #name = string*2,unicode*2# + * #form = (repr,str)*2# + * #Name = String*2,Unicode*2# + * #NAME = STRING*2,UNICODE*2# + * #extra = AndSize*2,,# + * #type = char*2, Py_UNICODE*2# + */ static PyObject * @name at type_@form@(PyObject *self) { @@ -672,9 +774,13 @@ ip = dptr = Py at Name@_AS_ at NAME@(self); len = Py at Name@_GET_SIZE(self); dptr += len-1; - while(len > 0 && *dptr-- == 0) len--; + while(len > 0 && *dptr-- == 0) { + len--; + } new = Py at Name@_From at Name@@extra@(ip, len); - if (new == NULL) return PyString_FromString(""); + if (new == NULL) { + return PyString_FromString(""); + } ret = Py at Name@_Type.tp_ at form@(new); Py_DECREF(new); return ret; @@ -699,10 +805,11 @@ * * These functions will return NULL if PyString creation fails. */ + /**begin repeat - * #name=float, double, longdouble# - * #Name=Float, Double, LongDouble# - * #NAME=FLOAT, DOUBLE, LONGDOUBLE# + * #name = float, double, longdouble# + * #Name = Float, Double, LongDouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# */ /**begin repeat1 * #kind = str, repr# @@ -736,20 +843,60 @@ /**end repeat1**/ /**end repeat**/ +/* + * float type print (control print a, where a is a float type instance) + */ +/**begin repeat + * #name = float, double, longdouble# + * #Name = Float, Double, LongDouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# + */ +static int + at name@type_print(PyObject *v, FILE *fp, int flags) +{ + char buf[100]; + @name@ val = ((Py at Name@ScalarObject *)v)->obval; + + format_ at name@(buf, sizeof(buf), val, + (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); + Py_BEGIN_ALLOW_THREADS + fputs(buf, fp); + Py_END_ALLOW_THREADS + return 0; +} + +static int +c at name@type_print(PyObject *v, FILE *fp, int flags) +{ + /* Size of buf: twice sizeof(real) + 2 (for the parenthesis) */ + char buf[202]; + c at name@ val = ((PyC at Name@ScalarObject *)v)->obval; + + format_c at name@(buf, sizeof(buf), val, + (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); + Py_BEGIN_ALLOW_THREADS + fputs(buf, fp); + Py_END_ALLOW_THREADS + return 0; +} + +/**end repeat**/ + + /* * Could improve this with a PyLong_FromLongDouble(longdouble ldval) * but this would need some more work... */ /**begin repeat - -#name=(int, long, hex, oct, float)*2# -#KIND=(Long*4, Float)*2# -#char=,,,,,c*5# -#CHAR=,,,,,C*5# -#POST=,,,,,.real*5# -*/ + * + * #name = (int, long, hex, oct, float)*2# + * #KIND = (Long*4, Float)*2# + * #char = ,,,,,c*5# + * #CHAR = ,,,,,C*5# + * #POST = ,,,,,.real*5# + */ static PyObject * @char at longdoubletype_@name@(PyObject *self) { @@ -766,46 +913,46 @@ static PyNumberMethods gentype_as_number = { - (binaryfunc)gentype_add, /*nb_add*/ - (binaryfunc)gentype_subtract, /*nb_subtract*/ - (binaryfunc)gentype_multiply, /*nb_multiply*/ - (binaryfunc)gentype_divide, /*nb_divide*/ - (binaryfunc)gentype_remainder, /*nb_remainder*/ - (binaryfunc)gentype_divmod, /*nb_divmod*/ - (ternaryfunc)gentype_power, /*nb_power*/ + (binaryfunc)gentype_add, /*nb_add*/ + (binaryfunc)gentype_subtract, /*nb_subtract*/ + (binaryfunc)gentype_multiply, /*nb_multiply*/ + (binaryfunc)gentype_divide, /*nb_divide*/ + (binaryfunc)gentype_remainder, /*nb_remainder*/ + (binaryfunc)gentype_divmod, /*nb_divmod*/ + (ternaryfunc)gentype_power, /*nb_power*/ (unaryfunc)gentype_negative, - (unaryfunc)gentype_positive, /*nb_pos*/ - (unaryfunc)gentype_absolute, /*(unaryfunc)gentype_abs,*/ - (inquiry)gentype_nonzero_number, /*nb_nonzero*/ - (unaryfunc)gentype_invert, /*nb_invert*/ - (binaryfunc)gentype_lshift, /*nb_lshift*/ - (binaryfunc)gentype_rshift, /*nb_rshift*/ - (binaryfunc)gentype_and, /*nb_and*/ - (binaryfunc)gentype_xor, /*nb_xor*/ - (binaryfunc)gentype_or, /*nb_or*/ - 0, /*nb_coerce*/ - (unaryfunc)gentype_int, /*nb_int*/ - (unaryfunc)gentype_long, /*nb_long*/ - (unaryfunc)gentype_float, /*nb_float*/ - (unaryfunc)gentype_oct, /*nb_oct*/ - (unaryfunc)gentype_hex, /*nb_hex*/ - 0, /*inplace_add*/ - 0, /*inplace_subtract*/ - 0, /*inplace_multiply*/ - 0, /*inplace_divide*/ - 0, /*inplace_remainder*/ - 0, /*inplace_power*/ - 0, /*inplace_lshift*/ - 0, /*inplace_rshift*/ - 0, /*inplace_and*/ - 0, /*inplace_xor*/ - 0, /*inplace_or*/ - (binaryfunc)gentype_floor_divide, /*nb_floor_divide*/ - (binaryfunc)gentype_true_divide, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ + (unaryfunc)gentype_positive, /*nb_pos*/ + (unaryfunc)gentype_absolute, /*(unaryfunc)gentype_abs,*/ + (inquiry)gentype_nonzero_number, /*nb_nonzero*/ + (unaryfunc)gentype_invert, /*nb_invert*/ + (binaryfunc)gentype_lshift, /*nb_lshift*/ + (binaryfunc)gentype_rshift, /*nb_rshift*/ + (binaryfunc)gentype_and, /*nb_and*/ + (binaryfunc)gentype_xor, /*nb_xor*/ + (binaryfunc)gentype_or, /*nb_or*/ + 0, /*nb_coerce*/ + (unaryfunc)gentype_int, /*nb_int*/ + (unaryfunc)gentype_long, /*nb_long*/ + (unaryfunc)gentype_float, /*nb_float*/ + (unaryfunc)gentype_oct, /*nb_oct*/ + (unaryfunc)gentype_hex, /*nb_hex*/ + 0, /*inplace_add*/ + 0, /*inplace_subtract*/ + 0, /*inplace_multiply*/ + 0, /*inplace_divide*/ + 0, /*inplace_remainder*/ + 0, /*inplace_power*/ + 0, /*inplace_lshift*/ + 0, /*inplace_rshift*/ + 0, /*inplace_and*/ + 0, /*inplace_xor*/ + 0, /*inplace_or*/ + (binaryfunc)gentype_floor_divide, /*nb_floor_divide*/ + (binaryfunc)gentype_true_divide, /*nb_true_divide*/ + 0, /*nb_inplace_floor_divide*/ + 0, /*nb_inplace_true_divide*/ #if PY_VERSION_HEX >= 0x02050000 - (unaryfunc)NULL, /*nb_index*/ + (unaryfunc)NULL, /*nb_index*/ #endif }; @@ -816,7 +963,9 @@ PyObject *arr, *ret; arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = arr->ob_type->tp_richcompare(arr, other, cmp_op); Py_DECREF(arr); return ret; @@ -839,7 +988,9 @@ { PyObject *flagobj; flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); - if (flagobj == NULL) return NULL; + if (flagobj == NULL) { + return NULL; + } ((PyArrayFlagsObject *)flagobj)->arr = NULL; ((PyArrayFlagsObject *)flagobj)->flags = self->flags; return flagobj; @@ -938,9 +1089,13 @@ PyObject *inter; arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } inter = PyObject_GetAttrString((PyObject *)arr, "__array_interface__"); - if (inter != NULL) PyDict_SetItemString(inter, "__ref", (PyObject *)arr); + if (inter != NULL) { + PyDict_SetItemString(inter, "__ref", (PyObject *)arr); + } Py_DECREF(arr); return inter; } @@ -998,7 +1153,9 @@ else if (PyArray_IsScalar(self, Object)) { PyObject *obj = ((PyObjectScalarObject *)self)->obval; ret = PyObject_GetAttrString(obj, "real"); - if (ret != NULL) return ret; + if (ret != NULL) { + return ret; + } PyErr_Clear(); } Py_INCREF(self); @@ -1016,8 +1173,7 @@ char *ptr; typecode = _realdescr_fromcomplexscalar(self, &typenum); ptr = (char *)scalar_value(self, NULL); - ret = PyArray_Scalar(ptr + typecode->elsize, - typecode, NULL); + ret = PyArray_Scalar(ptr + typecode->elsize, typecode, NULL); } else if (PyArray_IsScalar(self, Object)) { PyObject *obj = ((PyObjectScalarObject *)self)->obval; @@ -1053,7 +1209,9 @@ PyObject *ret, *arr; arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = PyArray_IterNew(arr); Py_DECREF(arr); return ret; @@ -1201,10 +1359,11 @@ /**begin repeat - -#name=tolist, item, tostring, astype, copy, __deepcopy__, searchsorted, view, swapaxes, conj, conjugate, nonzero, flatten, ravel, fill, transpose, newbyteorder# -*/ - + * + * #name = tolist, item, tostring, astype, copy, __deepcopy__, searchsorted, + * view, swapaxes, conj, conjugate, nonzero, flatten, ravel, fill, + * transpose, newbyteorder# + */ static PyObject * gentype_ at name@(PyObject *self, PyObject *args) { @@ -1222,7 +1381,9 @@ static PyObject * gentype_squeeze(PyObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } Py_INCREF(self); return self; } @@ -1235,17 +1396,16 @@ { Bool inplace=FALSE; - if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) + if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) { return NULL; - + } if (inplace) { PyErr_SetString(PyExc_ValueError, "cannot byteswap a scalar in-place"); return NULL; } else { - /* get the data, copyswap it and pass it to a new Array scalar - */ + /* get the data, copyswap it and pass it to a new Array scalar */ char *data; int numbytes; PyArray_Descr *descr; @@ -1255,8 +1415,13 @@ numbytes = gentype_getreadbuf(self, 0, (void **)&data); descr = PyArray_DescrFromScalar(self); newmem = _pya_malloc(descr->elsize); - if (newmem == NULL) {Py_DECREF(descr); return PyErr_NoMemory();} - else memcpy(newmem, data, descr->elsize); + if (newmem == NULL) { + Py_DECREF(descr); + return PyErr_NoMemory(); + } + else { + memcpy(newmem, data, descr->elsize); + } byte_swap_vector(newmem, 1, descr->elsize); new = PyArray_Scalar(newmem, descr, NULL); _pya_free(newmem); @@ -1267,10 +1432,12 @@ /**begin repeat - -#name=take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, round, argmax, argmin, max, min, ptp, any, all, resize, reshape, choose# -*/ - + * + * #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, + * std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, + * round, argmax, argmin, max, min, ptp, any, all, resize, reshape, + * choose# + */ static PyObject * gentype_ at name@(PyObject *self, PyObject *args, PyObject *kwds) { @@ -1284,7 +1451,9 @@ PyObject *ret; ret = gentype_generic_method((PyObject *)self, args, kwds, "getfield"); - if (!ret) return ret; + if (!ret) { + return ret; + } if (PyArray_IsScalar(ret, Generic) && \ (!PyArray_IsScalar(ret, Void))) { PyArray_Descr *new; @@ -1310,7 +1479,7 @@ static PyObject * voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) { - PyArray_Descr *typecode=NULL; + PyArray_Descr *typecode = NULL; int offset = 0; PyObject *value, *src; int mysize; @@ -1318,8 +1487,7 @@ static char *kwlist[] = {"value", "dtype", "offset", 0}; if ((self->flags & WRITEABLE) != WRITEABLE) { - PyErr_SetString(PyExc_RuntimeError, - "Can't write to memory"); + PyErr_SetString(PyExc_RuntimeError, "Can't write to memory"); return NULL; } if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|i", kwlist, @@ -1354,7 +1522,9 @@ else { /* Copy data from value to correct place in dptr */ src = PyArray_FromAny(value, typecode, 0, 0, CARRAY, NULL); - if (src == NULL) return NULL; + if (src == NULL) { + return NULL; + } typecode->f->copyswap(dptr, PyArray_DATA(src), !PyArray_ISNBO(self->descr->byteorder), src); @@ -1368,38 +1538,44 @@ static PyObject * gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args)) { - PyObject *ret=NULL, *obj=NULL, *mod=NULL; + PyObject *ret = NULL, *obj = NULL, *mod = NULL; const char *buffer; Py_ssize_t buflen; /* Return a tuple of (callable object, arguments) */ - ret = PyTuple_New(2); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } if (PyObject_AsReadBuffer(self, (const void **)&buffer, &buflen)<0) { - Py_DECREF(ret); return NULL; + Py_DECREF(ret); + return NULL; } mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) return NULL; + if (mod == NULL) { + return NULL; + } obj = PyObject_GetAttrString(mod, "scalar"); Py_DECREF(mod); - if (obj == NULL) return NULL; + if (obj == NULL) { + return NULL; + } PyTuple_SET_ITEM(ret, 0, obj); obj = PyObject_GetAttrString((PyObject *)self, "dtype"); if (PyArray_IsScalar(self, Object)) { mod = ((PyObjectScalarObject *)self)->obval; - PyTuple_SET_ITEM(ret, 1, - Py_BuildValue("NO", obj, mod)); + PyTuple_SET_ITEM(ret, 1, Py_BuildValue("NO", obj, mod)); } else { #ifndef Py_UNICODE_WIDE - /* We need to expand the buffer so that we always write - UCS4 to disk for pickle of unicode scalars. - - This could be in a unicode_reduce function, but - that would require re-factoring. - */ - int alloc=0; + /* + * We need to expand the buffer so that we always write + * UCS4 to disk for pickle of unicode scalars. + * + * This could be in a unicode_reduce function, but + * that would require re-factoring. + */ + int alloc = 0; char *tmp; int newlen; @@ -1448,13 +1624,16 @@ static PyObject * gentype_dump(PyObject *self, PyObject *args) { - PyObject *file=NULL; + PyObject *file = NULL; int ret; - if (!PyArg_ParseTuple(args, "O", &file)) + if (!PyArg_ParseTuple(args, "O", &file)) { return NULL; + } ret = PyArray_Dump(self, file, 2); - if (ret < 0) return NULL; + if (ret < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -1462,15 +1641,17 @@ static PyObject * gentype_dumps(PyObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) + if (!PyArg_ParseTuple(args, "")) { return NULL; + } return PyArray_Dumps(self, 2); } /* setting flags cannot be done for scalars */ static PyObject * -gentype_setflags(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) +gentype_setflags(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), + PyObject *NPY_UNUSED(kwds)) { Py_INCREF(Py_None); return Py_None; @@ -1698,7 +1879,9 @@ } flist = self->descr->names; m = PyTuple_GET_SIZE(flist); - if (n < 0) n += m; + if (n < 0) { + n += m; + } if (n < 0 || n >= m) { PyErr_Format(PyExc_IndexError, "invalid index (%d)", (int) n); return NULL; @@ -1725,14 +1908,17 @@ if (PyString_Check(ind) || PyUnicode_Check(ind)) { /* look up in fields */ fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) goto fail; + if (!fieldinfo) { + goto fail; + } return voidtype_getfield(self, fieldinfo, NULL); } /* try to convert it to a number */ n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) goto fail; - + if (error_converting(n)) { + goto fail; + } return voidtype_item(self, (Py_ssize_t)n); fail: @@ -1755,8 +1941,12 @@ flist = self->descr->names; m = PyTuple_GET_SIZE(flist); - if (n < 0) n += m; - if (n < 0 || n >= m) goto fail; + if (n < 0) { + n += m; + } + if (n < 0 || n >= m) { + goto fail; + } fieldinfo = PyDict_GetItem(self->descr->fields, PyTuple_GET_ITEM(flist, n)); newtup = Py_BuildValue("(OOO)", val, @@ -1764,7 +1954,9 @@ PyTuple_GET_ITEM(fieldinfo, 1)); res = voidtype_setfield(self, newtup, NULL); Py_DECREF(newtup); - if (!res) return -1; + if (!res) { + return -1; + } Py_DECREF(res); return 0; @@ -1790,20 +1982,26 @@ if (PyString_Check(ind) || PyUnicode_Check(ind)) { /* look up in fields */ fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) goto fail; + if (!fieldinfo) { + goto fail; + } newtup = Py_BuildValue("(OOO)", val, PyTuple_GET_ITEM(fieldinfo, 0), PyTuple_GET_ITEM(fieldinfo, 1)); res = voidtype_setfield(self, newtup, NULL); Py_DECREF(newtup); - if (!res) return -1; + if (!res) { + return -1; + } Py_DECREF(res); return 0; } /* try to convert it to a number */ n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) goto fail; + if (error_converting(n)) { + goto fail; + } return voidtype_ass_item(self, (Py_ssize_t)n, val); fail: @@ -1813,35 +2011,35 @@ static PyMappingMethods voidtype_as_mapping = { #if PY_VERSION_HEX >= 0x02050000 - (lenfunc)voidtype_length, /*mp_length*/ + (lenfunc)voidtype_length, /*mp_length*/ #else - (inquiry)voidtype_length, /*mp_length*/ + (inquiry)voidtype_length, /*mp_length*/ #endif - (binaryfunc)voidtype_subscript, /*mp_subscript*/ - (objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/ + (binaryfunc)voidtype_subscript, /*mp_subscript*/ + (objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/ }; static PySequenceMethods voidtype_as_sequence = { #if PY_VERSION_HEX >= 0x02050000 - (lenfunc)voidtype_length, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - (ssizeargfunc)voidtype_item, /*sq_item*/ - 0, /*sq_slice*/ - (ssizeobjargproc)voidtype_ass_item, /*sq_ass_item*/ + (lenfunc)voidtype_length, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + (ssizeargfunc)voidtype_item, /*sq_item*/ + 0, /*sq_slice*/ + (ssizeobjargproc)voidtype_ass_item, /*sq_ass_item*/ #else - (inquiry)voidtype_length, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - (intargfunc)voidtype_item, /*sq_item*/ - 0, /*sq_slice*/ - (intobjargproc)voidtype_ass_item, /*sq_ass_item*/ + (inquiry)voidtype_length, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + (intargfunc)voidtype_item, /*sq_item*/ + 0, /*sq_slice*/ + (intobjargproc)voidtype_ass_item, /*sq_ass_item*/ #endif - 0, /* ssq_ass_slice */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0, /* sq_inplace_repeat */ + 0, /* ssq_ass_slice */ + 0, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0, /* sq_inplace_repeat */ }; @@ -1892,9 +2090,10 @@ static Py_ssize_t gentype_getcharbuf(PyObject *self, Py_ssize_t segment, constchar **ptrptr) { - if (PyArray_IsScalar(self, String) || \ - PyArray_IsScalar(self, Unicode)) + if (PyArray_IsScalar(self, String) || + PyArray_IsScalar(self, Unicode)) { return gentype_getreadbuf(self, segment, (void **)ptrptr); + } else { PyErr_SetString(PyExc_TypeError, "Non-character array cannot be interpreted "\ @@ -1905,10 +2104,10 @@ static PyBufferProcs gentype_as_buffer = { - gentype_getreadbuf, /*bf_getreadbuffer*/ - NULL, /*bf_getwritebuffer*/ - gentype_getsegcount, /*bf_getsegcount*/ - gentype_getcharbuf, /*bf_getcharbuffer*/ + gentype_getreadbuf, /* bf_getreadbuffer*/ + NULL, /* bf_getwritebuffer*/ + gentype_getsegcount, /* bf_getsegcount*/ + gentype_getcharbuf, /* bf_getcharbuffer*/ }; @@ -1917,69 +2116,70 @@ static PyTypeObject PyGenericArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.generic", /*tp_name*/ - sizeof(PyObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ + 0, /* ob_size*/ + "numpy.generic", /* tp_name*/ + sizeof(PyObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; static void void_dealloc(PyVoidScalarObject *v) { - if (v->flags & OWNDATA) + if (v->flags & OWNDATA) { PyDataMem_FREE(v->obval); + } Py_XDECREF(v->descr); Py_XDECREF(v->base); v->ob_type->tp_free(v); @@ -1992,11 +2192,13 @@ v->ob_type->tp_free(v); } -/* string and unicode inherit from Python Type first and so GET_ITEM is different to get to the Python Type. +/* + * string and unicode inherit from Python Type first and so GET_ITEM + * is different to get to the Python Type. + * + * ok is a work-around for a bug in complex_new that doesn't allocate + * memory from the sub-types memory allocator. */ -/* ok is a work-around for a bug in complex_new that doesn't allocate - memory from the sub-types memory allocator. -*/ #define _WORK(num) \ if (type->tp_bases && (PyTuple_GET_SIZE(type->tp_bases)==2)) { \ @@ -2015,14 +2217,18 @@ #define _WORKz _WORK(0) #define _WORK0 -/**begin repeat1 -#name=byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, string, unicode, object# -#TYPE=BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, OBJECT# -#work=0,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,z,z,0# -#default=0*16,1*2,2# -*/ +/**begin repeat + * #name = byte, short, int, long, longlong, ubyte, ushort, uint, ulong, + * ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, + * string, unicode, object# + * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG, + * ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, + * STRING, UNICODE, OBJECT# + * #work = 0,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,z,z,0# + * #default = 0*16,1*2,2# + */ -#define _NPY_UNUSED2_1 +#define _NPY_UNUSED2_1 #define _NPY_UNUSED2_z #define _NPY_UNUSED2_0 NPY_UNUSED #define _NPY_UNUSED1_0 @@ -2041,17 +2247,20 @@ void *dest, *src; #endif - /* allow base-class (if any) to do conversion */ - /* If successful, this will jump to finish: */ + /* + * allow base-class (if any) to do conversion + * If successful, this will jump to finish: + */ _WORK at work@ if (!PyArg_ParseTuple(args, "|O", &obj)) { return NULL; } typecode = PyArray_DescrFromType(PyArray_ at TYPE@); - /* typecode is new reference and stolen by - PyArray_FromAny but not PyArray_Scalar - */ + /* + * typecode is new reference and stolen by + * PyArray_FromAny but not PyArray_Scalar + */ if (obj == NULL) { #if @default@ == 0 char *mem = malloc(sizeof(@name@)); @@ -2062,30 +2271,32 @@ #elif @default@ == 1 robj = PyArray_Scalar(NULL, typecode, NULL); #elif @default@ == 2 - Py_INCREF(Py_None); - robj = Py_None; + Py_INCREF(Py_None); + robj = Py_None; #endif - Py_DECREF(typecode); + Py_DECREF(typecode); goto finish; } - /* It is expected at this point that robj is a PyArrayScalar - (even for Object Data Type) - */ + /* + * It is expected at this point that robj is a PyArrayScalar + * (even for Object Data Type) + */ arr = PyArray_FromAny(obj, typecode, 0, 0, FORCECAST, NULL); if ((arr == NULL) || (PyArray_NDIM(arr) > 0)) { return arr; } /* 0-d array */ robj = PyArray_ToScalar(PyArray_DATA(arr), (NPY_AO *)arr); - Py_DECREF(arr); + Py_DECREF(arr); finish: - -#if @default@ == 2 /* In OBJECT case, robj is no longer a - PyArrayScalar at this point but the - remaining code assumes it is - */ + /* + * In OBJECT case, robj is no longer a + * PyArrayScalar at this point but the + * remaining code assumes it is + */ +#if @default@ == 2 return robj; #else /* Normal return */ @@ -2093,9 +2304,11 @@ return robj; } - /* This return path occurs when the requested type is not created - but another scalar object is created instead (i.e. when - the base-class does the conversion in _WORK macro) */ + /* + * This return path occurs when the requested type is not created + * but another scalar object is created instead (i.e. when + * the base-class does the conversion in _WORK macro) + */ /* Need to allocate new type and copy data-area over */ if (type->tp_itemsize) { @@ -2118,7 +2331,7 @@ *((npy_ at name@ *)dest) = *((npy_ at name@ *)src); #elif @default@ == 1 /* unicode and strings */ if (itemsize == 0) { /* unicode */ - itemsize = ((PyUnicodeObject *)robj)->length * sizeof(Py_UNICODE); + itemsize = ((PyUnicodeObject *)robj)->length * sizeof(Py_UNICODE); } memcpy(dest, src, itemsize); /* @default@ == 2 won't get here */ @@ -2138,16 +2351,21 @@ static PyObject * bool_arrtype_new(PyTypeObject *NPY_UNUSED(type), PyObject *args, PyObject *NPY_UNUSED(kwds)) { - PyObject *obj=NULL; + PyObject *obj = NULL; PyObject *arr; - if (!PyArg_ParseTuple(args, "|O", &obj)) return NULL; - if (obj == NULL) + if (!PyArg_ParseTuple(args, "|O", &obj)) { + return NULL; + } + if (obj == NULL) { PyArrayScalar_RETURN_FALSE; - if (obj == Py_False) + } + if (obj == Py_False) { PyArrayScalar_RETURN_FALSE; - if (obj == Py_True) + } + if (obj == Py_True) { PyArrayScalar_RETURN_TRUE; + } arr = PyArray_FROM_OTF(obj, PyArray_BOOL, FORCECAST); if (arr && 0 == PyArray_NDIM(arr)) { Bool val = *((Bool *)PyArray_DATA(arr)); @@ -2160,27 +2378,30 @@ static PyObject * bool_arrtype_and(PyObject *a, PyObject *b) { - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) + if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { PyArrayScalar_RETURN_BOOL_FROM_LONG ((a == PyArrayScalar_True)&(b == PyArrayScalar_True)); + } return PyGenericArrType_Type.tp_as_number->nb_and(a, b); } static PyObject * bool_arrtype_or(PyObject *a, PyObject *b) { - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) + if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { PyArrayScalar_RETURN_BOOL_FROM_LONG ((a == PyArrayScalar_True)|(b == PyArrayScalar_True)); + } return PyGenericArrType_Type.tp_as_number->nb_or(a, b); } static PyObject * bool_arrtype_xor(PyObject *a, PyObject *b) { - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) + if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { PyArrayScalar_RETURN_BOOL_FROM_LONG ((a == PyArrayScalar_True)^(b == PyArrayScalar_True)); + } return PyGenericArrType_Type.tp_as_number->nb_xor(a, b); } @@ -2192,10 +2413,13 @@ #if PY_VERSION_HEX >= 0x02050000 /**begin repeat -#name=byte, short, int, long, ubyte, ushort, longlong, uint, ulong, ulonglong# -#Name=Byte, Short, Int, Long, UByte, UShort, LongLong, UInt, ULong, ULongLong# -#type=PyInt_FromLong*6, PyLong_FromLongLong*1, PyLong_FromUnsignedLong*2, PyLong_FromUnsignedLongLong# -*/ + * #name = byte, short, int, long, ubyte, ushort, longlong, uint, ulong, + * ulonglong# + * #Name = Byte, Short, Int, Long, UByte, UShort, LongLong, UInt, ULong, + * ULongLong# + * #type = PyInt_FromLong*6, PyLong_FromLongLong*1, PyLong_FromUnsignedLong*2, + * PyLong_FromUnsignedLongLong# + */ static PyNumberMethods @name at _arrtype_as_number; static PyObject * @name at _index(PyObject *self) @@ -2203,6 +2427,7 @@ return @type@(PyArrayScalar_VAL(self, @Name@)); } /**end repeat**/ + static PyObject * bool_index(PyObject *a) { @@ -2212,67 +2437,71 @@ /* Arithmetic methods -- only so we can override &, |, ^. */ static PyNumberMethods bool_arrtype_as_number = { - 0, /* nb_add */ - 0, /* nb_subtract */ - 0, /* nb_multiply */ - 0, /* nb_divide */ - 0, /* nb_remainder */ - 0, /* nb_divmod */ - 0, /* nb_power */ - 0, /* nb_negative */ - 0, /* nb_positive */ - 0, /* nb_absolute */ - (inquiry)bool_arrtype_nonzero, /* nb_nonzero */ - 0, /* nb_invert */ - 0, /* nb_lshift */ - 0, /* nb_rshift */ - (binaryfunc)bool_arrtype_and, /* nb_and */ - (binaryfunc)bool_arrtype_xor, /* nb_xor */ - (binaryfunc)bool_arrtype_or, /* nb_or */ - 0, /* nb_coerce */ - 0, /* nb_int */ - 0, /* nb_long */ - 0, /* nb_float */ - 0, /* nb_oct */ - 0, /* nb_hex */ + 0, /* nb_add */ + 0, /* nb_subtract */ + 0, /* nb_multiply */ + 0, /* nb_divide */ + 0, /* nb_remainder */ + 0, /* nb_divmod */ + 0, /* nb_power */ + 0, /* nb_negative */ + 0, /* nb_positive */ + 0, /* nb_absolute */ + (inquiry)bool_arrtype_nonzero, /* nb_nonzero */ + 0, /* nb_invert */ + 0, /* nb_lshift */ + 0, /* nb_rshift */ + (binaryfunc)bool_arrtype_and, /* nb_and */ + (binaryfunc)bool_arrtype_xor, /* nb_xor */ + (binaryfunc)bool_arrtype_or, /* nb_or */ + 0, /* nb_coerce */ + 0, /* nb_int */ + 0, /* nb_long */ + 0, /* nb_float */ + 0, /* nb_oct */ + 0, /* nb_hex */ /* Added in release 2.0 */ - 0, /* nb_inplace_add */ - 0, /* nb_inplace_subtract */ - 0, /* nb_inplace_multiply */ - 0, /* nb_inplace_divide */ - 0, /* nb_inplace_remainder */ - 0, /* nb_inplace_power */ - 0, /* nb_inplace_lshift */ - 0, /* nb_inplace_rshift */ - 0, /* nb_inplace_and */ - 0, /* nb_inplace_xor */ - 0, /* nb_inplace_or */ + 0, /* nb_inplace_add */ + 0, /* nb_inplace_subtract */ + 0, /* nb_inplace_multiply */ + 0, /* nb_inplace_divide */ + 0, /* nb_inplace_remainder */ + 0, /* nb_inplace_power */ + 0, /* nb_inplace_lshift */ + 0, /* nb_inplace_rshift */ + 0, /* nb_inplace_and */ + 0, /* nb_inplace_xor */ + 0, /* nb_inplace_or */ /* Added in release 2.2 */ /* The following require the Py_TPFLAGS_HAVE_CLASS flag */ - 0, /* nb_floor_divide */ - 0, /* nb_true_divide */ - 0, /* nb_inplace_floor_divide */ - 0, /* nb_inplace_true_divide */ + 0, /* nb_floor_divide */ + 0, /* nb_true_divide */ + 0, /* nb_inplace_floor_divide */ + 0, /* nb_inplace_true_divide */ /* Added in release 2.5 */ - 0, /* nb_index */ +#if PY_VERSION_HEX >= 0x02050000 + 0, /* nb_index */ +#endif }; static PyObject * void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds)) { PyObject *obj, *arr; - ulonglong memu=1; - PyObject *new=NULL; + ulonglong memu = 1; + PyObject *new = NULL; char *destptr; - if (!PyArg_ParseTuple(args, "O", &obj)) return NULL; - /* For a VOID scalar first see if obj is an integer or long - and create new memory of that size (filled with 0) for the scalar - */ - - if (PyLong_Check(obj) || PyInt_Check(obj) || \ + if (!PyArg_ParseTuple(args, "O", &obj)) { + return NULL; + } + /* + * For a VOID scalar first see if obj is an integer or long + * and create new memory of that size (filled with 0) for the scalar + */ + if (PyLong_Check(obj) || PyInt_Check(obj) || PyArray_IsScalar(obj, Integer) || - (PyArray_Check(obj) && PyArray_NDIM(obj)==0 && \ + (PyArray_Check(obj) && PyArray_NDIM(obj)==0 && PyArray_ISINTEGER(obj))) { new = obj->ob_type->tp_as_number->nb_long(obj); } @@ -2288,7 +2517,9 @@ return NULL; } destptr = PyDataMem_NEW((int) memu); - if (destptr == NULL) return PyErr_NoMemory(); + if (destptr == NULL) { + return PyErr_NoMemory(); + } ret = type->tp_alloc(type, 0); if (ret == NULL) { PyDataMem_FREE(destptr); @@ -2296,8 +2527,8 @@ } ((PyVoidScalarObject *)ret)->obval = destptr; ((PyVoidScalarObject *)ret)->ob_size = (int) memu; - ((PyVoidScalarObject *)ret)->descr = \ - PyArray_DescrNewFromType(PyArray_VOID); + ((PyVoidScalarObject *)ret)->descr = + PyArray_DescrNewFromType(PyArray_VOID); ((PyVoidScalarObject *)ret)->descr->elsize = (int) memu; ((PyVoidScalarObject *)ret)->flags = BEHAVED | OWNDATA; ((PyVoidScalarObject *)ret)->base = NULL; @@ -2313,8 +2544,8 @@ /**************** Define Hash functions ********************/ /**begin repeat -#lname=bool,ubyte,ushort# -#name=Bool,UByte, UShort# + * #lname = bool,ubyte,ushort# + * #name = Bool,UByte, UShort# */ static long @lname at _arrtype_hash(PyObject *obj) @@ -2324,14 +2555,16 @@ /**end repeat**/ /**begin repeat -#lname=byte,short,uint,ulong# -#name=Byte,Short,UInt,ULong# + * #lname=byte,short,uint,ulong# + * #name=Byte,Short,UInt,ULong# */ static long @lname at _arrtype_hash(PyObject *obj) { long x = (long)(((Py at name@ScalarObject *)obj)->obval); - if (x == -1) x=-2; + if (x == -1) { + x = -2; + } return x; } /**end repeat**/ @@ -2341,16 +2574,18 @@ int_arrtype_hash(PyObject *obj) { long x = (long)(((PyIntScalarObject *)obj)->obval); - if (x == -1) x=-2; + if (x == -1) { + x = -2; + } return x; } #endif /**begin repeat -#char=,u# -#Char=,U# -#ext=&& (x >= LONG_MIN),# -*/ + * #char = ,u# + * #Char = ,U# + * #ext = && (x >= LONG_MIN),# + */ #if SIZEOF_LONG != SIZEOF_LONGLONG /* we assume SIZEOF_LONGLONG=2*SIZEOF_LONG */ static long @@ -2371,7 +2606,9 @@ both.v = x; y = both.hashvals[0] + (1000003)*both.hashvals[1]; } - if (y == -1) y = -2; + if (y == -1) { + y = -2; + } return y; } #endif @@ -2382,7 +2619,9 @@ ulonglong_arrtype_hash(PyObject *obj) { long x = (long)(((PyULongLongScalarObject *)obj)->obval); - if (x == -1) x=-2; + if (x == -1) { + x = -2; + } return x; } #endif @@ -2390,9 +2629,10 @@ /* Wrong thing to do for longdouble, but....*/ + /**begin repeat -#lname=float, longdouble# -#name=Float, LongDouble# + * #lname = float, longdouble# + * #name = Float, LongDouble# */ static long @lname at _arrtype_hash(PyObject *obj) @@ -2405,16 +2645,21 @@ c at lname@_arrtype_hash(PyObject *obj) { long hashreal, hashimag, combined; - hashreal = _Py_HashDouble((double) \ + hashreal = _Py_HashDouble((double) (((PyC at name@ScalarObject *)obj)->obval).real); - if (hashreal == -1) return -1; - hashimag = _Py_HashDouble((double) \ + if (hashreal == -1) { + return -1; + } + hashimag = _Py_HashDouble((double) (((PyC at name@ScalarObject *)obj)->obval).imag); - if (hashimag == -1) return -1; - + if (hashimag == -1) { + return -1; + } combined = hashreal + 1000003 * hashimag; - if (combined == -1) combined = -2; + if (combined == -1) { + combined = -2; + } return combined; } /**end repeat**/ @@ -2440,7 +2685,9 @@ /* first look in object and then hand off to generic type */ res = PyObject_GenericGetAttr(obj->obval, attr); - if (res) return res; + if (res) { + return res; + } PyErr_Clear(); return PyObject_GenericGetAttr((PyObject *)obj, attr); } @@ -2451,7 +2698,9 @@ /* first look in object and then hand off to generic type */ res = PyObject_GenericSetAttr(obj->obval, attr, val); - if (res >= 0) return res; + if (res >= 0) { + return res; + } PyErr_Clear(); return PyObject_GenericSetAttr((PyObject *)obj, attr, val); } @@ -2507,27 +2756,27 @@ static PySequenceMethods object_arrtype_as_sequence = { #if PY_VERSION_HEX >= 0x02050000 - (lenfunc)object_arrtype_length, /*sq_length*/ - (binaryfunc)object_arrtype_concat, /*sq_concat*/ - (ssizeargfunc)object_arrtype_repeat, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - (objobjproc)object_arrtype_contains, /* sq_contains */ - (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ - (ssizeargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ + (lenfunc)object_arrtype_length, /*sq_length*/ + (binaryfunc)object_arrtype_concat, /*sq_concat*/ + (ssizeargfunc)object_arrtype_repeat, /*sq_repeat*/ + 0, /*sq_item*/ + 0, /*sq_slice*/ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + (objobjproc)object_arrtype_contains, /* sq_contains */ + (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ + (ssizeargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ #else - (inquiry)object_arrtype_length, /*sq_length*/ - (binaryfunc)object_arrtype_concat, /*sq_concat*/ - (intargfunc)object_arrtype_repeat, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - (objobjproc)object_arrtype_contains, /* sq_contains */ - (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ - (intargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ + (inquiry)object_arrtype_length, /*sq_length*/ + (binaryfunc)object_arrtype_concat, /*sq_concat*/ + (intargfunc)object_arrtype_repeat, /*sq_repeat*/ + 0, /*sq_item*/ + 0, /*sq_slice*/ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + (objobjproc)object_arrtype_contains, /* sq_contains */ + (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ + (intargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ #endif }; @@ -2550,14 +2799,14 @@ int cnt; PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ - pb->bf_getsegcount == NULL || \ - (cnt = (*pb->bf_getsegcount)(self->obval, &newlen)) != 1) + if (pb == NULL || + pb->bf_getsegcount == NULL || + (cnt = (*pb->bf_getsegcount)(self->obval, &newlen)) != 1) { return 0; - - if (lenp) + } + if (lenp) { *lenp = newlen; - + } return cnt; } @@ -2566,14 +2815,13 @@ { PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ + if (pb == NULL || pb->bf_getreadbuffer == NULL || pb->bf_getsegcount == NULL) { PyErr_SetString(PyExc_TypeError, "expected a readable buffer object"); return -1; } - return (*pb->bf_getreadbuffer)(self->obval, segment, ptrptr); } @@ -2582,14 +2830,13 @@ { PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ + if (pb == NULL || pb->bf_getwritebuffer == NULL || pb->bf_getsegcount == NULL) { PyErr_SetString(PyExc_TypeError, "expected a writeable buffer object"); return -1; } - return (*pb->bf_getwritebuffer)(self->obval, segment, ptrptr); } @@ -2599,14 +2846,13 @@ { PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ + if (pb == NULL || pb->bf_getcharbuffer == NULL || pb->bf_getsegcount == NULL) { PyErr_SetString(PyExc_TypeError, "expected a character buffer object"); return -1; } - return (*pb->bf_getcharbuffer)(self->obval, segment, ptrptr); } @@ -2627,64 +2873,64 @@ static PyObject * object_arrtype_call(PyObjectScalarObject *obj, PyObject *args, PyObject *kwds) { - return PyObject_Call(obj->obval, args, kwds); + return PyObject_Call(obj->obval, args, kwds); } static PyTypeObject PyObjectArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.object_", /*tp_name*/ - sizeof(PyObjectScalarObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ - (destructor)object_arrtype_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - &object_arrtype_as_sequence, /* tp_as_sequence */ - &object_arrtype_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - (ternaryfunc)object_arrtype_call, /* tp_call */ - 0, /* tp_str */ - (getattrofunc)object_arrtype_getattro, /* tp_getattro */ - (setattrofunc)object_arrtype_setattro, /* tp_setattro */ - &object_arrtype_as_buffer, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy.object_", /* tp_name*/ + sizeof(PyObjectScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + (destructor)object_arrtype_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + &object_arrtype_as_sequence, /* tp_as_sequence */ + &object_arrtype_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + (ternaryfunc)object_arrtype_call, /* tp_call */ + 0, /* tp_str */ + (getattrofunc)object_arrtype_getattro, /* tp_getattro */ + (setattrofunc)object_arrtype_setattro, /* tp_setattro */ + &object_arrtype_as_buffer, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -2698,12 +2944,12 @@ static PyObject * gen_arrtype_subscript(PyObject *self, PyObject *key) { - /* Only [...], [...,], [, ...], - is allowed for indexing a scalar - - These return a new N-d array with a copy of - the data where N is the number of None's in . - + /* + * Only [...], [...,], [, ...], + * is allowed for indexing a scalar + * + * These return a new N-d array with a copy of + * the data where N is the number of None's in . */ PyObject *res, *ret; int N; @@ -2717,19 +2963,19 @@ "invalid index to scalar variable."); return NULL; } - - if (key == Py_Ellipsis) + if (key == Py_Ellipsis) { return res; - + } if (key == Py_None) { ret = add_new_axes_0d((PyArrayObject *)res, 1); Py_DECREF(res); return ret; } /* Must be a Tuple */ - N = count_new_axes_0d(key); - if (N < 0) return NULL; + if (N < 0) { + return NULL; + } ret = add_new_axes_0d((PyArrayObject *)res, N); Py_DECREF(res); return ret; @@ -2737,74 +2983,75 @@ /**begin repeat - * #name=bool, string, unicode, void# - * #NAME=Bool, String, Unicode, Void# - * #ex=_,_,_,# + * #name = bool, string, unicode, void# + * #NAME = Bool, String, Unicode, Void# + * #ex = _,_,_,# */ static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@@ex@", /*tp_name*/ - sizeof(Py at NAME@ScalarObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy. at name@@ex@", /* tp_name*/ + sizeof(Py at NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; /**end repeat**/ /**begin repeat -#NAME=Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble# -#name=int*5, uint*5, float*3# -#CNAME=(CHAR, SHORT, INT, LONG, LONGLONG)*2, FLOAT, DOUBLE, LONGDOUBLE# -*/ + * #NAME = Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, + * ULongLong, Float, Double, LongDouble# + * #name = int*5, uint*5, float*3# + * #CNAME = (CHAR, SHORT, INT, LONG, LONGLONG)*2, FLOAT, DOUBLE, LONGDOUBLE# + */ #if BITSOF_ at CNAME@ == 8 #define _THIS_SIZE "8" #elif BITSOF_ at CNAME@ == 16 @@ -2824,59 +3071,59 @@ #endif static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@" _THIS_SIZE, /*tp_name*/ - sizeof(Py at NAME@ScalarObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy. at name@" _THIS_SIZE, /* tp_name*/ + sizeof(Py at NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -2892,10 +3139,10 @@ /**begin repeat -#NAME=CFloat, CDouble, CLongDouble# -#name=complex*3# -#CNAME=FLOAT, DOUBLE, LONGDOUBLE# -*/ + * #NAME = CFloat, CDouble, CLongDouble# + * #name = complex*3# + * #CNAME = FLOAT, DOUBLE, LONGDOUBLE# + */ #if BITSOF_ at CNAME@ == 16 #define _THIS_SIZE2 "16" #define _THIS_SIZE1 "32" @@ -2918,65 +3165,69 @@ #define _THIS_SIZE2 "256" #define _THIS_SIZE1 "512" #endif -static PyTypeObject Py at NAME@ArrType_Type = { + +#define _THIS_DOC "Composed of two " _THIS_SIZE2 " bit floats" + + static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@" _THIS_SIZE1, /*tp_name*/ - sizeof(Py at NAME@ScalarObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - 0, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash */ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT, /*tp_flags*/ - "Composed of two " _THIS_SIZE2 " bit floats", /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy. at name@" _THIS_SIZE1, /* tp_name*/ + sizeof(Py at NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize*/ + 0, /* tp_dealloc*/ + 0, /* tp_print*/ + 0, /* tp_getattr*/ + 0, /* tp_setattr*/ + 0, /* tp_compare*/ + 0, /* tp_repr*/ + 0, /* tp_as_number*/ + 0, /* tp_as_sequence*/ + 0, /* tp_as_mapping*/ + 0, /* tp_hash */ + 0, /* tp_call*/ + 0, /* tp_str*/ + 0, /* tp_getattro*/ + 0, /* tp_setattro*/ + 0, /* tp_as_buffer*/ + Py_TPFLAGS_DEFAULT, /* tp_flags*/ + _THIS_DOC, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; #undef _THIS_SIZE1 #undef _THIS_SIZE2 +#undef _THIS_DOC /**end repeat**/ @@ -3004,12 +3255,15 @@ PyBoolArrType_Type.tp_as_number = &bool_arrtype_as_number; #if PY_VERSION_HEX >= 0x02050000 - /* need to add dummy versions with filled-in nb_index - in-order for PyType_Ready to fill in .__index__() method + /* + * need to add dummy versions with filled-in nb_index + * in-order for PyType_Ready to fill in .__index__() method */ /**begin repeat -#name=byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong# -#NAME=Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong# + * #name = byte, short, int, long, longlong, ubyte, ushort, + * uint, ulong, ulonglong# + * #NAME = Byte, Short, Int, Long, LongLong, UByte, UShort, + * UInt, ULong, ULongLong# */ Py at NAME@ArrType_Type.tp_as_number = &@name at _arrtype_as_number; Py at NAME@ArrType_Type.tp_as_number->nb_index = (unaryfunc)@name at _index; @@ -3033,15 +3287,19 @@ PyVoidArrType_Type.tp_as_sequence = &voidtype_as_sequence; /**begin repeat -#NAME=Number, Integer, SignedInteger, UnsignedInteger, Inexact, Floating, -ComplexFloating, Flexible, Character# + * #NAME= Number, Integer, SignedInteger, UnsignedInteger, Inexact, + * Floating, ComplexFloating, Flexible, Character# */ Py at NAME@ArrType_Type.tp_flags = BASEFLAGS; /**end repeat**/ /**begin repeat -#name=bool, byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, string, unicode, void, object# -#NAME=Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble, String, Unicode, Void, Object# + * #name = bool, byte, short, int, long, longlong, ubyte, ushort, uint, + * ulong, ulonglong, float, double, longdouble, cfloat, cdouble, + * clongdouble, string, unicode, void, object# + * #NAME = Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, + * ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, + * CLongDouble, String, Unicode, Void, Object# */ Py at NAME@ArrType_Type.tp_flags = BASEFLAGS; Py at NAME@ArrType_Type.tp_new = @name at _arrtype_new; @@ -3049,8 +3307,10 @@ /**end repeat**/ /**begin repeat -#name=bool, byte, short, ubyte, ushort, uint, ulong, ulonglong, float, longdouble, cfloat, clongdouble, void, object# -#NAME=Bool, Byte, Short, UByte, UShort, UInt, ULong, ULongLong, Float, LongDouble, CFloat, CLongDouble, Void, Object# + * #name = bool, byte, short, ubyte, ushort, uint, ulong, ulonglong, + * float, longdouble, cfloat, clongdouble, void, object# + * #NAME = Bool, Byte, Short, UByte, UShort, UInt, ULong, ULongLong, + * Float, LongDouble, CFloat, CLongDouble, Void, Object# */ Py at NAME@ArrType_Type.tp_hash = @name at _arrtype_hash; /**end repeat**/ @@ -3066,7 +3326,7 @@ #endif /**begin repeat - *#name = repr, str# + * #name = repr, str# */ PyFloatArrType_Type.tp_ at name@ = floattype_ at name@; PyCFloatArrType_Type.tp_ at name@ = cfloattype_ at name@; @@ -3075,15 +3335,24 @@ PyCDoubleArrType_Type.tp_ at name@ = cdoubletype_ at name@; /**end repeat**/ - /* These need to be coded specially because getitem does not - return a normal Python type + PyFloatArrType_Type.tp_print = floattype_print; + PyDoubleArrType_Type.tp_print = doubletype_print; + PyLongDoubleArrType_Type.tp_print = longdoubletype_print; + + PyCFloatArrType_Type.tp_print = cfloattype_print; + PyCDoubleArrType_Type.tp_print = cdoubletype_print; + PyCLongDoubleArrType_Type.tp_print = clongdoubletype_print; + + /* + * These need to be coded specially because getitem does not + * return a normal Python type */ PyLongDoubleArrType_Type.tp_as_number = &longdoubletype_as_number; PyCLongDoubleArrType_Type.tp_as_number = &clongdoubletype_as_number; /**begin repeat - * #name=int, long, hex, oct, float, repr, str# - * #kind=tp_as_number->nb*5, tp*2# + * #name = int, long, hex, oct, float, repr, str# + * #kind = tp_as_number->nb*5, tp*2# */ PyLongDoubleArrType_Type. at kind@_ at name@ = longdoubletype_ at name@; PyCLongDoubleArrType_Type. at kind@_ at name@ = clongdoubletype_ at name@; @@ -3137,8 +3406,9 @@ i++; } - if (!user) return typenum; - + if (!user) { + return typenum; + } /* Search any registered types */ i = 0; while (i < PyArray_NUMUSERTYPES) { @@ -3179,36 +3449,41 @@ } /* Check the generic types */ - if ((type == (PyObject *) &PyNumberArrType_Type) || \ - (type == (PyObject *) &PyInexactArrType_Type) || \ - (type == (PyObject *) &PyFloatingArrType_Type)) + if ((type == (PyObject *) &PyNumberArrType_Type) || + (type == (PyObject *) &PyInexactArrType_Type) || + (type == (PyObject *) &PyFloatingArrType_Type)) { typenum = PyArray_DOUBLE; - else if (type == (PyObject *)&PyComplexFloatingArrType_Type) + } + else if (type == (PyObject *)&PyComplexFloatingArrType_Type) { typenum = PyArray_CDOUBLE; - else if ((type == (PyObject *)&PyIntegerArrType_Type) || \ - (type == (PyObject *)&PySignedIntegerArrType_Type)) + } + else if ((type == (PyObject *)&PyIntegerArrType_Type) || + (type == (PyObject *)&PySignedIntegerArrType_Type)) { typenum = PyArray_LONG; - else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type) + } + else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type) { typenum = PyArray_ULONG; - else if (type == (PyObject *) &PyCharacterArrType_Type) + } + else if (type == (PyObject *) &PyCharacterArrType_Type) { typenum = PyArray_STRING; - else if ((type == (PyObject *) &PyGenericArrType_Type) || \ - (type == (PyObject *) &PyFlexibleArrType_Type)) + } + else if ((type == (PyObject *) &PyGenericArrType_Type) || + (type == (PyObject *) &PyFlexibleArrType_Type)) { typenum = PyArray_VOID; + } if (typenum != PyArray_NOTYPE) { return PyArray_DescrFromType(typenum); } - /* Otherwise --- type is a sub-type of an array scalar - not corresponding to a registered data-type object. + /* + * Otherwise --- type is a sub-type of an array scalar + * not corresponding to a registered data-type object. */ - /* Do special thing for VOID sub-types - */ + /* Do special thing for VOID sub-types */ if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) { new = PyArray_DescrNewFromType(PyArray_VOID); - conv = _arraydescr_fromobj(type); if (conv) { new->fields = conv->fields; @@ -3229,8 +3504,8 @@ } /*NUMPY_API - Return the tuple of ordered field names from a dictionary. -*/ + * Return the tuple of ordered field names from a dictionary. + */ static PyObject * PyArray_FieldNames(PyObject *fields) { @@ -3244,20 +3519,25 @@ return NULL; } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; + if (_numpy_internal == NULL) { + return NULL; + } tup = PyObject_CallMethod(_numpy_internal, "_makenames_list", "O", fields); Py_DECREF(_numpy_internal); - if (tup == NULL) return NULL; + if (tup == NULL) { + return NULL; + } ret = PyTuple_GET_ITEM(tup, 0); ret = PySequence_Tuple(ret); Py_DECREF(tup); return ret; } -/* New reference */ /*NUMPY_API - Return descr object from array scalar. -*/ + * Return descr object from array scalar. + * + * New reference + */ static PyArray_Descr * PyArray_DescrFromScalar(PyObject *sc) { @@ -3273,8 +3553,9 @@ if (descr->elsize == 0) { PyArray_DESCR_REPLACE(descr); type_num = descr->type_num; - if (type_num == PyArray_STRING) + if (type_num == PyArray_STRING) { descr->elsize = PyString_GET_SIZE(sc); + } else if (type_num == PyArray_UNICODE) { descr->elsize = PyUnicode_GET_DATA_SIZE(sc); #ifndef Py_UNICODE_WIDE @@ -3290,18 +3571,20 @@ Py_XDECREF(descr->fields); descr->fields = NULL; } - if (descr->fields) + if (descr->fields) { descr->names = PyArray_FieldNames(descr->fields); + } PyErr_Clear(); } } return descr; } -/* New reference */ /*NUMPY_API - Get a typeobject from a type-number -- can return NULL. -*/ + * Get a typeobject from a type-number -- can return NULL. + * + * New reference + */ static PyObject * PyArray_TypeObjectFromType(int type) { @@ -3309,7 +3592,9 @@ PyObject *obj; descr = PyArray_DescrFromType(type); - if (descr == NULL) return NULL; + if (descr == NULL) { + return NULL; + } obj = (PyObject *)descr->typeobj; Py_XINCREF(obj); Py_DECREF(descr); Modified: branches/numpy-mingw-w64/numpy/core/tests/test_memmap.py =================================================================== --- branches/numpy-mingw-w64/numpy/core/tests/test_memmap.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/tests/test_memmap.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -14,6 +14,9 @@ self.data = arange(12, dtype=self.dtype) self.data.resize(self.shape) + def tearDown(self): + self.tmpfp.close() + def test_roundtrip(self): # Write data to file fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', Modified: branches/numpy-mingw-w64/numpy/core/tests/test_multiarray.py =================================================================== --- branches/numpy-mingw-w64/numpy/core/tests/test_multiarray.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/tests/test_multiarray.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,9 +1,12 @@ import tempfile import sys +import os import numpy as np from numpy.testing import * from numpy.core import * +from test_print import in_foreign_locale + class TestFlags(TestCase): def setUp(self): self.a = arange(10) @@ -113,41 +116,6 @@ d2 = dtype('f8') assert_equal(d2, dtype(float64)) - -class TestFromstring(TestCase): - def test_binary(self): - a = fromstring('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',dtype=' 4: + assert_equal(str(tp(1e10)), str(float('1e10')), + err_msg='Failed str formatting for type %s' % tp) + else: + if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ + sys.version_info[1] <= 5: + ref = '1e+010' + else: + ref = '1e+10' + assert_equal(str(tp(1e10)), ref, + err_msg='Failed str formatting for type %s' % tp) - This is only for the str function, and only for simple types. - The precision of np.float and np.longdouble aren't the same as the - python float precision. +#@dec.knownfailureif(True, "formatting tests are known to fail") +def test_float_types(): + """ Check formatting. - """ - for t in [np.cfloat, np.cdouble, np.clongdouble] : - for x in [0, 1,-1, 1e10, 1e20] : - assert_equal(str(t(x)), str(complex(x))) - assert_equal(str(t(x*1j)), str(complex(x*1j))) - assert_equal(str(t(x + x*1j)), str(complex(x + x*1j))) + This is only for the str function, and only for simple types. + The precision of np.float and np.longdouble aren't the same as the + python float precision. + """ + for t in [np.float32, np.double, np.longdouble] : + yield check_float_type, t +def check_nan_inf_float(tp): + for x in [np.inf, -np.inf, np.nan]: + assert_equal(str(tp(x)), _REF[x], + err_msg='Failed str formatting for type %s' % tp) + +#@dec.knownfailureif(True, "formatting tests are known to fail") +def test_nan_inf_float(): + """ Check formatting of nan & inf. + + This is only for the str function, and only for simple types. + The precision of np.float and np.longdouble aren't the same as the + python float precision. + + """ + for t in [np.float32, np.double, np.longdouble] : + yield check_nan_inf_float, t + +def check_complex_type(tp): + for x in [0, 1,-1, 1e20] : + assert_equal(str(tp(x)), str(complex(x)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x*1j)), str(complex(x*1j)), + err_msg='Failed str formatting for type %s' % tp) + assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)), + err_msg='Failed str formatting for type %s' % tp) + + if tp(1e10).itemsize > 8: + assert_equal(str(tp(1e10)), str(complex(1e10)), + err_msg='Failed str formatting for type %s' % tp) + else: + if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ + sys.version_info[1] <= 5: + ref = '(1e+010+0j)' + else: + ref = '(1e+10+0j)' + assert_equal(str(tp(1e10)), ref, + err_msg='Failed str formatting for type %s' % tp) + +#@dec.knownfailureif(True, "formatting tests are known to fail") +def test_complex_types(): + """Check formatting of complex types. + + This is only for the str function, and only for simple types. + The precision of np.float and np.longdouble aren't the same as the + python float precision. + + """ + for t in [np.complex64, np.cdouble, np.clongdouble] : + yield check_complex_type, t + +# print tests +def _test_redirected_print(x, tp, ref=None): + file = StringIO() + file_tp = StringIO() + stdout = sys.stdout + try: + sys.stdout = file_tp + print tp(x) + sys.stdout = file + if ref: + print ref + else: + print x + finally: + sys.stdout = stdout + + assert_equal(file.getvalue(), file_tp.getvalue(), + err_msg='print failed for type%s' % tp) + +def check_float_type_print(tp): + for x in [0, 1,-1, 1e20]: + _test_redirected_print(float(x), tp) + + for x in [np.inf, -np.inf, np.nan]: + _test_redirected_print(float(x), tp, _REF[x]) + + if tp(1e10).itemsize > 4: + _test_redirected_print(float(1e10), tp) + else: + if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ + sys.version_info[1] <= 5: + ref = '1e+010' + else: + ref = '1e+10' + _test_redirected_print(float(1e10), tp, ref) + +#@dec.knownfailureif(True, "formatting tests are known to fail") +def check_complex_type_print(tp): + # We do not create complex with inf/nan directly because the feature is + # missing in python < 2.6 + for x in [0, 1, -1, 1e20]: + _test_redirected_print(complex(x), tp) + + if tp(1e10).itemsize > 8: + _test_redirected_print(complex(1e10), tp) + else: + if sys.platform == 'win32' and sys.version_info[0] <= 2 and \ + sys.version_info[1] <= 5: + ref = '(1e+010+0j)' + else: + ref = '(1e+10+0j)' + _test_redirected_print(complex(1e10), tp, ref) + + _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') + _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') + _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') + +def test_float_type_print(): + """Check formatting when using print """ + for t in [np.float32, np.double, np.longdouble] : + yield check_float_type_print, t + +#@dec.knownfailureif(True, "formatting tests are known to fail") +def test_complex_type_print(): + """Check formatting when using print """ + for t in [np.complex64, np.cdouble, np.clongdouble] : + yield check_complex_type_print, t + +# Locale tests: scalar types formatting should be independent of the locale +def in_foreign_locale(func): + # XXX: How to query locale on a given system ? + + # French is one language where the decimal is ',' not '.', and should be + # relatively common on many systems + def wrapper(*args, **kwargs): + curloc = locale.getlocale(locale.LC_NUMERIC) + try: + try: + if not sys.platform == 'win32': + locale.setlocale(locale.LC_NUMERIC, 'fr_FR') + else: + locale.setlocale(locale.LC_NUMERIC, 'FRENCH') + except locale.Error: + raise nose.SkipTest("Skipping locale test, because " + "French locale not found") + return func(*args, **kwargs) + finally: + locale.setlocale(locale.LC_NUMERIC, locale=curloc) + return nose.tools.make_decorator(func)(wrapper) + +#@dec.knownfailureif(True, "formatting tests are known to fail") + at in_foreign_locale +def test_locale_single(): + assert_equal(str(np.float32(1.2)), str(float(1.2))) + +#@dec.knownfailureif(True, "formatting tests are known to fail") + at in_foreign_locale +def test_locale_double(): + assert_equal(str(np.double(1.2)), str(float(1.2))) + +#@dec.knownfailureif(True, "formatting tests are known to fail") + at in_foreign_locale +def test_locale_longdouble(): + assert_equal(str(np.longdouble(1.2)), str(float(1.2))) + if __name__ == "__main__": run_module_suite() Modified: branches/numpy-mingw-w64/numpy/core/tests/test_regression.py =================================================================== --- branches/numpy-mingw-w64/numpy/core/tests/test_regression.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/tests/test_regression.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,7 +1,7 @@ - from StringIO import StringIO import pickle import sys +import gc from os import path from numpy.testing import * import numpy as np @@ -1208,5 +1208,17 @@ a = np.array(1) self.failUnlessRaises(ValueError, lambda x: x.choose([]), a) + def test_errobj_reference_leak(self, level=rlevel): + """Ticket #955""" + z = int(0) + p = np.int32(-1) + + gc.collect() + n_before = len(gc.get_objects()) + z**p # this shouldn't leak a reference to errobj + gc.collect() + n_after = len(gc.get_objects()) + assert n_before >= n_after, (n_before, n_after) + if __name__ == "__main__": run_module_suite() Modified: branches/numpy-mingw-w64/numpy/core/tests/test_unicode.py =================================================================== --- branches/numpy-mingw-w64/numpy/core/tests/test_unicode.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/core/tests/test_unicode.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -17,7 +17,7 @@ # Creation tests ############################################################ -class create_zeros: +class create_zeros(object): """Check the creation of zero-valued arrays""" def content_check(self, ua, ua_scalar, nbytes): @@ -69,7 +69,7 @@ ulen = 1009 -class create_values: +class create_values(object): """Check the creation of unicode arrays with values""" def content_check(self, ua, ua_scalar, nbytes): @@ -154,7 +154,7 @@ # Assignment tests ############################################################ -class assign_values: +class assign_values(object): """Check the assignment of unicode arrays with values""" def content_check(self, ua, ua_scalar, nbytes): Modified: branches/numpy-mingw-w64/numpy/ctypeslib.py =================================================================== --- branches/numpy-mingw-w64/numpy/ctypeslib.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/ctypeslib.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -353,8 +353,3 @@ result = tp.from_address(addr) result.__keep = ai return result - - -def test(level=1, verbosity=1): - from numpy.testing import NumpyTest - return NumpyTest().test(level, verbosity) Modified: branches/numpy-mingw-w64/numpy/distutils/command/config.py =================================================================== --- branches/numpy-mingw-w64/numpy/distutils/command/config.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/distutils/command/config.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -5,11 +5,13 @@ import os, signal import warnings +import sys from distutils.command.config import config as old_config from distutils.command.config import LANG_EXT from distutils import log from distutils.file_util import copy_file +import distutils from numpy.distutils.exec_command import exec_command from numpy.distutils.mingw32ccompiler import generate_manifest @@ -39,6 +41,30 @@ def _check_compiler (self): old_config._check_compiler(self) from numpy.distutils.fcompiler import FCompiler, new_fcompiler + + if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc': + # XXX: hack to circumvent a python 2.6 bug with msvc9compiler: + # initialize call query_vcvarsall, which throws an IOError, and + # causes an error along the way without much information. We try to + # catch it here, hoping it is early enough, and print an helpful + # message instead of Error: None. + if not self.compiler.initialized: + try: + self.compiler.initialize() + except IOError, e: + msg = """\ +Could not initialize compiler instance: do you have Visual Studio +installed ? If you are trying to build with mingw, please use python setup.py +build -c mingw32 instead ). If you have Visual Studio installed, check it is +correctly installed, and the right version (VS 2008 for python 2.6, VS 2003 for +2.5, etc...). Original exception was: %s, and the Compiler +class was %s +============================================================================""" \ + % (e, self.compiler.__class__.__name__) + print """\ +============================================================================""" + raise distutils.errors.DistutilsPlatformError(msg) + if not isinstance(self.fcompiler, FCompiler): self.fcompiler = new_fcompiler(compiler=self.fcompiler, dry_run=self.dry_run, force=1, Modified: branches/numpy-mingw-w64/numpy/distutils/command/scons.py =================================================================== --- branches/numpy-mingw-w64/numpy/distutils/command/scons.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/distutils/command/scons.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -361,9 +361,13 @@ try: minver = "0.9.3" - from numscons import get_version - if get_version() < minver: - raise ValueError() + try: + # version_info was added in 0.10.0 + from numscons import version_info + except ImportError: + from numscons import get_version + if get_version() < minver: + raise ValueError() except ImportError: raise RuntimeError("You need numscons >= %s to build numpy "\ "with numscons (imported numscons path " \ Modified: branches/numpy-mingw-w64/numpy/distutils/fcompiler/compaq.py =================================================================== --- branches/numpy-mingw-w64/numpy/distutils/fcompiler/compaq.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/distutils/fcompiler/compaq.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -79,12 +79,16 @@ m.initialize() ar_exe = m.lib except DistutilsPlatformError, msg: - print 'Ignoring "%s" (one should fix me in fcompiler/compaq.py)' % (msg) + pass except AttributeError, msg: if '_MSVCCompiler__root' in str(msg): print 'Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg) else: raise + except IOError, e: + if not "vcvarsall.bat" in str(e): + print "Unexpected IOError in", __file__ + raise e executables = { 'version_cmd' : ['', "/what"], Modified: branches/numpy-mingw-w64/numpy/distutils/fcompiler/gnu.py =================================================================== --- branches/numpy-mingw-w64/numpy/distutils/fcompiler/gnu.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/distutils/fcompiler/gnu.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -87,21 +87,29 @@ def get_flags_linker_so(self): opt = self.linker_so[1:] if sys.platform=='darwin': - # MACOSX_DEPLOYMENT_TARGET must be at least 10.3. This is - # a reasonable default value even when building on 10.4 when using - # the official Python distribution and those derived from it (when - # not broken). target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) - if target is None or target == '': - target = '10.3' - major, minor = target.split('.') - if int(minor) < 3: - minor = '3' - warnings.warn('Environment variable ' - 'MACOSX_DEPLOYMENT_TARGET reset to %s.%s' % (major, minor)) - os.environ['MACOSX_DEPLOYMENT_TARGET'] = '%s.%s' % (major, - minor) - + # If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value + # and leave it alone. But, distutils will complain if the + # environment's value is different from the one in the Python + # Makefile used to build Python. We let disutils handle this + # error checking. + if not target: + # If MACOSX_DEPLOYMENT_TARGET is not set in the environment, + # we try to get it first from the Python Makefile and then we + # fall back to setting it to 10.3 to maximize the set of + # versions we can work with. This is a reasonable default + # even when using the official Python dist and those derived + # from it. + import distutils.sysconfig as sc + g = {} + filename = sc.get_makefile_filename() + sc.parse_makefile(filename, g) + target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3') + os.environ['MACOSX_DEPLOYMENT_TARGET'] = target + if target == '10.3': + s = 'Env. variable MACOSX_DEPLOYMENT_TARGET set to 10.3' + warnings.warn(s) + opt.extend(['-undefined', 'dynamic_lookup', '-bundle']) else: opt.append("-shared") @@ -272,30 +280,30 @@ def get_library_dirs(self): opt = GnuFCompiler.get_library_dirs(self) - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - target = self.get_target() - if target: + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + target = self.get_target() + if target: d = os.path.normpath(self.get_libgcc_dir()) - root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir) - mingwdir = os.path.normpath(os.path.join(root, target, "lib")) - full = os.path.join(mingwdir, "libmingwex.a") - if os.path.exists(full): - opt.append(mingwdir) - return opt + root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir) + mingwdir = os.path.normpath(os.path.join(root, target, "lib")) + full = os.path.join(mingwdir, "libmingwex.a") + if os.path.exists(full): + opt.append(mingwdir) + return opt def get_libraries(self): opt = GnuFCompiler.get_libraries(self) if sys.platform == 'darwin': opt.remove('cc_dynamic') - if sys.platform == 'win32': - c_compiler = self.c_compiler - if c_compiler and c_compiler.compiler_type == "msvc": - if "gcc" in opt: - i = opt.index("gcc") - opt.insert(i+1, "mingwex") - opt.insert(i+1, "mingw32") + if sys.platform == 'win32': + c_compiler = self.c_compiler + if c_compiler and c_compiler.compiler_type == "msvc": + if "gcc" in opt: + i = opt.index("gcc") + opt.insert(i+1, "mingwex") + opt.insert(i+1, "mingw32") return opt def get_target(self): @@ -303,9 +311,9 @@ ['-v'], use_tee=0) if not status: - m = TARGET_R.search(output) - if m: - return m.group(1) + m = TARGET_R.search(output) + if m: + return m.group(1) return "" if __name__ == '__main__': Modified: branches/numpy-mingw-w64/numpy/distutils/lib2def.py =================================================================== --- branches/numpy-mingw-w64/numpy/distutils/lib2def.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/distutils/lib2def.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,6 +1,7 @@ import re import sys import os +import subprocess __doc__ = """This module generates a DEF file from the symbols in an MSVC-compiled DLL import library. It correctly discriminates between @@ -59,13 +60,13 @@ deffile = None return libfile, deffile -def getnm(nm_cmd = 'nm -Cs python%s.lib' % py_ver): +def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]): """Returns the output of nm_cmd via a pipe. nm_output = getnam(nm_cmd = 'nm -Cs py_lib')""" - f = os.popen(nm_cmd) - nm_output = f.read() - f.close() + f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE) + nm_output = f.stdout.read() + f.stdout.close() return nm_output def parse_nm(nm_output): @@ -107,7 +108,7 @@ deffile = sys.stdout else: deffile = open(deffile, 'w') - nm_cmd = '%s %s' % (DEFAULT_NM, libfile) + nm_cmd = [str(DEFAULT_NM), str(libfile)] nm_output = getnm(nm_cmd) dlist, flist = parse_nm(nm_output) output_def(dlist, flist, DEF_HEADER, deffile) Modified: branches/numpy-mingw-w64/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/numpy-mingw-w64/numpy/distutils/mingw32ccompiler.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/distutils/mingw32ccompiler.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -9,6 +9,7 @@ """ import os +import subprocess import sys import log import subprocess @@ -56,9 +57,10 @@ # get_versions methods regex if self.gcc_version is None: import re - out = os.popen('gcc -dumpversion','r') - out_string = out.read() - out.close() + p = subprocess.Popen(['gcc', '-dumpversion'], shell=True, + stdout=subprocess.PIPE) + out_string = p.stdout.read() + p.stdout.close() result = re.search('(\d+\.\d+)',out_string) if result: self.gcc_version = StrictVersion(result.group(1)) @@ -336,23 +338,37 @@ # raise DistutilsPlatformError, msg return +#===================================== +# Dealing with Visual Studio MANIFESTS +#===================================== + # Functions to deal with visual studio manifests. Manifest are a mechanism to # enforce strong DLL versioning on windows, and has nothing to do with # distutils MANIFEST. manifests are XML files with version info, and used by -# the OS loader; they are necessary when linking against a DLL no in the system -# path; in particular, python 2.6 is built against the MS runtime 9 (the one -# from VS 2008), which is not available on most windows systems; python 2.6 -# installer does install it in the Win SxS (Side by side) directory, but this -# requires the manifest too. This is a big mess, thanks MS for a wonderful -# system. +# the OS loader; they are necessary when linking against a DLL not in the +# system path; in particular, official python 2.6 binary is built against the +# MS runtime 9 (the one from VS 2008), which is not available on most windows +# systems; python 2.6 installer does install it in the Win SxS (Side by side) +# directory, but this requires the manifest for this to work. This is a big +# mess, thanks MS for a wonderful system. -# XXX: ideally, we should use exactly the same version as used by python, but I -# have no idea how to obtain the exact version from python. We could use the -# strings utility on python.exe, maybe ? -_MSVCRVER_TO_FULLVER = {'90': "9.0.21022.8", - # I took one version in my SxS directory: no idea if it is the good - # one, and we can't retrieve it from python - '80': "8.0.50727.42"} +# XXX: ideally, we should use exactly the same version as used by python. I +# submitted a patch to get this version, but it was only included for python +# 2.6.1 and above. So for versions below, we use a "best guess". +_MSVCRVER_TO_FULLVER = {} +if sys.platform == 'win32': + try: + import msvcrt + if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): + _MSVCRVER_TO_FULLVER['90'] = msvcrt.CRT_ASSEMBLY_VERSION + else: + _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" + _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" + except ImportError: + # If we are here, means python was not built with MSVC. Not sure what to do + # in that case: manifest building will fail, but it should not be used in + # that case anyway + log.warn('Cannot import msvcrt: using manifest will not be possible') def msvc_manifest_xml(maj, min): """Given a major and minor version of the MSVCR, returns the Modified: branches/numpy-mingw-w64/numpy/distutils/misc_util.py =================================================================== --- branches/numpy-mingw-w64/numpy/distutils/misc_util.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/distutils/misc_util.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -6,6 +6,7 @@ import glob import atexit import tempfile +import subprocess try: set @@ -1340,7 +1341,10 @@ revision = None m = None try: - sin, sout = os.popen4('svnversion') + p = subprocess.Popen(['svnversion'], shell=True, + stdout=subprocess.PIPE, stderr=STDOUT, + close_fds=True) + sout = p.stdout m = re.match(r'(?P\d+)', sout.read()) except: pass Modified: branches/numpy-mingw-w64/numpy/distutils/system_info.py =================================================================== --- branches/numpy-mingw-w64/numpy/distutils/system_info.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/distutils/system_info.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -128,6 +128,50 @@ from numpy.distutils.misc_util import is_sequence, is_string from numpy.distutils.command.config import config as cmd_config +# Determine number of bits +import platform +_bits = {'32bit':32,'64bit':64} +platform_bits = _bits[platform.architecture()[0]] + +def libpaths(paths,bits): + """Return a list of library paths valid on 32 or 64 bit systems. + + Inputs: + paths : sequence + A sequence of strings (typically paths) + bits : int + An integer, the only valid values are 32 or 64. A ValueError exception + is raised otherwise. + + Examples: + + Consider a list of directories + >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] + + For a 32-bit platform, this is already valid: + >>> libpaths(paths,32) + ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] + + On 64 bits, we prepend the '64' postfix + >>> libpaths(paths,64) + ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', + '/usr/lib64', '/usr/lib'] + """ + if bits not in (32, 64): + raise ValueError("Invalid bit size in libpaths: 32 or 64 only") + + # Handle 32bit case + if bits==32: + return paths + + # Handle 64bit case + out = [] + for p in paths: + out.extend([p+'64', p]) + + return out + + if sys.platform == 'win32': default_lib_dirs = ['C:\\', os.path.join(distutils.sysconfig.EXEC_PREFIX, @@ -137,24 +181,16 @@ default_x11_lib_dirs = [] default_x11_include_dirs = [] else: - default_lib_dirs = ['/usr/local/lib', '/opt/lib', '/usr/lib', - '/opt/local/lib', '/sw/lib'] + default_lib_dirs = libpaths(['/usr/local/lib','/opt/lib','/usr/lib', + '/opt/local/lib','/sw/lib'], platform_bits) default_include_dirs = ['/usr/local/include', '/opt/include', '/usr/include', - '/opt/local/include', '/sw/include'] + '/opt/local/include', '/sw/include', + '/usr/include/suitesparse'] default_src_dirs = ['.','/usr/local/src', '/opt/src','/sw/src'] - try: - platform = os.uname() - bit64 = platform[-1].endswith('64') - except: - bit64 = False - - if bit64: - default_x11_lib_dirs = ['/usr/lib64'] - else: - default_x11_lib_dirs = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] - + default_x11_lib_dirs = libpaths(['/usr/X11R6/lib','/usr/X11/lib', + '/usr/lib'], platform_bits) default_x11_include_dirs = ['/usr/X11R6/include','/usr/X11/include', '/usr/include'] @@ -364,14 +400,16 @@ self.files.extend(get_standard_file('.numpy-site.cfg')) self.files.extend(get_standard_file('site.cfg')) self.parse_config_files() - self.search_static_first = self.cp.getboolean(self.section, - 'search_static_first') + if self.section is not None: + self.search_static_first = self.cp.getboolean(self.section, + 'search_static_first') assert isinstance(self.search_static_first, int) def parse_config_files(self): self.cp.read(self.files) if not self.cp.has_section(self.section): - self.cp.add_section(self.section) + if self.section is not None: + self.cp.add_section(self.section) def calc_libraries_info(self): libs = self.get_libraries() Copied: branches/numpy-mingw-w64/numpy/doc/constants.py (from rev 6424, trunk/numpy/doc/constants.py) Modified: branches/numpy-mingw-w64/numpy/f2py/cfuncs.py =================================================================== --- branches/numpy-mingw-w64/numpy/f2py/cfuncs.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/f2py/cfuncs.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -472,15 +472,17 @@ cppmacros['CHECKSTRING']="""\ #define CHECKSTRING(check,tcheck,name,show,var)\\ \tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\tfprintf(stderr,show\"\\n\",slen(var),var);\\ +\t\tchar errstring[256];\\ +\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ +\t\tPyErr_SetString(#modulename#_error, errstring);\\ \t\t/*goto capi_fail;*/\\ \t} else """ cppmacros['CHECKSCALAR']="""\ #define CHECKSCALAR(check,tcheck,name,show,var)\\ \tif (!(check)) {\\ -\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ -\t\tfprintf(stderr,show\"\\n\",var);\\ +\t\tchar errstring[256];\\ +\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ +\t\tPyErr_SetString(#modulename#_error,errstring);\\ \t\t/*goto capi_fail;*/\\ \t} else """ ## cppmacros['CHECKDIMS']="""\ Modified: branches/numpy-mingw-w64/numpy/f2py/crackfortran.py =================================================================== --- branches/numpy-mingw-w64/numpy/f2py/crackfortran.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/f2py/crackfortran.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -2446,9 +2446,9 @@ global skipfuncs, onlyfuncs setmesstext(block) ret='' - if type(block) is type([]): + if isinstance(block, list): for g in block: - if g['block'] in ['function','subroutine']: + if g and g['block'] in ['function','subroutine']: if g['name'] in skipfuncs: continue if onlyfuncs and g['name'] not in onlyfuncs: Modified: branches/numpy-mingw-w64/numpy/f2py/f2py.1 =================================================================== --- branches/numpy-mingw-w64/numpy/f2py/f2py.1 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/f2py/f2py.1 2009-02-20 16:37:01 UTC (rev 6425) @@ -20,7 +20,7 @@ This program generates a Python C/API file (module.c) that contains wrappers for given Fortran or C functions so that they can be called from Python. -With the -c option the corresponding +With the \-c option the corresponding extension modules are built. .SH OPTIONS .TP @@ -49,8 +49,8 @@ \'untitled\'. .TP .B \-\-[no\-]lower -Do [not] lower the cases in . By default, --lower is -assumed with -h key, and --no-lower without -h key. +Do [not] lower the cases in . By default, \-\-lower is +assumed with \-h key, and \-\-no\-lower without \-h key. .TP .B \-\-build\-dir All f2py generated files are created in . Default is tempfile.mktemp(). @@ -59,14 +59,14 @@ Overwrite existing signature file. .TP .B \-\-[no\-]latex\-doc -Create (or not) module.tex. Default is --no-latex-doc. +Create (or not) module.tex. Default is \-\-no\-latex\-doc. .TP .B \-\-short\-latex Create 'incomplete' LaTeX document (without commands \\documentclass, \\tableofcontents, and \\begin{document}, \\end{document}). .TP .B \-\-[no\-]rest\-doc -Create (or not) module.rst. Default is --no-rest-doc. +Create (or not) module.rst. Default is \-\-no\-rest\-doc. .TP .B \-\-debug\-capi Create C/API code that reports the state of the wrappers during @@ -81,12 +81,12 @@ .TP .B \-\-[no\-]wrap\-functions Create Fortran subroutine wrappers to Fortran 77 -functions. --wrap-functions is default because it ensures maximum +functions. \-\-wrap\-functions is default because it ensures maximum portability/compiler independence. .TP .B \-\-help\-link [..] List system resources found by system_info.py. [..] may contain -a list of resources names. See also --link- switch below. +a list of resources names. See also \-\-link\- switch below. .TP .B \-\-quiet Run quietly. @@ -100,7 +100,7 @@ .B \-\-include_paths path1:path2:... Search include files (that f2py will scan) from the given directories. .SH "CONFIG_FC OPTIONS" -The following options are effective only when -c switch is used. +The following options are effective only when \-c switch is used. .TP .B \-\-help-compiler List available Fortran compilers [DEPRECIATED]. @@ -147,13 +147,13 @@ .B \-\-debug Compile with debugging information. .SH "EXTRA OPTIONS" -The following options are effective only when -c switch is used. +The following options are effective only when \-c switch is used. .TP .B \-\-link- Link extension module with as defined by numpy_distutils/system_info.py. E.g. to link with optimized LAPACK libraries (vecLib on MacOSX, ATLAS elsewhere), use ---link-lapack_opt. See also --help-link switch. +\-\-link\-lapack_opt. See also \-\-help\-link switch. .TP .B -L/path/to/lib/ -l Modified: branches/numpy-mingw-w64/numpy/f2py/f2py2e.py =================================================================== --- branches/numpy-mingw-w64/numpy/f2py/f2py2e.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/f2py/f2py2e.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -543,7 +543,7 @@ setup(ext_modules = [ext]) if remove_build_dir and os.path.exists(build_dir): - import shutil + import shutil outmess('Removing build directory %s\n'%(build_dir)) shutil.rmtree(build_dir) Modified: branches/numpy-mingw-w64/numpy/f2py/rules.py =================================================================== --- branches/numpy-mingw-w64/numpy/f2py/rules.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/f2py/rules.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -245,7 +245,7 @@ f2py_start_clock(); #endif \tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ -\t\t\"#argformat#|#keyformat##xaformat#:#pyname#\",\\ +\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\ \t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL; #frompyobj# /*end of frompyobj*/ @@ -1355,6 +1355,16 @@ rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\ ['\\begin{description}']+rd[k][1:]+\ ['\\end{description}'] + + # Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720 + if rd['keyformat'] or rd['xaformat']: + argformat = rd['argformat'] + if isinstance(argformat, list): + argformat.append('|') + else: + assert isinstance(argformat, str),repr((argformat, type(argformat))) + rd['argformat'] += '|' + ar=applyrules(routine_rules,rd) if ismoduleroutine(rout): outmess('\t\t\t %s\n'%(ar['docshort'])) Modified: branches/numpy-mingw-w64/numpy/lib/__init__.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/__init__.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/__init__.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,151 +1,3 @@ -""" -Basic functions used by several sub-packages and -useful to have in the main name-space. - -Type Handling -------------- -================ =================== -iscomplexobj Test for complex object, scalar result -isrealobj Test for real object, scalar result -iscomplex Test for complex elements, array result -isreal Test for real elements, array result -imag Imaginary part -real Real part -real_if_close Turns complex number with tiny imaginary part to real -isneginf Tests for negative infinity, array result -isposinf Tests for positive infinity, array result -isnan Tests for nans, array result -isinf Tests for infinity, array result -isfinite Tests for finite numbers, array result -isscalar True if argument is a scalar -nan_to_num Replaces NaN's with 0 and infinities with large numbers -cast Dictionary of functions to force cast to each type -common_type Determine the minimum common type code for a group - of arrays -mintypecode Return minimal allowed common typecode. -================ =================== - -Index Tricks ------------- -================ =================== -mgrid Method which allows easy construction of N-d - 'mesh-grids' -``r_`` Append and construct arrays: turns slice objects into - ranges and concatenates them, for 2d arrays appends rows. -index_exp Konrad Hinsen's index_expression class instance which - can be useful for building complicated slicing syntax. -================ =================== - -Useful Functions ----------------- -================ =================== -select Extension of where to multiple conditions and choices -extract Extract 1d array from flattened array according to mask -insert Insert 1d array of values into Nd array according to mask -linspace Evenly spaced samples in linear space -logspace Evenly spaced samples in logarithmic space -fix Round x to nearest integer towards zero -mod Modulo mod(x,y) = x % y except keeps sign of y -amax Array maximum along axis -amin Array minimum along axis -ptp Array max-min along axis -cumsum Cumulative sum along axis -prod Product of elements along axis -cumprod Cumluative product along axis -diff Discrete differences along axis -angle Returns angle of complex argument -unwrap Unwrap phase along given axis (1-d algorithm) -sort_complex Sort a complex-array (based on real, then imaginary) -trim_zeros Trim the leading and trailing zeros from 1D array. -vectorize A class that wraps a Python function taking scalar - arguments into a generalized function which can handle - arrays of arguments using the broadcast rules of - numerix Python. -================ =================== - -Shape Manipulation ------------------- -================ =================== -squeeze Return a with length-one dimensions removed. -atleast_1d Force arrays to be > 1D -atleast_2d Force arrays to be > 2D -atleast_3d Force arrays to be > 3D -vstack Stack arrays vertically (row on row) -hstack Stack arrays horizontally (column on column) -column_stack Stack 1D arrays as columns into 2D array -dstack Stack arrays depthwise (along third dimension) -split Divide array into a list of sub-arrays -hsplit Split into columns -vsplit Split into rows -dsplit Split along third dimension -================ =================== - -Matrix (2D Array) Manipulations -------------------------------- -================ =================== -fliplr 2D array with columns flipped -flipud 2D array with rows flipped -rot90 Rotate a 2D array a multiple of 90 degrees -eye Return a 2D array with ones down a given diagonal -diag Construct a 2D array from a vector, or return a given - diagonal from a 2D array. -mat Construct a Matrix -bmat Build a Matrix from blocks -================ =================== - -Polynomials ------------ -================ =================== -poly1d A one-dimensional polynomial class -poly Return polynomial coefficients from roots -roots Find roots of polynomial given coefficients -polyint Integrate polynomial -polyder Differentiate polynomial -polyadd Add polynomials -polysub Substract polynomials -polymul Multiply polynomials -polydiv Divide polynomials -polyval Evaluate polynomial at given argument -================ =================== - -Import Tricks -------------- -================ =================== -ppimport Postpone module import until trying to use it -ppimport_attr Postpone module import until trying to use its attribute -ppresolve Import postponed module and return it. -================ =================== - -Machine Arithmetics -------------------- -================ =================== -machar_single Single precision floating point arithmetic parameters -machar_double Double precision floating point arithmetic parameters -================ =================== - -Threading Tricks ----------------- -================ =================== -ParallelExec Execute commands in parallel thread. -================ =================== - -1D Array Set Operations ------------------------ -Set operations for 1D numeric arrays based on sort() function. - -================ =================== -ediff1d Array difference (auxiliary function). -unique1d Unique elements of 1D array. -intersect1d Intersection of 1D arrays with unique elements. -intersect1d_nu Intersection of 1D arrays with any elements. -setxor1d Set exclusive-or of 1D arrays with unique elements. -setmember1d Return an array of shape of ar1 containing 1 where - the elements of ar1 are in ar2 and 0 otherwise. -union1d Union of 1D arrays with unique elements. -setdiff1d Set difference of 1D arrays with unique elements. -================ =================== - -""" from info import __doc__ from numpy.version import version as __version__ Copied: branches/numpy-mingw-w64/numpy/lib/_iotools.py (from rev 6424, trunk/numpy/lib/_iotools.py) Modified: branches/numpy-mingw-w64/numpy/lib/arraysetops.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/arraysetops.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/arraysetops.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -52,13 +52,19 @@ If provided, this number will be taked onto the beginning of the returned differences. + Notes + ----- + When applied to masked arrays, this function drops the mask information + if the `to_begin` and/or `to_end` parameters are used + + Returns ------- ed : array The differences. Loosely, this will be (ary[1:] - ary[:-1]). """ - ary = np.asarray(ary).flat + ary = np.asanyarray(ary).flat ed = ary[1:] - ary[:-1] arrays = [ed] if to_begin is not None: @@ -132,7 +138,7 @@ "the output was (indices, unique_arr), but " "has now been reversed to be more consistent.") - ar = np.asarray(ar1).flatten() + ar = np.asanyarray(ar1).flatten() if ar.size == 0: if return_inverse and return_index: return ar, np.empty(0, np.bool), np.empty(0, np.bool) Modified: branches/numpy-mingw-w64/numpy/lib/function_base.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/function_base.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/function_base.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -228,10 +228,10 @@ * None : the new behaviour is used, no warning is printed. * True : the new behaviour is used and a warning is raised about the future removal of the `new` keyword. - * False : the old behaviour is used and a DeprecationWarning + * False : the old behaviour is used and a DeprecationWarning is raised. - As of NumPy 1.3, this keyword should not be used explicitly since it - will disappear in NumPy 1.4. + As of NumPy 1.3, this keyword should not be used explicitly since it + will disappear in NumPy 1.4. Returns ------- @@ -267,9 +267,9 @@ # Old behavior if new == False: warnings.warn(""" - The histogram semantics being used is now deprecated and - will disappear in NumPy 1.4. Please update your code to - use the default semantics. + The histogram semantics being used is now deprecated and + will disappear in NumPy 1.4. Please update your code to + use the default semantics. """, DeprecationWarning) a = asarray(a).ravel() @@ -320,8 +320,8 @@ elif new in [True, None]: if new is True: warnings.warn(""" - The new semantics of histogram is now the default and the `new` - keyword will be removed in NumPy 1.4. + The new semantics of histogram is now the default and the `new` + keyword will be removed in NumPy 1.4. """, Warning) a = asarray(a) if weights is not None: @@ -1073,53 +1073,6 @@ else: return a[slice1]-a[slice2] -try: - add_docstring(digitize, -r"""digitize(x,bins) - -Return the index of the bin to which each value of x belongs. - -Each index i returned is such that bins[i-1] <= x < bins[i] if -bins is monotonically increasing, or bins [i-1] > x >= bins[i] if -bins is monotonically decreasing. - -Beyond the bounds of the bins 0 or len(bins) is returned as appropriate. - -""") -except RuntimeError: - pass - -try: - add_docstring(bincount, -r"""bincount(x,weights=None) - -Return the number of occurrences of each value in x. - -x must be a list of non-negative integers. The output, b[i], -represents the number of times that i is found in x. If weights -is specified, every occurrence of i at a position p contributes -weights[p] instead of 1. - -See also: histogram, digitize, unique. - -""") -except RuntimeError: - pass - -try: - add_docstring(add_docstring, -r"""docstring(obj, docstring) - -Add a docstring to a built-in obj if possible. -If the obj already has a docstring raise a RuntimeError -If this routine does not know how to add a docstring to the object -raise a TypeError - -""") -except RuntimeError: - pass - - def interp(x, xp, fp, left=None, right=None): """ One-dimensional linear interpolation. @@ -2818,9 +2771,9 @@ y : array_like Input array to integrate. x : array_like, optional - If `x` is None, then spacing between all `y` elements is 1. + If `x` is None, then spacing between all `y` elements is `dx`. dx : scalar, optional - If `x` is None, spacing given by `dx` is assumed. + If `x` is None, spacing given by `dx` is assumed. Default is 1. axis : int, optional Specify the axis. @@ -2836,7 +2789,15 @@ if x is None: d = dx else: - d = diff(x,axis=axis) + x = asarray(x) + if x.ndim == 1: + d = diff(x) + # reshape to correct shape + shape = [1]*y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = diff(x, axis=axis) nd = len(y.shape) slice1 = [slice(None)]*nd slice2 = [slice(None)]*nd Modified: branches/numpy-mingw-w64/numpy/lib/getlimits.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/getlimits.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/getlimits.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -88,6 +88,12 @@ _finfo_cache = {} def __new__(cls, dtype): + try: + dtype = np.dtype(dtype) + except TypeError: + # In case a float instance was given + dtype = np.dtype(type(dtype)) + obj = cls._finfo_cache.get(dtype,None) if obj is not None: return obj @@ -115,7 +121,7 @@ return obj def _init(self, dtype): - self.dtype = dtype + self.dtype = np.dtype(dtype) if dtype is ntypes.double: itype = ntypes.int64 fmt = '%24.16e' @@ -149,23 +155,23 @@ self.nexp = machar.iexp self.nmant = machar.it self.machar = machar - self._str_tiny = machar._str_xmin - self._str_max = machar._str_xmax - self._str_epsneg = machar._str_epsneg - self._str_eps = machar._str_eps - self._str_resolution = machar._str_resolution + self._str_tiny = machar._str_xmin.strip() + self._str_max = machar._str_xmax.strip() + self._str_epsneg = machar._str_epsneg.strip() + self._str_eps = machar._str_eps.strip() + self._str_resolution = machar._str_resolution.strip() return self def __str__(self): return '''\ Machine parameters for %(dtype)s --------------------------------------------------------------------- -precision=%(precision)3s resolution=%(_str_resolution)s -machep=%(machep)6s eps= %(_str_eps)s -negep =%(negep)6s epsneg= %(_str_epsneg)s -minexp=%(minexp)6s tiny= %(_str_tiny)s -maxexp=%(maxexp)6s max= %(_str_max)s -nexp =%(nexp)6s min= -max +precision=%(precision)3s resolution= %(_str_resolution)s +machep=%(machep)6s eps= %(_str_eps)s +negep =%(negep)6s epsneg= %(_str_epsneg)s +minexp=%(minexp)6s tiny= %(_str_tiny)s +maxexp=%(maxexp)6s max= %(_str_max)s +nexp =%(nexp)6s min= -max --------------------------------------------------------------------- ''' % self.__dict__ @@ -220,8 +226,11 @@ _min_vals = {} _max_vals = {} - def __init__(self, type): - self.dtype = np.dtype(type) + def __init__(self, int_type): + try: + self.dtype = np.dtype(int_type) + except TypeError: + self.dtype = np.dtype(type(int_type)) self.kind = self.dtype.kind self.bits = self.dtype.itemsize * 8 self.key = "%s%d" % (self.kind, self.bits) @@ -256,6 +265,17 @@ max = property(max) + def __str__(self): + """String representation.""" + return '''\ +Machine parameters for %(dtype)s +--------------------------------------------------------------------- +min = %(min)s +max = %(max)s +--------------------------------------------------------------------- +''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max} + + if __name__ == '__main__': f = finfo(ntypes.single) print 'single epsilon:',f.eps Modified: branches/numpy-mingw-w64/numpy/lib/index_tricks.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/index_tricks.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/index_tricks.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -212,6 +212,8 @@ mgrid = nd_grid(sparse=False) ogrid = nd_grid(sparse=True) +mgrid.__doc__ = None # set in numpy.add_newdocs +ogrid.__doc__ = None # set in numpy.add_newdocs class AxisConcatenator(object): """Translates slice objects to concatenation along an axis. Modified: branches/numpy-mingw-w64/numpy/lib/info.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/info.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/info.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,134 +1,149 @@ -__doc_title__ = """Basic functions used by several sub-packages and -useful to have in the main name-space.""" -__doc__ = __doc_title__ + """ +""" +Basic functions used by several sub-packages and +useful to have in the main name-space. -Type handling -============== -iscomplexobj -- Test for complex object, scalar result -isrealobj -- Test for real object, scalar result -iscomplex -- Test for complex elements, array result -isreal -- Test for real elements, array result -imag -- Imaginary part -real -- Real part -real_if_close -- Turns complex number with tiny imaginary part to real -isneginf -- Tests for negative infinity ---| -isposinf -- Tests for positive infinity | -isnan -- Tests for nans |---- array results -isinf -- Tests for infinity | -isfinite -- Tests for finite numbers ---| -isscalar -- True if argument is a scalar -nan_to_num -- Replaces NaN's with 0 and infinities with large numbers -cast -- Dictionary of functions to force cast to each type -common_type -- Determine the 'minimum common type code' for a group - of arrays -mintypecode -- Return minimal allowed common typecode. +Type Handling +------------- +================ =================== +iscomplexobj Test for complex object, scalar result +isrealobj Test for real object, scalar result +iscomplex Test for complex elements, array result +isreal Test for real elements, array result +imag Imaginary part +real Real part +real_if_close Turns complex number with tiny imaginary part to real +isneginf Tests for negative infinity, array result +isposinf Tests for positive infinity, array result +isnan Tests for nans, array result +isinf Tests for infinity, array result +isfinite Tests for finite numbers, array result +isscalar True if argument is a scalar +nan_to_num Replaces NaN's with 0 and infinities with large numbers +cast Dictionary of functions to force cast to each type +common_type Determine the minimum common type code for a group + of arrays +mintypecode Return minimal allowed common typecode. +================ =================== -Index tricks -================== -mgrid -- Method which allows easy construction of N-d 'mesh-grids' -r_ -- Append and construct arrays: turns slice objects into - ranges and concatenates them, for 2d arrays appends - rows. -index_exp -- Konrad Hinsen's index_expression class instance which - can be useful for building complicated slicing syntax. +Index Tricks +------------ +================ =================== +mgrid Method which allows easy construction of N-d + 'mesh-grids' +``r_`` Append and construct arrays: turns slice objects into + ranges and concatenates them, for 2d arrays appends rows. +index_exp Konrad Hinsen's index_expression class instance which + can be useful for building complicated slicing syntax. +================ =================== -Useful functions -================== -select -- Extension of where to multiple conditions and choices -extract -- Extract 1d array from flattened array according to mask -insert -- Insert 1d array of values into Nd array according to mask -linspace -- Evenly spaced samples in linear space -logspace -- Evenly spaced samples in logarithmic space -fix -- Round x to nearest integer towards zero -mod -- Modulo mod(x,y) = x % y except keeps sign of y -amax -- Array maximum along axis -amin -- Array minimum along axis -ptp -- Array max-min along axis -cumsum -- Cumulative sum along axis -prod -- Product of elements along axis -cumprod -- Cumluative product along axis -diff -- Discrete differences along axis -angle -- Returns angle of complex argument -unwrap -- Unwrap phase along given axis (1-d algorithm) -sort_complex -- Sort a complex-array (based on real, then imaginary) -trim_zeros -- trim the leading and trailing zeros from 1D array. +Useful Functions +---------------- +================ =================== +select Extension of where to multiple conditions and choices +extract Extract 1d array from flattened array according to mask +insert Insert 1d array of values into Nd array according to mask +linspace Evenly spaced samples in linear space +logspace Evenly spaced samples in logarithmic space +fix Round x to nearest integer towards zero +mod Modulo mod(x,y) = x % y except keeps sign of y +amax Array maximum along axis +amin Array minimum along axis +ptp Array max-min along axis +cumsum Cumulative sum along axis +prod Product of elements along axis +cumprod Cumluative product along axis +diff Discrete differences along axis +angle Returns angle of complex argument +unwrap Unwrap phase along given axis (1-d algorithm) +sort_complex Sort a complex-array (based on real, then imaginary) +trim_zeros Trim the leading and trailing zeros from 1D array. +vectorize A class that wraps a Python function taking scalar + arguments into a generalized function which can handle + arrays of arguments using the broadcast rules of + numerix Python. +================ =================== -vectorize -- a class that wraps a Python function taking scalar - arguments into a generalized function which - can handle arrays of arguments using the broadcast - rules of numerix Python. +Shape Manipulation +------------------ +================ =================== +squeeze Return a with length-one dimensions removed. +atleast_1d Force arrays to be > 1D +atleast_2d Force arrays to be > 2D +atleast_3d Force arrays to be > 3D +vstack Stack arrays vertically (row on row) +hstack Stack arrays horizontally (column on column) +column_stack Stack 1D arrays as columns into 2D array +dstack Stack arrays depthwise (along third dimension) +split Divide array into a list of sub-arrays +hsplit Split into columns +vsplit Split into rows +dsplit Split along third dimension +================ =================== -Shape manipulation -=================== -squeeze -- Return a with length-one dimensions removed. -atleast_1d -- Force arrays to be > 1D -atleast_2d -- Force arrays to be > 2D -atleast_3d -- Force arrays to be > 3D -vstack -- Stack arrays vertically (row on row) -hstack -- Stack arrays horizontally (column on column) -column_stack -- Stack 1D arrays as columns into 2D array -dstack -- Stack arrays depthwise (along third dimension) -split -- Divide array into a list of sub-arrays -hsplit -- Split into columns -vsplit -- Split into rows -dsplit -- Split along third dimension +Matrix (2D Array) Manipulations +------------------------------- +================ =================== +fliplr 2D array with columns flipped +flipud 2D array with rows flipped +rot90 Rotate a 2D array a multiple of 90 degrees +eye Return a 2D array with ones down a given diagonal +diag Construct a 2D array from a vector, or return a given + diagonal from a 2D array. +mat Construct a Matrix +bmat Build a Matrix from blocks +================ =================== -Matrix (2d array) manipluations -=============================== -fliplr -- 2D array with columns flipped -flipud -- 2D array with rows flipped -rot90 -- Rotate a 2D array a multiple of 90 degrees -eye -- Return a 2D array with ones down a given diagonal -diag -- Construct a 2D array from a vector, or return a given - diagonal from a 2D array. -mat -- Construct a Matrix -bmat -- Build a Matrix from blocks - Polynomials -============ -poly1d -- A one-dimensional polynomial class +----------- +================ =================== +poly1d A one-dimensional polynomial class +poly Return polynomial coefficients from roots +roots Find roots of polynomial given coefficients +polyint Integrate polynomial +polyder Differentiate polynomial +polyadd Add polynomials +polysub Substract polynomials +polymul Multiply polynomials +polydiv Divide polynomials +polyval Evaluate polynomial at given argument +================ =================== -poly -- Return polynomial coefficients from roots -roots -- Find roots of polynomial given coefficients -polyint -- Integrate polynomial -polyder -- Differentiate polynomial -polyadd -- Add polynomials -polysub -- Substract polynomials -polymul -- Multiply polynomials -polydiv -- Divide polynomials -polyval -- Evaluate polynomial at given argument +Import Tricks +------------- +================ =================== +ppimport Postpone module import until trying to use it +ppimport_attr Postpone module import until trying to use its attribute +ppresolve Import postponed module and return it. +================ =================== -Import tricks -============= -ppimport -- Postpone module import until trying to use it -ppimport_attr -- Postpone module import until trying to use its - attribute -ppresolve -- Import postponed module and return it. +Machine Arithmetics +------------------- +================ =================== +machar_single Single precision floating point arithmetic parameters +machar_double Double precision floating point arithmetic parameters +================ =================== -Machine arithmetics -=================== -machar_single -- MachAr instance storing the parameters of system - single precision floating point arithmetics -machar_double -- MachAr instance storing the parameters of system - double precision floating point arithmetics +Threading Tricks +---------------- +================ =================== +ParallelExec Execute commands in parallel thread. +================ =================== -Threading tricks -================ -ParallelExec -- Execute commands in parallel thread. - -1D array set operations -======================= +1D Array Set Operations +----------------------- Set operations for 1D numeric arrays based on sort() function. -ediff1d -- Array difference (auxiliary function). -unique1d -- Unique elements of 1D array. -intersect1d -- Intersection of 1D arrays with unique elements. -intersect1d_nu -- Intersection of 1D arrays with any elements. -setxor1d -- Set exclusive-or of 1D arrays with unique elements. -setmember1d -- Return an array of shape of ar1 containing 1 where - the elements of ar1 are in ar2 and 0 otherwise. -union1d -- Union of 1D arrays with unique elements. -setdiff1d -- Set difference of 1D arrays with unique elements. +================ =================== +ediff1d Array difference (auxiliary function). +unique1d Unique elements of 1D array. +intersect1d Intersection of 1D arrays with unique elements. +intersect1d_nu Intersection of 1D arrays with any elements. +setxor1d Set exclusive-or of 1D arrays with unique elements. +setmember1d Return an array of shape of ar1 containing 1 where + the elements of ar1 are in ar2 and 0 otherwise. +union1d Union of 1D arrays with unique elements. +setdiff1d Set difference of 1D arrays with unique elements. +================ =================== """ Modified: branches/numpy-mingw-w64/numpy/lib/io.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/io.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/io.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,4 +1,5 @@ __all__ = ['savetxt', 'loadtxt', + 'genfromtxt', 'ndfromtxt', 'mafromtxt', 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', 'packbits', 'unpackbits', @@ -15,7 +16,11 @@ from _datasource import DataSource from _compiled_base import packbits, unpackbits +from _iotools import LineSplitter, NameValidator, StringConverter, \ + _is_string_like, has_nested_fields, flatten_dtype + _file = file +_string_like = _is_string_like class BagObj(object): """A simple class that converts attribute lookups to @@ -264,10 +269,6 @@ return str -def _string_like(obj): - try: obj + '' - except (TypeError, ValueError): return 0 - return 1 def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False): @@ -342,7 +343,7 @@ if usecols is not None: usecols = list(usecols) - if _string_like(fname): + if _is_string_like(fname): if fname.endswith('.gz'): import gzip fh = gzip.open(fname) @@ -520,7 +521,7 @@ """ - if _string_like(fname): + if _is_string_like(fname): if fname.endswith('.gz'): import gzip fh = gzip.open(fname,'wb') @@ -603,8 +604,508 @@ seq = regexp.findall(file.read()) if seq and not isinstance(seq[0], tuple): - # make sure np.array doesn't interpret strings as binary data - # by always producing a list of tuples - seq = [(x,) for x in seq] - output = np.array(seq, dtype=dtype) + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output.dtype = dtype + else: + output = np.array(seq, dtype=dtype) + return output + + + + +#####-------------------------------------------------------------------------- +#---- --- ASCII functions --- +#####-------------------------------------------------------------------------- + + + +def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, + converters=None, missing='', missing_values=None, usecols=None, + names=None, excludelist=None, deletechars=None, + case_sensitive=True, unpack=None, usemask=False, loose=True): + """ + Load data from a text file. + + Each line past the first `skiprows` ones is split at the `delimiter` + character, and characters following the `comments` character are discarded. + + + + Parameters + ---------- + fname : file or string + File or filename to read. If the filename extension is `.gz` or `.bz2`, + the file is first decompressed. + dtype : data-type + Data type of the resulting array. If this is a flexible data-type, + the resulting array will be 1-dimensional, and each row will be + interpreted as an element of the array. In this case, the number + of columns used must match the number of fields in the data-type, + and the names of each field will be set by the corresponding name + of the dtype. + If None, the dtypes will be determined by the contents of each + column, individually. + comments : {string}, optional + The character used to indicate the start of a comment. + All the characters occurring on a line after a comment are discarded + delimiter : {string}, optional + The string used to separate values. By default, any consecutive + whitespace act as delimiter. + skiprows : {int}, optional + Numbers of lines to skip at the beginning of the file. + converters : {None, dictionary}, optional + A dictionary mapping column number to a function that will convert + values in the column to a number. Converters can also be used to + provide a default value for missing data: + ``converters = {3: lambda s: float(s or 0)}``. + missing : {string}, optional + A string representing a missing value, irrespective of the column where + it appears (e.g., `'missing'` or `'unused'`). + missing_values : {None, dictionary}, optional + A dictionary mapping a column number to a string indicating whether the + corresponding field should be masked. + usecols : {None, sequence}, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + names : {None, True, string, sequence}, optional + If `names` is True, the field names are read from the first valid line + after the first `skiprows` lines. + If `names` is a sequence or a single-string of comma-separated names, + the names will be used to define the field names in a flexible dtype. + If `names` is None, the names of the dtype fields will be used, if any. + excludelist : {sequence}, optional + A list of names to exclude. This list is appended to the default list + ['return','file','print']. Excluded names are appended an underscore: + for example, `file` would become `file_`. + deletechars : {string}, optional + A string combining invalid characters that must be deleted from the names. + case_sensitive : {True, False, 'upper', 'lower'}, optional + If True, field names are case_sensitive. + If False or 'upper', field names are converted to upper case. + If 'lower', field names are converted to lower case. + unpack : {bool}, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)`` + usemask : {bool}, optional + If True, returns a masked array. + If False, return a regular standard array. + + Returns + ------- + out : MaskedArray + Data read from the text file. + + Notes + -------- + * When spaces are used as delimiters, or when no delimiter has been given + as input, there should not be any missing data between two fields. + * When the variable are named (either by a flexible dtype or with `names`, + there must not be any header in the file (else a :exc:ValueError exception + is raised). + + Warnings + -------- + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + + See Also + -------- + numpy.loadtxt : equivalent function when no data is missing. + + """ + # + if usemask: + from numpy.ma import MaskedArray, make_mask_descr + # Check the input dictionary of converters + user_converters = converters or {} + if not isinstance(user_converters, dict): + errmsg = "The input argument 'converter' should be a valid dictionary "\ + "(got '%s' instead)" + raise TypeError(errmsg % type(user_converters)) + # Check the input dictionary of missing values + user_missing_values = missing_values or {} + if not isinstance(user_missing_values, dict): + errmsg = "The input argument 'missing_values' should be a valid "\ + "dictionary (got '%s' instead)" + raise TypeError(errmsg % type(missing_values)) + defmissing = [_.strip() for _ in missing.split(',')] + [''] + + # Initialize the filehandle, the LineSplitter and the NameValidator +# fhd = _to_filehandle(fname) + if isinstance(fname, basestring): + fhd = np.lib._datasource.open(fname) + elif not hasattr(fname, 'read'): + raise TypeError("The input should be a string or a filehandle. "\ + "(got %s instead)" % type(fname)) + else: + fhd = fname + split_line = LineSplitter(delimiter=delimiter, comments=comments, + autostrip=False)._handyman + validate_names = NameValidator(excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive) + + # Get the first valid lines after the first skiprows ones + for i in xrange(skiprows): + fhd.readline() + first_values = None + while not first_values: + first_line = fhd.readline() + if first_line == '': + raise IOError('End-of-file reached before encountering data.') + if names is True: + first_values = first_line.strip().split(delimiter) + else: + first_values = split_line(first_line) + if names is True: + fval = first_values[0].strip() + if fval in comments: + del first_values[0] + + # Check the columns to use + if usecols is not None: + usecols = list(usecols) + nbcols = len(usecols or first_values) + + # Check the names and overwrite the dtype.names if needed + if dtype is not None: + dtype = np.dtype(dtype) + dtypenames = getattr(dtype, 'names', None) + if names is True: + names = validate_names([_.strip() for _ in first_values]) + first_line ='' + elif _is_string_like(names): + names = validate_names([_.strip() for _ in names.split(',')]) + elif names: + names = validate_names(names) + elif dtypenames: + dtype.names = validate_names(dtypenames) + if names and dtypenames: + dtype.names = names + + # If usecols is a list of names, convert to a list of indices + if usecols: + for (i, current) in enumerate(usecols): + if _is_string_like(current): + usecols[i] = names.index(current) + + # If user_missing_values has names as keys, transform them to indices + missing_values = {} + for (key, val) in user_missing_values.iteritems(): + # If val is a list, flatten it. In any case, add missing &'' to the list + if isinstance(val, (list, tuple)): + val = [str(_) for _ in val] + else: + val = [str(val),] + val.extend(defmissing) + if _is_string_like(key): + try: + missing_values[names.index(key)] = val + except ValueError: + pass + else: + missing_values[key] = val + + + # Initialize the default converters + if dtype is None: + # Note: we can't use a [...]*nbcols, as we would have 3 times the same + # ... converter, instead of 3 different converters. + converters = [StringConverter(None, + missing_values=missing_values.get(_, defmissing)) + for _ in range(nbcols)] + else: + flatdtypes = flatten_dtype(dtype) + # Initialize the converters + if len(flatdtypes) > 1: + # Flexible type : get a converter from each dtype + converters = [StringConverter(dt, + missing_values=missing_values.get(i, defmissing), + locked=True) + for (i, dt) in enumerate(flatdtypes)] + else: + # Set to a default converter (but w/ different missing values) + converters = [StringConverter(dtype, + missing_values=missing_values.get(_, defmissing), + locked=True) + for _ in range(nbcols)] + missing_values = [_.missing_values for _ in converters] + + # Update the converters to use the user-defined ones + uc_update = [] + for (i, conv) in user_converters.iteritems(): + # If the converter is specified by column names, use the index instead + if _is_string_like(i): + i = names.index(i) + if usecols: + try: + i = usecols.index(i) + except ValueError: + # Unused converter specified + continue + converters[i].update(conv, default=None, + missing_values=missing_values[i], + locked=True) + uc_update.append((i, conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) + + # Reset the names to match the usecols + if (not first_line) and usecols: + names = [names[_] for _ in usecols] + + rows = [] + append_to_rows = rows.append + if usemask: + masks = [] + append_to_masks = masks.append + # Parse each line + for line in itertools.chain([first_line,], fhd): + values = split_line(line) + # Skip an empty line + if len(values) == 0: + continue + # Select only the columns we need + if usecols: + values = [values[_] for _ in usecols] + # Check whether we need to update the converter + if dtype is None: + for (converter, item) in zip(converters, values): + converter.upgrade(item) + # Store the values + append_to_rows(tuple(values)) + if usemask: + append_to_masks(tuple([val.strip() in mss + for (val, mss) in zip(values, + missing_values)])) + + # Convert each value according to the converter: + # We want to modify the list in place to avoid creating a new one... + if loose: + conversionfuncs = [conv._loose_call for conv in converters] + else: + conversionfuncs = [conv._strict_call for conv in converters] + for (i, vals) in enumerate(rows): + rows[i] = tuple([convert(val) + for (convert, val) in zip(conversionfuncs, vals)]) + + # Reset the dtype + data = rows + if dtype is None: + # Get the dtypes from the types of the converters + coldtypes = [conv.type for conv in converters] + # Find the columns with strings... + strcolidx = [i for (i, v) in enumerate(coldtypes) + if v in (type('S'), np.string_)] + # ... and take the largest number of chars. + for i in strcolidx: + coldtypes[i] = "|S%i" % max(len(row[i]) for row in data) + # + if names is None: + # If the dtype is uniform, don't define names, else use '' + base = set([c.type for c in converters if c._checked]) + + if len(base) == 1: + (ddtype, mdtype) = (list(base)[0], np.bool) + else: + ddtype = [('', dt) for dt in coldtypes] + mdtype = [('', np.bool) for dt in coldtypes] + else: + ddtype = zip(names, coldtypes) + mdtype = zip(names, [np.bool] * len(coldtypes)) + output = np.array(data, dtype=ddtype) + if usemask: + outputmask = np.array(masks, dtype=mdtype) + else: + # Overwrite the initial dtype names if needed + if names and dtype.names: + dtype.names = names + flatdtypes = flatten_dtype(dtype) + # Case 1. We have a structured type + if len(flatdtypes) > 1: + # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] + # First, create the array using a flattened dtype: + # [('a', int), ('b1', int), ('b2', float)] + # Then, view the array using the specified dtype. + if has_nested_fields(dtype): + if 'O' in (_.char for _ in flatdtypes): + errmsg = "Nested fields involving objects "\ + "are not supported..." + raise NotImplementedError(errmsg) + rows = np.array(data, dtype=[('', t) for t in flatdtypes]) + output = rows.view(dtype) + else: + output = np.array(data, dtype=dtype) + # Now, process the rowmasks the same way + if usemask: + rowmasks = np.array(masks, + dtype=np.dtype([('', np.bool) + for t in flatdtypes])) + # Construct the new dtype + mdtype = make_mask_descr(dtype) + outputmask = rowmasks.view(mdtype) + # Case #2. We have a basic dtype + else: + # We used some user-defined converters + if user_converters: + ishomogeneous = True + descr = [] + for (i, ttype) in enumerate([conv.type for conv in converters]): + # Keep the dtype of the current converter + if i in user_converters: + ishomogeneous &= (ttype == dtype.type) + if ttype == np.string_: + ttype = "|S%i" % max(len(row[i]) for row in data) + descr.append(('', ttype)) + else: + descr.append(('', dtype)) + # So we changed the dtype ? + if not ishomogeneous: + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) + # + output = np.array(data, dtype) + if usemask: + if dtype.names: + mdtype = [(_, np.bool) for _ in dtype.names] + else: + mdtype = np.bool + outputmask = np.array(masks, dtype=mdtype) + # Try to take care of the missing data we missed + if usemask and output.dtype.names: + for (name, conv) in zip(names or (), converters): + missing_values = [conv(_) for _ in conv.missing_values if _ != ''] + for mval in missing_values: + outputmask[name] |= (output[name] == mval) + # Construct the final array + if usemask: + output = output.view(MaskedArray) + output._mask = outputmask + if unpack: + return output.squeeze().T + return output.squeeze() + + + +def ndfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, + converters=None, missing='', missing_values=None, + usecols=None, unpack=None, names=None, + excludelist=None, deletechars=None, case_sensitive=True,): + """ + Load ASCII data stored in fname and returns a ndarray. + + Complete description of all the optional input parameters is available in + the docstring of the `genfromtxt` function. + + See Also + -------- + numpy.genfromtxt : generic function. + + """ + kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter, + skiprows=skiprows, converters=converters, + missing=missing, missing_values=missing_values, + usecols=usecols, unpack=unpack, names=names, + excludelist=excludelist, deletechars=deletechars, + case_sensitive=case_sensitive, usemask=False) + return genfromtxt(fname, **kwargs) + +def mafromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0, + converters=None, missing='', missing_values=None, + usecols=None, unpack=None, names=None, + excludelist=None, deletechars=None, case_sensitive=True,): + """ + Load ASCII data stored in fname and returns a MaskedArray. + + Complete description of all the optional input parameters is available in + the docstring of the `genfromtxt` function. + + See Also + -------- + numpy.genfromtxt : generic function. + """ + kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter, + skiprows=skiprows, converters=converters, + missing=missing, missing_values=missing_values, + usecols=usecols, unpack=unpack, names=names, + excludelist=excludelist, deletechars=deletechars, + case_sensitive=case_sensitive, + usemask=True) + return genfromtxt(fname, **kwargs) + + +def recfromtxt(fname, dtype=None, comments='#', delimiter=None, skiprows=0, + converters=None, missing='', missing_values=None, + usecols=None, unpack=None, names=None, + excludelist=None, deletechars=None, case_sensitive=True, + usemask=False): + """ + Load ASCII data stored in fname and returns a standard recarray (if + `usemask=False`) or a MaskedRecords (if `usemask=True`). + + Complete description of all the optional input parameters is available in + the docstring of the `genfromtxt` function. + + See Also + -------- + numpy.genfromtxt : generic function + + Warnings + -------- + * by default, `dtype=None`, which means that the dtype of the output array + will be determined from the data. + """ + kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter, + skiprows=skiprows, converters=converters, + missing=missing, missing_values=missing_values, + usecols=usecols, unpack=unpack, names=names, + excludelist=excludelist, deletechars=deletechars, + case_sensitive=case_sensitive, usemask=usemask) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + + +def recfromcsv(fname, dtype=None, comments='#', skiprows=0, + converters=None, missing='', missing_values=None, + usecols=None, unpack=None, names=True, + excludelist=None, deletechars=None, case_sensitive='lower', + usemask=False): + """ + Load ASCII data stored in comma-separated file and returns a recarray (if + `usemask=False`) or a MaskedRecords (if `usemask=True`). + + Complete description of all the optional input parameters is available in + the docstring of the `genfromtxt` function. + + See Also + -------- + numpy.genfromtxt : generic function + """ + kwargs = dict(dtype=dtype, comments=comments, delimiter=",", + skiprows=skiprows, converters=converters, + missing=missing, missing_values=missing_values, + usecols=usecols, unpack=unpack, names=names, + excludelist=excludelist, deletechars=deletechars, + case_sensitive=case_sensitive, usemask=usemask) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + Copied: branches/numpy-mingw-w64/numpy/lib/recfunctions.py (from rev 6424, trunk/numpy/lib/recfunctions.py) Modified: branches/numpy-mingw-w64/numpy/lib/src/_compiled_base.c =================================================================== --- branches/numpy-mingw-w64/numpy/lib/src/_compiled_base.c 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/src/_compiled_base.c 2009-02-20 16:37:01 UTC (rev 6425) @@ -494,35 +494,46 @@ #define _TESTDOC1(typebase) (obj->ob_type == &Py##typebase##_Type) #define _TESTDOC2(typebase) (obj->ob_type == Py##typebase##_TypePtr) -#define _ADDDOC(typebase, doc, name) { \ +#define _ADDDOC(typebase, doc, name) do { \ Py##typebase##Object *new = (Py##typebase##Object *)obj; \ if (!(doc)) { \ doc = docstr; \ } \ else { \ - PyErr_Format(PyExc_RuntimeError, \ - "%s method %s",name, msg); \ + PyErr_Format(PyExc_RuntimeError, "%s method %s", name, msg); \ return NULL; \ } \ - } + } while (0) - if _TESTDOC1(CFunction) - _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name) - else if _TESTDOC1(Type) - _ADDDOC(Type, new->tp_doc, new->tp_name) - else if _TESTDOC2(MemberDescr) - _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name) - else if _TESTDOC2(GetSetDescr) - _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name) - else if _TESTDOC2(MethodDescr) - _ADDDOC(MethodDescr, new->d_method->ml_doc, - new->d_method->ml_name) - else { - PyErr_SetString(PyExc_TypeError, - "Cannot set a docstring for that object"); - return NULL; - } + if (_TESTDOC1(CFunction)) + _ADDDOC(CFunction, new->m_ml->ml_doc, new->m_ml->ml_name); + else if (_TESTDOC1(Type)) + _ADDDOC(Type, new->tp_doc, new->tp_name); + else if (_TESTDOC2(MemberDescr)) + _ADDDOC(MemberDescr, new->d_member->doc, new->d_member->name); + else if (_TESTDOC2(GetSetDescr)) + _ADDDOC(GetSetDescr, new->d_getset->doc, new->d_getset->name); + else if (_TESTDOC2(MethodDescr)) + _ADDDOC(MethodDescr, new->d_method->ml_doc, new->d_method->ml_name); + else { + PyObject *doc_attr; + + doc_attr = PyObject_GetAttrString(obj, "__doc__"); + if (doc_attr != NULL && doc_attr != Py_None) { + PyErr_Format(PyExc_RuntimeError, "object %s", msg); + return NULL; + } + Py_XDECREF(doc_attr); + if (PyObject_SetAttrString(obj, "__doc__", str) < 0) { + PyErr_SetString(PyExc_TypeError, + "Cannot set a docstring for that object"); + return NULL; + } + Py_INCREF(Py_None); + return Py_None; + } + #undef _TESTDOC1 #undef _TESTDOC2 #undef _ADDDOC @@ -533,35 +544,6 @@ } -static char packbits_doc[] = - "out = numpy.packbits(myarray, axis=None)\n\n" - " myarray : an integer type array whose elements should be packed to bits\n\n" - " This routine packs the elements of a binary-valued dataset into a\n" - " NumPy array of type uint8 ('B') whose bits correspond to\n" - " the logical (0 or nonzero) value of the input elements.\n" - " The dimension over-which bit-packing is done is given by axis.\n" - " The shape of the output has the same number of dimensions as the input\n" - " (unless axis is None, in which case the output is 1-d).\n" - "\n" - " Example:\n" - " >>> a = array([[[1,0,1],\n" - " ... [0,1,0]],\n" - " ... [[1,1,0],\n" - " ... [0,0,1]]])\n" - " >>> b = numpy.packbits(a,axis=-1)\n" - " >>> b\n" - " array([[[160],[64]],[[192],[32]]], dtype=uint8)\n\n" - " Note that 160 = 128 + 32\n" - " 192 = 128 + 64\n"; - -static char unpackbits_doc[] = - "out = numpy.unpackbits(myarray, axis=None)\n\n" - " myarray - array of uint8 type where each element represents a bit-field\n" - " that should be unpacked into a boolean output array\n\n" - " The shape of the output array is either 1-d (if axis is None) or\n" - " the same shape as the input array with unpacking done along the\n" - " axis specified."; - /* PACKBITS This function packs binary (0 or 1) 1-bit per pixel arrays @@ -809,9 +791,9 @@ {"add_docstring", (PyCFunction)arr_add_docstring, METH_VARARGS, NULL}, {"packbits", (PyCFunction)io_pack, METH_VARARGS | METH_KEYWORDS, - packbits_doc}, + NULL}, {"unpackbits", (PyCFunction)io_unpack, METH_VARARGS | METH_KEYWORDS, - unpackbits_doc}, + NULL}, {NULL, NULL} /* sentinel */ }; Copied: branches/numpy-mingw-w64/numpy/lib/tests/test__iotools.py (from rev 6424, trunk/numpy/lib/tests/test__iotools.py) Modified: branches/numpy-mingw-w64/numpy/lib/tests/test_function_base.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/tests/test_function_base.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/tests/test_function_base.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -430,6 +430,44 @@ #check integral of normal equals 1 assert_almost_equal(sum(r,axis=0),1,7) + def test_ndim(self): + x = linspace(0, 1, 3) + y = linspace(0, 2, 8) + z = linspace(0, 3, 13) + + wx = ones_like(x) * (x[1]-x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = ones_like(y) * (y[1]-y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = ones_like(z) * (z[1]-z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:,None,None] + y[None,:,None] + z[None,None,:] + + qx = (q*wx[:,None,None]).sum(axis=0) + qy = (q*wy[None,:,None]).sum(axis=1) + qz = (q*wz[None,None,:]).sum(axis=2) + + # n-d `x` + r = trapz(q, x=x[:,None,None], axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y[None,:,None], axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z[None,None,:], axis=2) + assert_almost_equal(r, qz) + + # 1-d `x` + r = trapz(q, x=x, axis=0) + assert_almost_equal(r, qx) + r = trapz(q, x=y, axis=1) + assert_almost_equal(r, qy) + r = trapz(q, x=z, axis=2) + assert_almost_equal(r, qz) + + class TestSinc(TestCase): def test_simple(self): assert(sinc(0)==1) Modified: branches/numpy-mingw-w64/numpy/lib/tests/test_getlimits.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/tests/test_getlimits.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/tests/test_getlimits.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -51,5 +51,9 @@ assert_equal(iinfo(T).max, T(-1)) +def test_instances(): + iinfo(10) + finfo(3.0) + if __name__ == "__main__": run_module_suite() Modified: branches/numpy-mingw-w64/numpy/lib/tests/test_io.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/tests/test_io.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/tests/test_io.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,10 +1,25 @@ -from numpy.testing import * + import numpy as np +import numpy.ma as ma +from numpy.ma.testutils import * + import StringIO from tempfile import NamedTemporaryFile +import sys, time +from datetime import datetime -class RoundtripTest: + +MAJVER, MINVER = sys.version_info[:2] + +def strptime(s, fmt=None): + """This function is available in the datetime module only + from Python >= 2.5. + + """ + return datetime(*time.strptime(s, fmt)[:3]) + +class RoundtripTest(object): def roundtrip(self, save_func, *args, **kwargs): """ save_func : callable @@ -25,7 +40,14 @@ file_on_disk = kwargs.get('file_on_disk', False) if file_on_disk: - target_file = NamedTemporaryFile() + # Do not delete the file on windows, because we can't + # reopen an already opened file on that platform, so we + # need to close the file and reopen it, implying no + # automatic deletion. + if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6: + target_file = NamedTemporaryFile(delete=False) + else: + target_file = NamedTemporaryFile() load_file = target_file.name else: target_file = StringIO.StringIO() @@ -37,6 +59,9 @@ target_file.flush() target_file.seek(0) + if sys.platform == 'win32' and not isinstance(target_file, StringIO.StringIO): + target_file.close() + arr_reloaded = np.load(load_file, **load_kwds) self.arr = arr @@ -59,6 +84,7 @@ a = np.array([1, 2, 3, 4], int) self.roundtrip(a) + @np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32") def test_mmap(self): a = np.array([[1, 2.5], [4, 7.3]]) self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) @@ -95,6 +121,7 @@ class TestSaveTxt(TestCase): + @np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32") def test_array(self): a =np.array([[1, 2], [3, 4]], float) c = StringIO.StringIO() @@ -319,7 +346,6 @@ assert_array_equal(x, a) def test_record_2(self): - return # pass this test until #736 is resolved c = StringIO.StringIO() c.write('1312 foo\n1534 bar\n4444 qux') c.seek(0) @@ -341,5 +367,447 @@ assert_array_equal(x, a) +#####-------------------------------------------------------------------------- + + +class TestFromTxt(TestCase): + # + def test_record(self): + "Test w/ explicit dtype" + data = StringIO.StringIO('1 2\n3 4') +# data.seek(0) + test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) + control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_equal(test, control) + # + data = StringIO.StringIO('M 64.0 75.0\nF 25.0 60.0') +# data.seek(0) + descriptor = {'names': ('gender','age','weight'), + 'formats': ('S1', 'i4', 'f4')} + control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], + dtype=descriptor) + test = np.ndfromtxt(data, dtype=descriptor) + assert_equal(test, control) + + def test_array(self): + "Test outputing a standard ndarray" + data = StringIO.StringIO('1 2\n3 4') + control = np.array([[1,2],[3,4]], dtype=int) + test = np.ndfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data.seek(0) + control = np.array([[1,2],[3,4]], dtype=float) + test = np.loadtxt(data, dtype=float) + assert_array_equal(test, control) + + def test_1D(self): + "Test squeezing to 1D" + control = np.array([1, 2, 3, 4], int) + # + data = StringIO.StringIO('1\n2\n3\n4\n') + test = np.ndfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data = StringIO.StringIO('1,2,3,4\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',') + assert_array_equal(test, control) + + def test_comments(self): + "Test the stripping of comments" + control = np.array([1, 2, 3, 5], int) + # Comment on its own line + data = StringIO.StringIO('# comment\n1,2,3,5\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + # Comment at the end of a line + data = StringIO.StringIO('1,2,3,5# comment\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + + def test_skiprows(self): + "Test row skipping" + control = np.array([1, 2, 3, 5], int) + # + data = StringIO.StringIO('comment\n1,2,3,5\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', skiprows=1) + assert_equal(test, control) + # + data = StringIO.StringIO('# comment\n1,2,3,5\n') + test = np.loadtxt(data, dtype=int, delimiter=',', skiprows=1) + assert_equal(test, control) + + def test_header(self): + "Test retrieving a header" + data = StringIO.StringIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') + test = np.ndfromtxt(data, dtype=None, names=True) + control = {'gender': np.array(['M', 'F']), + 'age': np.array([64.0, 25.0]), + 'weight': np.array([75.0, 60.0])} + assert_equal(test['gender'], control['gender']) + assert_equal(test['age'], control['age']) + assert_equal(test['weight'], control['weight']) + + def test_auto_dtype(self): + "Test the automatic definition of the output dtype" + data = StringIO.StringIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') + test = np.ndfromtxt(data, dtype=None) + control = [np.array(['A', 'BCD']), + np.array([64, 25]), + np.array([75.0, 60.0]), + np.array([3+4j, 5+6j]), + np.array([True, False]),] + assert_equal(test.dtype.names, ['f0','f1','f2','f3','f4']) + for (i, ctrl) in enumerate(control): + assert_equal(test['f%i' % i], ctrl) + + + def test_auto_dtype_uniform(self): + "Tests whether the output dtype can be uniformized" + data = StringIO.StringIO('1 2 3 4\n5 6 7 8\n') + test = np.ndfromtxt(data, dtype=None) + control = np.array([[1,2,3,4],[5,6,7,8]]) + assert_equal(test, control) + + + def test_fancy_dtype(self): + "Check that a nested dtype isn't MIA" + data = StringIO.StringIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',') + control = np.array([(1,(2,3.0)),(4,(5,6.0))], dtype=fancydtype) + assert_equal(test, control) + + + def test_names_overwrite(self): + "Test overwriting the names of the dtype" + descriptor = {'names': ('g','a','w'), + 'formats': ('S1', 'i4', 'f4')} + data = StringIO.StringIO('M 64.0 75.0\nF 25.0 60.0') + names = ('gender','age','weight') + test = np.ndfromtxt(data, dtype=descriptor, names=names) + descriptor['names'] = names + control = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=descriptor) + assert_equal(test, control) + + + def test_commented_header(self): + "Check that names can be retrieved even if the line is commented out." + data = StringIO.StringIO(""" +#gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + # The # is part of the first name and should be deleted automatically. + test = np.genfromtxt(data, names=True, dtype=None) + ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], + dtype=[('gender','|S1'), ('age', int), ('weight', float)]) + assert_equal(test, ctrl) + # Ditto, but we should get rid of the first element + data = StringIO.StringIO(""" +# gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + test = np.genfromtxt(data, names=True, dtype=None) + assert_equal(test, ctrl) + + + def test_autonames_and_usecols(self): + "Tests names and usecols" + data = StringIO.StringIO('A B C D\n aaaa 121 45 9.1') + test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), + names=True, dtype=None) + control = np.array(('aaaa', 45, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + + def test_converters_with_usecols(self): + "Test the combination user-defined converters and usecol" + data = StringIO.StringIO('1,2,3,,5\n6,7,8,9,10\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', + converters={3:lambda s: int(s or -999)}, + usecols=(1, 3, )) + control = np.array([[2, -999], [7, 9]], int) + assert_equal(test, control) + + def test_converters_with_usecols_and_names(self): + "Tests names and usecols" + data = StringIO.StringIO('A B C D\n aaaa 121 45 9.1') + test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True, + dtype=None, converters={'C':lambda s: 2 * int(s)}) + control = np.array(('aaaa', 90, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_cornercases(self): + "Test the conversion to datetime." + converter = {'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} + data = StringIO.StringIO('2009-02-03 12:00:00Z, 72214.0') + test = np.ndfromtxt(data, delimiter=',', dtype=None, + names=['date','stid'], converters=converter) + control = np.array((datetime(2009,02,03), 72214.), + dtype=[('date', np.object_), ('stid', float)]) + assert_equal(test, control) + + + def test_unused_converter(self): + "Test whether unused converters are forgotten" + data = StringIO.StringIO("1 21\n 3 42\n") + test = np.ndfromtxt(data, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_equal(test, [21, 42]) + # + data.seek(0) + test = np.ndfromtxt(data, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_equal(test, [33, 66]) + + + def test_dtype_with_converters(self): + dstr = "2009; 23; 46" + test = np.ndfromtxt(StringIO.StringIO(dstr,), + delimiter=";", dtype=float, converters={0:str}) + control = np.array([('2009', 23., 46)], + dtype=[('f0','|S4'), ('f1', float), ('f2', float)]) + assert_equal(test, control) + test = np.ndfromtxt(StringIO.StringIO(dstr,), + delimiter=";", dtype=float, converters={0:float}) + control = np.array([2009., 23., 46],) + assert_equal(test, control) + + + def test_dtype_with_object(self): + "Test using an explicit dtype with an object" + from datetime import date + import time + data = """ + 1; 2001-01-01 + 2; 2002-01-31 + """ + ndtype = [('idx', int), ('code', np.object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array([(1, datetime(2001,1,1)), (2, datetime(2002,1,31))], + dtype=ndtype) + assert_equal(test, control) + # + ndtype = [('nest', [('idx', int), ('code', np.object)])] + try: + test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", + dtype=ndtype, converters=converters) + except NotImplementedError: + pass + else: + errmsg = "Nested dtype involving objects should be supported." + raise AssertionError(errmsg) + + + def test_userconverters_with_explicit_dtype(self): + "Test user_converters w/ explicit (standard) dtype" + data = StringIO.StringIO('skip,skip,2001-01-01,1.0,skip') + test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: str}) + control = np.array([('2001-01-01', 1.)], + dtype=[('', '|S10'), ('', float)]) + assert_equal(test, control) + + + def test_spacedelimiter(self): + "Test space delimiter" + data = StringIO.StringIO("1 2 3 4 5\n6 7 8 9 10") + test = np.ndfromtxt(data) + control = np.array([[ 1., 2., 3., 4., 5.], + [ 6., 7., 8., 9.,10.]]) + assert_equal(test, control) + + + def test_missing(self): + data = StringIO.StringIO('1,2,3,,5\n') + test = np.ndfromtxt(data, dtype=int, delimiter=',', \ + converters={3:lambda s: int(s or -999)}) + control = np.array([1, 2, 3, -999, 5], int) + assert_equal(test, control) + + + def test_usecols(self): + "Test the selection of columns" + # Select 1 column + control = np.array( [[1, 2], [3, 4]], float) + data = StringIO.StringIO() + np.savetxt(data, control) + data.seek(0) + test = np.ndfromtxt(data, dtype=float, usecols=(1,)) + assert_equal(test, control[:, 1]) + # + control = np.array( [[1, 2, 3], [3, 4, 5]], float) + data = StringIO.StringIO() + np.savetxt(data, control) + data.seek(0) + test = np.ndfromtxt(data, dtype=float, usecols=(1, 2)) + assert_equal(test, control[:, 1:]) + # Testing with arrays instead of tuples. + data.seek(0) + test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2])) + assert_equal(test, control[:, 1:]) + # Checking with dtypes defined converters. + data = StringIO.StringIO("""JOE 70.1 25.3\nBOB 60.5 27.9""") + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + test = np.ndfromtxt(data, usecols=(0, 2), dtype=zip(names, dtypes)) + assert_equal(test['stid'], ["JOE", "BOB"]) + assert_equal(test['temp'], [25.3, 27.9]) + + + def test_empty_file(self): + "Test that an empty file raises the proper exception" + data = StringIO.StringIO() + assert_raises(IOError, np.ndfromtxt, data) + + + def test_fancy_dtype_alt(self): + "Check that a nested dtype isn't MIA" + data = StringIO.StringIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.mafromtxt(data, dtype=fancydtype, delimiter=',') + control = ma.array([(1,(2,3.0)),(4,(5,6.0))], dtype=fancydtype) + assert_equal(test, control) + + + def test_withmissing(self): + data = StringIO.StringIO('A,B\n0,1\n2,N/A') + test = np.mafromtxt(data, dtype=None, delimiter=',', missing='N/A', + names=True) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', np.int), ('B', np.int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + # + data.seek(0) + test = np.mafromtxt(data, delimiter=',', missing='N/A', names=True) + control = ma.array([(0, 1), (2, -1)], + mask=[[False, False], [False, True]],) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + + def test_user_missing_values(self): + datastr ="A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + data = StringIO.StringIO(datastr) + basekwargs = dict(dtype=None, delimiter=',', names=True, missing='N/A') + mdtype = [('A', int), ('B', float), ('C', complex)] + # + test = np.mafromtxt(data, **basekwargs) + control = ma.array([( 0, 0.0, 0j), (1, -999, 1j), + ( -9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], + dtype=mdtype) + assert_equal(test, control) + # + data.seek(0) + test = np.mafromtxt(data, + missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs) + control = ma.array([( 0, 0.0, 0j), (1, -999, 1j), + ( -9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + # + data.seek(0) + test = np.mafromtxt(data, + missing_values={0:-9, 'B':-99, 'C':-999j}, + **basekwargs) + control = ma.array([( 0, 0.0, 0j), (1, -999, 1j), + ( -9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + + + def test_withmissing_float(self): + data = StringIO.StringIO('A,B\n0,1.5\n2,-999.00') + test = np.mafromtxt(data, dtype=None, delimiter=',', missing='-999.0', + names=True,) + control = ma.array([(0, 1.5), (2, -1.)], + mask=[(False, False), (False, True)], + dtype=[('A', np.int), ('B', np.float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + + def test_with_masked_column_uniform(self): + "Test masked column" + data = StringIO.StringIO('1 2 3\n4 5 6\n') + test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True) + control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0],[0, 1, 0]]) + assert_equal(test, control) + + def test_with_masked_column_various(self): + "Test masked column" + data = StringIO.StringIO('True 2 3\nFalse 5 6\n') + test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True) + control = ma.array([(1, 2, 3), (0, 5, 6)], + mask=[(0, 1, 0),(0, 1, 0)], + dtype=[('f0', bool), ('f1', bool), ('f2', int)]) + assert_equal(test, control) + + + def test_recfromtxt(self): + # + data = StringIO.StringIO('A,B\n0,1\n2,3') + test = np.recfromtxt(data, delimiter=',', missing='N/A', names=True) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', np.int), ('B', np.int)]) + self.failUnless(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = StringIO.StringIO('A,B\n0,1\n2,N/A') + test = np.recfromtxt(data, dtype=None, delimiter=',', missing='N/A', + names=True, usemask=True) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', np.int), ('B', np.int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + + + def test_recfromcsv(self): + # + data = StringIO.StringIO('A,B\n0,1\n2,3') + test = np.recfromcsv(data, missing='N/A', + names=True, case_sensitive=True) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', np.int), ('B', np.int)]) + self.failUnless(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = StringIO.StringIO('A,B\n0,1\n2,N/A') + test = np.recfromcsv(data, dtype=None, missing='N/A', + names=True, case_sensitive=True, usemask=True) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', np.int), ('B', np.int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + # + data = StringIO.StringIO('A,B\n0,1\n2,3') + test = np.recfromcsv(data, missing='N/A',) + control = np.array([(0, 1), (2, 3)], + dtype=[('a', np.int), ('b', np.int)]) + self.failUnless(isinstance(test, np.recarray)) + assert_equal(test, control) + + + + if __name__ == "__main__": run_module_suite() Copied: branches/numpy-mingw-w64/numpy/lib/tests/test_recfunctions.py (from rev 6424, trunk/numpy/lib/tests/test_recfunctions.py) Modified: branches/numpy-mingw-w64/numpy/lib/utils.py =================================================================== --- branches/numpy-mingw-w64/numpy/lib/utils.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/lib/utils.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -699,11 +699,11 @@ # import sub-packages if import_modules and hasattr(item, '__path__'): - for pth in item.__path__: - for mod_path in os.listdir(pth): - init_py = os.path.join(pth, mod_path, '__init__.py') + for pth in item.__path__: + for mod_path in os.listdir(pth): + init_py = os.path.join(pth, mod_path, '__init__.py') if not os.path.isfile(init_py): - continue + continue if _all is not None and mod_path not in _all: continue try: Modified: branches/numpy-mingw-w64/numpy/linalg/linalg.py =================================================================== --- branches/numpy-mingw-w64/numpy/linalg/linalg.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/linalg/linalg.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -9,7 +9,7 @@ zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr. """ -__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', +__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv', 'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'det', 'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'LinAlgError'] Modified: branches/numpy-mingw-w64/numpy/linalg/tests/test_linalg.py =================================================================== --- branches/numpy-mingw-w64/numpy/linalg/tests/test_linalg.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/linalg/tests/test_linalg.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -202,7 +202,7 @@ assert_equal(matrix_power(A,2),A) -class HermitianTestCase: +class HermitianTestCase(object): def test_single(self): a = array([[1.,2.], [2.,1.]], dtype=single) self.do(a) Modified: branches/numpy-mingw-w64/numpy/ma/core.py =================================================================== --- branches/numpy-mingw-w64/numpy/ma/core.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/ma/core.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,20 +1,24 @@ # pylint: disable-msg=E1002 -"""MA: a facility for dealing with missing observations -MA is generally used as a numpy.array look-alike. -by Paul F. Dubois. +""" +numpy.ma : a package to handle missing or invalid values. +This package was initially written for numarray by Paul F. Dubois +at Lawrence Livermore National Laboratory. +In 2006, the package was completely rewritten by Pierre Gerard-Marchant +(University of Georgia) to make the MaskedArray class a subclass of ndarray, +and to improve support of structured arrays. + + Copyright 1999, 2000, 2001 Regents of the University of California. Released for unlimited redistribution. -Adapted for numpy_core 2005 by Travis Oliphant and -(mainly) Paul Dubois. +* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. +* Subclassing of the base ndarray 2006 by Pierre Gerard-Marchant + (pgmdevlist_AT_gmail_DOT_com) +* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) -Subclassing of the base ndarray 2006 by Pierre Gerard-Marchant. -pgmdevlist_AT_gmail_DOT_com -Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) +.. moduleauthor:: Pierre Gerard-Marchant -:author: Pierre Gerard-Marchant - """ __author__ = "Pierre GF Gerard-Marchant" __docformat__ = "restructuredtext en" @@ -33,7 +37,8 @@ 'default_fill_value', 'diag', 'diagonal', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', 'fabs', 'flatten_mask', 'fmod', 'filled', 'floor', 'floor_divide', - 'fix_invalid', 'frombuffer', 'fromfunction', + 'fix_invalid', 'flatten_structured_array', 'frombuffer', 'fromflex', + 'fromfunction', 'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', 'identity', 'ids', 'indices', 'inner', 'innerproduct', @@ -54,7 +59,7 @@ 'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize', 'right_shift', 'round_', 'round', 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', - 'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'sort', 'soften_mask', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', 'var', 'where', @@ -152,7 +157,7 @@ """ if hasattr(obj,'dtype'): - defval = default_filler[obj.dtype.kind] + defval = _check_fill_value(None, obj.dtype) elif isinstance(obj, np.dtype): if obj.subdtype: defval = default_filler[obj.subdtype[0].kind] @@ -170,6 +175,18 @@ defval = default_filler['O'] return defval + +def _recursive_extremum_fill_value(ndtype, extremum): + names = ndtype.names + if names: + deflist = [] + for name in names: + fval = _recursive_extremum_fill_value(ndtype[name], extremum) + deflist.append(fval) + return tuple(deflist) + return extremum[ndtype] + + def minimum_fill_value(obj): """ Calculate the default fill value suitable for taking the minimum of ``obj``. @@ -177,11 +194,7 @@ """ errmsg = "Unsuitable type for calculating minimum." if hasattr(obj, 'dtype'): - objtype = obj.dtype - filler = min_filler[objtype] - if filler is None: - raise TypeError(errmsg) - return filler + return _recursive_extremum_fill_value(obj.dtype, min_filler) elif isinstance(obj, float): return min_filler[ntypes.typeDict['float_']] elif isinstance(obj, int): @@ -193,6 +206,7 @@ else: raise TypeError(errmsg) + def maximum_fill_value(obj): """ Calculate the default fill value suitable for taking the maximum of ``obj``. @@ -200,11 +214,7 @@ """ errmsg = "Unsuitable type for calculating maximum." if hasattr(obj, 'dtype'): - objtype = obj.dtype - filler = max_filler[objtype] - if filler is None: - raise TypeError(errmsg) - return filler + return _recursive_extremum_fill_value(obj.dtype, max_filler) elif isinstance(obj, float): return max_filler[ntypes.typeDict['float_']] elif isinstance(obj, int): @@ -217,6 +227,28 @@ raise TypeError(errmsg) +def _recursive_set_default_fill_value(dtypedescr): + deflist = [] + for currentdescr in dtypedescr: + currenttype = currentdescr[1] + if isinstance(currenttype, list): + deflist.append(tuple(_recursive_set_default_fill_value(currenttype))) + else: + deflist.append(default_fill_value(np.dtype(currenttype))) + return tuple(deflist) + +def _recursive_set_fill_value(fillvalue, dtypedescr): + fillvalue = np.resize(fillvalue, len(dtypedescr)) + output_value = [] + for (fval, descr) in zip(fillvalue, dtypedescr): + cdtype = descr[1] + if isinstance(cdtype, list): + output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) + else: + output_value.append(np.array(fval, dtype=cdtype).item()) + return tuple(output_value) + + def _check_fill_value(fill_value, ndtype): """ Private function validating the given `fill_value` for the given dtype. @@ -233,10 +265,9 @@ fields = ndtype.fields if fill_value is None: if fields: - fdtype = [(_[0], _[1]) for _ in ndtype.descr] - fill_value = np.array(tuple([default_fill_value(fields[n][0]) - for n in ndtype.names]), - dtype=fdtype) + descr = ndtype.descr + fill_value = np.array(_recursive_set_default_fill_value(descr), + dtype=ndtype,) else: fill_value = default_fill_value(ndtype) elif fields: @@ -248,10 +279,9 @@ err_msg = "Unable to transform %s to dtype %s" raise ValueError(err_msg % (fill_value, fdtype)) else: - fval = np.resize(fill_value, len(ndtype.descr)) - fill_value = [np.asarray(f).astype(desc[1]).item() - for (f, desc) in zip(fval, ndtype.descr)] - fill_value = np.array(tuple(fill_value), copy=False, dtype=fdtype) + descr = ndtype.descr + fill_value = np.array(_recursive_set_fill_value(fill_value, descr), + dtype=ndtype) else: if isinstance(fill_value, basestring) and (ndtype.char not in 'SV'): fill_value = default_fill_value(ndtype) @@ -315,7 +345,7 @@ def filled(a, fill_value = None): """ Return `a` as an array where masked data have been replaced by `value`. - + If `a` is not a MaskedArray, `a` itself is returned. If `a` is a MaskedArray and `fill_value` is None, `fill_value` is set to `a.fill_value`. @@ -367,7 +397,7 @@ return rcls #####-------------------------------------------------------------------------- -def get_data(a, subok=True): +def getdata(a, subok=True): """ Return the `_data` part of `a` if `a` is a MaskedArray, or `a` itself. @@ -384,8 +414,8 @@ if not subok: return data.view(ndarray) return data +get_data = getdata -getdata = get_data def fix_invalid(a, mask=nomask, copy=True, fill_value=None): """ @@ -535,17 +565,20 @@ # ... but np.putmask looks more efficient, despite the copy. np.putmask(d1, dm, self.fill) # Take care of the masked singletong first ... - if not m.ndim and m: + if (not m.ndim) and m: return masked - # Get the result class ....................... - if isinstance(a, MaskedArray): - subtype = type(a) + elif m is nomask: + result = self.f(d1, *args, **kwargs) else: - subtype = MaskedArray - # Get the result as a view of the subtype ... - result = self.f(d1, *args, **kwargs).view(subtype) - # Fix the mask if we don't have a scalar - if result.ndim > 0: + result = np.where(m, d1, self.f(d1, *args, **kwargs)) + # If result is not a scalar + if result.ndim: + # Get the result subclass: + if isinstance(a, MaskedArray): + subtype = type(a) + else: + subtype = MaskedArray + result = result.view(subtype) result._mask = m result._update_from(a) return result @@ -583,20 +616,50 @@ def __call__ (self, a, b, *args, **kwargs): "Execute the call behavior." - m = mask_or(getmask(a), getmask(b)) - (d1, d2) = (get_data(a), get_data(b)) - result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a, b)) - if len(result.shape): - if m is not nomask: - result._mask = make_mask_none(result.shape) - result._mask.flat = m + m = mask_or(getmask(a), getmask(b), shrink=False) + (da, db) = (getdata(a), getdata(b)) + # Easy case: there's no mask... + if m is nomask: + result = self.f(da, db, *args, **kwargs) + # There are some masked elements: run only on the unmasked + else: + result = np.where(m, da, self.f(da, db, *args, **kwargs)) + # Transforms to a (subclass of) MaskedArray if we don't have a scalar + if result.shape: + result = result.view(get_masked_subclass(a, b)) + # If we have a mask, make sure it's broadcasted properly + if m.any(): + result._mask = mask_or(getmaskarray(a), getmaskarray(b)) + # If some initial masks where not shrunk, don't shrink the result + elif m.shape: + result._mask = make_mask_none(result.shape, result.dtype) if isinstance(a, MaskedArray): result._update_from(a) if isinstance(b, MaskedArray): result._update_from(b) + # ... or return masked if we have a scalar and the common mask is True elif m: return masked return result +# +# result = self.f(d1, d2, *args, **kwargs).view(get_masked_subclass(a, b)) +# if len(result.shape): +# if m is not nomask: +# result._mask = make_mask_none(result.shape) +# result._mask.flat = m +# #!!!!! +# # Force m to be at least 1D +# m.shape = m.shape or (1,) +# print "Resetting data" +# result.data[m].flat = d1.flat +# #!!!!! +# if isinstance(a, MaskedArray): +# result._update_from(a) +# if isinstance(b, MaskedArray): +# result._update_from(b) +# elif m: +# return masked +# return result def reduce(self, target, axis=0, dtype=None): """Reduce `target` along the given `axis`.""" @@ -639,11 +702,13 @@ m = umath.logical_or.outer(ma, mb) if (not m.ndim) and m: return masked - rcls = get_masked_subclass(a, b) - # We could fill the arguments first, butis it useful ? - # d = self.f.outer(filled(a, self.fillx), filled(b, self.filly)).view(rcls) - d = self.f.outer(getdata(a), getdata(b)).view(rcls) - if d.ndim > 0: + (da, db) = (getdata(a), getdata(b)) + if m is nomask: + d = self.f.outer(da, db) + else: + d = np.where(m, da, self.f.outer(da, db)) + if d.shape: + d = d.view(get_masked_subclass(a, b)) d._mask = m return d @@ -655,7 +720,7 @@ if isinstance(target, MaskedArray): tclass = type(target) else: - tclass = masked_array + tclass = MaskedArray t = filled(target, self.filly) return self.f.accumulate(t, axis).view(tclass) @@ -664,7 +729,8 @@ #.............................................................................. class _DomainedBinaryOperation: - """Define binary operations that have a domain, like divide. + """ + Define binary operations that have a domain, like divide. They have no reduce, outer or accumulate. @@ -689,26 +755,36 @@ ufunc_domain[dbfunc] = domain ufunc_fills[dbfunc] = (fillx, filly) - def __call__(self, a, b): + def __call__(self, a, b, *args, **kwargs): "Execute the call behavior." ma = getmask(a) - mb = getmask(b) - d1 = getdata(a) - d2 = get_data(b) - t = narray(self.domain(d1, d2), copy=False) + mb = getmaskarray(b) + da = getdata(a) + db = getdata(b) + t = narray(self.domain(da, db), copy=False) if t.any(None): - mb = mask_or(mb, t) + mb = mask_or(mb, t, shrink=False) # The following line controls the domain filling - if t.size == d2.size: - d2 = np.where(t, self.filly, d2) + if t.size == db.size: + db = np.where(t, self.filly, db) else: - d2 = np.where(np.resize(t, d2.shape), self.filly, d2) - m = mask_or(ma, mb) + db = np.where(np.resize(t, db.shape), self.filly, db) + # Shrink m if a.mask was nomask, otherwise don't. + m = mask_or(ma, mb, shrink=(getattr(a, '_mask', nomask) is nomask)) if (not m.ndim) and m: return masked - result = self.f(d1, d2).view(get_masked_subclass(a, b)) - if result.ndim > 0: - result._mask = m + elif (m is nomask): + result = self.f(da, db, *args, **kwargs) + else: + result = np.where(m, da, self.f(da, db, *args, **kwargs)) + if result.shape: + result = result.view(get_masked_subclass(a, b)) + # If we have a mask, make sure it's broadcasted properly + if m.any(): + result._mask = mask_or(getmaskarray(a), mb) + # If some initial masks where not shrunk, don't shrink the result + elif m.shape: + result._mask = make_mask_none(result.shape, result.dtype) if isinstance(a, MaskedArray): result._update_from(a) if isinstance(b, MaskedArray): @@ -796,36 +872,37 @@ #---- --- Mask creation functions --- #####-------------------------------------------------------------------------- +def _recursive_make_descr(datatype, newtype=bool_): + "Private function allowing recursion in make_descr." + # Do we have some name fields ? + if datatype.names: + descr = [] + for name in datatype.names: + field = datatype.fields[name] + if len(field) == 3: + # Prepend the title to the name + name = (field[-1], name) + descr.append((name, _recursive_make_descr(field[0], newtype))) + return descr + # Is this some kind of composite a la (np.float,2) + elif datatype.subdtype: + mdescr = list(datatype.subdtype) + mdescr[0] = newtype + return tuple(mdescr) + else: + return newtype + def make_mask_descr(ndtype): """Constructs a dtype description list from a given dtype. Each field is set to a bool. """ - def _make_descr(datatype): - "Private function allowing recursion." - # Do we have some name fields ? - if datatype.names: - descr = [] - for name in datatype.names: - field = datatype.fields[name] - if len(field) == 3: - # Prepend the title to the name - name = (field[-1], name) - descr.append((name, _make_descr(field[0]))) - return descr - # Is this some kind of composite a la (np.float,2) - elif datatype.subdtype: - mdescr = list(datatype.subdtype) - mdescr[0] = np.dtype(bool) - return tuple(mdescr) - else: - return np.bool # Make sure we do have a dtype if not isinstance(ndtype, np.dtype): ndtype = np.dtype(ndtype) - return np.dtype(_make_descr(ndtype)) + return np.dtype(_recursive_make_descr(ndtype, np.bool)) -def get_mask(a): +def getmask(a): """Return the mask of a, if any, or nomask. To get a full array of booleans of the same shape as a, use @@ -833,7 +910,7 @@ """ return getattr(a, '_mask', nomask) -getmask = get_mask +get_mask = getmask def getmaskarray(arr): """Return the mask of arr, if any, or a boolean array of the shape @@ -952,7 +1029,17 @@ ValueError If m1 and m2 have different flexible dtypes. - """ + """ + def _recursive_mask_or(m1, m2, newmask): + names = m1.dtype.names + for name in names: + current1 = m1[name] + if current1.dtype.names: + _recursive_mask_or(current1, m2[name], newmask[name]) + else: + umath.logical_or(current1, m2[name], newmask[name]) + return + # if (m1 is nomask) or (m1 is False): dtype = getattr(m2, 'dtype', MaskType) return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) @@ -966,8 +1053,7 @@ raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2)) if dtype1.names: newmask = np.empty_like(m1) - for n in dtype1.names: - newmask[n] = umath.logical_or(m1[n], m2[n]) + _recursive_mask_or(m1, m2, newmask) return newmask return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) @@ -976,7 +1062,7 @@ """ Returns a completely flattened version of the mask, where nested fields are collapsed. - + Parameters ---------- mask : array_like @@ -999,7 +1085,7 @@ >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) >>> flatten_mask(mask) array([False, False, False, False, False, True], dtype=bool) - + """ # def _flatmask(mask): @@ -1033,7 +1119,7 @@ def masked_where(condition, a, copy=True): """ - Return ``a`` as an array masked where ``condition`` is True. + Return ``a`` as an array masked where ``condition`` is ``True``. Masked values of ``a`` or ``condition`` are kept. Parameters @@ -1063,35 +1149,45 @@ result._mask = cond return result + def masked_greater(x, value, copy=True): """ - Return the array `x` masked where (x > value). + Return the array `x` masked where ``(x > value)``. Any value of mask already masked is kept masked. """ return masked_where(greater(x, value), x, copy=copy) + def masked_greater_equal(x, value, copy=True): - "Shortcut to masked_where, with condition = (x >= value)." + "Shortcut to masked_where, with condition ``(x >= value)``." return masked_where(greater_equal(x, value), x, copy=copy) + def masked_less(x, value, copy=True): - "Shortcut to masked_where, with condition = (x < value)." + "Shortcut to masked_where, with condition ``(x < value)``." return masked_where(less(x, value), x, copy=copy) + def masked_less_equal(x, value, copy=True): - "Shortcut to masked_where, with condition = (x <= value)." + "Shortcut to masked_where, with condition ``(x <= value)``." return masked_where(less_equal(x, value), x, copy=copy) + def masked_not_equal(x, value, copy=True): - "Shortcut to masked_where, with condition = (x != value)." + "Shortcut to masked_where, with condition ``(x != value)``." return masked_where(not_equal(x, value), x, copy=copy) + def masked_equal(x, value, copy=True): """ - Shortcut to masked_where, with condition = (x == value). For - floating point, consider ``masked_values(x, value)`` instead. + Shortcut to masked_where, with condition ``(x == value)``. + See Also + -------- + masked_where : base function + masked_values : equivalent function for floats. + """ # An alternative implementation relies on filling first: probably not needed. # d = filled(x, 0) @@ -1100,6 +1196,7 @@ # return array(d, mask=m, copy=copy) return masked_where(equal(x, value), x, copy=copy) + def masked_inside(x, v1, v2, copy=True): """ Shortcut to masked_where, where ``condition`` is True for x inside @@ -1117,6 +1214,7 @@ condition = (xf >= v1) & (xf <= v2) return masked_where(condition, x, copy=copy) + def masked_outside(x, v1, v2, copy=True): """ Shortcut to ``masked_where``, where ``condition`` is True for x outside @@ -1134,7 +1232,7 @@ condition = (xf < v1) | (xf > v2) return masked_where(condition, x, copy=copy) -# + def masked_object(x, value, copy=True, shrink=True): """ Mask the array `x` where the data are exactly equal to value. @@ -1163,6 +1261,7 @@ mask = mask_or(mask, make_mask(condition, shrink=shrink)) return masked_array(x, mask=mask, copy=copy, fill_value=value) + def masked_values(x, value, rtol=1.e-5, atol=1.e-8, copy=True, shrink=True): """ Mask the array x where the data are approximately equal in @@ -1200,6 +1299,7 @@ mask = mask_or(mask, make_mask(condition, shrink=shrink)) return masked_array(xnew, mask=mask, copy=copy, fill_value=value) + def masked_invalid(a, copy=True): """ Mask the array for invalid values (NaNs or infs). @@ -1221,6 +1321,7 @@ #####-------------------------------------------------------------------------- #---- --- Printing options --- #####-------------------------------------------------------------------------- + class _MaskedPrintOption: """ Handle the string used to represent missing data in a masked array. @@ -1255,10 +1356,65 @@ #if you single index into a masked location you get this object. masked_print_option = _MaskedPrintOption('--') + +def _recursive_printoption(result, mask, printopt): + """ + Puts printoptions in result where mask is True. + Private function allowing for recursion + """ + names = result.dtype.names + for name in names: + (curdata, curmask) = (result[name], mask[name]) + if curdata.dtype.names: + _recursive_printoption(curdata, curmask, printopt) + else: + np.putmask(curdata, curmask, printopt) + return + +_print_templates = dict(long = """\ +masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) +""", + short = """\ +masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, +%(nlen)s fill_value = %(fill)s) +""", + long_flx = """\ +masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, +%(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) +""", + short_flx = """\ +masked_%(name)s(data = %(data)s, +%(nlen)s mask = %(mask)s, +%(nlen)s fill_value = %(fill)s, +%(nlen)s dtype = %(dtype)s) +""") + #####-------------------------------------------------------------------------- #---- --- MaskedArray class --- #####-------------------------------------------------------------------------- +def _recursive_filled(a, mask, fill_value): + """ + Recursively fill `a` with `fill_value`. + Private function + """ + names = a.dtype.names + for name in names: + current = a[name] + if current.dtype.names: + _recursive_filled(current, mask[name], fill_value[name]) + else: + np.putmask(current, mask[name], fill_value[name]) + #............................................................................... class _arraymethod(object): """ @@ -1313,17 +1469,17 @@ elif mask is not nomask: result.__setmask__(getattr(mask, methodname)(*args, **params)) else: - if mask.ndim and mask.all(): + if mask.ndim and (not mask.dtype.names and mask.all()): return masked return result #.......................................................... -class FlatIter(object): +class MaskedIterator(object): "Define an interator." def __init__(self, ma): self.ma = ma - self.ma_iter = np.asarray(ma).flat - + self.dataiter = ma._data.flat + # if ma._mask is nomask: self.maskiter = None else: @@ -1332,19 +1488,79 @@ def __iter__(self): return self + def __getitem__(self, indx): + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + _mask.shape = result.shape + result._mask = _mask + return result + ### This won't work is ravel makes a copy def __setitem__(self, index, value): - a = self.ma.ravel() - a[index] = value + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) +# self.ma1d[index] = value def next(self): "Returns the next element of the iterator." - d = self.ma_iter.next() + d = self.dataiter.next() if self.maskiter is not None and self.maskiter.next(): d = masked return d +def flatten_structured_array(a): + """ + Flatten a strutured array. + + The datatype of the output is the largest datatype of the (nested) fields. + + Returns + ------- + output : var + Flatten MaskedArray if the input is a MaskedArray, + standard ndarray otherwise. + + Examples + -------- + >>> ndtype = [('a', int), ('b', float)] + >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) + >>> flatten_structured_array(a) + array([[1., 1.], + [2., 2.]]) + + """ + # + def flatten_sequence(iterable): + """Flattens a compound of nested iterables.""" + for elm in iter(iterable): + if hasattr(elm,'__iter__'): + for f in flatten_sequence(elm): + yield f + else: + yield elm + # + a = np.asanyarray(a) + inishape = a.shape + a = a.ravel() + if isinstance(a, MaskedArray): + out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) + out = out.view(MaskedArray) + out._mask = np.array([tuple(flatten_sequence(d.item())) + for d in getmaskarray(a)]) + else: + out = np.array([tuple(flatten_sequence(d.item())) for d in a]) + if len(inishape) > 1: + newshape = list(out.shape) + newshape[0] = inishape + out.shape = tuple(flatten_sequence(newshape)) + return out + + + + class MaskedArray(ndarray): """ Arrays with possibly masked values. Masked values of True @@ -1358,32 +1574,32 @@ ---------- data : {var} Input data. - mask : {nomask, sequence} + mask : {nomask, sequence}, optional Mask. Must be convertible to an array of booleans with the same shape as data: True indicates a masked (eg., invalid) data. - dtype : dtype - Data type of the output. If None, the type of the data - argument is used. If dtype is not None and different from - data.dtype, a copy is performed. - copy : bool - Whether to copy the input data (True), or to use a - reference instead. Note: data are NOT copied by default. - subok : {True, boolean} + dtype : {dtype}, optional + Data type of the output. + If dtype is None, the type of the data argument (`data.dtype`) is used. + If dtype is not None and different from `data.dtype`, a copy is performed. + copy : {False, True}, optional + Whether to copy the input data (True), or to use a reference instead. + Note: data are NOT copied by default. + subok : {True, False}, optional Whether to return a subclass of MaskedArray (if possible) or a plain MaskedArray. - ndmin : {0, int} + ndmin : {0, int}, optional Minimum number of dimensions - fill_value : {var} - Value used to fill in the masked values when necessary. If - None, a default based on the datatype is used. - keep_mask : {True, boolean} + fill_value : {var}, optional + Value used to fill in the masked values when necessary. + If None, a default based on the datatype is used. + keep_mask : {True, boolean}, optional Whether to combine mask with the mask of the input data, if any (True), or to use only mask for the output (False). - hard_mask : {False, boolean} - Whether to use a hard mask or not. With a hard mask, - masked values cannot be unmasked. - shrink : {True, boolean} + hard_mask : {False, boolean}, optional + Whether to use a hard mask or not. + With a hard mask, masked values cannot be unmasked. + shrink : {True, boolean}, optional Whether to force compression of an empty mask. """ @@ -1397,10 +1613,12 @@ subok=True, ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, flag=None, shrink=True, **options): - """Create a new masked array from scratch. + """ + Create a new masked array from scratch. - Note: you can also create an array with the .view(MaskedArray) - method. + Notes + ----- + A masked array can also be created by taking a .view(MaskedArray). """ if flag is not None: @@ -1564,7 +1782,8 @@ return #.................................. def __array_wrap__(self, obj, context=None): - """Special hook for ufuncs. + """ + Special hook for ufuncs. Wraps the numpy array and sets the mask according to context. """ result = obj.view(type(self)) @@ -1577,10 +1796,11 @@ # Get the domain mask................ domain = ufunc_domain.get(func, None) if domain is not None: + # Take the domain, and make sure it's a ndarray if len(args) > 2: - d = reduce(domain, args) + d = filled(reduce(domain, args), True) else: - d = domain(*args) + d = filled(domain(*args), True) # Fill the result where the domain is wrong try: # Binary domain: take the last value @@ -1598,7 +1818,8 @@ if d is not nomask: m = d else: - m |= d + # Don't modify inplace, we risk back-propagation + m = (m | d) # Make sure the mask has the proper size if result.shape == () and m: return masked @@ -1630,7 +1851,7 @@ if dtype is None: dtype = output.dtype mdtype = make_mask_descr(dtype) - + output._mask = self._mask.view(mdtype, ndarray) output._mask.shape = output.shape # Make sure to reset the _fill_value if needed @@ -1797,7 +2018,8 @@ ndarray.__setitem__(_data, indx, dindx) _mask[indx] = mindx return - #............................................ + + def __getslice__(self, i, j): """x.__getslice__(i, j) <==> x[i:j] @@ -1806,7 +2028,8 @@ """ return self.__getitem__(slice(i, j)) - #........................ + + def __setslice__(self, i, j, value): """x.__setslice__(i, j, value) <==> x[i:j]=value @@ -1815,7 +2038,8 @@ """ self.__setitem__(slice(i, j), value) - #............................................ + + def __setmask__(self, mask, copy=False): """Set the mask. @@ -1881,33 +2105,28 @@ # return self._mask.reshape(self.shape) return self._mask mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") - # - def _getrecordmask(self): - """Return the mask of the records. + + + def _get_recordmask(self): + """ + Return the mask of the records. A record is masked when all the fields are masked. """ _mask = ndarray.__getattribute__(self, '_mask').view(ndarray) if _mask.dtype.names is None: return _mask - if _mask.size > 1: - axis = 1 - else: - axis = None - # - try: - return _mask.view((bool_, len(self.dtype))).all(axis) - except ValueError: - return np.all([[f[n].all() for n in _mask.dtype.names] - for f in _mask], axis=axis) + return np.all(flatten_structured_array(_mask), axis=-1) - def _setrecordmask(self): + + def _set_recordmask(self): """Return the mask of the records. A record is masked when all the fields are masked. """ raise NotImplementedError("Coming soon: setting the mask per records!") - recordmask = property(fget=_getrecordmask) + recordmask = property(fget=_get_recordmask) + #............................................ def harden_mask(self): """Force the mask to hard. @@ -1921,6 +2140,10 @@ """ self._hardmask = False + hardmask = property(fget=lambda self: self._hardmask, + doc="Hardness of the mask") + + def unshare_mask(self): """Copy the mask and set the sharedmask flag to False. @@ -1929,6 +2152,9 @@ self._mask = self._mask.copy() self._sharedmask = False + sharedmask = property(fget=lambda self: self._sharedmask, + doc="Share status of the mask (read-only).") + def shrink_mask(self): """Reduce a mask to nomask when possible. @@ -1938,6 +2164,10 @@ self._mask = nomask #............................................ + + baseclass = property(fget= lambda self:self._baseclass, + doc="Class of the underlying data (read-only).") + def _get_data(self): """Return the current data, as a view of the original underlying data. @@ -1960,7 +2190,7 @@ """Return a flat iterator. """ - return FlatIter(self) + return MaskedIterator(self) # def _set_flat (self, value): """Set a flattened version of self to value. @@ -1991,24 +2221,25 @@ fill_value = property(fget=get_fill_value, fset=set_fill_value, doc="Filling value.") + def filled(self, fill_value=None): - """Return a copy of self._data, where masked values are filled - with fill_value. + """ + Return a copy of self, where masked values are filled with `fill_value`. - If fill_value is None, self.fill_value is used instead. + If `fill_value` is None, `self.fill_value` is used instead. - Notes - ----- - + Subclassing is preserved - + The result is NOT a MaskedArray ! + Notes + ----- + + Subclassing is preserved + + The result is NOT a MaskedArray ! - Examples - -------- - >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) - >>> x.filled() - array([1,2,-999,4,-999]) - >>> type(x.filled()) - + Examples + -------- + >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) + >>> x.filled() + array([1,2,-999,4,-999]) + >>> type(x.filled()) + """ m = self._mask @@ -2025,9 +2256,7 @@ # if m.dtype.names: result = self._data.copy() - for n in result.dtype.names: - field = result[n] - np.putmask(field, self._mask[n], fill_value[n]) + _recursive_filled(result, self._mask, fill_value) elif not m.any(): return self._data else: @@ -2148,13 +2377,9 @@ res = self._data.astype("|O8") res[m] = f else: - rdtype = [list(_) for _ in self.dtype.descr] - for r in rdtype: - r[1] = '|O8' - rdtype = [tuple(_) for _ in rdtype] + rdtype = _recursive_make_descr(self.dtype, "|O8") res = self._data.astype(rdtype) - for field in names: - np.putmask(res[field], m[field], f) + _recursive_printoption(res, m, f) else: res = self.filled(self.fill_value) return str(res) @@ -2163,44 +2388,71 @@ """Literal string representation. """ - with_mask = """\ -masked_%(name)s(data = - %(data)s, - mask = - %(mask)s, - fill_value=%(fill)s) -""" - with_mask1 = """\ -masked_%(name)s(data = %(data)s, - mask = %(mask)s, - fill_value=%(fill)s) -""" - with_mask_flx = """\ -masked_%(name)s(data = - %(data)s, - mask = - %(mask)s, - fill_value=%(fill)s, - dtype=%(dtype)s) -""" - with_mask1_flx = """\ -masked_%(name)s(data = %(data)s, - mask = %(mask)s, - fill_value=%(fill)s - dtype=%(dtype)s) -""" n = len(self.shape) name = repr(self._data).split('(')[0] - parameters = dict(name=name, data=str(self), mask=str(self._mask), + parameters = dict(name=name, nlen=" "*len(name), + data=str(self), mask=str(self._mask), fill=str(self.fill_value), dtype=str(self.dtype)) if self.dtype.names: if n <= 1: - return with_mask1_flx % parameters - return with_mask_flx % parameters + return _print_templates['short_flx'] % parameters + return _print_templates['long_flx'] % parameters elif n <= 1: - return with_mask1 % parameters - return with_mask % parameters + return _print_templates['short'] % parameters + return _print_templates['long'] % parameters #............................................ + def __eq__(self, other): + "Check whether other equals self elementwise" + omask = getattr(other, '_mask', nomask) + if omask is nomask: + check = ndarray.__eq__(self.filled(0), other).view(type(self)) + check._mask = self._mask + else: + odata = filled(other, 0) + check = ndarray.__eq__(self.filled(0), odata).view(type(self)) + if self._mask is nomask: + check._mask = omask + else: + mask = mask_or(self._mask, omask) + if mask.dtype.names: + if mask.size > 1: + axis = 1 + else: + axis = None + try: + mask = mask.view((bool_, len(self.dtype))).all(axis) + except ValueError: + mask = np.all([[f[n].all() for n in mask.dtype.names] + for f in mask], axis=axis) + check._mask = mask + return check + # + def __ne__(self, other): + "Check whether other doesn't equal self elementwise" + omask = getattr(other, '_mask', nomask) + if omask is nomask: + check = ndarray.__ne__(self.filled(0), other).view(type(self)) + check._mask = self._mask + else: + odata = filled(other, 0) + check = ndarray.__ne__(self.filled(0), odata).view(type(self)) + if self._mask is nomask: + check._mask = omask + else: + mask = mask_or(self._mask, omask) + if mask.dtype.names: + if mask.size > 1: + axis = 1 + else: + axis = None + try: + mask = mask.view((bool_, len(self.dtype))).all(axis) + except ValueError: + mask = np.all([[f[n].all() for n in mask.dtype.names] + for f in mask], axis=axis) + check._mask = mask + return check + # def __add__(self, other): "Add other to self, and return a new masked array." return add(self, other) @@ -2223,7 +2475,7 @@ # def __rmul__(self, other): "Multiply other by self, and return a new masked array." - return multiply(other, self) + return multiply(self, other) # def __div__(self, other): "Divide other into self, and return a new masked array." @@ -2243,32 +2495,39 @@ #............................................ def __iadd__(self, other): "Add other to self in-place." - ndarray.__iadd__(self._data, getdata(other)) m = getmask(other) if self._mask is nomask: - self._mask = m - elif m is not nomask: - self._mask += m + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + else: + if m is not nomask: + self._mask += m + ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other))) return self #.... def __isub__(self, other): "Subtract other from self in-place." - ndarray.__isub__(self._data, getdata(other)) m = getmask(other) if self._mask is nomask: - self._mask = m + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m elif m is not nomask: self._mask += m + ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other))) return self #.... def __imul__(self, other): "Multiply self by other in-place." - ndarray.__imul__(self._data, getdata(other)) m = getmask(other) if self._mask is nomask: - self._mask = m + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m elif m is not nomask: self._mask += m + ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other))) return self #.... def __idiv__(self, other): @@ -2281,21 +2540,25 @@ if dom_mask.any(): (_, fval) = ufunc_fills[np.divide] other_data = np.where(dom_mask, fval, other_data) - ndarray.__idiv__(self._data, other_data) - self._mask = mask_or(self._mask, new_mask) +# self._mask = mask_or(self._mask, new_mask) + self._mask |= new_mask + ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data)) return self #... def __ipow__(self, other): - "Raise self to the power other, in place" - _data = self._data + "Raise self to the power other, in place." other_data = getdata(other) other_mask = getmask(other) - ndarray.__ipow__(_data, other_data) - invalid = np.logical_not(np.isfinite(_data)) + ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data)) + invalid = np.logical_not(np.isfinite(self._data)) + if invalid.any(): + if self._mask is not nomask: + self._mask |= invalid + else: + self._mask = invalid + np.putmask(self._data, invalid, self.fill_value) new_mask = mask_or(other_mask, invalid) self._mask = mask_or(self._mask, new_mask) - # The following line is potentially problematic, as we change _data... - np.putmask(self._data, invalid, self.fill_value) return self #............................................ def __float__(self): @@ -2453,25 +2716,24 @@ return result # def resize(self, newshape, refcheck=True, order=False): - """Attempt to modify the size and the shape of the array in place. + """ + Change shape and size of array in-place. - The array must own its own memory and not be referenced by - other arrays. - - Returns - ------- - None. - """ - try: - self._data.resize(newshape, refcheck, order) - if self.mask is not nomask: - self._mask.resize(newshape, refcheck, order) - except ValueError: - raise ValueError("Cannot resize an array that has been referenced " - "or is referencing another array in this way.\n" - "Use the resize function.") - return None + # Note : the 'order' keyword looks broken, let's just drop it +# try: +# ndarray.resize(self, newshape, refcheck=refcheck) +# if self.mask is not nomask: +# self._mask.resize(newshape, refcheck=refcheck) +# except ValueError: +# raise ValueError("Cannot resize an array that has been referenced " +# "or is referencing another array in this way.\n" +# "Use the numpy.ma.resize function.") +# return None + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) # def put(self, indices, values, mode='raise'): """ @@ -3062,7 +3324,7 @@ index_array : ndarray, int Array of indices that sort `a` along the specified axis. In other words, ``a[index_array]`` yields a sorted `a`. - + See Also -------- sort : Describes sorting algorithms used. @@ -3389,7 +3651,7 @@ outmask = out._mask = make_mask_none(out.shape) outmask.flat = newmask else: - + if out.dtype.kind in 'biu': errmsg = "Masked data information would be lost in one or more"\ " location." @@ -3506,9 +3768,10 @@ def tofile(self, fid, sep="", format="%s"): raise NotImplementedError("Not implemented yet, sorry...") - def torecords(self): + def toflex(self): """ Transforms a MaskedArray into a flexible-type array with two fields: + * the ``_data`` field stores the ``_data`` part of the array; * the ``_mask`` field stores the ``_mask`` part of the array; @@ -3551,6 +3814,7 @@ record['_data'] = self._data record['_mask'] = self._mask return record + torecords = toflex #-------------------------------------------- # Pickling def __getstate__(self): @@ -3610,7 +3874,7 @@ """ _data = ndarray.__new__(baseclass, baseshape, basetype) - _mask = ndarray.__new__(ndarray, baseshape, 'b1') + _mask = ndarray.__new__(ndarray, baseshape, make_mask_descr(basetype)) return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) @@ -3848,22 +4112,22 @@ else: basetype = MaskedArray # Get the result and view it as a (subclass of) MaskedArray - result = umath.power(fa, fb).view(basetype) + result = np.where(m, fa, umath.power(fa, fb)).view(basetype) + result._update_from(a) # Find where we're in trouble w/ NaNs and Infs invalid = np.logical_not(np.isfinite(result.view(ndarray))) - # Retrieve some extra attributes if needed - if isinstance(result, MaskedArray): - result._update_from(a) # Add the initial mask if m is not nomask: - if np.isscalar(result): + if not (result.ndim): return masked + m |= invalid result._mask = m # Fix the invalid parts if invalid.any(): if not result.ndim: return masked - result[invalid] = masked + elif result._mask is nomask: + result._mask = invalid result._data[invalid] = result.fill_value return result @@ -3934,12 +4198,12 @@ def compressed(x): """ Return a 1-D array of all the non-masked data. - + See Also -------- MaskedArray.compressed equivalent method - + """ if getmask(x) is nomask: return np.asanyarray(x) @@ -4307,8 +4571,8 @@ Returns the inner product of a and b for arrays of floating point types. Like the generic NumPy equivalent the product sum is over the last dimension - of a and b. - + of a and b. + Notes ----- The first argument is not conjugated. @@ -4343,7 +4607,8 @@ outerproduct = outer def allequal (a, b, fill_value=True): - """Return True if all entries of a and b are equal, using + """ + Return True if all entries of a and b are equal, using fill_value as a truth value where either or both are masked. """ @@ -4378,7 +4643,7 @@ fill_value : boolean, optional Whether masked values in a or b are considered equal (True) or not (False). - + rtol : Relative tolerance The relative difference is equal to `rtol` * `b`. atol : Absolute tolerance @@ -4401,7 +4666,7 @@ True. absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) - + Return True if all elements of a and b are equal subject to given tolerances. @@ -4434,10 +4699,10 @@ return np.all(d) #.............................................................................. -def asarray(a, dtype=None): +def asarray(a, dtype=None, order=None): """ - Convert the input to a masked array. - + Convert the input `a` to a masked array of the given datatype. + Parameters ---------- a : array_like @@ -4449,29 +4714,40 @@ order : {'C', 'F'}, optional Whether to use row-major ('C') or column-major ('FORTRAN') memory representation. Defaults to 'C'. - + Returns ------- out : ndarray MaskedArray interpretation of `a`. No copy is performed if the input - is already an ndarray. If `a` is a subclass of ndarray, a base - class ndarray is returned. - Return a as a MaskedArray object of the given dtype. - If dtype is not given or None, is is set to the dtype of a. - No copy is performed if a is already an array. - Subclasses are converted to the base class MaskedArray. + is already an ndarray. If `a` is a subclass of MaskedArray, a base + class MaskedArray is returned. """ return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=False) def asanyarray(a, dtype=None): - """asanyarray(data, dtype) = array(data, dtype, copy=0, subok=1) + """ + Convert the input `a` to a masked array of the given datatype. + If `a` is a subclass of MaskedArray, its class is conserved. - Return a as an masked array. - If dtype is not given or None, is is set to the dtype of a. - No copy is performed if a is already an array. - Subclasses are conserved. + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Defaults to 'C'. + Returns + ------- + out : ndarray + MaskedArray interpretation of `a`. No copy is performed if the input + is already an ndarray. + """ return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) @@ -4516,6 +4792,15 @@ raise NotImplementedError("Not yet implemented. Sorry") +def fromflex(fxarray): + """ + Rebuilds a masked_array from a flexible-type array output by the '.torecord' + array + """ + return masked_array(fxarray['_data'], mask=fxarray['_mask']) + + + class _convert2ma: """Convert functions from numpy to numpy.ma. Modified: branches/numpy-mingw-w64/numpy/ma/extras.py =================================================================== --- branches/numpy-mingw-w64/numpy/ma/extras.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/ma/extras.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -19,11 +19,14 @@ 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack', + 'intersect1d', 'intersect1d_nu', 'mask_cols', 'mask_rowcols', 'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_', 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', + 'setdiff1d', 'setmember1d', 'setxor1d', + 'unique1d', 'union1d', 'vander', 'vstack', ] @@ -45,22 +48,19 @@ #............................................................................... def issequence(seq): """Is seq a sequence (ndarray, list or tuple)?""" - if isinstance(seq, ndarray): + if isinstance(seq, (ndarray, tuple, list)): return True - elif isinstance(seq, tuple): - return True - elif isinstance(seq, list): - return True return False def count_masked(arr, axis=None): - """Count the number of masked elements along the given axis. + """ + Count the number of masked elements along the given axis. Parameters ---------- - axis : int, optional - Axis along which to count. - If None (default), a flattened version of the array is used. + axis : int, optional + Axis along which to count. + If None (default), a flattened version of the array is used. """ m = getmaskarray(arr) @@ -136,9 +136,12 @@ res.append(masked_array(_d, mask=_m)) return res -atleast_1d = _fromnxfunction('atleast_1d') -atleast_2d = _fromnxfunction('atleast_2d') -atleast_3d = _fromnxfunction('atleast_3d') +#atleast_1d = _fromnxfunction('atleast_1d') +#atleast_2d = _fromnxfunction('atleast_2d') +#atleast_3d = _fromnxfunction('atleast_3d') +atleast_1d = np.atleast_1d +atleast_2d = np.atleast_2d +atleast_3d = np.atleast_3d vstack = row_stack = _fromnxfunction('vstack') hstack = _fromnxfunction('hstack') @@ -252,7 +255,8 @@ def average(a, axis=None, weights=None, returned=False): - """Average the array over the given axis. + """ + Average the array over the given axis. Parameters ---------- @@ -440,10 +444,10 @@ #.............................................................................. def compress_rowcols(x, axis=None): """ - Suppress the rows and/or columns of a 2D array that contains + Suppress the rows and/or columns of a 2D array that contain masked values. - The suppression behavior is selected with the `axis`parameter. + The suppression behavior is selected with the `axis` parameter. - If axis is None, rows and columns are suppressed. - If axis is 0, only rows are suppressed. @@ -482,13 +486,15 @@ return x._data[idxr][:,idxc] def compress_rows(a): - """Suppress whole rows of a 2D array that contain masked values. + """ + Suppress whole rows of a 2D array that contain masked values. """ return compress_rowcols(a, 0) def compress_cols(a): - """Suppress whole columnss of a 2D array that contain masked values. + """ + Suppress whole columns of a 2D array that contain masked values. """ return compress_rowcols(a, 1) @@ -530,30 +536,35 @@ return a def mask_rows(a, axis=None): - """Mask whole rows of a 2D array that contain masked values. + """ + Mask whole rows of a 2D array that contain masked values. Parameters ---------- - axis : int, optional - Axis along which to perform the operation. - If None, applies to a flattened version of the array. + axis : int, optional + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + """ return mask_rowcols(a, 0) def mask_cols(a, axis=None): - """Mask whole columns of a 2D array that contain masked values. + """ + Mask whole columns of a 2D array that contain masked values. Parameters ---------- - axis : int, optional - Axis along which to perform the operation. - If None, applies to a flattened version of the array. + axis : int, optional + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + """ return mask_rowcols(a, 1) def dot(a,b, strict=False): - """Return the dot product of two 2D masked arrays a and b. + """ + Return the dot product of two 2D masked arrays a and b. Like the generic numpy equivalent, the product sum is over the last dimension of a and the second-to-last dimension of b. If strict is True, @@ -582,74 +593,213 @@ m = ~np.dot(am, bm) return masked_array(d, mask=m) -#............................................................................... -def ediff1d(array, to_end=None, to_begin=None): - """Return the differences between consecutive elements of an - array, possibly with prefixed and/or appended values. +#####-------------------------------------------------------------------------- +#---- --- arraysetops --- +#####-------------------------------------------------------------------------- - Parameters - ---------- - array : {array} - Input array, will be flattened before the difference is taken. - to_end : {number}, optional - If provided, this number will be tacked onto the end of the returned - differences. - to_begin : {number}, optional - If provided, this number will be taked onto the beginning of the - returned differences. +def ediff1d(arr, to_end=None, to_begin=None): + """ + Computes the differences between consecutive elements of an array. + This function is the equivalent of `numpy.ediff1d` that takes masked + values into account. + + See Also + -------- + numpy.eddif1d : equivalent function for ndarrays. + Returns ------- - ed : {array} - The differences. Loosely, this will be (ary[1:] - ary[:-1]). - + output : MaskedArray + """ - a = masked_array(array, copy=True) - if a.ndim > 1: - a.reshape((a.size,)) - (d, m, n) = (a._data, a._mask, a.size-1) - dd = d[1:]-d[:-1] - if m is nomask: - dm = nomask - else: - dm = m[1:]-m[:-1] + arr = ma.asanyarray(arr).flat + ed = arr[1:] - arr[:-1] + arrays = [ed] # + if to_begin is not None: + arrays.insert(0, to_begin) if to_end is not None: - to_end = asarray(to_end) - nend = to_end.size - if to_begin is not None: - to_begin = asarray(to_begin) - nbegin = to_begin.size - r_data = np.empty((n+nend+nbegin,), dtype=a.dtype) - r_mask = np.zeros((n+nend+nbegin,), dtype=bool) - r_data[:nbegin] = to_begin._data - r_mask[:nbegin] = to_begin._mask - r_data[nbegin:-nend] = dd - r_mask[nbegin:-nend] = dm - else: - r_data = np.empty((n+nend,), dtype=a.dtype) - r_mask = np.zeros((n+nend,), dtype=bool) - r_data[:-nend] = dd - r_mask[:-nend] = dm - r_data[-nend:] = to_end._data - r_mask[-nend:] = to_end._mask + arrays.append(to_end) # - elif to_begin is not None: - to_begin = asarray(to_begin) - nbegin = to_begin.size - r_data = np.empty((n+nbegin,), dtype=a.dtype) - r_mask = np.zeros((n+nbegin,), dtype=bool) - r_data[:nbegin] = to_begin._data - r_mask[:nbegin] = to_begin._mask - r_data[nbegin:] = dd - r_mask[nbegin:] = dm + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in the common + # case where neither to_begin or to_end was given. + ed = hstack(arrays) # + return ed + + +def unique1d(ar1, return_index=False, return_inverse=False): + """ + Finds the unique elements of an array. + + Masked values are considered the same element (masked). + + The output array is always a MaskedArray. + + See Also + -------- + np.unique1d : equivalent function for ndarrays. + """ + output = np.unique1d(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) else: - r_data = dd - r_mask = dm - return masked_array(r_data, mask=r_mask) + output = output.view(MaskedArray) + return output +def intersect1d(ar1, ar2): + """ + Returns the repeated or unique elements belonging to the two arrays. + + Masked values are assumed equals one to the other. + The output is always a masked array + + See Also + -------- + numpy.intersect1d : equivalent function for ndarrays. + + Examples + -------- + >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> intersect1d(x, y) + masked_array(data = [1 1 3 3 --], + mask = [False False False False True], + fill_value = 999999) + """ + aux = ma.concatenate((ar1,ar2)) + aux.sort() + return aux[aux[1:] == aux[:-1]] + + + +def intersect1d_nu(ar1, ar2): + """ + Returns the unique elements common to both arrays. + + Masked values are considered equal one to the other. + The output is always a masked array. + + See Also + -------- + intersect1d : Returns repeated or unique common elements. + numpy.intersect1d_nu : equivalent function for ndarrays. + + Examples + -------- + >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> intersect1d_nu(x, y) + masked_array(data = [1 3 --], + mask = [False False True], + fill_value = 999999) + + """ + # Might be faster than unique1d( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique1d(ar1), unique1d(ar2))) + aux.sort() + return aux[aux[1:] == aux[:-1]] + + + +def setxor1d(ar1, ar2): + """ + Set exclusive-or of 1D arrays with unique elements. + + See Also + -------- + numpy.setxor1d : equivalent function for ndarrays + + """ + aux = ma.concatenate((ar1, ar2)) + if aux.size == 0: + return aux + aux.sort() + auxf = aux.filled() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = (flag[1:] == flag[:-1]) + return aux[flag2] + + +def setmember1d(ar1, ar2): + """ + Return a boolean array set True where first element is in second array. + + See Also + -------- + numpy.setmember1d : equivalent function for ndarrays. + + """ + ar1 = ma.asanyarray(ar1) + ar2 = ma.asanyarray( ar2 ) + ar = ma.concatenate((ar1, ar2 )) + b1 = ma.zeros(ar1.shape, dtype = np.int8) + b2 = ma.ones(ar2.shape, dtype = np.int8) + tt = ma.concatenate((b1, b2)) + + # We need this to be a stable sort, so always use 'mergesort' here. The + # values from the first array should always come before the values from the + # second array. + perm = ar.argsort(kind='mergesort') + aux = ar[perm] + aux2 = tt[perm] +# flag = ediff1d( aux, 1 ) == 0 + flag = ma.concatenate((aux[1:] == aux[:-1], [False])) + ii = ma.where( flag * aux2 )[0] + aux = perm[ii+1] + perm[ii+1] = perm[ii] + perm[ii] = aux + # + indx = perm.argsort(kind='mergesort')[:len( ar1 )] + # + return flag[indx] + + +def union1d(ar1, ar2): + """ + Union of 1D arrays with unique elements. + + See also + -------- + numpy.union1d : equivalent function for ndarrays. + + """ + return unique1d(ma.concatenate((ar1, ar2))) + + +def setdiff1d(ar1, ar2): + """ + Set difference of 1D arrays with unique elements. + + See Also + -------- + numpy.setdiff1d : equivalent function for ndarrays + + """ + aux = setmember1d(ar1,ar2) + if aux.size == 0: + return aux + else: + return ma.asarray(ar1)[aux == 0] + + + +#####-------------------------------------------------------------------------- +#---- --- Covariance --- +#####-------------------------------------------------------------------------- + + + + def _covhelper(x, y=None, rowvar=True, allow_masked=True): """ Private function for the computation of covariance and correlation @@ -747,7 +897,8 @@ def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True): - """The correlation coefficients formed from the array x, where the + """ + The correlation coefficients formed from the array x, where the rows are the observations, and the columns are variables. corrcoef(x,y) where x and y are 1d arrays is the same as @@ -818,7 +969,8 @@ #####-------------------------------------------------------------------------- class MAxisConcatenator(AxisConcatenator): - """Translate slice objects to concatenation along an axis. + """ + Translate slice objects to concatenation along an axis. """ @@ -877,11 +1029,13 @@ return self._retval(res) class mr_class(MAxisConcatenator): - """Translate slice objects to concatenation along the first axis. + """ + Translate slice objects to concatenation along the first axis. - For example: - >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] - array([1, 2, 3, 0, 0, 4, 5, 6]) + Examples + -------- + >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] + array([1, 2, 3, 0, 0, 4, 5, 6]) """ def __init__(self): @@ -894,7 +1048,8 @@ #####-------------------------------------------------------------------------- def flatnotmasked_edges(a): - """Find the indices of the first and last not masked values in a + """ + Find the indices of the first and last not masked values in a 1D masked array. If all values are masked, returns None. """ @@ -907,8 +1062,10 @@ else: return None + def notmasked_edges(a, axis=None): - """Find the indices of the first and last not masked values along + """ + Find the indices of the first and last not masked values along the given axis in a masked array. If all values are masked, return None. Otherwise, return a list @@ -917,9 +1074,10 @@ Parameters ---------- - axis : int, optional - Axis along which to perform the operation. - If None, applies to a flattened version of the array. + axis : int, optional + Axis along which to perform the operation. + If None, applies to a flattened version of the array. + """ a = asarray(a) if axis is None or a.ndim == 1: @@ -929,8 +1087,10 @@ return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]), tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]),] + def flatnotmasked_contiguous(a): - """Find contiguous unmasked data in a flattened masked array. + """ + Find contiguous unmasked data in a flattened masked array. Return a sorted sequence of slices (start index, end index). @@ -950,22 +1110,22 @@ return result def notmasked_contiguous(a, axis=None): - """Find contiguous unmasked data in a masked array along the given - axis. + """ + Find contiguous unmasked data in a masked array along the given axis. Parameters ---------- - axis : int, optional - Axis along which to perform the operation. - If None, applies to a flattened version of the array. + axis : int, optional + Axis along which to perform the operation. + If None, applies to a flattened version of the array. Returns ------- - A sorted sequence of slices (start index, end index). + A sorted sequence of slices (start index, end index). Notes ----- - Only accepts 2D arrays at most. + Only accepts 2D arrays at most. """ a = asarray(a) Modified: branches/numpy-mingw-w64/numpy/ma/mrecords.py =================================================================== --- branches/numpy-mingw-w64/numpy/ma/mrecords.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/ma/mrecords.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -357,7 +357,7 @@ dtype = None else: output = ndarray.view(self, dtype) - # OK, there's the change + # OK, there's the change except TypeError: dtype = np.dtype(dtype) # we need to revert to MaskedArray, but keeping the possibility Modified: branches/numpy-mingw-w64/numpy/ma/tests/test_core.py =================================================================== --- branches/numpy-mingw-w64/numpy/ma/tests/test_core.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/ma/tests/test_core.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -474,6 +474,20 @@ np.array([(1, '1', 1.)], dtype=flexi.dtype)) + def test_filled_w_nested_dtype(self): + "Test filled w/ nested dtype" + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([(1, (1, 1)), (2, (2, 2))], + mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) + test = a.filled(0) + control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) + assert_equal(test, control) + # + test = a['B'].filled(0) + control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) + assert_equal(test, control) + + def test_optinfo_propagation(self): "Checks that _optinfo dictionary isn't back-propagated" x = array([1,2,3,], dtype=float) @@ -483,6 +497,55 @@ y._optinfo['info'] = '!!!' assert_equal(x._optinfo['info'], '???') + + def test_fancy_printoptions(self): + "Test printing a masked array w/ fancy dtype." + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = array([(1, (2, 3.0)), (4, (5, 6.0))], + mask=[(1, (0, 1)), (0, (1, 0))], + dtype=fancydtype) + control = "[(--, (2, --)) (4, (--, 6.0))]" + assert_equal(str(test), control) + + + def test_flatten_structured_array(self): + "Test flatten_structured_array on arrays" + # On ndarray + ndtype = [('a', int), ('b', float)] + a = np.array([(1, 1), (2, 2)], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[1., 1.], [2., 2.]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # On masked_array + a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1.], [2., 2.]], + mask=[[0, 1], [1, 0]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # On masked array with nested structure + ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] + a = array([(1, (1, 1.1)), (2, (2, 2.2))], + mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1., 1.1], [2., 2., 2.2]], + mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # Keeping the initial shape + ndtype = [('a', int), ('b', float)] + a = np.array([[(1, 1),], [(2, 2),]], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[[1., 1.],], [[2., 2.],]], dtype=np.float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + + + + #------------------------------------------------------------------------------ class TestMaskedArrayArithmetic(TestCase): @@ -539,6 +602,7 @@ assert_equal(np.multiply(x,y), multiply(xm, ym)) assert_equal(np.divide(x,y), divide(xm, ym)) + def test_divide_on_different_shapes(self): x = arange(6, dtype=float) x.shape = (2,3) @@ -557,6 +621,7 @@ assert_equal(z, [[-1.,-1.,-1.], [3.,4.,5.]]) assert_equal(z.mask, [[1,1,1],[0,0,0]]) + def test_mixed_arithmetic(self): "Tests mixed arithmetics." na = np.array([1]) @@ -571,6 +636,7 @@ assert_equal(getmaskarray(a/2), [0,0,0]) assert_equal(getmaskarray(2/a), [1,0,1]) + def test_masked_singleton_arithmetic(self): "Tests some scalar arithmetics on MaskedArrays." # Masked singleton should remain masked no matter what @@ -581,6 +647,7 @@ self.failUnless(maximum(xm, xm).mask) self.failUnless(minimum(xm, xm).mask) + def test_arithmetic_with_masked_singleton(self): "Checks that there's no collapsing to masked" x = masked_array([1,2]) @@ -593,6 +660,7 @@ assert_equal(y.shape, x.shape) assert_equal(y._mask, [True, True]) + def test_arithmetic_with_masked_singleton_on_1d_singleton(self): "Check that we're not losing the shape of a singleton" x = masked_array([1, ]) @@ -600,6 +668,7 @@ assert_equal(y.shape, x.shape) assert_equal(y.mask, [True, ]) + def test_scalar_arithmetic(self): x = array(0, mask=0) assert_equal(x.filled().ctypes.data, x.ctypes.data) @@ -608,6 +677,7 @@ assert_equal(xm.shape,(2,)) assert_equal(xm.mask,[1,1]) + def test_basic_ufuncs (self): "Test various functions such as sin, cos." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d @@ -649,6 +719,7 @@ assert getmask(count(ott,0)) is nomask assert_equal([1,2],count(ott,0)) + def test_minmax_func (self): "Tests minimum and maximum." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d @@ -672,6 +743,7 @@ x[-1,-1] = masked assert_equal(maximum(x), 2) + def test_minimummaximum_func(self): a = np.ones((2,2)) aminimum = minimum(a,a) @@ -690,6 +762,7 @@ self.failUnless(isinstance(amaximum, MaskedArray)) assert_equal(amaximum, np.maximum.outer(a,a)) + def test_minmax_funcs_with_output(self): "Tests the min/max functions with explicit outputs" mask = np.random.rand(12).round() @@ -735,7 +808,8 @@ self.failUnless(x.min() is masked) self.failUnless(x.max() is masked) self.failUnless(x.ptp() is masked) - #........................ + + def test_addsumprod (self): "Tests add, sum, product." (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d @@ -757,6 +831,98 @@ assert_equal(np.sum(x,1), sum(x,1)) assert_equal(np.product(x,1), product(x,1)) + + def test_binops_d2D(self): + "Test binary operations on 2D data" + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + # + test = a * b + control = array([[2., 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b * a + control = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a * b + control = array([[2, 3], [8, 10], [18, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b * a + control = array([[2, 3], [8, 10], [18, 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + + def test_domained_binops_d2D(self): + "Test domained binary operations on 2D data" + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + # + test = a / b + control = array([[1./2., 1./3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b / a + control = array([[2./1., 3./1.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a / b + control = array([[1./2, 1./3], [2./4, 2./5], [3./6, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = b / a + control = array([[2/1., 3/1.], [4/2., 5/2.], [6/3., 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + + def test_noshrinking(self): + "Check that we don't shrink a mask when not wanted" + # Binary operations + a = masked_array([1,2,3], mask=[False,False,False], shrink=False) + b = a + 1 + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a += 1 + assert_equal(a.mask, [0, 0, 0]) + # Domained binary operation + b = a / 1. + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a /= 1. + assert_equal(a.mask, [0, 0, 0]) + + def test_mod(self): "Tests mod" (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d @@ -767,7 +933,6 @@ test = mod(xm, ym) assert_equal(test, np.mod(xm, ym)) assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) - def test_TakeTransposeInnerOuter(self): @@ -825,6 +990,57 @@ self.failUnless(result is output) self.failUnless(output[0] is masked) + + def test_eq_on_structured(self): + "Test the equality of structured arrays" + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + test = (a == a) + assert_equal(test, [True, True]) + assert_equal(test.mask, [False, False]) + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test, [False, True]) + assert_equal(test.mask, [True, False]) + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test, [True, False]) + assert_equal(test.mask, [False, False]) + + + def test_ne_on_structured(self): + "Test the equality of structured arrays" + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + test = (a != a) + assert_equal(test, [False, False]) + assert_equal(test.mask, [False, False]) + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test, [True, False]) + assert_equal(test.mask, [True, False]) + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test, [False, True]) + assert_equal(test.mask, [False, False]) + + + def test_numpyarithmetics(self): + "Check that the mask is not back-propagated when using numpy functions" + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + # + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + # + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + #------------------------------------------------------------------------------ class TestMaskedArrayAttributes(TestCase): @@ -922,8 +1138,17 @@ a[1] = 1 assert_equal(a._mask, zeros(10)) - def _wtv(self): - int(np.nan) + def test_flat(self): + "Test flat on masked_matrices" + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) #------------------------------------------------------------------------------ @@ -1050,21 +1275,44 @@ # The shape shouldn't matter ndtype = [('f0', float, (2, 2))] control = np.array((default_fill_value(0.),), - dtype=[('f0',float)]) + dtype=[('f0',float)]).astype(ndtype) assert_equal(_check_fill_value(None, ndtype), control) - control = np.array((0,), dtype=[('f0',float)]) + control = np.array((0,), dtype=[('f0',float)]).astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) # ndtype = np.dtype("int, (2,3)float, float") control = np.array((default_fill_value(0), default_fill_value(0.), default_fill_value(0.),), - dtype="int, float, float") + dtype="int, float, float").astype(ndtype) test = _check_fill_value(None, ndtype) assert_equal(test, control) - control = np.array((0,0,0), dtype="int, float, float") + control = np.array((0,0,0), dtype="int, float, float").astype(ndtype) assert_equal(_check_fill_value(0, ndtype), control) + + def test_extremum_fill_value(self): + "Tests extremum fill values for flexible type." + a = array([(1, (2, 3)), (4, (5, 6))], + dtype=[('A', int), ('B', [('BA', int), ('BB', int)])]) + test = a.fill_value + assert_equal(test['A'], default_fill_value(a['A'])) + assert_equal(test['B']['BA'], default_fill_value(a['B']['BA'])) + assert_equal(test['B']['BB'], default_fill_value(a['B']['BB'])) + # + test = minimum_fill_value(a) + assert_equal(test[0], minimum_fill_value(a['A'])) + assert_equal(test[1][0], minimum_fill_value(a['B']['BA'])) + assert_equal(test[1][1], minimum_fill_value(a['B']['BB'])) + assert_equal(test[1], minimum_fill_value(a['B'])) + # + test = maximum_fill_value(a) + assert_equal(test[0], maximum_fill_value(a['A'])) + assert_equal(test[1][0], maximum_fill_value(a['B']['BA'])) + assert_equal(test[1][1], maximum_fill_value(a['B']['BB'])) + assert_equal(test[1], maximum_fill_value(a['B'])) + + #------------------------------------------------------------------------------ class TestUfuncs(TestCase): @@ -1126,7 +1374,17 @@ self.failUnless(amask.max(1)[0].mask) self.failUnless(amask.min(1)[0].mask) + def test_ndarray_mask(self): + "Check that the mask of the result is a ndarray (not a MaskedArray...)" + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + test = np.sqrt(a) + control = masked_array([-1, 0, 1, np.sqrt(2), -1], + mask=[1, 0, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + self.failUnless(not isinstance(test.mask, MaskedArray)) + #------------------------------------------------------------------------------ class TestMaskedArrayInPlaceArithmetics(TestCase): @@ -1242,23 +1500,177 @@ def test_inplace_division_misc(self): # - x = np.array([1.,1.,1.,-2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.]) - y = np.array([5.,0.,3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) - m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] - m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 ,0, 1] + x = [1., 1., 1.,-2., pi/2., 4., 5., -10., 10., 1., 2., 3.] + y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1] xm = masked_array(x, mask=m1) ym = masked_array(y, mask=m2) # z = xm/ym assert_equal(z._mask, [1,1,1,0,0,1,1,0,0,0,1,1]) - assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) + assert_equal(z._data, [1.,1.,1.,-1.,-pi/2.,4.,5.,1.,1.,1.,2.,3.]) + #assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) # xm = xm.copy() xm /= ym assert_equal(xm._mask, [1,1,1,0,0,1,1,0,0,0,1,1]) - assert_equal(xm._data, [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) + assert_equal(z._data, [1.,1.,1.,-1.,-pi/2.,4.,5.,1.,1.,1.,2.,3.]) + #assert_equal(xm._data, [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.]) + def test_datafriendly_add(self): + "Test keeping data w/ (inplace) addition" + x = array([1, 2, 3], mask=[0, 0, 1]) + # Test add w/ scalar + xx = x + 1 + assert_equal(xx.data, [2, 3, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test iadd w/ scalar + x += 1 + assert_equal(x.data, [2, 3, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test add w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x + array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 4, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test iadd w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x += array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 4, 3]) + assert_equal(x.mask, [1, 0, 1]) + + + def test_datafriendly_sub(self): + "Test keeping data w/ (inplace) subtraction" + # Test sub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - 1 + assert_equal(xx.data, [0, 1, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test isub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= 1 + assert_equal(x.data, [0, 1, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test sub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 0, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test isub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 0, 3]) + assert_equal(x.mask, [1, 0, 1]) + + + def test_datafriendly_mul(self): + "Test keeping data w/ (inplace) multiplication" + # Test mul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * 2 + assert_equal(xx.data, [2, 4, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test imul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= 2 + assert_equal(x.data, [2, 4, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test mul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 40, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test imul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(x.data, [1, 40, 3]) + assert_equal(x.mask, [1, 0, 1]) + + + def test_datafriendly_div(self): + "Test keeping data w/ (inplace) division" + # Test div on scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x / 2. + assert_equal(xx.data, [1/2., 2/2., 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test idiv on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= 2. + assert_equal(x.data, [1/2., 2/2., 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test div on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x / array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(xx.data, [1., 2./20., 3.]) + assert_equal(xx.mask, [1, 0, 1]) + # Test idiv on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(x.data, [1., 2/20., 3.]) + assert_equal(x.mask, [1, 0, 1]) + + + def test_datafriendly_pow(self): + "Test keeping data w/ (inplace) power" + # Test pow on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x ** 2.5 + assert_equal(xx.data, [1., 2.**2.5, 3.]) + assert_equal(xx.mask, [0, 0, 1]) + # Test ipow on scalar + x **= 2.5 + assert_equal(x.data, [1., 2.**2.5, 3]) + assert_equal(x.mask, [0, 0, 1]) + + + def test_datafriendly_add_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + # + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + + def test_datafriendly_sub_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + # + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + + def test_datafriendly_mul_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + # + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + #------------------------------------------------------------------------------ class TestMaskedArrayMethods(TestCase): @@ -1334,8 +1746,8 @@ a *= 1e-8 a[0] = 0 self.failUnless(allclose(a, 0, masked_equal=True)) - + def test_allany(self): """Checks the any/all methods/functions.""" x = np.array([[ 0.13, 0.26, 0.90], @@ -1702,6 +2114,28 @@ assert_equal(am, an) + def test_sort_flexible(self): + "Test sort on flexible dtype." + a = array([(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], + mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], + dtype=[('A', int), ('B', int)]) + # + test = sort(a) + b = array([(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], + mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, b) + assert_equal(test.mask, b.mask) + # + test = sort(a, endwith=False) + b = array([(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3),], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0),], + dtype=[('A', int), ('B', int)]) + assert_equal(test, b) + assert_equal(test.mask, b.mask) + # + + def test_squeeze(self): "Check squeeze" data = masked_array([[1,2,3]]) @@ -1775,15 +2209,15 @@ assert_equal(x.tolist(), [(1,1.1,'one'),(2,2.2,'two'),(None,None,None)]) - def test_torecords(self): + def test_toflex(self): "Test the conversion to records" data = arange(10) - record = data.torecords() + record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) # data[[0,1,2,-1]] = masked - record = data.torecords() + record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) # @@ -1793,7 +2227,7 @@ np.random.rand(10))], dtype=ndtype) data[[0,1,2,-1]] = masked - record = data.torecords() + record = data.toflex() assert_equal(record['_data'], data._data) assert_equal(record['_mask'], data._mask) # @@ -1803,10 +2237,29 @@ np.random.rand(10))], dtype=ndtype) data[[0,1,2,-1]] = masked - record = data.torecords() - assert_equal(record['_data'], data._data) - assert_equal(record['_mask'], data._mask) + record = data.toflex() + assert_equal_records(record['_data'], data._data) + assert_equal_records(record['_mask'], data._mask) + + def test_fromflex(self): + "Test the reconstruction of a masked_array from a record" + a = array([1, 2, 3]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + # + a = array([1, 2, 3], mask=[0, 0, 1]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + # + a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], + dtype=[('A', int), ('B', float)]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.data, a.data) + #------------------------------------------------------------------------------ @@ -1970,7 +2423,7 @@ assert_equal(out, [0, 4, 8]) assert_equal(out.mask, [0, 1, 0]) out = diag(out) - control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], mask = [[0, 0, 0], [0, 1, 0], [0, 0, 0]]) assert_equal(out, control) @@ -2155,8 +2608,8 @@ def test_power(self): x = -1.1 - assert_almost_equal(power(x,2.), 1.21) - self.failUnless(power(x,masked) is masked) + assert_almost_equal(power(x, 2.), 1.21) + self.failUnless(power(x, masked) is masked) x = array([-1.1,-1.1,1.1,1.1,0.]) b = array([0.5,2.,0.5,2.,-1.], mask=[0,0,0,0,1]) y = power(x,b) @@ -2423,6 +2876,12 @@ test = mask_or(mask, other) except ValueError: pass + # Using nested arrays + dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])] + amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) + bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) + cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) + assert_equal(mask_or(amask, bmask), cntrl) def test_flatten_mask(self): @@ -2435,7 +2894,7 @@ test = flatten_mask(mask) control = np.array([0, 0, 0, 1], dtype=bool) assert_equal(test, control) - + mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] data = [(0, (0, 0)), (0, (0, 1))] mask = np.array(data, dtype=mdtype) @@ -2583,7 +3042,7 @@ self.failUnless(isinstance(test, MaskedArray)) assert_equal(test._data, a._data) assert_equal(test._mask, a._mask) - + # def test_view_to_type(self): (data, a, controlmask) = self.data @@ -2619,7 +3078,7 @@ assert_equal(test.dtype.names, ('A', 'B')) assert_equal(test['A'], a['a'][-1]) assert_equal(test['B'], a['b'][-1]) - + # def test_view_to_subdtype(self): (data, a, controlmask) = self.data Modified: branches/numpy-mingw-w64/numpy/ma/tests/test_extras.py =================================================================== --- branches/numpy-mingw-w64/numpy/ma/tests/test_extras.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/ma/tests/test_extras.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -22,7 +22,7 @@ # def test_masked_all(self): "Tests masked_all" - # Standard dtype + # Standard dtype test = masked_all((2,), dtype=float) control = array([1, 1], mask=[1, 1], dtype=float) assert_equal(test, control) @@ -53,7 +53,7 @@ def test_masked_all_like(self): "Tests masked_all" - # Standard dtype + # Standard dtype base = array([1, 2], dtype=float) test = masked_all_like(base) control = array([1, 1], mask=[1, 1], dtype=float) @@ -338,40 +338,8 @@ c = dot(b,a,False) assert_equal(c, np.dot(b.filled(0),a.filled(0))) - def test_ediff1d(self): - "Tests mediff1d" - x = masked_array(np.arange(5), mask=[1,0,0,0,1]) - difx_d = (x._data[1:]-x._data[:-1]) - difx_m = (x._mask[1:]-x._mask[:-1]) - dx = ediff1d(x) - assert_equal(dx._data, difx_d) - assert_equal(dx._mask, difx_m) - # - dx = ediff1d(x, to_begin=masked) - assert_equal(dx._data, np.r_[0,difx_d]) - assert_equal(dx._mask, np.r_[1,difx_m]) - dx = ediff1d(x, to_begin=[1,2,3]) - assert_equal(dx._data, np.r_[[1,2,3],difx_d]) - assert_equal(dx._mask, np.r_[[0,0,0],difx_m]) - # - dx = ediff1d(x, to_end=masked) - assert_equal(dx._data, np.r_[difx_d,0]) - assert_equal(dx._mask, np.r_[difx_m,1]) - dx = ediff1d(x, to_end=[1,2,3]) - assert_equal(dx._data, np.r_[difx_d,[1,2,3]]) - assert_equal(dx._mask, np.r_[difx_m,[0,0,0]]) - # - dx = ediff1d(x, to_end=masked, to_begin=masked) - assert_equal(dx._data, np.r_[0,difx_d,0]) - assert_equal(dx._mask, np.r_[1,difx_m,1]) - dx = ediff1d(x, to_end=[1,2,3], to_begin=masked) - assert_equal(dx._data, np.r_[0,difx_d,[1,2,3]]) - assert_equal(dx._mask, np.r_[1,difx_m,[0,0,0]]) - # - dx = ediff1d(x._data, to_end=masked, to_begin=masked) - assert_equal(dx._data, np.r_[0,difx_d,0]) - assert_equal(dx._mask, np.r_[1,0,0,0,0,1]) + class TestApplyAlongAxis(TestCase): # "Tests 2D functions" @@ -383,6 +351,7 @@ assert_equal(xa,[[1,4],[7,10]]) + class TestMedian(TestCase): # def test_2d(self): @@ -422,11 +391,12 @@ assert_equal(median(x,0), [[12,10],[8,9],[16,17]]) + class TestCov(TestCase): - # + def setUp(self): self.data = array(np.random.rand(12)) - # + def test_1d_wo_missing(self): "Test cov on 1D variable w/o missing values" x = self.data @@ -434,7 +404,7 @@ assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), cov(x, rowvar=False, bias=True)) - # + def test_2d_wo_missing(self): "Test cov on 1 2D variable w/o missing values" x = self.data.reshape(3,4) @@ -442,7 +412,7 @@ assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) assert_almost_equal(np.cov(x, rowvar=False, bias=True), cov(x, rowvar=False, bias=True)) - # + def test_1d_w_missing(self): "Test cov 1 1D variable w/missing values" x = self.data @@ -466,7 +436,7 @@ cov(x, x[::-1], rowvar=False)) assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), cov(x, x[::-1], rowvar=False, bias=True)) - # + def test_2d_w_missing(self): "Test cov on 2D variable w/ missing value" x = self.data @@ -486,11 +456,12 @@ np.cov(xf, rowvar=False, bias=True) * x.shape[0]/frac) + class TestCorrcoef(TestCase): - # + def setUp(self): self.data = array(np.random.rand(12)) - # + def test_1d_wo_missing(self): "Test cov on 1D variable w/o missing values" x = self.data @@ -499,7 +470,7 @@ corrcoef(x, rowvar=False)) assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) - # + def test_2d_wo_missing(self): "Test corrcoef on 1 2D variable w/o missing values" x = self.data.reshape(3,4) @@ -508,7 +479,7 @@ corrcoef(x, rowvar=False)) assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), corrcoef(x, rowvar=False, bias=True)) - # + def test_1d_w_missing(self): "Test corrcoef 1 1D variable w/missing values" x = self.data @@ -532,7 +503,7 @@ corrcoef(x, x[::-1], rowvar=False)) assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False, bias=True), corrcoef(x, x[::-1], rowvar=False, bias=True)) - # + def test_2d_w_missing(self): "Test corrcoef on 2D variable w/ missing value" x = self.data @@ -575,6 +546,213 @@ assert_almost_equal(a, a_) + +class TestArraySetOps(TestCase): + # + def test_unique1d_onlist(self): + "Test unique1d on list" + data = [1, 1, 1, 2, 2, 3] + test = unique1d(data, return_index=True, return_inverse=True) + self.failUnless(isinstance(test[0], MaskedArray)) + assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) + assert_equal(test[1], [0, 3, 5]) + assert_equal(test[2], [0, 0, 0, 1, 1, 2]) + + def test_unique1d_onmaskedarray(self): + "Test unique1d on masked data w/use_mask=True" + data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) + test = unique1d(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + # + data.fill_value = 3 + data = masked_array([1, 1, 1, 2, 2, 3], + mask=[0, 0, 1, 0, 1, 0], fill_value=3) + test = unique1d(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + + def test_unique1d_allmasked(self): + "Test all masked" + data = masked_array([1, 1, 1], mask=True) + test = unique1d(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1,], mask=[True])) + assert_equal(test[1], [0]) + assert_equal(test[2], [0, 0, 0]) + # + "Test masked" + data = masked + test = unique1d(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array(masked)) + assert_equal(test[1], [0]) + assert_equal(test[2], [0]) + + def test_ediff1d(self): + "Tests mediff1d" + x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) + test = ediff1d(x) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + def test_ediff1d_tobegin(self): + "Test ediff1d w/ to_begin" + x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + test = ediff1d(x, to_begin=masked) + control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_begin=[1,2,3]) + control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + def test_ediff1d_toend(self): + "Test ediff1d w/ to_end" + x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + test = ediff1d(x, to_end=masked) + control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1,2,3]) + control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + def test_ediff1d_tobegin_toend(self): + "Test ediff1d w/ to_begin and to_end" + x = masked_array(np.arange(5), mask=[1,0,0,0,1]) + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1,2,3], to_begin=masked) + control = array([0, 1, 1, 1, 4, 1, 2, 3], mask=[1, 1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + def test_ediff1d_ndarray(self): + "Test ediff1d w/ a ndarray" + x = np.arange(5) + test = ediff1d(x) + control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) + assert_equal(test, control) + self.failUnless(isinstance(test, MaskedArray)) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) + self.failUnless(isinstance(test, MaskedArray)) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + + def test_intersect1d(self): + "Test intersect1d" + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d(x, y) + control = array([1, 1, 3, 3, -1], mask=[0, 0, 0, 0, 1]) + assert_equal(test, control) + + + def test_intersect1d_nu(self): + "Test intersect1d_nu" + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d_nu(x, y) + control = array([1, 3, -1], mask=[0, 0, 1]) + assert_equal(test, control) + + + def test_setxor1d(self): + "Test setxor1d" + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1]) + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7])) + # + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array( [1, 2, 3] ) + b = array( [6, 5, 4] ) + test = setxor1d(a, b) + assert(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b) + assert(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + assert_array_equal([], setxor1d([],[])) + + + def test_setmember1d( self ): + "Test setmember1d" + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1]) + test = setmember1d(a, b) + assert_equal(test, [True, True, True, False, True]) + # + assert_array_equal([], setmember1d([],[])) + + + def test_union1d( self ): + "Test union1d" + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1]) + test = union1d(a, b) + control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) + assert_equal(test, control) + # + assert_array_equal([], setmember1d([],[])) + + + def test_setdiff1d( self ): + "Test setdiff1d" + a = array([6, 5, 4, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 1]) + b = array([2, 4, 3, 3, 2, 1, 5]) + test = setdiff1d(a, b) + assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) + # + a = arange(10) + b = arange(8) + assert_equal(setdiff1d(a, b), array([8, 9])) + + + def test_setdiff1d_char_array(self): + "Test setdiff1d_charray" + a = np.array(['a','b','c']) + b = np.array(['a','b','s']) + assert_array_equal(setdiff1d(a,b), np.array(['c'])) + + + + +class TestShapeBase(TestCase): + # + def test_atleast1d(self): + pass + + ############################################################################### #------------------------------------------------------------------------------ if __name__ == "__main__": Modified: branches/numpy-mingw-w64/numpy/ma/tests/test_mrecords.py =================================================================== --- branches/numpy-mingw-w64/numpy/ma/tests/test_mrecords.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/ma/tests/test_mrecords.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -334,8 +334,8 @@ mult[0] = masked mult[1] = (1, 1, 1) mult.filled(0) - assert_equal(mult.filled(0), - np.array([(0,0,0),(1,1,1)], dtype=mult.dtype)) + assert_equal_records(mult.filled(0), + np.array([(0,0,0),(1,1,1)], dtype=mult.dtype)) class TestView(TestCase): Modified: branches/numpy-mingw-w64/numpy/ma/tests/test_subclassing.py =================================================================== --- branches/numpy-mingw-w64/numpy/ma/tests/test_subclassing.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/ma/tests/test_subclassing.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -153,5 +153,3 @@ ################################################################################ if __name__ == '__main__': run_module_suite() - - Modified: branches/numpy-mingw-w64/numpy/ma/testutils.py =================================================================== --- branches/numpy-mingw-w64/numpy/ma/testutils.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/ma/testutils.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -110,14 +110,14 @@ return _assert_equal_on_sequences(actual.tolist(), desired.tolist(), err_msg='') - elif actual_dtype.char in "OV" and desired_dtype.char in "OV": - if (actual_dtype != desired_dtype) and actual_dtype: - msg = build_err_msg([actual_dtype, desired_dtype], - err_msg, header='', names=('actual', 'desired')) - raise ValueError(msg) - return _assert_equal_on_sequences(actual.tolist(), - desired.tolist(), - err_msg='') +# elif actual_dtype.char in "OV" and desired_dtype.char in "OV": +# if (actual_dtype != desired_dtype) and actual_dtype: +# msg = build_err_msg([actual_dtype, desired_dtype], +# err_msg, header='', names=('actual', 'desired')) +# raise ValueError(msg) +# return _assert_equal_on_sequences(actual.tolist(), +# desired.tolist(), +# err_msg='') return assert_array_equal(actual, desired, err_msg) @@ -167,12 +167,12 @@ """Asserts that a comparison relation between two masked arrays is satisfied elementwise.""" # Fill the data first - xf = filled(x) - yf = filled(y) +# xf = filled(x) +# yf = filled(y) # Allocate a common mask and refill m = mask_or(getmask(x), getmask(y)) - x = masked_array(xf, copy=False, mask=m) - y = masked_array(yf, copy=False, mask=m) + x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) + y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) if ((x is masked) and not (y is masked)) or \ ((y is masked) and not (x is masked)): msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, @@ -180,14 +180,16 @@ raise ValueError(msg) # OK, now run the basic tests on filled versions return utils.assert_array_compare(comparison, - x.filled(fill_value), y.filled(fill_value), - err_msg=err_msg, - verbose=verbose, header=header) + x.filled(fill_value), + y.filled(fill_value), + err_msg=err_msg, + verbose=verbose, header=header) def assert_array_equal(x, y, err_msg='', verbose=True): """Checks the elementwise equality of two masked arrays.""" - assert_array_compare(equal, x, y, err_msg=err_msg, verbose=verbose, + assert_array_compare(operator.__eq__, x, y, + err_msg=err_msg, verbose=verbose, header='Arrays are not equal') @@ -221,7 +223,8 @@ def assert_array_less(x, y, err_msg='', verbose=True): "Checks that x is smaller than y elementwise." - assert_array_compare(less, x, y, err_msg=err_msg, verbose=verbose, + assert_array_compare(operator.__lt__, x, y, + err_msg=err_msg, verbose=verbose, header='Arrays are not less-ordered') Modified: branches/numpy-mingw-w64/numpy/numarray/util.py =================================================================== --- branches/numpy-mingw-w64/numpy/numarray/util.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/numarray/util.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,7 +1,7 @@ import os import numpy -__all__ = ['MathDomainError', 'UnderflowError', 'NumOverflowError', +__all__ = ['MathDomainError', 'UnderflowError', 'NumOverflowError', 'handleError', 'get_numarray_include_dirs'] class MathDomainError(ArithmeticError): pass Modified: branches/numpy-mingw-w64/numpy/oldnumeric/arrayfns.py =================================================================== --- branches/numpy-mingw-w64/numpy/oldnumeric/arrayfns.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/oldnumeric/arrayfns.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,8 +1,8 @@ """Backward compatible with arrayfns from Numeric """ -__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask', - 'histogram', 'index_sort', 'interp', 'nz', 'reverse', 'span', +__all__ = ['array_set', 'construct3', 'digitize', 'error', 'find_mask', + 'histogram', 'index_sort', 'interp', 'nz', 'reverse', 'span', 'to_corners', 'zmin_zmax'] import numpy as np Modified: branches/numpy-mingw-w64/numpy/oldnumeric/mlab.py =================================================================== --- branches/numpy-mingw-w64/numpy/oldnumeric/mlab.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/oldnumeric/mlab.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,7 +1,7 @@ # This module is for compatibility only. All functions are defined elsewhere. __all__ = ['rand', 'tril', 'trapz', 'hanning', 'rot90', 'triu', 'diff', 'angle', - 'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort', + 'roots', 'ptp', 'kaiser', 'randn', 'cumprod', 'diag', 'msort', 'LinearAlgebra', 'RandomArray', 'prod', 'std', 'hamming', 'flipud', 'max', 'blackman', 'corrcoef', 'bartlett', 'eye', 'squeeze', 'sinc', 'tri', 'cov', 'svd', 'min', 'median', 'fliplr', 'eig', 'mean'] Modified: branches/numpy-mingw-w64/numpy/oldnumeric/rng.py =================================================================== --- branches/numpy-mingw-w64/numpy/oldnumeric/rng.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/oldnumeric/rng.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -5,7 +5,7 @@ __all__ = ['CreateGenerator','ExponentialDistribution','LogNormalDistribution', - 'NormalDistribution', 'UniformDistribution', 'error', 'ranf', + 'NormalDistribution', 'UniformDistribution', 'error', 'ranf', 'default_distribution', 'random_sample', 'standard_generator'] import numpy.random.mtrand as mt Modified: branches/numpy-mingw-w64/numpy/testing/__init__.py =================================================================== --- branches/numpy-mingw-w64/numpy/testing/__init__.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/testing/__init__.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -5,12 +5,10 @@ away. """ -#import unittest from unittest import TestCase import decorators as dec from utils import * -from parametric import ParametricTestCase from numpytest import * from nosetester import NoseTester as Tester from nosetester import run_module_suite Modified: branches/numpy-mingw-w64/numpy/testing/decorators.py =================================================================== --- branches/numpy-mingw-w64/numpy/testing/decorators.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/testing/decorators.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -51,8 +51,11 @@ Parameters --------- - skip_condition : bool - Flag to determine whether to skip test (True) or not (False) + skip_condition : bool or callable. + Flag to determine whether to skip test. If the condition is a + callable, it is used at runtime to dynamically make the decision. This + is useful for tests that may require costly imports, to delay the cost + until the test suite is actually executed. msg : string Message to give on raising a SkipTest exception @@ -69,28 +72,66 @@ decorator with the nose.tools.make_decorator function in order to transmit function name, and various other metadata. ''' - if msg is None: - msg = 'Test skipped due to test condition' + def skip_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose - def skipper(*args, **kwargs): - if skip_condition: - raise nose.SkipTest, msg + + # Allow for both boolean or callable skip conditions. + if callable(skip_condition): + skip_val = lambda : skip_condition() + else: + skip_val = lambda : skip_condition + + def get_msg(func,msg=None): + """Skip message with information about function being skipped.""" + if msg is None: + out = 'Test skipped due to test condition' + else: + out = '\n'+msg + + return "Skipping test: %s%s" % (func.__name__,out) + + # We need to define *two* skippers because Python doesn't allow both + # return with value and yield inside the same function. + def skipper_func(*args, **kwargs): + """Skipper for normal test functions.""" + if skip_val(): + raise nose.SkipTest(get_msg(f,msg)) else: return f(*args, **kwargs) + + def skipper_gen(*args, **kwargs): + """Skipper for test generators.""" + if skip_val(): + raise nose.SkipTest(get_msg(f,msg)) + else: + for x in f(*args, **kwargs): + yield x + + # Choose the right skipper to use when building the actual decorator. + if nose.util.isgenerator(f): + skipper = skipper_gen + else: + skipper = skipper_func + return nose.tools.make_decorator(f)(skipper) + return skip_decorator -def knownfailureif(skip_condition, msg=None): - ''' Make function raise KnownFailureTest exception if skip_condition is true +def knownfailureif(fail_condition, msg=None): + ''' Make function raise KnownFailureTest exception if fail_condition is true + Parameters --------- - skip_condition : bool - Flag to determine whether to mark test as known failure (True) - or not (False) + fail_condition : bool or callable. + Flag to determine whether to mark test as known failure (True) + or not (False). If the condition is a callable, it is used at + runtime to dynamically make the decision. This is useful for + tests that may require costly imports, to delay the cost + until the test suite is actually executed. msg : string Message to give on raising a KnownFailureTest exception @@ -109,15 +150,23 @@ ''' if msg is None: msg = 'Test skipped due to known failure' - def skip_decorator(f): + + # Allow for both boolean or callable known failure conditions. + if callable(fail_condition): + fail_val = lambda : fail_condition() + else: + fail_val = lambda : fail_condition + + def knownfail_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose from noseclasses import KnownFailureTest - def skipper(*args, **kwargs): - if skip_condition: + def knownfailer(*args, **kwargs): + if fail_val(): raise KnownFailureTest, msg else: return f(*args, **kwargs) - return nose.tools.make_decorator(f)(skipper) - return skip_decorator + return nose.tools.make_decorator(f)(knownfailer) + + return knownfail_decorator Modified: branches/numpy-mingw-w64/numpy/testing/noseclasses.py =================================================================== --- branches/numpy-mingw-w64/numpy/testing/noseclasses.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/testing/noseclasses.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,4 +1,6 @@ -# These classes implement a doctest runner plugin for nose. +# These classes implement a doctest runner plugin for nose, a "known failure" +# error class, and a customized TestProgram for NumPy. + # Because this module imports nose directly, it should not # be used except by nosetester.py to avoid a general NumPy # dependency on nose. @@ -6,6 +8,7 @@ import os import doctest +import nose from nose.plugins import doctests as npd from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin from nose.plugins.base import Plugin @@ -251,7 +254,7 @@ class KnownFailure(ErrorClassPlugin): - '''Plugin that installs a KNOWNFAIL error class for the + '''Plugin that installs a KNOWNFAIL error class for the KnownFailureClass exception. When KnownFailureTest is raised, the exception will be logged in the knownfail attribute of the result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the @@ -275,3 +278,25 @@ disable = getattr(options, 'noKnownFail', False) if disable: self.enabled = False + + + +# Because nose currently discards the test result object, but we need +# to return it to the user, override TestProgram.runTests to retain +# the result +class NumpyTestProgram(nose.core.TestProgram): + def runTests(self): + """Run Tests. Returns true on success, false on failure, and + sets self.success to the same value. + """ + if self.testRunner is None: + self.testRunner = nose.core.TextTestRunner(stream=self.config.stream, + verbosity=self.config.verbosity, + config=self.config) + plug_runner = self.config.plugins.prepareTestRunner(self.testRunner) + if plug_runner is not None: + self.testRunner = plug_runner + + self.result = self.testRunner.run(self.test) + self.success = self.result.wasSuccessful() + return self.success Modified: branches/numpy-mingw-w64/numpy/testing/nosetester.py =================================================================== --- branches/numpy-mingw-w64/numpy/testing/nosetester.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/testing/nosetester.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -5,7 +5,6 @@ ''' import os import sys -import warnings def get_package_name(filepath): # find the package name given a path name that's part of the package @@ -28,7 +27,6 @@ pkg_name.reverse() return '.'.join(pkg_name) - def import_nose(): """ Import nose only when needed. """ @@ -166,8 +164,8 @@ print "nose version %d.%d.%d" % nose.__versioninfo__ - def test(self, label='fast', verbose=1, extra_argv=None, doctests=False, - coverage=False, **kwargs): + def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False): ''' Run tests for module using nose %(test_header)s @@ -179,39 +177,6 @@ http://nedbatchelder.com/code/modules/coverage.html) ''' - old_args = set(['level', 'verbosity', 'all', 'sys_argv', - 'testcase_pattern']) - unexpected_args = set(kwargs.keys()) - old_args - if len(unexpected_args) > 0: - ua = ', '.join(unexpected_args) - raise TypeError("test() got unexpected arguments: %s" % ua) - - # issue a deprecation warning if any of the pre-1.2 arguments to - # test are given - if old_args.intersection(kwargs.keys()): - warnings.warn("This method's signature will change in the next " \ - "release; the level, verbosity, all, sys_argv, " \ - "and testcase_pattern keyword arguments will be " \ - "removed. Please update your code.", - DeprecationWarning, stacklevel=2) - - # Use old arguments if given (where it makes sense) - # For the moment, level and sys_argv are ignored - - # replace verbose with verbosity - if kwargs.get('verbosity') is not None: - verbose = kwargs.get('verbosity') - # cap verbosity at 3 because nose becomes *very* verbose beyond that - verbose = min(verbose, 3) - - import utils - utils.verbose = verbose - - # if all evaluates as True, omit attribute filter and run doctests - if kwargs.get('all'): - label = '' - doctests = True - # if doctests is in the extra args, remove it and set the doctest # flag so the NumPy doctester is used instead if extra_argv and '--with-doctest' in extra_argv: @@ -221,9 +186,6 @@ argv = self._test_argv(label, verbose, extra_argv) if doctests: argv += ['--with-numpydoctest'] - print "Running unit tests and doctests for %s" % self.package_name - else: - print "Running unit tests for %s" % self.package_name if coverage: argv+=['--cover-package=%s' % self.package_name, '--with-coverage', @@ -237,33 +199,8 @@ argv += ['--exclude','swig_ext'] argv += ['--exclude','array_from_pyobj'] - self._show_system_info() - nose = import_nose() - # Because nose currently discards the test result object, but we need - # to return it to the user, override TestProgram.runTests to retain - # the result - class NumpyTestProgram(nose.core.TestProgram): - def runTests(self): - """Run Tests. Returns true on success, false on failure, and - sets self.success to the same value. - """ - if self.testRunner is None: - self.testRunner = nose.core.TextTestRunner(stream=self.config.stream, - verbosity=self.config.verbosity, - config=self.config) - plug_runner = self.config.plugins.prepareTestRunner(self.testRunner) - if plug_runner is not None: - self.testRunner = plug_runner - self.result = self.testRunner.run(self.test) - self.success = self.result.wasSuccessful() - return self.success - - # reset doctest state on every run - import doctest - doctest.master = None - # construct list of plugins, omitting the existing doctest plugin import nose.plugins.builtin from noseclasses import NumpyDoctest, KnownFailure @@ -271,10 +208,46 @@ for p in nose.plugins.builtin.plugins: plug = p() if plug.name == 'doctest': + # skip the builtin doctest plugin continue plugins.append(plug) + return argv, plugins + + def test(self, label='fast', verbose=1, extra_argv=None, doctests=False, + coverage=False): + ''' Run tests for module using nose + + %(test_header)s + doctests : boolean + If True, run doctests in module, default False + coverage : boolean + If True, report coverage of NumPy code, default False + (Requires the coverage module: + http://nedbatchelder.com/code/modules/coverage.html) + ''' + + # cap verbosity at 3 because nose becomes *very* verbose beyond that + verbose = min(verbose, 3) + + import utils + utils.verbose = verbose + + if doctests: + print "Running unit tests and doctests for %s" % self.package_name + else: + print "Running unit tests for %s" % self.package_name + + self._show_system_info() + + # reset doctest state on every run + import doctest + doctest.master = None + + argv, plugins = self.prepare_test_args(label, verbose, extra_argv, + doctests, coverage) + from noseclasses import NumpyTestProgram t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) return t.result @@ -286,9 +259,10 @@ print "Running benchmarks for %s" % self.package_name self._show_system_info() - nose = import_nose() argv = self._test_argv(label, verbose, extra_argv) argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] + + nose = import_nose() return nose.run(argv=argv) # generate method docstrings Modified: branches/numpy-mingw-w64/numpy/testing/numpytest.py =================================================================== --- branches/numpy-mingw-w64/numpy/testing/numpytest.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/testing/numpytest.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,91 +1,16 @@ import os -import re import sys -import imp -import types -import unittest import traceback -import warnings -__all__ = ['set_package_path', 'set_local_path', 'restore_path', - 'IgnoreException', 'NumpyTestCase', 'NumpyTest', 'importall',] +__all__ = ['IgnoreException', 'importall',] DEBUG=0 -from numpy.testing.utils import jiffies get_frame = sys._getframe class IgnoreException(Exception): "Ignoring this exception due to disabled feature" -def set_package_path(level=1): - """ Prepend package directory to sys.path. - - set_package_path should be called from a test_file.py that - satisfies the following tree structure: - - //test_file.py - - Then the first existing path name from the following list - - /build/lib.- - /.. - - is prepended to sys.path. - The caller is responsible for removing this path by using - - restore_path() - """ - warnings.warn("set_package_path will be removed in NumPy 1.3; please " - "update your code", DeprecationWarning, stacklevel=2) - - from distutils.util import get_platform - f = get_frame(level) - if f.f_locals['__name__']=='__main__': - testfile = sys.argv[0] - else: - testfile = f.f_locals['__file__'] - d = os.path.dirname(os.path.dirname(os.path.abspath(testfile))) - d1 = os.path.join(d,'build','lib.%s-%s'%(get_platform(),sys.version[:3])) - if not os.path.isdir(d1): - d1 = os.path.dirname(d) - if DEBUG: - print 'Inserting %r to sys.path for test_file %r' % (d1, testfile) - sys.path.insert(0,d1) - return - - -def set_local_path(reldir='', level=1): - """ Prepend local directory to sys.path. - - The caller is responsible for removing this path by using - - restore_path() - """ - warnings.warn("set_local_path will be removed in NumPy 1.3; please " - "update your code", DeprecationWarning, stacklevel=2) - - f = get_frame(level) - if f.f_locals['__name__']=='__main__': - testfile = sys.argv[0] - else: - testfile = f.f_locals['__file__'] - local_path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(testfile)),reldir)) - if DEBUG: - print 'Inserting %r to sys.path' % (local_path) - sys.path.insert(0,local_path) - return - -def restore_path(): - warnings.warn("restore_path will be removed in NumPy 1.3; please " - "update your code", DeprecationWarning, stacklevel=2) - - if DEBUG: - print 'Removing %r from sys.path' % (sys.path[0]) - del sys.path[0] - return - - def output_exception(printstream = sys.stdout): try: type, value, tb = sys.exc_info() @@ -99,576 +24,6 @@ type = value = tb = None # clean up return - -class _dummy_stream: - def __init__(self,stream): - self.data = [] - self.stream = stream - def write(self,message): - if not self.data and not message.startswith('E'): - self.stream.write(message) - self.stream.flush() - message = '' - self.data.append(message) - def writeln(self,message): - self.write(message+'\n') - def flush(self): - self.stream.flush() - - -class NumpyTestCase (unittest.TestCase): - def __init__(self, *args, **kwds): - warnings.warn("NumpyTestCase will be removed in the next release; please update your code to use nose or unittest", - DeprecationWarning, stacklevel=2) - unittest.TestCase.__init__(self, *args, **kwds) - - def measure(self,code_str,times=1): - """ Return elapsed time for executing code_str in the - namespace of the caller for given times. - """ - frame = get_frame(1) - locs,globs = frame.f_locals,frame.f_globals - code = compile(code_str, - 'NumpyTestCase runner for '+self.__class__.__name__, - 'exec') - i = 0 - elapsed = jiffies() - while i>sys.stderr,yellow_text('Warning: %s' % (message)) - sys.stderr.flush() - def info(self, message): - print>>sys.stdout, message - sys.stdout.flush() - - def rundocs(self, filename=None): - """ Run doc string tests found in filename. - """ - import doctest - if filename is None: - f = get_frame(1) - filename = f.f_globals['__file__'] - name = os.path.splitext(os.path.basename(filename))[0] - path = [os.path.dirname(filename)] - file, pathname, description = imp.find_module(name, path) - try: - m = imp.load_module(name, file, pathname, description) - finally: - file.close() - if sys.version[:3]<'2.4': - doctest.testmod(m, verbose=False) - else: - tests = doctest.DocTestFinder().find(m) - runner = doctest.DocTestRunner(verbose=False) - for test in tests: - runner.run(test) - return - - -def _get_all_method_names(cls): - names = dir(cls) - if sys.version[:3]<='2.1': - for b in cls.__bases__: - for n in dir(b)+_get_all_method_names(b): - if n not in names: - names.append(n) - return names - - -# for debug build--check for memory leaks during the test. -class _NumPyTextTestResult(unittest._TextTestResult): - def startTest(self, test): - unittest._TextTestResult.startTest(self, test) - if self.showAll: - N = len(sys.getobjects(0)) - self._totnumobj = N - self._totrefcnt = sys.gettotalrefcount() - return - - def stopTest(self, test): - if self.showAll: - N = len(sys.getobjects(0)) - self.stream.write("objects: %d ===> %d; " % (self._totnumobj, N)) - self.stream.write("refcnts: %d ===> %d\n" % (self._totrefcnt, - sys.gettotalrefcount())) - return - -class NumPyTextTestRunner(unittest.TextTestRunner): - def _makeResult(self): - return _NumPyTextTestResult(self.stream, self.descriptions, self.verbosity) - - -class NumpyTest: - """ Numpy tests site manager. - - Usage: NumpyTest().test(level=1,verbosity=1) - - is package name or its module object. - - Package is supposed to contain a directory tests/ with test_*.py - files where * refers to the names of submodules. See .rename() - method to redefine name mapping between test_*.py files and names of - submodules. Pattern test_*.py can be overwritten by redefining - .get_testfile() method. - - test_*.py files are supposed to define a classes, derived from - NumpyTestCase or unittest.TestCase, with methods having names - starting with test or bench or check. The names of TestCase classes - must have a prefix test. This can be overwritten by redefining - .check_testcase_name() method. - - And that is it! No need to implement test or test_suite functions - in each .py file. - - Old-style test_suite(level=1) hooks are also supported. - """ - _check_testcase_name = re.compile(r'test.*|Test.*').match - def check_testcase_name(self, name): - """ Return True if name matches TestCase class. - """ - return not not self._check_testcase_name(name) - - testfile_patterns = ['test_%(modulename)s.py'] - def get_testfile(self, module, verbosity = 0): - """ Return path to module test file. - """ - mstr = self._module_str - short_module_name = self._get_short_module_name(module) - d = os.path.split(module.__file__)[0] - test_dir = os.path.join(d,'tests') - local_test_dir = os.path.join(os.getcwd(),'tests') - if os.path.basename(os.path.dirname(local_test_dir)) \ - == os.path.basename(os.path.dirname(test_dir)): - test_dir = local_test_dir - for pat in self.testfile_patterns: - fn = os.path.join(test_dir, pat % {'modulename':short_module_name}) - if os.path.isfile(fn): - return fn - if verbosity>1: - self.warn('No test file found in %s for module %s' \ - % (test_dir, mstr(module))) - return - - def __init__(self, package=None): - warnings.warn("NumpyTest will be removed in the next release; please update your code to use nose or unittest", - DeprecationWarning, stacklevel=2) - if package is None: - from numpy.distutils.misc_util import get_frame - f = get_frame(1) - package = f.f_locals.get('__name__',f.f_globals.get('__name__',None)) - assert package is not None - self.package = package - self._rename_map = {} - - def rename(self, **kws): - """Apply renaming submodule test file test_.py to - test_.py. - - Usage: self.rename(name='newname') before calling the - self.test() method. - - If 'newname' is None, then no tests will be executed for a given - module. - """ - for k,v in kws.items(): - self._rename_map[k] = v - return - - def _module_str(self, module): - filename = module.__file__[-30:] - if filename!=module.__file__: - filename = '...'+filename - return '' % (module.__name__, filename) - - def _get_method_names(self,clsobj,level): - names = [] - for mthname in _get_all_method_names(clsobj): - if mthname[:5] not in ['bench','check'] \ - and mthname[:4] not in ['test']: - continue - mth = getattr(clsobj, mthname) - if type(mth) is not types.MethodType: - continue - d = mth.im_func.func_defaults - if d is not None: - mthlevel = d[0] - else: - mthlevel = 1 - if level>=mthlevel: - if mthname not in names: - names.append(mthname) - for base in clsobj.__bases__: - for n in self._get_method_names(base,level): - if n not in names: - names.append(n) - return names - - def _get_short_module_name(self, module): - d,f = os.path.split(module.__file__) - short_module_name = os.path.splitext(os.path.basename(f))[0] - if short_module_name=='__init__': - short_module_name = module.__name__.split('.')[-1] - short_module_name = self._rename_map.get(short_module_name,short_module_name) - return short_module_name - - def _get_module_tests(self, module, level, verbosity): - mstr = self._module_str - - short_module_name = self._get_short_module_name(module) - if short_module_name is None: - return [] - - test_file = self.get_testfile(module, verbosity) - - if test_file is None: - return [] - - if not os.path.isfile(test_file): - if short_module_name[:5]=='info_' \ - and short_module_name[5:]==module.__name__.split('.')[-2]: - return [] - if short_module_name in ['__cvs_version__','__svn_version__']: - return [] - if short_module_name[-8:]=='_version' \ - and short_module_name[:-8]==module.__name__.split('.')[-2]: - return [] - if verbosity>1: - self.warn(test_file) - self.warn(' !! No test file %r found for %s' \ - % (os.path.basename(test_file), mstr(module))) - return [] - - if test_file in self.test_files: - return [] - - parent_module_name = '.'.join(module.__name__.split('.')[:-1]) - test_module_name,ext = os.path.splitext(os.path.basename(test_file)) - test_dir_module = parent_module_name+'.tests' - test_module_name = test_dir_module+'.'+test_module_name - - if test_dir_module not in sys.modules: - sys.modules[test_dir_module] = imp.new_module(test_dir_module) - - old_sys_path = sys.path[:] - try: - f = open(test_file,'r') - test_module = imp.load_module(test_module_name, f, - test_file, ('.py', 'r', 1)) - f.close() - except: - sys.path[:] = old_sys_path - self.warn('FAILURE importing tests for %s' % (mstr(module))) - output_exception(sys.stderr) - return [] - sys.path[:] = old_sys_path - - self.test_files.append(test_file) - - return self._get_suite_list(test_module, level, module.__name__) - - def _get_suite_list(self, test_module, level, module_name='__main__', - verbosity=1): - suite_list = [] - if hasattr(test_module, 'test_suite'): - suite_list.extend(test_module.test_suite(level)._tests) - for name in dir(test_module): - obj = getattr(test_module, name) - if type(obj) is not type(unittest.TestCase) \ - or not issubclass(obj, unittest.TestCase) \ - or not self.check_testcase_name(obj.__name__): - continue - for mthname in self._get_method_names(obj,level): - suite = obj(mthname) - if getattr(suite,'isrunnable',lambda mthname:1)(mthname): - suite_list.append(suite) - matched_suite_list = [suite for suite in suite_list \ - if self.testcase_match(suite.id()\ - .replace('__main__.',''))] - if verbosity>=0: - self.info(' Found %s/%s tests for %s' \ - % (len(matched_suite_list), len(suite_list), module_name)) - return matched_suite_list - - def _test_suite_from_modules(self, this_package, level, verbosity): - package_name = this_package.__name__ - modules = [] - for name, module in sys.modules.items(): - if not name.startswith(package_name) or module is None: - continue - if not hasattr(module,'__file__'): - continue - if os.path.basename(os.path.dirname(module.__file__))=='tests': - continue - modules.append((name, module)) - - modules.sort() - modules = [m[1] for m in modules] - - self.test_files = [] - suites = [] - for module in modules: - suites.extend(self._get_module_tests(module, abs(level), verbosity)) - - suites.extend(self._get_suite_list(sys.modules[package_name], - abs(level), verbosity=verbosity)) - return unittest.TestSuite(suites) - - def _test_suite_from_all_tests(self, this_package, level, verbosity): - importall(this_package) - package_name = this_package.__name__ - - # Find all tests/ directories under the package - test_dirs_names = {} - for name, module in sys.modules.items(): - if not name.startswith(package_name) or module is None: - continue - if not hasattr(module, '__file__'): - continue - d = os.path.dirname(module.__file__) - if os.path.basename(d)=='tests': - continue - d = os.path.join(d, 'tests') - if not os.path.isdir(d): - continue - if d in test_dirs_names: - continue - test_dir_module = '.'.join(name.split('.')[:-1]+['tests']) - test_dirs_names[d] = test_dir_module - - test_dirs = test_dirs_names.keys() - test_dirs.sort() - - # For each file in each tests/ directory with a test case in it, - # import the file, and add the test cases to our list - suite_list = [] - testcase_match = re.compile(r'\s*class\s+\w+\s*\(.*TestCase').match - for test_dir in test_dirs: - test_dir_module = test_dirs_names[test_dir] - - if test_dir_module not in sys.modules: - sys.modules[test_dir_module] = imp.new_module(test_dir_module) - - for fn in os.listdir(test_dir): - base, ext = os.path.splitext(fn) - if ext != '.py': - continue - f = os.path.join(test_dir, fn) - - # check that file contains TestCase class definitions: - fid = open(f, 'r') - skip = True - for line in fid: - if testcase_match(line): - skip = False - break - fid.close() - if skip: - continue - - # import the test file - n = test_dir_module + '.' + base - # in case test files import local modules - sys.path.insert(0, test_dir) - fo = None - try: - try: - fo = open(f) - test_module = imp.load_module(n, fo, f, - ('.py', 'U', 1)) - except Exception, msg: - print 'Failed importing %s: %s' % (f,msg) - continue - finally: - if fo: - fo.close() - del sys.path[0] - - suites = self._get_suite_list(test_module, level, - module_name=n, - verbosity=verbosity) - suite_list.extend(suites) - - all_tests = unittest.TestSuite(suite_list) - return all_tests - - def test(self, level=1, verbosity=1, all=True, sys_argv=[], - testcase_pattern='.*'): - """Run Numpy module test suite with level and verbosity. - - level: - None --- do nothing, return None - < 0 --- scan for tests of level=abs(level), - don't run them, return TestSuite-list - > 0 --- scan for tests of level, run them, - return TestRunner - > 10 --- run all tests (same as specifying all=True). - (backward compatibility). - - verbosity: - >= 0 --- show information messages - > 1 --- show warnings on missing tests - - all: - True --- run all test files (like self.testall()) - False (default) --- only run test files associated with a module - - sys_argv --- replacement of sys.argv[1:] during running - tests. - - testcase_pattern --- run only tests that match given pattern. - - It is assumed (when all=False) that package tests suite follows - the following convention: for each package module, there exists - file /tests/test_.py that defines - TestCase classes (with names having prefix 'test_') with methods - (with names having prefixes 'check_' or 'bench_'); each of these - methods are called when running unit tests. - """ - if level is None: # Do nothing. - return - - if isinstance(self.package, str): - exec 'import %s as this_package' % (self.package) - else: - this_package = self.package - - self.testcase_match = re.compile(testcase_pattern).match - - if all: - all_tests = self._test_suite_from_all_tests(this_package, - level, verbosity) - else: - all_tests = self._test_suite_from_modules(this_package, - level, verbosity) - - if level < 0: - return all_tests - - runner = unittest.TextTestRunner(verbosity=verbosity) - old_sys_argv = sys.argv[1:] - sys.argv[1:] = sys_argv - # Use the builtin displayhook. If the tests are being run - # under IPython (for instance), any doctest test suites will - # fail otherwise. - old_displayhook = sys.displayhook - sys.displayhook = sys.__displayhook__ - try: - r = runner.run(all_tests) - finally: - sys.displayhook = old_displayhook - sys.argv[1:] = old_sys_argv - return r - - def testall(self, level=1,verbosity=1): - """ Run Numpy module test suite with level and verbosity. - - level: - None --- do nothing, return None - < 0 --- scan for tests of level=abs(level), - don't run them, return TestSuite-list - > 0 --- scan for tests of level, run them, - return TestRunner - - verbosity: - >= 0 --- show information messages - > 1 --- show warnings on missing tests - - Different from .test(..) method, this method looks for - TestCase classes from all files in /tests/ - directory and no assumptions are made for naming the - TestCase classes or their methods. - """ - return self.test(level=level, verbosity=verbosity, all=True) - - def run(self): - """ Run Numpy module test suite with level and verbosity - taken from sys.argv. Requires optparse module. - """ - - # delayed import of shlex to reduce startup time - import shlex - - try: - from optparse import OptionParser - except ImportError: - self.warn('Failed to import optparse module, ignoring.') - return self.test() - usage = r'usage: %prog [-v ] [-l ]'\ - r' [-s ""]'\ - r' [-t ""]' - parser = OptionParser(usage) - parser.add_option("-v", "--verbosity", - action="store", - dest="verbosity", - default=1, - type='int') - parser.add_option("-l", "--level", - action="store", - dest="level", - default=1, - type='int') - parser.add_option("-s", "--sys-argv", - action="store", - dest="sys_argv", - default='', - type='string') - parser.add_option("-t", "--testcase-pattern", - action="store", - dest="testcase_pattern", - default=r'.*', - type='string') - (options, args) = parser.parse_args() - return self.test(options.level,options.verbosity, - sys_argv=shlex.split(options.sys_argv or ''), - testcase_pattern=options.testcase_pattern) - - def warn(self, message): - from numpy.distutils.misc_util import yellow_text - print>>sys.stderr,yellow_text('Warning: %s' % (message)) - sys.stderr.flush() - def info(self, message): - print>>sys.stdout, message - sys.stdout.flush() - def importall(package): """ Try recursively to import all subpackages under package. Deleted: branches/numpy-mingw-w64/numpy/testing/parametric.py =================================================================== --- branches/numpy-mingw-w64/numpy/testing/parametric.py 2009-02-20 13:30:20 UTC (rev 6424) +++ branches/numpy-mingw-w64/numpy/testing/parametric.py 2009-02-20 16:37:01 UTC (rev 6425) @@ -1,311 +0,0 @@ -"""Support for parametric tests in unittest. - -:Author: Fernando Perez - -Purpose -======= - -Briefly, the main class in this module allows you to easily and cleanly -(without the gross name-mangling hacks that are normally needed) to write -unittest TestCase classes that have parametrized tests. That is, tests which -consist of multiple sub-tests that scan for example a parameter range, but -where you want each sub-test to: - -* count as a separate test in the statistics. - -* be run even if others in the group error out or fail. - - -The class offers a simple name-based convention to create such tests (see -simple example at the end), in one of two ways: - -* Each sub-test in a group can be run fully independently, with the - setUp/tearDown methods being called each time. - -* The whole group can be run with setUp/tearDown being called only once for the - group. This lets you conveniently reuse state that may be very expensive to - compute for multiple tests. Be careful not to corrupt it!!! - - -Caveats -======= - -This code relies on implementation details of the unittest module (some key -methods are heavily modified versions of those, after copying them in). So it -may well break either if you make sophisticated use of the unittest APIs, or if -unittest itself changes in the future. I have only tested this with Python -2.5. - -""" -__docformat__ = "restructuredtext en" - -import unittest -import warnings - -class _ParametricTestCase(unittest.TestCase): - """TestCase subclass with support for parametric tests. - - Subclasses of this class can implement test methods that return a list of - tests and arguments to call those with, to do parametric testing (often - also called 'data driven' testing.""" - - #: Prefix for tests with independent state. These methods will be run with - #: a separate setUp/tearDown call for each test in the group. - _indepParTestPrefix = 'testip' - - #: Prefix for tests with shared state. These methods will be run with - #: a single setUp/tearDown call for the whole group. This is useful when - #: writing a group of tests for which the setup is expensive and one wants - #: to actually share that state. Use with care (especially be careful not - #: to mutate the state you are using, which will alter later tests). - _shareParTestPrefix = 'testsp' - - def __init__(self, methodName = 'runTest'): - warnings.warn("ParametricTestCase will be removed in the next NumPy " - "release", DeprecationWarning) - unittest.TestCase.__init__(self, methodName) - - def exec_test(self,test,args,result): - """Execute a single test. Returns a success boolean""" - - ok = False - try: - test(*args) - ok = True - except self.failureException: - result.addFailure(self, self._exc_info()) - except KeyboardInterrupt: - raise - except: - result.addError(self, self._exc_info()) - - return ok - - def set_testMethodDoc(self,doc): - self._testMethodDoc = doc - self._TestCase__testMethodDoc = doc - - def get_testMethodDoc(self): - return self._testMethodDoc - - testMethodDoc = property(fset=set_testMethodDoc, fget=get_testMethodDoc) - - def get_testMethodName(self): - try: - return getattr(self,"_testMethodName") - except: - return getattr(self,"_TestCase__testMethodName") - - testMethodName = property(fget=get_testMethodName) - - def run_test(self, testInfo,result): - """Run one test with arguments""" - - test,args = testInfo[0],testInfo[1:] - - # Reset the doc attribute to be the docstring of this particular test, - # so that in error messages it prints the actual test's docstring and - # not that of the test factory. - self.testMethodDoc = test.__doc__ - result.startTest(self) - try: - try: - self.setUp() - except KeyboardInterrupt: - raise - except: - result.addError(self, self._exc_info()) - return - - ok = self.exec_test(test,args,result) - - try: - self.tearDown() - except KeyboardInterrupt: - raise - except: - result.addError(self, self._exc_info()) - ok = False - if ok: result.addSuccess(self) - finally: - result.stopTest(self) - - def run_tests(self, tests,result): - """Run many tests with a common setUp/tearDown. - - The entire set of tests is run with a single setUp/tearDown call.""" - - try: - self.setUp() - except KeyboardInterrupt: - raise - except: - result.testsRun += 1 - result.addError(self, self._exc_info()) - return - - saved_doc = self.testMethodDoc - - try: - # Run all the tests specified - for testInfo in tests: - test,args = testInfo[0],testInfo[1:] - - # Set the doc argument for this test. Note that even if we do - # this, the fail/error tracebacks still print the docstring for - # the parent factory, because they only generate the message at - # the end of the run, AFTER we've restored it. There is no way - # to tell the unittest system (without overriding a lot of - # stuff) to extract this information right away, the logic is - # hardcoded to pull it later, since unittest assumes it doesn't - # change. - self.testMethodDoc = test.__doc__ - result.startTest(self) - ok = self.exec_test(test,args,result) - if ok: result.addSuccess(self) - - finally: - # Restore docstring info and run tearDown once only. - self.testMethodDoc = saved_doc - try: - self.tearDown() - except KeyboardInterrupt: - raise - except: - result.addError(self, self._exc_info()) - - def run(self, result=None): - """Test runner.""" - - #print - #print '*** run for method:',self._testMethodName # dbg - #print '*** doc:',self._testMethodDoc # dbg - - if result is None: result = self.defaultTestResult() - - # Independent tests: each gets its own setup/teardown - if self.testMethodName.startswith(self._indepParTestPrefix): - for t in getattr(self,self.testMethodName)(): - self.run_test(t,result) - # Shared-state test: single setup/teardown for all - elif self.testMethodName.startswith(self._shareParTestPrefix): - tests = getattr(self,self.testMethodName,'runTest')() - self.run_tests(tests,result) - # Normal unittest Test methods - else: - unittest.TestCase.run(self,result) - -# The underscore was added to the class name to keep nose from trying -# to run the test class (nose ignores class names that begin with an -# underscore by default). -ParametricTestCase = _ParametricTestCase - -############################################################################# -# Quick and dirty interactive example/test -if __name__ == '__main__': - - class ExampleTestCase(ParametricTestCase): - - #------------------------------------------------------------------- - # An instrumented setUp method so we can see when it gets called and - # how many times per instance - counter = 0 - - def setUp(self): - self.counter += 1 - print 'setUp count: %2s for: %s' % (self.counter, - self.testMethodDoc) - - #------------------------------------------------------------------- - # A standard test method, just like in the unittest docs. - def test_foo(self): - """Normal test for feature foo.""" - pass - - #------------------------------------------------------------------- - # Testing methods that need parameters. These can NOT be named test*, - # since they would be picked up by unittest and called without - # arguments. Instead, call them anything else (I use tst*) and then - # load them via the factories below. - def tstX(self,i): - "Test feature X with parameters." - print 'tstX, i=',i - if i==1 or i==3: - # Test fails - self.fail('i is bad, bad: %s' % i) - - def tstY(self,i): - "Test feature Y with parameters." - print 'tstY, i=',i - if i==1: - # Force an error - 1/0 - - def tstXX(self,i,j): - "Test feature XX with parameters." - print 'tstXX, i=',i,'j=',j - if i==1: - # Test fails - self.fail('i is bad, bad: %s' % i) - - def tstYY(self,i): - "Test feature YY with parameters." - print 'tstYY, i=',i - if i==2: - # Force an error - 1/0 - - def tstZZ(self): - """Test feature ZZ without parameters, needs multiple runs. - - This could be a random test that you want to run multiple times.""" - pass - - #------------------------------------------------------------------- - # Parametric test factories that create the test groups to call the - # above tst* methods with their required arguments. - def testip(self): - """Independent parametric test factory. - - A separate setUp() call is made for each test returned by this - method. - - You must return an iterable (list or generator is fine) containing - tuples with the actual method to be called as the first argument, - and the arguments for that call later.""" - return [(self.tstX,i) for i in range(5)] - - def testip2(self): - """Another independent parametric test factory""" - return [(self.tstY,i) for i in range(5)] - - def testip3(self): - """Test factory combining different subtests. - - This one shows how to assemble calls to different tests.""" - return [(self.tstX,3),(self.tstX,9),(self.tstXX,4,10), - (self.tstZZ,),(self.tstZZ,)] - - def testsp(self): - """Shared parametric test factory - - A single setUp() call is made for all the tests returned by this - method. - """ - return [(self.tstXX,i,i+1) for i in range(5)] - - def testsp2(self): - """Another shared parametric test factory""" - return [(self.tstYY,i) for i in range(5)] - - def testsp3(self): - """Another shared parametric test factory. - - This one simply calls the same test multiple times, without any - arguments. Note that you must still return tuples, even if there - are no arguments.""" - return [(self.tstZZ,) for i in range(10)] - - - # This test class runs normally under unittest's default runner - unittest.main() Copied: branches/numpy-mingw-w64/numpy/testing/tests/test_decorators.py (from rev 6424, trunk/numpy/testing/tests/test_decorators.py) From numpy-svn at scipy.org Fri Feb 20 17:28:23 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 16:28:23 -0600 (CST) Subject: [Numpy-svn] r6426 - trunk/numpy/core/src Message-ID: <20090220222823.45DD1C7C011@scipy.org> Author: charris Date: 2009-02-20 16:28:05 -0600 (Fri, 20 Feb 2009) New Revision: 6426 Modified: trunk/numpy/core/src/arrayobject.c Log: Change indentation of switch statements. Modified: trunk/numpy/core/src/arrayobject.c =================================================================== --- trunk/numpy/core/src/arrayobject.c 2009-02-20 16:37:01 UTC (rev 6425) +++ trunk/numpy/core/src/arrayobject.c 2009-02-20 22:28:05 UTC (rev 6426) @@ -5099,157 +5099,157 @@ int typenum; switch (cmp_op) { - case Py_LT: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.less); - break; - case Py_LE: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.less_equal); - break; - case Py_EQ: - if (other == Py_None) { + case Py_LT: + result = PyArray_GenericBinaryFunction(self, other, + n_ops.less); + break; + case Py_LE: + result = PyArray_GenericBinaryFunction(self, other, + n_ops.less_equal); + break; + case Py_EQ: + if (other == Py_None) { + Py_INCREF(Py_False); + return Py_False; + } + /* Try to convert other to an array */ + if (!PyArray_Check(other)) { + typenum = self->descr->type_num; + if (typenum != PyArray_OBJECT) { + typenum = PyArray_NOTYPE; + } + array_other = PyArray_FromObject(other, + typenum, 0, 0); + /* + * If not successful, then return False. This fixes code + * that used to allow equality comparisons between arrays + * and other objects which would give a result of False. + */ + if ((array_other == NULL) || + (array_other == Py_None)) { + Py_XDECREF(array_other); + PyErr_Clear(); Py_INCREF(Py_False); return Py_False; } - /* Try to convert other to an array */ - if (!PyArray_Check(other)) { - typenum = self->descr->type_num; - if (typenum != PyArray_OBJECT) { - typenum = PyArray_NOTYPE; - } - array_other = PyArray_FromObject(other, - typenum, 0, 0); - /* - * If not successful, then return False. This fixes code - * that used to allow equality comparisons between arrays - * and other objects which would give a result of False. - */ - if ((array_other == NULL) || - (array_other == Py_None)) { - Py_XDECREF(array_other); - PyErr_Clear(); - Py_INCREF(Py_False); - return Py_False; - } + } + else { + Py_INCREF(other); + array_other = other; + } + result = PyArray_GenericBinaryFunction(self, + array_other, + n_ops.equal); + if ((result == Py_NotImplemented) && + (self->descr->type_num == PyArray_VOID)) { + int _res; + + _res = PyObject_RichCompareBool + ((PyObject *)self->descr, + (PyObject *)\ + PyArray_DESCR(array_other), + Py_EQ); + if (_res < 0) { + Py_DECREF(result); + Py_DECREF(array_other); + return NULL; } - else { - Py_INCREF(other); - array_other = other; + if (_res) { + Py_DECREF(result); + result = _void_compare + (self, + (PyArrayObject *)array_other, + cmp_op); + Py_DECREF(array_other); } - result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.equal); - if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { - int _res; - - _res = PyObject_RichCompareBool - ((PyObject *)self->descr, - (PyObject *)\ - PyArray_DESCR(array_other), - Py_EQ); - if (_res < 0) { - Py_DECREF(result); - Py_DECREF(array_other); - return NULL; - } - if (_res) { - Py_DECREF(result); - result = _void_compare - (self, - (PyArrayObject *)array_other, - cmp_op); - Py_DECREF(array_other); - } - return result; + return result; + } + /* + * If the comparison results in NULL, then the + * two array objects can not be compared together so + * return zero + */ + Py_DECREF(array_other); + if (result == NULL) { + PyErr_Clear(); + Py_INCREF(Py_False); + return Py_False; + } + break; + case Py_NE: + if (other == Py_None) { + Py_INCREF(Py_True); + return Py_True; + } + /* Try to convert other to an array */ + if (!PyArray_Check(other)) { + typenum = self->descr->type_num; + if (typenum != PyArray_OBJECT) { + typenum = PyArray_NOTYPE; } + array_other = PyArray_FromObject(other, typenum, 0, 0); /* - * If the comparison results in NULL, then the - * two array objects can not be compared together so - * return zero + * If not successful, then objects cannot be + * compared and cannot be equal, therefore, + * return True; */ - Py_DECREF(array_other); - if (result == NULL) { + if ((array_other == NULL) || (array_other == Py_None)) { + Py_XDECREF(array_other); PyErr_Clear(); - Py_INCREF(Py_False); - return Py_False; - } - break; - case Py_NE: - if (other == Py_None) { Py_INCREF(Py_True); return Py_True; } - /* Try to convert other to an array */ - if (!PyArray_Check(other)) { - typenum = self->descr->type_num; - if (typenum != PyArray_OBJECT) { - typenum = PyArray_NOTYPE; - } - array_other = PyArray_FromObject(other, typenum, 0, 0); - /* - * If not successful, then objects cannot be - * compared and cannot be equal, therefore, - * return True; - */ - if ((array_other == NULL) || (array_other == Py_None)) { - Py_XDECREF(array_other); - PyErr_Clear(); - Py_INCREF(Py_True); - return Py_True; - } + } + else { + Py_INCREF(other); + array_other = other; + } + result = PyArray_GenericBinaryFunction(self, + array_other, + n_ops.not_equal); + if ((result == Py_NotImplemented) && + (self->descr->type_num == PyArray_VOID)) { + int _res; + + _res = PyObject_RichCompareBool( + (PyObject *)self->descr, + (PyObject *) + PyArray_DESCR(array_other), + Py_EQ); + if (_res < 0) { + Py_DECREF(result); + Py_DECREF(array_other); + return NULL; } - else { - Py_INCREF(other); - array_other = other; + if (_res) { + Py_DECREF(result); + result = _void_compare( + self, + (PyArrayObject *)array_other, + cmp_op); + Py_DECREF(array_other); } - result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.not_equal); - if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { - int _res; + return result; + } - _res = PyObject_RichCompareBool( - (PyObject *)self->descr, - (PyObject *) - PyArray_DESCR(array_other), - Py_EQ); - if (_res < 0) { - Py_DECREF(result); - Py_DECREF(array_other); - return NULL; - } - if (_res) { - Py_DECREF(result); - result = _void_compare( - self, - (PyArrayObject *)array_other, - cmp_op); - Py_DECREF(array_other); - } - return result; - } - - Py_DECREF(array_other); - if (result == NULL) { - PyErr_Clear(); - Py_INCREF(Py_True); - return Py_True; - } - break; - case Py_GT: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater); - break; - case Py_GE: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater_equal); - break; - default: - result = Py_NotImplemented; - Py_INCREF(result); + Py_DECREF(array_other); + if (result == NULL) { + PyErr_Clear(); + Py_INCREF(Py_True); + return Py_True; + } + break; + case Py_GT: + result = PyArray_GenericBinaryFunction(self, other, + n_ops.greater); + break; + case Py_GE: + result = PyArray_GenericBinaryFunction(self, other, + n_ops.greater_equal); + break; + default: + result = Py_NotImplemented; + Py_INCREF(result); } if (result == Py_NotImplemented) { /* Try to handle string comparisons */ @@ -8886,115 +8886,115 @@ typechar = str[0]; size = atoi(str + 1); switch (typechar) { - case 'b': - if (size == sizeof(Bool)) { - type_num = PyArray_BOOL; - } - else { - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - break; - case 'u': - if (size == sizeof(uintp)) { - type_num = PyArray_UINTP; - } - else if (size == sizeof(char)) { - type_num = PyArray_UBYTE; - } - else if (size == sizeof(short)) { - type_num = PyArray_USHORT; - } - else if (size == sizeof(ulong)) { - type_num = PyArray_ULONG; - } - else if (size == sizeof(int)) { - type_num = PyArray_UINT; - } - else if (size == sizeof(ulonglong)) { - type_num = PyArray_ULONGLONG; - } - else { - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - break; - case 'i': - if (size == sizeof(intp)) { - type_num = PyArray_INTP; - } - else if (size == sizeof(char)) { - type_num = PyArray_BYTE; - } - else if (size == sizeof(short)) { - type_num = PyArray_SHORT; - } - else if (size == sizeof(long)) { - type_num = PyArray_LONG; - } - else if (size == sizeof(int)) { - type_num = PyArray_INT; - } - else if (size == sizeof(longlong)) { - type_num = PyArray_LONGLONG; - } - else { - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - break; - case 'f': - if (size == sizeof(float)) { - type_num = PyArray_FLOAT; - } - else if (size == sizeof(double)) { - type_num = PyArray_DOUBLE; - } - else if (size == sizeof(longdouble)) { - type_num = PyArray_LONGDOUBLE; - } - else { - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - break; - case 'c': - if (size == sizeof(float)*2) { - type_num = PyArray_CFLOAT; - } - else if (size == sizeof(double)*2) { - type_num = PyArray_CDOUBLE; - } - else if (size == sizeof(longdouble)*2) { - type_num = PyArray_CLONGDOUBLE; - } - else { - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - break; - case 'O': - if (size == sizeof(PyObject *)) { - type_num = PyArray_OBJECT; - } - else { - PyErr_SetString(PyExc_ValueError, msg); - return NULL; - } - break; - case PyArray_STRINGLTR: - type_num = PyArray_STRING; - break; - case PyArray_UNICODELTR: - type_num = PyArray_UNICODE; - size <<= 2; - break; - case 'V': - type_num = PyArray_VOID; - break; - default: + case 'b': + if (size == sizeof(Bool)) { + type_num = PyArray_BOOL; + } + else { PyErr_SetString(PyExc_ValueError, msg); return NULL; + } + break; + case 'u': + if (size == sizeof(uintp)) { + type_num = PyArray_UINTP; + } + else if (size == sizeof(char)) { + type_num = PyArray_UBYTE; + } + else if (size == sizeof(short)) { + type_num = PyArray_USHORT; + } + else if (size == sizeof(ulong)) { + type_num = PyArray_ULONG; + } + else if (size == sizeof(int)) { + type_num = PyArray_UINT; + } + else if (size == sizeof(ulonglong)) { + type_num = PyArray_ULONGLONG; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'i': + if (size == sizeof(intp)) { + type_num = PyArray_INTP; + } + else if (size == sizeof(char)) { + type_num = PyArray_BYTE; + } + else if (size == sizeof(short)) { + type_num = PyArray_SHORT; + } + else if (size == sizeof(long)) { + type_num = PyArray_LONG; + } + else if (size == sizeof(int)) { + type_num = PyArray_INT; + } + else if (size == sizeof(longlong)) { + type_num = PyArray_LONGLONG; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'f': + if (size == sizeof(float)) { + type_num = PyArray_FLOAT; + } + else if (size == sizeof(double)) { + type_num = PyArray_DOUBLE; + } + else if (size == sizeof(longdouble)) { + type_num = PyArray_LONGDOUBLE; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'c': + if (size == sizeof(float)*2) { + type_num = PyArray_CFLOAT; + } + else if (size == sizeof(double)*2) { + type_num = PyArray_CDOUBLE; + } + else if (size == sizeof(longdouble)*2) { + type_num = PyArray_CLONGDOUBLE; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case 'O': + if (size == sizeof(PyObject *)) { + type_num = PyArray_OBJECT; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; + case PyArray_STRINGLTR: + type_num = PyArray_STRING; + break; + case PyArray_UNICODELTR: + type_num = PyArray_UNICODE; + size <<= 2; + break; + case 'V': + type_num = PyArray_VOID; + break; + default: + PyErr_SetString(PyExc_ValueError, msg); + return NULL; } descr = PyArray_DescrFromType(type_num); @@ -9636,88 +9636,88 @@ Py_DECREF(to); switch(fromtype) { - case PyArray_BYTE: - case PyArray_SHORT: - case PyArray_INT: - case PyArray_LONG: - case PyArray_LONGLONG: - if (PyTypeNum_ISINTEGER(totype)) { - if (PyTypeNum_ISUNSIGNED(totype)) { - return 0; - } - else { - return telsize >= felsize; - } + case PyArray_BYTE: + case PyArray_SHORT: + case PyArray_INT: + case PyArray_LONG: + case PyArray_LONGLONG: + if (PyTypeNum_ISINTEGER(totype)) { + if (PyTypeNum_ISUNSIGNED(totype)) { + return 0; } - else if (PyTypeNum_ISFLOAT(totype)) { - if (felsize < 8) { - return telsize > felsize; - } - else { - return telsize >= felsize; - } + else { + return telsize >= felsize; } - else if (PyTypeNum_ISCOMPLEX(totype)) { - if (felsize < 8) { - return (telsize >> 1) > felsize; - } - else { - return (telsize >> 1) >= felsize; - } + } + else if (PyTypeNum_ISFLOAT(totype)) { + if (felsize < 8) { + return telsize > felsize; } else { - return totype > fromtype; + return telsize >= felsize; } - case PyArray_UBYTE: - case PyArray_USHORT: - case PyArray_UINT: - case PyArray_ULONG: - case PyArray_ULONGLONG: - if (PyTypeNum_ISINTEGER(totype)) { - if (PyTypeNum_ISSIGNED(totype)) { - return telsize > felsize; - } - else { - return telsize >= felsize; - } + } + else if (PyTypeNum_ISCOMPLEX(totype)) { + if (felsize < 8) { + return (telsize >> 1) > felsize; } - else if (PyTypeNum_ISFLOAT(totype)) { - if (felsize < 8) { - return telsize > felsize; - } - else { - return telsize >= felsize; - } + else { + return (telsize >> 1) >= felsize; } - else if (PyTypeNum_ISCOMPLEX(totype)) { - if (felsize < 8) { - return (telsize >> 1) > felsize; - } - else { - return (telsize >> 1) >= felsize; - } + } + else { + return totype > fromtype; + } + case PyArray_UBYTE: + case PyArray_USHORT: + case PyArray_UINT: + case PyArray_ULONG: + case PyArray_ULONGLONG: + if (PyTypeNum_ISINTEGER(totype)) { + if (PyTypeNum_ISSIGNED(totype)) { + return telsize > felsize; } else { - return totype > fromtype; + return telsize >= felsize; } - case PyArray_FLOAT: - case PyArray_DOUBLE: - case PyArray_LONGDOUBLE: - if (PyTypeNum_ISCOMPLEX(totype)) { - return (telsize >> 1) >= felsize; + } + else if (PyTypeNum_ISFLOAT(totype)) { + if (felsize < 8) { + return telsize > felsize; } else { - return totype > fromtype; + return telsize >= felsize; } - case PyArray_CFLOAT: - case PyArray_CDOUBLE: - case PyArray_CLONGDOUBLE: + } + else if (PyTypeNum_ISCOMPLEX(totype)) { + if (felsize < 8) { + return (telsize >> 1) > felsize; + } + else { + return (telsize >> 1) >= felsize; + } + } + else { return totype > fromtype; - case PyArray_STRING: - case PyArray_UNICODE: + } + case PyArray_FLOAT: + case PyArray_DOUBLE: + case PyArray_LONGDOUBLE: + if (PyTypeNum_ISCOMPLEX(totype)) { + return (telsize >> 1) >= felsize; + } + else { return totype > fromtype; - default: - return 0; + } + case PyArray_CFLOAT: + case PyArray_CDOUBLE: + case PyArray_CLONGDOUBLE: + return totype > fromtype; + case PyArray_STRING: + case PyArray_UNICODE: + return totype > fromtype; + default: + return 0; } } @@ -12505,38 +12505,38 @@ return NULL; } switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { - case 8: - if (!PyArg_ParseTuple(args, "(icOOOiii)", &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment, &dtypeflags)) { - return NULL; - } - break; - case 7: - if (!PyArg_ParseTuple(args, "(icOOOii)", &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment)) { - return NULL; - } - break; - case 6: - if (!PyArg_ParseTuple(args, "(icOOii)", &version, - &endian, &subarray, &fields, - &elsize, &alignment)) { - PyErr_Clear(); - } - break; - case 5: - version = 0; - if (!PyArg_ParseTuple(args, "(cOOii)", - &endian, &subarray, &fields, &elsize, - &alignment)) { - return NULL; - } - break; - default: - /* raise an error */ - version = -1; + case 8: + if (!PyArg_ParseTuple(args, "(icOOOiii)", &version, &endian, + &subarray, &names, &fields, &elsize, + &alignment, &dtypeflags)) { + return NULL; + } + break; + case 7: + if (!PyArg_ParseTuple(args, "(icOOOii)", &version, &endian, + &subarray, &names, &fields, &elsize, + &alignment)) { + return NULL; + } + break; + case 6: + if (!PyArg_ParseTuple(args, "(icOOii)", &version, + &endian, &subarray, &fields, + &elsize, &alignment)) { + PyErr_Clear(); + } + break; + case 5: + version = 0; + if (!PyArg_ParseTuple(args, "(cOOii)", + &endian, &subarray, &fields, &elsize, + &alignment)) { + return NULL; + } + break; + default: + /* raise an error */ + version = -1; } /* @@ -12832,54 +12832,54 @@ Py_INCREF(new); } switch (cmp_op) { - case Py_LT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) { - result = Py_True; - } - else { - result = Py_False; - } - break; - case Py_LE: - if (PyArray_CanCastTo(self, new)) { - result = Py_True; - } - else { - result = Py_False; - } - break; - case Py_EQ: - if (PyArray_EquivTypes(self, new)) { - result = Py_True; - } - else { - result = Py_False; - } - break; - case Py_NE: - if (PyArray_EquivTypes(self, new)) - result = Py_False; - else - result = Py_True; - break; - case Py_GT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) { - result = Py_True; - } - else { - result = Py_False; - } - break; - case Py_GE: - if (PyArray_CanCastTo(new, self)) { - result = Py_True; - } - else { - result = Py_False; - } - break; - default: - result = Py_NotImplemented; + case Py_LT: + if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_LE: + if (PyArray_CanCastTo(self, new)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_EQ: + if (PyArray_EquivTypes(self, new)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_NE: + if (PyArray_EquivTypes(self, new)) + result = Py_False; + else + result = Py_True; + break; + case Py_GT: + if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + case Py_GE: + if (PyArray_CanCastTo(new, self)) { + result = Py_True; + } + else { + result = Py_False; + } + break; + default: + result = Py_NotImplemented; } Py_XDECREF(new); @@ -13296,87 +13296,87 @@ key = PyString_AS_STRING(ind); n = PyString_GET_SIZE(ind); switch(n) { - case 1: - switch(key[0]) { - case 'C': - return arrayflags_contiguous_get(self); - case 'F': - return arrayflags_fortran_get(self); - case 'W': - return arrayflags_writeable_get(self); - case 'B': - return arrayflags_behaved_get(self); - case 'O': - return arrayflags_owndata_get(self); - case 'A': - return arrayflags_aligned_get(self); - case 'U': - return arrayflags_updateifcopy_get(self); - default: - goto fail; - } - break; - case 2: - if (strncmp(key, "CA", n) == 0) { - return arrayflags_carray_get(self); - } - if (strncmp(key, "FA", n) == 0) { - return arrayflags_farray_get(self); - } - break; - case 3: - if (strncmp(key, "FNC", n) == 0) { - return arrayflags_fnc_get(self); - } - break; - case 4: - if (strncmp(key, "FORC", n) == 0) { - return arrayflags_forc_get(self); - } - break; - case 6: - if (strncmp(key, "CARRAY", n) == 0) { - return arrayflags_carray_get(self); - } - if (strncmp(key, "FARRAY", n) == 0) { - return arrayflags_farray_get(self); - } - break; - case 7: - if (strncmp(key,"FORTRAN",n) == 0) { - return arrayflags_fortran_get(self); - } - if (strncmp(key,"BEHAVED",n) == 0) { - return arrayflags_behaved_get(self); - } - if (strncmp(key,"OWNDATA",n) == 0) { - return arrayflags_owndata_get(self); - } - if (strncmp(key,"ALIGNED",n) == 0) { - return arrayflags_aligned_get(self); - } - break; - case 9: - if (strncmp(key,"WRITEABLE",n) == 0) { - return arrayflags_writeable_get(self); - } - break; - case 10: - if (strncmp(key,"CONTIGUOUS",n) == 0) { - return arrayflags_contiguous_get(self); - } - break; - case 12: - if (strncmp(key, "UPDATEIFCOPY", n) == 0) { - return arrayflags_updateifcopy_get(self); - } - if (strncmp(key, "C_CONTIGUOUS", n) == 0) { - return arrayflags_contiguous_get(self); - } - if (strncmp(key, "F_CONTIGUOUS", n) == 0) { - return arrayflags_fortran_get(self); - } - break; + case 1: + switch(key[0]) { + case 'C': + return arrayflags_contiguous_get(self); + case 'F': + return arrayflags_fortran_get(self); + case 'W': + return arrayflags_writeable_get(self); + case 'B': + return arrayflags_behaved_get(self); + case 'O': + return arrayflags_owndata_get(self); + case 'A': + return arrayflags_aligned_get(self); + case 'U': + return arrayflags_updateifcopy_get(self); + default: + goto fail; + } + break; + case 2: + if (strncmp(key, "CA", n) == 0) { + return arrayflags_carray_get(self); + } + if (strncmp(key, "FA", n) == 0) { + return arrayflags_farray_get(self); + } + break; + case 3: + if (strncmp(key, "FNC", n) == 0) { + return arrayflags_fnc_get(self); + } + break; + case 4: + if (strncmp(key, "FORC", n) == 0) { + return arrayflags_forc_get(self); + } + break; + case 6: + if (strncmp(key, "CARRAY", n) == 0) { + return arrayflags_carray_get(self); + } + if (strncmp(key, "FARRAY", n) == 0) { + return arrayflags_farray_get(self); + } + break; + case 7: + if (strncmp(key,"FORTRAN",n) == 0) { + return arrayflags_fortran_get(self); + } + if (strncmp(key,"BEHAVED",n) == 0) { + return arrayflags_behaved_get(self); + } + if (strncmp(key,"OWNDATA",n) == 0) { + return arrayflags_owndata_get(self); + } + if (strncmp(key,"ALIGNED",n) == 0) { + return arrayflags_aligned_get(self); + } + break; + case 9: + if (strncmp(key,"WRITEABLE",n) == 0) { + return arrayflags_writeable_get(self); + } + break; + case 10: + if (strncmp(key,"CONTIGUOUS",n) == 0) { + return arrayflags_contiguous_get(self); + } + break; + case 12: + if (strncmp(key, "UPDATEIFCOPY", n) == 0) { + return arrayflags_updateifcopy_get(self); + } + if (strncmp(key, "C_CONTIGUOUS", n) == 0) { + return arrayflags_contiguous_get(self); + } + if (strncmp(key, "F_CONTIGUOUS", n) == 0) { + return arrayflags_fortran_get(self); + } + break; } fail: From numpy-svn at scipy.org Fri Feb 20 23:22:00 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 22:22:00 -0600 (CST) Subject: [Numpy-svn] r6427 - trunk/numpy/core/src Message-ID: <20090221042200.F0E67C7C033@scipy.org> Author: charris Date: 2009-02-20 22:21:50 -0600 (Fri, 20 Feb 2009) New Revision: 6427 Modified: trunk/numpy/core/src/multiarraymodule.c trunk/numpy/core/src/scalarmathmodule.c.src Log: Coding style cleanups. Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2009-02-20 22:28:05 UTC (rev 6426) +++ trunk/numpy/core/src/multiarraymodule.c 2009-02-21 04:21:50 UTC (rev 6427) @@ -25,7 +25,7 @@ #define PyAO PyArrayObject -static PyObject *typeDict=NULL; /* Must be explicitly loaded */ +static PyObject *typeDict = NULL; /* Must be explicitly loaded */ static PyArray_Descr * _arraydescr_fromobj(PyObject *obj) @@ -39,7 +39,9 @@ if (dtypedescr) { ret = PyArray_DescrConverter(dtypedescr, &new); Py_DECREF(dtypedescr); - if (ret == PY_SUCCEED) return new; + if (ret == PY_SUCCEED) { + return new; + } PyErr_Clear(); } /* Understand basic ctypes */ @@ -52,13 +54,16 @@ PyObject *length; length = PyObject_GetAttrString(obj, "_length_"); PyErr_Clear(); - if (length) { /* derived type */ + if (length) { + /* derived type */ PyObject *newtup; PyArray_Descr *derived; newtup = Py_BuildValue("NO", new, length); ret = PyArray_DescrConverter(newtup, &derived); Py_DECREF(newtup); - if (ret == PY_SUCCEED) return derived; + if (ret == PY_SUCCEED) { + return derived; + } PyErr_Clear(); return NULL; } @@ -75,29 +80,34 @@ if (dtypedescr) { ret = PyArray_DescrAlignConverter(dtypedescr, &new); Py_DECREF(dtypedescr); - if (ret == PY_SUCCEED) return new; + if (ret == PY_SUCCEED) { + return new; + } PyErr_Clear(); } return NULL; } -/* XXX: We include c99 compat math module here because it is needed for +/* + * XXX: We include c99 compat math module here because it is needed for * numpyos.c (included by arrayobject). This is bad - we should separate - * declaration/implementation and share this in a lib. */ + * declaration/implementation and share this in a lib. + */ #include "umath_funcs_c99.inc" -/* Including this file is the only way I know how to declare functions - static in each file, and store the pointers from functions in both - arrayobject.c and multiarraymodule.c for the C-API +/* + * Including this file is the only way I know how to declare functions + * static in each file, and store the pointers from functions in both + * arrayobject.c and multiarraymodule.c for the C-API + * + * Declarying an external pointer-containing variable in arrayobject.c + * and trying to copy it to PyArray_API, did not work. + * + * Think about two modules with a common api that import each other... + * + * This file would just be the module calls. + */ - Declarying an external pointer-containing variable in arrayobject.c - and trying to copy it to PyArray_API, did not work. - - Think about two modules with a common api that import each other... - - This file would just be the module calls. -*/ - #include "arrayobject.c" @@ -105,60 +115,72 @@ static PyObject *MultiArrayError; /*NUMPY_API - Multiply a List of ints -*/ + * Multiply a List of ints + */ static int PyArray_MultiplyIntList(register int *l1, register int n) { - register int s=1; - while (n--) s *= (*l1++); + int s = 1; + + while (n--) { + s *= (*l1++); + } return s; } /*NUMPY_API - Multiply a List -*/ + * Multiply a List + */ static intp PyArray_MultiplyList(register intp *l1, register int n) { - register intp s=1; - while (n--) s *= (*l1++); + intp s = 1; + + while (n--) { + s *= (*l1++); + } return s; } /*NUMPY_API - Multiply a List of Non-negative numbers with over-flow detection. -*/ + * Multiply a List of Non-negative numbers with over-flow detection. + */ static intp PyArray_OverflowMultiplyList(register intp *l1, register int n) { - register intp s=1; + intp s = 1; + while (n--) { - if (*l1 == 0) return 0; - if ((s > MAX_INTP / *l1) || (*l1 > MAX_INTP / s)) + if (*l1 == 0) { + return 0; + } + if ((s > MAX_INTP / *l1) || (*l1 > MAX_INTP / s)) { return -1; + } s *= (*l1++); } return s; } /*NUMPY_API - Produce a pointer into array -*/ + * Produce a pointer into array + */ static void * PyArray_GetPtr(PyArrayObject *obj, register intp* ind) { - register int n = obj->nd; - register intp *strides = obj->strides; - register char *dptr = obj->data; + int n = obj->nd; + intp *strides = obj->strides; + char *dptr = obj->data; - while (n--) dptr += (*strides++) * (*ind++); + while (n--) { + dptr += (*strides++) * (*ind++); + } return (void *)dptr; } /*NUMPY_API - Get axis from an object (possibly None) -- a converter function, -*/ + * Get axis from an object (possibly None) -- a converter function, + */ static int PyArray_AxisConverter(PyObject *obj, int *axis) { @@ -175,31 +197,37 @@ } /*NUMPY_API - Compare Lists -*/ + * Compare Lists + */ static int PyArray_CompareLists(intp *l1, intp *l2, int n) { int i; - for(i=0;iob_type; - + if (pytype) { + subtype = pytype; + } + else { + subtype = self->ob_type; + } Py_INCREF(self->descr); new = PyArray_NewFromDescr(subtype, self->descr, @@ -207,8 +235,9 @@ self->strides, self->data, self->flags, (PyObject *)self); - - if (new==NULL) return NULL; + if (new == NULL) { + return NULL; + } Py_INCREF(self); PyArray_BASE(new) = (PyObject *)self; @@ -224,20 +253,20 @@ return new; } -/* Returns a contiguous array */ /*NUMPY_API - Ravel -*/ + * Ravel + * Returns a contiguous array + */ static PyObject * PyArray_Ravel(PyArrayObject *a, NPY_ORDER fortran) { PyArray_Dims newdim = {NULL,1}; intp val[1] = {-1}; - if (fortran == PyArray_ANYORDER) + if (fortran == PyArray_ANYORDER) { fortran = PyArray_ISFORTRAN(a); - + } newdim.ptr = val; if (!fortran && PyArray_ISCONTIGUOUS(a)) { return PyArray_Newshape(a, &newdim, PyArray_CORDER); @@ -245,8 +274,9 @@ else if (fortran && PyArray_ISFORTRAN(a)) { return PyArray_Newshape(a, &newdim, PyArray_FORTRANORDER); } - else + else { return PyArray_Flatten(a, fortran); + } } static double @@ -254,23 +284,25 @@ { static const double p10[] = {1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8}; double ret; - if (n < 9) + if (n < 9) { ret = p10[n]; + } else { ret = 1e9; - while (n-- > 9) + while (n-- > 9) { ret *= 10.; + } } return ret; } /*NUMPY_API - Round -*/ + * Round + */ static PyObject * PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) { - PyObject *f, *ret=NULL, *tmp, *op1, *op2; + PyObject *f, *ret = NULL, *tmp, *op1, *op2; int ret_int=0; PyArray_Descr *my_descr; if (out && (PyArray_SIZE(out) != PyArray_SIZE(a))) { @@ -283,45 +315,68 @@ PyObject *round_part; PyObject *new; int res; + if (out) { new = (PyObject *)out; Py_INCREF(new); } else { new = PyArray_Copy(a); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } } /* new.real = a.real.round(decimals) */ part = PyObject_GetAttrString(new, "real"); - if (part == NULL) {Py_DECREF(new); return NULL;} + if (part == NULL) { + Py_DECREF(new); + return NULL; + } part = PyArray_EnsureAnyArray(part); round_part = PyArray_Round((PyArrayObject *)part, decimals, NULL); Py_DECREF(part); - if (round_part == NULL) {Py_DECREF(new); return NULL;} + if (round_part == NULL) { + Py_DECREF(new); + return NULL; + } res = PyObject_SetAttrString(new, "real", round_part); Py_DECREF(round_part); - if (res < 0) {Py_DECREF(new); return NULL;} + if (res < 0) { + Py_DECREF(new); + return NULL; + } /* new.imag = a.imag.round(decimals) */ part = PyObject_GetAttrString(new, "imag"); - if (part == NULL) {Py_DECREF(new); return NULL;} + if (part == NULL) { + Py_DECREF(new); + return NULL; + } part = PyArray_EnsureAnyArray(part); round_part = PyArray_Round((PyArrayObject *)part, decimals, NULL); Py_DECREF(part); - if (round_part == NULL) {Py_DECREF(new); return NULL;} + if (round_part == NULL) { + Py_DECREF(new); + return NULL; + } res = PyObject_SetAttrString(new, "imag", round_part); Py_DECREF(round_part); - if (res < 0) {Py_DECREF(new); return NULL;} + if (res < 0) { + Py_DECREF(new); + return NULL; + } return new; } /* do the most common case first */ if (decimals >= 0) { if (PyArray_ISINTEGER(a)) { if (out) { - if (PyArray_CopyAnyInto(out, a) < 0) return NULL; + if (PyArray_CopyAnyInto(out, a) < 0) { + return NULL; + } Py_INCREF(out); return (PyObject *)out; } @@ -332,8 +387,7 @@ } if (decimals == 0) { if (out) { - return PyObject_CallFunction(n_ops.rint, "OO", - a, out); + return PyObject_CallFunction(n_ops.rint, "OO", a, out); } return PyObject_CallFunction(n_ops.rint, "O", a); } @@ -357,18 +411,34 @@ out = (PyArrayObject *)PyArray_Empty(a->nd, a->dimensions, my_descr, PyArray_ISFORTRAN(a)); - if (out == NULL) return NULL; + if (out == NULL) { + return NULL; + } } - else Py_INCREF(out); + else { + Py_INCREF(out); + } f = PyFloat_FromDouble(power_of_ten(decimals)); - if (f==NULL) return NULL; + if (f == NULL) { + return NULL; + } ret = PyObject_CallFunction(op1, "OOO", a, f, out); - if (ret==NULL) goto finish; + if (ret == NULL) { + goto finish; + } tmp = PyObject_CallFunction(n_ops.rint, "OO", ret, ret); - if (tmp == NULL) {Py_DECREF(ret); ret=NULL; goto finish;} + if (tmp == NULL) { + Py_DECREF(ret); + ret = NULL; + goto finish; + } Py_DECREF(tmp); tmp = PyObject_CallFunction(op2, "OOO", ret, f, ret); - if (tmp == NULL) {Py_DECREF(ret); ret=NULL; goto finish;} + if (tmp == NULL) { + Py_DECREF(ret); + ret = NULL; + goto finish; + } Py_DECREF(tmp); finish: @@ -382,22 +452,21 @@ return tmp; } return ret; - } /*NUMPY_API - Flatten -*/ + * Flatten + */ static PyObject * PyArray_Flatten(PyArrayObject *a, NPY_ORDER order) { PyObject *ret; intp size; - if (order == PyArray_ANYORDER) + if (order == PyArray_ANYORDER) { order = PyArray_ISFORTRAN(a); - + } size = PyArray_SIZE(a); Py_INCREF(a->descr); ret = PyArray_NewFromDescr(a->ob_type, @@ -407,7 +476,9 @@ NULL, 0, (PyObject *)a); - if (ret== NULL) return NULL; + if (ret == NULL) { + return NULL; + } if (_flat_copyinto(ret, (PyObject *)a, order) < 0) { Py_DECREF(ret); return NULL; @@ -416,20 +487,20 @@ } -/* For back-ward compatability * +/* For back-ward compatability -- Not recommended */ - / * Not recommended */ - /*NUMPY_API - Reshape an array -*/ + * Reshape + */ static PyObject * PyArray_Reshape(PyArrayObject *self, PyObject *shape) { PyObject *ret; PyArray_Dims newdims; - if (!PyArray_IntpConverter(shape, &newdims)) return NULL; + if (!PyArray_IntpConverter(shape, &newdims)) { + return NULL; + } ret = PyArray_Newshape(self, &newdims, PyArray_CORDER); PyDimMem_FREE(newdims.ptr); return ret; @@ -447,25 +518,31 @@ nd = self->nd; dims = self->dimensions; - for (k=0, j=0; !done && (jstrides[j]; - j++; k++; + j++; + k++; } - else if ((knd; oi++) { - if (self->dimensions[oi]!=1) { + for (oi = 0; oi < self->nd; oi++) { + if (self->dimensions[oi]!= 1) { olddims[oldnd] = self->dimensions[oi]; oldstrides[oldnd] = self->strides[oi]; oldnd++; @@ -513,13 +590,17 @@ np = 1; - for (ni=0; nini;nk--) - newstrides[nk-1]=newstrides[nk]*newdims[nk]; + newstrides[ni] = oldstrides[oi]; + for (nk = ni + 1; nk < nj; nk++) { + newstrides[nk] = newstrides[nk - 1]*newdims[nk - 1]; + } } - + else { + /* C order */ + newstrides[nj - 1] = oldstrides[oj - 1]; + for (nk = nj - 1; nk > ni; nk--) { + newstrides[nk - 1] = newstrides[nk]*newdims[nk]; + } + } ni = nj++; oi = oj++; - } /* @@ -596,17 +681,19 @@ s_known = 1; i_unknown = -1; - for(i=0; iptr; PyArrayObject *ret; int n = newdims->len; - Bool same, incref=TRUE; + Bool same, incref = TRUE; intp *strides = NULL; intp newstrides[MAX_DIMS]; int flags; - if (fortran == PyArray_ANYORDER) + if (fortran == PyArray_ANYORDER) { fortran = PyArray_ISFORTRAN(self); - + } /* Quick check to make sure anything actually needs to be done */ if (n == self->nd) { same = TRUE; - i=0; - while(same && iflags; - if (strides==NULL) { /* we are really re-shaping not just adding ones - to the shape somewhere */ - - /* fix any -1 dimensions and check new-dimensions against - old size */ - if (_fix_unknown_dimension(newdims, PyArray_SIZE(self)) < 0) + if (strides == NULL) { + /* + * we are really re-shaping not just adding ones to the shape somewhere + * fix any -1 dimensions and check new-dimensions against old size + */ + if (_fix_unknown_dimension(newdims, PyArray_SIZE(self)) < 0) { return NULL; - - /* sometimes we have to create a new copy of the array - in order to get the right orientation and - because we can't just re-use the buffer with the - data in the order it is in. - */ + } + /* + * sometimes we have to create a new copy of the array + * in order to get the right orientation and + * because we can't just re-use the buffer with the + * data in the order it is in. + */ if (!(PyArray_ISONESEGMENT(self)) || (((PyArray_CHKFLAGS(self, NPY_CONTIGUOUS) && - fortran == NPY_FORTRANORDER) - || (PyArray_CHKFLAGS(self, NPY_FORTRAN) && + fortran == NPY_FORTRANORDER) || + (PyArray_CHKFLAGS(self, NPY_FORTRAN) && fortran == NPY_CORDER)) && (self->nd > 1))) { - - int success=0; + int success = 0; success = _attempt_nocopy_reshape(self,n,dimensions, newstrides,fortran); if (success) { /* no need to copy the array after all */ strides = newstrides; flags = self->flags; - } else { + } + else { PyObject *new; new = PyArray_NewCopy(self, fortran); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } incref = FALSE; self = (PyArrayObject *)new; flags = self->flags; } } - /* We always have to interpret the contiguous buffer correctly - */ + /* We always have to interpret the contiguous buffer correctly */ - /* Make sure the flags argument is set. - */ + /* Make sure the flags argument is set. */ if (n > 1) { if (fortran == NPY_FORTRANORDER) { flags &= ~NPY_CONTIGUOUS; @@ -728,26 +823,29 @@ } } else if (n > 0) { - /* replace any 0-valued strides with - appropriate value to preserve contiguousness - */ + /* + * replace any 0-valued strides with + * appropriate value to preserve contiguousness + */ if (fortran == PyArray_FORTRANORDER) { - if (strides[0] == 0) + if (strides[0] == 0) { strides[0] = self->descr->elsize; - for (i=1; idescr->elsize; - for (i=n-2; i>-1; i--) { - if (strides[i] == 0) - strides[i] = strides[i+1] * \ - dimensions[i+1]; } + for (i = n - 2; i > -1; i--) { + if (strides[i] == 0) { + strides[i] = strides[i+1] * dimensions[i+1]; + } + } } } @@ -759,27 +857,31 @@ self->data, flags, (PyObject *)self); - if (ret== NULL) goto fail; - - if (incref) Py_INCREF(self); + if (ret == NULL) { + goto fail; + } + if (incref) { + Py_INCREF(self); + } ret->base = (PyObject *)self; PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - return (PyObject *)ret; fail: - if (!incref) {Py_DECREF(self);} + if (!incref) { + Py_DECREF(self); + } return NULL; } -/* return a new view of the array object with all of its unit-length - dimensions squeezed out if needed, otherwise - return the same array. -*/ - -/*NUMPY_API*/ +/*NUMPY_API + * + * return a new view of the array object with all of its unit-length + * dimensions squeezed out if needed, otherwise + * return the same array. + */ static PyObject * PyArray_Squeeze(PyArrayObject *self) { @@ -787,14 +889,14 @@ int newnd = nd; intp dimensions[MAX_DIMS]; intp strides[MAX_DIMS]; - int i,j; + int i, j; PyObject *ret; if (nd == 0) { Py_INCREF(self); return (PyObject *)self; } - for (j=0, i=0; idimensions[i] == 1) { newnd -= 1; } @@ -811,7 +913,9 @@ strides, self->data, self->flags, (PyObject *)self); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArray_FLAGS(ret) &= ~OWNDATA; PyArray_BASE(ret) = (PyObject *)self; Py_INCREF(self); @@ -820,16 +924,17 @@ /*NUMPY_API - Mean -*/ + * Mean + */ static PyObject * PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) { - PyObject *obj1=NULL, *obj2=NULL; + PyObject *obj1 = NULL, *obj2 = NULL; PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } obj1 = PyArray_GenericReduceFunction((PyAO *)new, n_ops.add, axis, rtype, out); obj2 = PyFloat_FromDouble((double) PyArray_DIM(new,axis)); @@ -850,10 +955,10 @@ return ret; } -/* Set variance to 1 to by-pass square-root calculation and return variance */ /*NUMPY_API - Std -*/ + * Set variance to 1 to by-pass square-root calculation and return variance + * Std + */ static PyObject * PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, int variance) @@ -865,34 +970,51 @@ __New_PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, int variance, int num) { - PyObject *obj1=NULL, *obj2=NULL, *obj3=NULL, *new=NULL; - PyObject *ret=NULL, *newshape=NULL; + PyObject *obj1 = NULL, *obj2 = NULL, *obj3 = NULL, *new = NULL; + PyObject *ret = NULL, *newshape = NULL; int i, n; intp val; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } /* Compute and reshape mean */ obj1 = PyArray_EnsureAnyArray(PyArray_Mean((PyAO *)new, axis, rtype, NULL)); - if (obj1 == NULL) {Py_DECREF(new); return NULL;} + if (obj1 == NULL) { + Py_DECREF(new); + return NULL; + } n = PyArray_NDIM(new); newshape = PyTuple_New(n); - if (newshape == NULL) {Py_DECREF(obj1); Py_DECREF(new); return NULL;} - for (i=0; iob_type == ret->ob_type) return ret; + if (ret == NULL || PyArray_CheckExact(self)) { + return ret; + } + if (PyArray_Check(self) && self->ob_type == ret->ob_type) { + return ret; + } obj1 = PyArray_EnsureArray(ret); - if (obj1 == NULL) return NULL; + if (obj1 == NULL) { + return NULL; + } ret = PyArray_View((PyAO *)obj1, NULL, self->ob_type); Py_DECREF(obj1); if (out) { @@ -971,15 +1113,16 @@ /*NUMPY_API - Sum -*/ + *Sum + */ static PyObject * PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.add, axis, rtype, out); Py_DECREF(new); @@ -987,15 +1130,16 @@ } /*NUMPY_API - Prod -*/ + * Prod + */ static PyObject * PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.multiply, axis, rtype, out); Py_DECREF(new); @@ -1003,15 +1147,16 @@ } /*NUMPY_API - CumSum -*/ + *CumSum + */ static PyObject * PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericAccumulateFunction((PyAO *)new, n_ops.add, axis, rtype, out); Py_DECREF(new); @@ -1019,14 +1164,16 @@ } /*NUMPY_API - CumProd -*/ + * CumProd + */ static PyObject * PyArray_CumProd(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericAccumulateFunction((PyAO *)new, n_ops.multiply, axis, @@ -1036,15 +1183,16 @@ } /*NUMPY_API - Any -*/ + * Any + */ static PyObject * PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.logical_or, axis, PyArray_BOOL, out); @@ -1053,15 +1201,16 @@ } /*NUMPY_API - All -*/ + * All + */ static PyObject * PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.logical_and, axis, PyArray_BOOL, out); @@ -1071,8 +1220,8 @@ /*NUMPY_API - Compress -*/ + * Compress + */ static PyObject * PyArray_Compress(PyArrayObject *self, PyObject *condition, int axis, PyArrayObject *out) @@ -1081,8 +1230,9 @@ PyObject *res, *ret; cond = (PyAO *)PyArray_FROM_O(condition); - if (cond == NULL) return NULL; - + if (cond == NULL) { + return NULL; + } if (cond->nd != 1) { Py_DECREF(cond); PyErr_SetString(PyExc_ValueError, @@ -1092,7 +1242,9 @@ res = PyArray_Nonzero(cond); Py_DECREF(cond); - if (res == NULL) return res; + if (res == NULL) { + return res; + } ret = PyArray_TakeFrom(self, PyTuple_GET_ITEM(res, 0), axis, out, NPY_RAISE); Py_DECREF(res); @@ -1100,51 +1252,61 @@ } /*NUMPY_API - Nonzero -*/ + * Nonzero + */ static PyObject * PyArray_Nonzero(PyArrayObject *self) { - int n=self->nd, j; - intp count=0, i, size; - PyArrayIterObject *it=NULL; - PyObject *ret=NULL, *item; + int n = self->nd, j; + intp count = 0, i, size; + PyArrayIterObject *it = NULL; + PyObject *ret = NULL, *item; intp *dptr[MAX_DIMS]; it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it==NULL) return NULL; - + if (it == NULL) { + return NULL; + } size = it->size; - for (i=0; idescr->f->nonzero(it->dataptr, self)) count++; + for (i = 0; i < size; i++) { + if (self->descr->f->nonzero(it->dataptr, self)) { + count++; + } PyArray_ITER_NEXT(it); } PyArray_ITER_RESET(it); ret = PyTuple_New(n); - if (ret == NULL) goto fail; - for (j=0; job_type, 1, &count, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)self); - if (item == NULL) goto fail; + if (item == NULL) { + goto fail; + } PyTuple_SET_ITEM(ret, j, item); dptr[j] = (intp *)PyArray_DATA(item); } - if (n==1) { - for (i=0; idescr->f->nonzero(it->dataptr, self)) + if (n == 1) { + for (i = 0; i < size; i++) { + if (self->descr->f->nonzero(it->dataptr, self)) { *(dptr[0])++ = i; + } PyArray_ITER_NEXT(it); } } else { /* reset contiguous so that coordinates gets updated */ it->contiguous = 0; - for (i=0; idescr->f->nonzero(it->dataptr, self)) - for (j=0; jdescr->f->nonzero(it->dataptr, self)) { + for (j = 0; j < n; j++) { *(dptr[j])++ = it->coordinates[j]; + } + } PyArray_ITER_NEXT(it); } } @@ -1163,10 +1325,12 @@ _GenericBinaryOutFunction(PyArrayObject *m1, PyObject *m2, PyArrayObject *out, PyObject *op) { - if (out == NULL) + if (out == NULL) { return PyObject_CallFunction(op, "OO", m1, m2); - else + } + else { return PyObject_CallFunction(op, "OOO", m1, m2, out); + } } static PyObject * @@ -1176,7 +1340,9 @@ if (max != NULL) { res1 = _GenericBinaryOutFunction(self, max, out, n_ops.minimum); - if (res1 == NULL) return NULL; + if (res1 == NULL) { + return NULL; + } } else { res1 = (PyObject *)self; @@ -1186,7 +1352,10 @@ if (min != NULL) { res2 = _GenericBinaryOutFunction((PyArrayObject *)res1, min, out, n_ops.maximum); - if (res2 == NULL) {Py_XDECREF(res1); return NULL;} + if (res2 == NULL) { + Py_XDECREF(res1); + return NULL; + } } else { res2 = res1; @@ -1197,16 +1366,16 @@ } /*NUMPY_API - Clip -*/ + * Clip + */ static PyObject * PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *out) { PyArray_FastClipFunc *func; - int outgood=0, ingood=0; - PyArrayObject *maxa=NULL; - PyArrayObject *mina=NULL; - PyArrayObject *newout=NULL, *newin=NULL; + int outgood = 0, ingood = 0; + PyArrayObject *maxa = NULL; + PyArrayObject *mina = NULL; + PyArrayObject *newout = NULL, *newin = NULL; PyArray_Descr *indescr, *newdescr; char *max_data, *min_data; PyObject *zero; @@ -1219,31 +1388,37 @@ func = self->descr->f->fastclip; if (func == NULL || (min != NULL && !PyArray_CheckAnyScalar(min)) || - (max != NULL && !PyArray_CheckAnyScalar(max))) + (max != NULL && !PyArray_CheckAnyScalar(max))) { return _slow_array_clip(self, min, max, out); - + } /* Use the fast scalar clip function */ /* First we need to figure out the correct type */ indescr = NULL; if (min != NULL) { indescr = PyArray_DescrFromObject(min, NULL); - if (indescr == NULL) return NULL; + if (indescr == NULL) { + return NULL; + } } if (max != NULL) { newdescr = PyArray_DescrFromObject(max, indescr); Py_XDECREF(indescr); - if (newdescr == NULL) return NULL; + if (newdescr == NULL) { + return NULL; + } } else { - newdescr = indescr; /* Steal the reference */ + /* Steal the reference */ + newdescr = indescr; } - /* Use the scalar descriptor only if it is of a bigger - KIND than the input array (and then find the - type that matches both). - */ + /* + * Use the scalar descriptor only if it is of a bigger + * KIND than the input array (and then find the + * type that matches both). + */ if (PyArray_ScalarKind(newdescr->type_num, NULL) > PyArray_ScalarKind(self->descr->type_num, NULL)) { indescr = _array_small_type(newdescr, self->descr); @@ -1259,7 +1434,9 @@ PyArray_Descr *descr2; descr2 = PyArray_DescrNewByteorder(indescr, '='); Py_DECREF(indescr); - if (descr2 == NULL) goto fail; + if (descr2 == NULL) { + goto fail; + } indescr = descr2; } @@ -1267,28 +1444,32 @@ if (max != NULL) { maxa = (NPY_AO *)PyArray_FromAny(max, indescr, 0, 0, NPY_DEFAULT, NULL); - if (maxa == NULL) return NULL; + if (maxa == NULL) { + return NULL; + } } else { /* Side-effect of PyArray_FromAny */ Py_DECREF(indescr); } - - /* If we are unsigned, then make sure min is not <0 */ - /* This is to match the behavior of - _slow_array_clip - - We allow min and max to go beyond the limits - for other data-types in which case they - are interpreted as their modular counterparts. + /* + * If we are unsigned, then make sure min is not < 0 + * This is to match the behavior of _slow_array_clip + * + * We allow min and max to go beyond the limits + * for other data-types in which case they + * are interpreted as their modular counterparts. */ if (min != NULL) { if (PyArray_ISUNSIGNED(self)) { int cmp; zero = PyInt_FromLong(0); cmp = PyObject_RichCompareBool(min, zero, Py_LT); - if (cmp == -1) { Py_DECREF(zero); goto fail;} + if (cmp == -1) { + Py_DECREF(zero); + goto fail; + } if (cmp == 1) { min = zero; } @@ -1306,46 +1487,61 @@ mina = (NPY_AO *)PyArray_FromAny(min, indescr, 0, 0, NPY_DEFAULT, NULL); Py_DECREF(min); - if (mina == NULL) goto fail; + if (mina == NULL) { + goto fail; + } } - /* Check to see if input is single-segment, aligned, - and in native byteorder */ + /* + * Check to see if input is single-segment, aligned, + * and in native byteorder + */ if (PyArray_ISONESEGMENT(self) && PyArray_CHKFLAGS(self, ALIGNED) && - PyArray_ISNOTSWAPPED(self) && (self->descr == indescr)) + PyArray_ISNOTSWAPPED(self) && (self->descr == indescr)) { ingood = 1; - + } if (!ingood) { int flags; - if (PyArray_ISFORTRAN(self)) flags = NPY_FARRAY; - else flags = NPY_CARRAY; + + if (PyArray_ISFORTRAN(self)) { + flags = NPY_FARRAY; + } + else { + flags = NPY_CARRAY; + } Py_INCREF(indescr); newin = (NPY_AO *)PyArray_FromArray(self, indescr, flags); - if (newin == NULL) goto fail; + if (newin == NULL) { + goto fail; + } } else { newin = self; Py_INCREF(newin); } - /* At this point, newin is a single-segment, aligned, and correct - byte-order array of the correct type + /* + * At this point, newin is a single-segment, aligned, and correct + * byte-order array of the correct type + * + * if ingood == 0, then it is a copy, otherwise, + * it is the original input. + */ - if ingood == 0, then it is a copy, otherwise, - it is the original input. - */ - - /* If we have already made a copy of the data, then use - that as the output array - */ + /* + * If we have already made a copy of the data, then use + * that as the output array + */ if (out == NULL && !ingood) { out = newin; } - /* Now, we know newin is a usable array for fastclip, - we need to make sure the output array is available - and usable */ + /* + * Now, we know newin is a usable array for fastclip, + * we need to make sure the output array is available + * and usable + */ if (out == NULL) { Py_INCREF(indescr); out = (NPY_AO*)PyArray_NewFromDescr(self->ob_type, @@ -1354,7 +1550,9 @@ NULL, NULL, PyArray_ISFORTRAN(self), (PyObject *)self); - if (out == NULL) goto fail; + if (out == NULL) { + goto fail; + } outgood = 1; } else Py_INCREF(out); @@ -1368,8 +1566,10 @@ outgood = 1; } - /* Do we still not have a suitable output array? */ - /* Create one, now */ + /* + * Do we still not have a suitable output array? + * Create one, now + */ if (!outgood) { int oflags; if (PyArray_ISFORTRAN(out)) @@ -1379,7 +1579,9 @@ oflags |= NPY_UPDATEIFCOPY | NPY_FORCECAST; Py_INCREF(indescr); newout = (NPY_AO*)PyArray_FromArray(out, indescr, oflags); - if (newout == NULL) goto fail; + if (newout == NULL) { + goto fail; + } } else { newout = out; @@ -1392,22 +1594,20 @@ "same shape as the input."); goto fail; } - if (newout->data != newin->data) { memcpy(newout->data, newin->data, PyArray_NBYTES(newin)); } /* Now we can call the fast-clip function */ - min_data = max_data = NULL; - if (mina != NULL) + if (mina != NULL) { min_data = mina->data; - if (maxa != NULL) + } + if (maxa != NULL) { max_data = maxa->data; + } + func(newin->data, PyArray_SIZE(newin), min_data, max_data, newout->data); - func(newin->data, PyArray_SIZE(newin), min_data, max_data, - newout->data); - /* Clean up temporary variables */ Py_XDECREF(mina); Py_XDECREF(maxa); @@ -1426,8 +1626,8 @@ /*NUMPY_API - Conjugate -*/ + * Conjugate + */ static PyObject * PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) { @@ -1445,35 +1645,40 @@ else { PyArrayObject *ret; if (out) { - if (PyArray_CopyAnyInto(out, self)< 0) + if (PyArray_CopyAnyInto(out, self) < 0) { return NULL; + } ret = out; } - else ret = self; + else { + ret = self; + } Py_INCREF(ret); return (PyObject *)ret; } } /*NUMPY_API - Trace -*/ + * Trace + */ static PyObject * PyArray_Trace(PyArrayObject *self, int offset, int axis1, int axis2, int rtype, PyArrayObject *out) { - PyObject *diag=NULL, *ret=NULL; + PyObject *diag = NULL, *ret = NULL; diag = PyArray_Diagonal(self, offset, axis1, axis2); - if (diag == NULL) return NULL; + if (diag == NULL) { + return NULL; + } ret = PyArray_GenericReduceFunction((PyAO *)diag, n_ops.add, -1, rtype, out); Py_DECREF(diag); return ret; } /*NUMPY_API - Diagonal -*/ + * Diagonal + */ static PyObject * PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2) { @@ -1489,9 +1694,13 @@ "array.ndim must be >= 2"); return NULL; } - if (axis1 < 0) axis1 += n; - if (axis2 < 0) axis2 += n; - if ((axis1 == axis2) || (axis1 < 0) || (axis1 >= n) || \ + if (axis1 < 0) { + axis1 += n; + } + if (axis2 < 0) { + axis2 += n; + } + if ((axis1 == axis2) || (axis1 < 0) || (axis1 >= n) || (axis2 < 0) || (axis2 >= n)) { PyErr_Format(PyExc_ValueError, "axis1(=%d) and axis2(=%d) "\ "must be different and within range (nd=%d)", @@ -1504,21 +1713,26 @@ newaxes.ptr[n-2] = axis1; newaxes.ptr[n-1] = axis2; pos = 0; - for (i=0; idimensions[0]; n2 = self->dimensions[1]; - step = n2+1; + step = n2 + 1; if (offset < 0) { start = -n2 * offset; stop = MIN(n2, n1+offset)*(n2+1) - n2*offset; @@ -1530,17 +1744,22 @@ /* count = ceil((stop-start)/step) */ count = ((stop-start) / step) + (((stop-start) % step) != 0); - indices = PyArray_New(&PyArray_Type, 1, &count, PyArray_INTP, NULL, NULL, 0, 0, NULL); if (indices == NULL) { - Py_DECREF(self); return NULL; + Py_DECREF(self); + return NULL; } dptr = (intp *)PyArray_DATA(indices); - for (n1=start; n1descr; - mydiagonal = PyList_New(0); - if (mydiagonal == NULL) {Py_DECREF(self); return NULL;} + if (mydiagonal == NULL) { + Py_DECREF(self); + return NULL; + } n1 = self->dimensions[0]; - for (i=0; idata; @@ -1634,8 +1857,10 @@ case 2: n = ap->dimensions[0]; ptr2 = (char **)_pya_malloc(n * sizeof(char *)); - if (!ptr2) goto fail; - for (i=0; idata + i*ap->strides[0]; } *((char ***)ptr) = ptr2; @@ -1644,12 +1869,13 @@ n = ap->dimensions[0]; m = ap->dimensions[1]; ptr3 = (char ***)_pya_malloc(n*(m+1) * sizeof(char *)); - if (!ptr3) goto fail; - for (i=0; idata + i*ap->strides[0] + \ - j*ap->strides[1]; + for (j = 0; j < m; j++) { + ptr3[i][j] = ap->data + i*ap->strides[0] + j*ap->strides[1]; } } *((char ****)ptr) = ptr3; @@ -1666,8 +1892,8 @@ /* Deprecated --- Use PyArray_AsCArray instead */ /*NUMPY_API - Convert to a 1D C-array -*/ + * Convert to a 1D C-array + */ static int PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) { @@ -1679,15 +1905,16 @@ return -1; } descr = PyArray_DescrFromType(typecode); - if (PyArray_AsCArray(op, (void *)ptr, &newd1, 1, descr) == -1) + if (PyArray_AsCArray(op, (void *)ptr, &newd1, 1, descr) == -1) { return -1; + } *d1 = (int) newd1; return 0; } /*NUMPY_API - Convert to a 2D C-array -*/ + * Convert to a 2D C-array + */ static int PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode) { @@ -1699,9 +1926,9 @@ return -1; } descr = PyArray_DescrFromType(typecode); - if (PyArray_AsCArray(op, (void *)ptr, newdims, 2, descr) == -1) + if (PyArray_AsCArray(op, (void *)ptr, newdims, 2, descr) == -1) { return -1; - + } *d1 = (int ) newdims[0]; *d2 = (int ) newdims[1]; return 0; @@ -1710,15 +1937,16 @@ /* End Deprecated */ /*NUMPY_API - Free pointers created if As2D is called -*/ + * Free pointers created if As2D is called + */ static int PyArray_Free(PyObject *op, void *ptr) { PyArrayObject *ap = (PyArrayObject *)op; - if ((ap->nd < 1) || (ap->nd > 3)) + if ((ap->nd < 1) || (ap->nd > 3)) { return -1; + } if (ap->nd >= 2) { _pya_free(ptr); } @@ -1730,25 +1958,33 @@ static PyObject * _swap_and_concat(PyObject *op, int axis, int n) { - PyObject *newtup=NULL; + PyObject *newtup = NULL; PyObject *otmp, *arr; int i; newtup = PyTuple_New(n); - if (newtup==NULL) return NULL; - for (i=0; i= MAX_DIMS) { otmp = PyArray_Ravel(mps[i],0); Py_DECREF(mps[i]); @@ -1817,9 +2056,13 @@ } new_dim = 0; - for(i=0; ind; + for (i = 0; i < n; i++) { + if (mps[i] == NULL) { + goto fail; + } + if (i == 0) { + nd = mps[i]->nd; + } else { if (nd != mps[i]->nd) { PyErr_SetString(PyExc_ValueError, @@ -1843,7 +2086,6 @@ } new_dim += mps[i]->dimensions[0]; } - tmp = mps[0]->dimensions[0]; mps[0]->dimensions[0] = new_dim; Py_INCREF(mps[0]->descr); @@ -1854,30 +2096,35 @@ (PyObject *)ret); mps[0]->dimensions[0] = tmp; - if (ret == NULL) goto fail; - + if (ret == NULL) { + goto fail; + } data = ret->data; - for(i=0; idata, numbytes); data += numbytes; } PyArray_INCREF(ret); - for(i=0; i= n)) { PyErr_SetString(PyExc_ValueError, "bad axis1 argument to swapaxes"); @@ -1912,10 +2163,16 @@ new_axes.ptr = dims; new_axes.len = n; - for (i=0; ind; - for (i=0; ilen; axes = permute->ptr; if (n != ap->nd) { @@ -1946,12 +2204,14 @@ "axes don't match array"); return NULL; } - for (i=0; ind+axis; + if (axis < 0) { + axis = ap->nd + axis; + } if (axis < 0 || axis >= ap->nd) { PyErr_SetString(PyExc_ValueError, "invalid axis for this array"); @@ -1965,12 +2225,14 @@ reverse_permutation[axis] = i; permutation[i] = axis; } - for (i=0; idata. */ + /* + * this allocates memory for dimensions and strides (but fills them + * incorrectly), sets up descr, and points data at ap->data. + */ Py_INCREF(ap->descr); ret = (PyArrayObject *)\ PyArray_NewFromDescr(ap->ob_type, @@ -1978,25 +2240,25 @@ n, ap->dimensions, NULL, ap->data, ap->flags, (PyObject *)ap); - if (ret == NULL) return NULL; - + if (ret == NULL) { + return NULL; + } /* point at true owner of memory: */ ret->base = (PyObject *)ap; Py_INCREF(ap); /* fix the dimensions and strides of the return-array */ - for(i=0; idimensions[i] = ap->dimensions[permutation[i]]; ret->strides[i] = ap->strides[permutation[i]]; } PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - return (PyObject *)ret; } /*NUMPY_API - Repeat the array. -*/ + * Repeat the array. + */ static PyObject * PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) { @@ -2004,13 +2266,15 @@ intp n, n_outer, i, j, k, chunk, total; intp tmp; int nd; - PyArrayObject *repeats=NULL; - PyObject *ap=NULL; - PyArrayObject *ret=NULL; + PyArrayObject *repeats = NULL; + PyObject *ap = NULL; + PyArrayObject *ret = NULL; char *new_data, *old_data; repeats = (PyAO *)PyArray_ContiguousFromAny(op, PyArray_INTP, 0, 1); - if (repeats == NULL) return NULL; + if (repeats == NULL) { + return NULL; + } nd = repeats->nd; counts = (intp *)repeats->data; @@ -2020,25 +2284,26 @@ } aop = (PyAO *)ap; - - if (nd == 1) + if (nd == 1) { n = repeats->dimensions[0]; - else /* nd == 0 */ + } + else { + /* nd == 0 */ n = aop->dimensions[axis]; - + } if (aop->dimensions[axis] != n) { PyErr_SetString(PyExc_ValueError, "a.shape[axis] != len(repeats)"); goto fail; } - - if (nd == 0) + if (nd == 0) { total = counts[0]*n; + } else { total = 0; - for(j=0; jdimensions[axis] = n; - - if (ret == NULL) goto fail; - + if (ret == NULL) { + goto fail; + } new_data = ret->data; old_data = aop->data; chunk = aop->descr->elsize; - for(i=axis+1; ind; i++) { + for(i = axis + 1; i < aop->nd; i++) { chunk *= aop->dimensions[i]; } n_outer = 1; - for(i=0; idimensions[i]; - - for(i=0; idimensions[i]; + } + for (i = 0; i < n_outer; i++) { + for (j = 0; j < n; j++) { + tmp = nd ? counts[j] : counts[0]; + for (k = 0; k < tmp; k++) { memcpy(new_data, old_data, chunk); new_data += chunk; } @@ -2117,7 +2383,9 @@ } -/*NUMPY_API*/ +/*NUMPY_API + * ScalarKind + */ static NPY_SCALARKIND PyArray_ScalarKind(int typenum, PyArrayObject **arr) { @@ -2146,10 +2414,12 @@ NPY_SCALARKIND retval; PyArray_Descr* descr = PyArray_DescrFromType(typenum); - if (descr->f->scalarkind) + if (descr->f->scalarkind) { retval = descr->f->scalarkind((arr ? *arr : NULL)); - else + } + else { retval = PyArray_NOSCALAR; + } Py_DECREF(descr); return retval; } @@ -2167,15 +2437,15 @@ if (scalar == PyArray_NOSCALAR) { return PyArray_CanCastSafely(thistype, neededtype); } - from = PyArray_DescrFromType(thistype); - if (from->f->cancastscalarkindto && - (castlist = from->f->cancastscalarkindto[scalar])) { - while (*castlist != PyArray_NOTYPE) + if (from->f->cancastscalarkindto + && (castlist = from->f->cancastscalarkindto[scalar])) { + while (*castlist != PyArray_NOTYPE) { if (*castlist++ == neededtype) { Py_DECREF(from); return 1; } + } } Py_DECREF(from); @@ -2191,14 +2461,15 @@ case PyArray_INTPOS_SCALAR: return (neededtype >= PyArray_BYTE); case PyArray_INTNEG_SCALAR: - return (neededtype >= PyArray_BYTE) && \ - !(PyTypeNum_ISUNSIGNED(neededtype)); + return (neededtype >= PyArray_BYTE) + && !(PyTypeNum_ISUNSIGNED(neededtype)); case PyArray_FLOAT_SCALAR: return (neededtype >= PyArray_FLOAT); case PyArray_COMPLEX_SCALAR: return (neededtype >= PyArray_CFLOAT); default: - return 1; /* should never get here... */ + /* should never get here... */ + return 1; } } } @@ -2209,19 +2480,21 @@ static PyArrayObject ** PyArray_ConvertToCommonType(PyObject *op, int *retn) { - int i, n, allscalars=0; - PyArrayObject **mps=NULL; + int i, n, allscalars = 0; + PyArrayObject **mps = NULL; PyObject *otmp; - PyArray_Descr *intype=NULL, *stype=NULL; - PyArray_Descr *newtype=NULL; - NPY_SCALARKIND scalarkind=NPY_NOSCALAR, intypekind=NPY_NOSCALAR; + PyArray_Descr *intype = NULL, *stype = NULL; + PyArray_Descr *newtype = NULL; + NPY_SCALARKIND scalarkind = NPY_NOSCALAR, intypekind = NPY_NOSCALAR; *retn = n = PySequence_Length(op); if (n == 0) { PyErr_SetString(PyExc_ValueError, "0-length sequence."); } - if (PyErr_Occurred()) {*retn = 0; return NULL;} - + if (PyErr_Occurred()) { + *retn = 0; + return NULL; + } mps = (PyArrayObject **)PyDataMem_NEW(n*sizeof(PyArrayObject *)); if (mps == NULL) { *retn = 0; @@ -2229,12 +2502,11 @@ } if (PyArray_Check(op)) { - for (i=0; itype_num, - NULL); + intypekind = PyArray_ScalarKind(intype->type_num, NULL); } else { newtype = PyArray_DescrFromObject(otmp, stype); Py_XDECREF(stype); stype = newtype; - scalarkind = PyArray_ScalarKind(newtype->type_num, - NULL); + scalarkind = PyArray_ScalarKind(newtype->type_num, NULL); mps[i] = (PyArrayObject *)Py_None; Py_INCREF(Py_None); } Py_XDECREF(otmp); } - if (intype==NULL) { /* all scalars */ + if (intype==NULL) { + /* all scalars */ allscalars = 1; intype = stype; Py_INCREF(intype); - for (i=0; itype_num, intype->type_num, scalarkind)) { @@ -2288,7 +2558,7 @@ Py_XDECREF(intype); intype = newtype; } - for (i=0; idescr); @@ -2371,29 +2652,33 @@ PyArrayObject *obj; int flags = NPY_CARRAY | NPY_UPDATEIFCOPY | NPY_FORCECAST; - if ((PyArray_NDIM(ret) != multi->nd) || - !PyArray_CompareLists(PyArray_DIMS(ret), multi->dimensions, - multi->nd)) { + if ((PyArray_NDIM(ret) != multi->nd) + || !PyArray_CompareLists( + PyArray_DIMS(ret), multi->dimensions, multi->nd)) { PyErr_SetString(PyExc_TypeError, "invalid shape for output array."); ret = NULL; goto fail; } if (clipmode == NPY_RAISE) { - /* we need to make sure and get a copy - so the input array is not changed - before the error is called - */ + /* + * we need to make sure and get a copy + * so the input array is not changed + * before the error is called + */ flags |= NPY_ENSURECOPY; } Py_INCREF(mps[0]->descr); - obj = (PyArrayObject *)PyArray_FromArray(ret, mps[0]->descr, - flags); - if (obj != ret) copyret = 1; + obj = (PyArrayObject *)PyArray_FromArray(ret, mps[0]->descr, flags); + if (obj != ret) { + copyret = 1; + } ret = obj; } - if (ret == NULL) goto fail; + if (ret == NULL) { + goto fail; + } elsize = ret->descr->elsize; ret_data = ret->data; @@ -2403,31 +2688,41 @@ switch(clipmode) { case NPY_RAISE: PyErr_SetString(PyExc_ValueError, - "invalid entry in choice "\ - "array"); + "invalid entry in choice "\ + "array"); goto fail; case NPY_WRAP: if (mi < 0) { - while(mi<0) mi += n; + while (mi < 0) { + mi += n; + } } else { - while(mi>=n) mi -= n; + while (mi >= n) { + mi -= n; + } } break; case NPY_CLIP: - if (mi < 0) mi=0; - else if (mi>=n) mi=n-1; + if (mi < 0) { + mi = 0; + } + else if (mi >= n) { + mi = n - 1; + } break; } } memmove(ret_data, PyArray_MultiIter_DATA(multi, mi), elsize); - ret_data += elsize; + ret_data += elsize; PyArray_MultiIter_NEXT(multi); } PyArray_INCREF(ret); Py_DECREF(multi); - for(i=0; idescr->elsize; astride = op->strides[axis]; - needcopy = !(op->flags & ALIGNED) || (astride != (intp) elsize) \ - || swap; + needcopy = !(op->flags & ALIGNED) || (astride != (intp) elsize) || swap; + if (needcopy) { + char *buffer = PyDataMem_NEW(N*elsize); - if (needcopy) { - char *buffer; - buffer = PyDataMem_NEW(N*elsize); while (size--) { _unaligned_strided_byte_copy(buffer, (intp) elsize, it->dataptr, astride, N, elsize); @@ -2516,7 +2808,6 @@ PyArray_ITER_NEXT(it); } } - NPY_END_THREADS_DESCR(op->descr); Py_DECREF(it); return 0; @@ -2531,10 +2822,10 @@ _new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which) { - PyArrayIterObject *it=NULL; - PyArrayIterObject *rit=NULL; + PyArrayIterObject *it = NULL; + PyArrayIterObject *rit = NULL; PyObject *ret; - int needcopy=0, i; + int needcopy = 0, i; intp N, size; int elsize, swap; intp astride, rstride, *iptr; @@ -2544,16 +2835,17 @@ ret = PyArray_New(op->ob_type, op->nd, op->dimensions, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)op); - if (ret == NULL) return NULL; - + if (ret == NULL) { + return NULL; + } it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)op, &axis); rit = (PyArrayIterObject *)PyArray_IterAllButAxis(ret, &axis); - if (rit == NULL || it == NULL) goto fail; - + if (rit == NULL || it == NULL) { + goto fail; + } swap = !PyArray_ISNOTSWAPPED(op); NPY_BEGIN_THREADS_DESCR(op->descr); - argsort = op->descr->f->argsort[which]; size = it->size; N = op->dimensions[axis]; @@ -2561,19 +2853,23 @@ astride = op->strides[axis]; rstride = PyArray_STRIDE(ret,axis); - needcopy = swap || !(op->flags & ALIGNED) || (astride != (intp) elsize) || \ - (rstride != sizeof(intp)); - + needcopy = swap || !(op->flags & ALIGNED) || (astride != (intp) elsize) || + (rstride != sizeof(intp)); if (needcopy) { char *valbuffer, *indbuffer; + valbuffer = PyDataMem_NEW(N*elsize); indbuffer = PyDataMem_NEW(N*sizeof(intp)); while (size--) { _unaligned_strided_byte_copy(valbuffer, (intp) elsize, it->dataptr, astride, N, elsize); - if (swap) _strided_byte_swap(valbuffer, (intp) elsize, N, elsize); + if (swap) { + _strided_byte_swap(valbuffer, (intp) elsize, N, elsize); + } iptr = (intp *)indbuffer; - for (i=0; idataptr; - for (i=0; idataptr, (intp *)rit->dataptr, - N, op) < 0) goto fail; + for (i = 0; i < N; i++) { + *iptr++ = i; + } + if (argsort(it->dataptr, (intp *)rit->dataptr, N, op) < 0) { + goto fail; + } PyArray_ITER_NEXT(it); PyArray_ITER_NEXT(rit); } @@ -2605,9 +2904,7 @@ return ret; fail: - NPY_END_THREADS; - Py_DECREF(ret); Py_XDECREF(it); Py_XDECREF(rit); @@ -2616,7 +2913,6 @@ /* Be sure to save this global_compare when necessary */ - static PyArrayObject *global_obj; static int @@ -2625,12 +2921,11 @@ return global_obj->descr->f->compare(a,b,global_obj); } -/* Consumes reference to ap (op gets it) - op contains a version of the array with axes swapped if - local variable axis is not the last dimension. - orign must be defined locally. -*/ - +/* + * Consumes reference to ap (op gets it) op contains a version of + * the array with axes swapped if local variable axis is not the + * last dimension. Origin must be defined locally. + */ #define SWAPAXES(op, ap) { \ orign = (ap)->nd-1; \ if (axis != orign) { \ @@ -2641,11 +2936,11 @@ else (op) = (ap); \ } -/* Consumes reference to ap (op gets it) - origin must be previously defined locally. - SWAPAXES must have been called previously. - op contains the swapped version of the array. -*/ +/* + * Consumes reference to ap (op gets it) origin must be previously + * defined locally. SWAPAXES must have been called previously. + * op contains the swapped version of the array. + */ #define SWAPBACK(op, ap) { \ if (axis != orign) { \ (op) = (PyAO *)PyArray_SwapAxes((ap), axis, orign); \ @@ -2675,22 +2970,24 @@ } /*NUMPY_API - Sort an array in-place -*/ + * Sort an array in-place + */ static int PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) { - PyArrayObject *ap=NULL, *store_arr=NULL; + PyArrayObject *ap = NULL, *store_arr = NULL; char *ip; int i, n, m, elsize, orign; n = op->nd; - if ((n==0) || (PyArray_SIZE(op)==1)) return 0; - - if (axis < 0) axis += n; + if ((n == 0) || (PyArray_SIZE(op) == 1)) { + return 0; + } + if (axis < 0) { + axis += n; + } if ((axis < 0) || (axis >= n)) { - PyErr_Format(PyExc_ValueError, - "axis(=%d) out of bounds", axis); + PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", axis); return -1; } if (!PyArray_ISWRITEABLE(op)) { @@ -2703,9 +3000,8 @@ if (op->descr->f->sort[which] != NULL) { return _new_sort(op, axis, which); } - - if ((which != PyArray_QUICKSORT) || \ - op->descr->f->compare == NULL) { + if ((which != PyArray_QUICKSORT) + || op->descr->f->compare == NULL) { PyErr_SetString(PyExc_TypeError, "desired sort not supported for this type"); return -1; @@ -2716,30 +3012,33 @@ ap = (PyArrayObject *)PyArray_FromAny((PyObject *)op, NULL, 1, 0, DEFAULT | UPDATEIFCOPY, NULL); - if (ap == NULL) goto fail; - + if (ap == NULL) { + goto fail; + } elsize = ap->descr->elsize; m = ap->dimensions[ap->nd-1]; - if (m == 0) goto finish; - + if (m == 0) { + goto finish; + } n = PyArray_SIZE(ap)/m; /* Store global -- allows re-entry -- restore before leaving*/ store_arr = global_obj; global_obj = ap; - - for (ip=ap->data, i=0; idata, i = 0; i < n; i++, ip += elsize*m) { qsort(ip, m, elsize, qsortCompare); } - global_obj = store_arr; - if (PyErr_Occurred()) goto fail; + if (PyErr_Occurred()) { + goto fail; + } finish: Py_DECREF(ap); /* Should update op if needed */ SWAPBACK2(op); return 0; + fail: Py_XDECREF(ap); SWAPBACK2(op); @@ -2761,32 +3060,35 @@ } /*NUMPY_API - ArgSort an array -*/ + * ArgSort an array + */ static PyObject * PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) { - PyArrayObject *ap=NULL, *ret=NULL, *store, *op2; + PyArrayObject *ap = NULL, *ret = NULL, *store, *op2; intp *ip; intp i, j, n, m, orign; int argsort_elsize; char *store_ptr; n = op->nd; - if ((n==0) || (PyArray_SIZE(op)==1)) { + if ((n == 0) || (PyArray_SIZE(op) == 1)) { ret = (PyArrayObject *)PyArray_New(op->ob_type, op->nd, op->dimensions, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)op); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } *((intp *)ret->data) = 0; return (PyObject *)ret; } /* Creates new reference op2 */ - if ((op2=(PyAO *)_check_axis(op, &axis, 0))==NULL) return NULL; - + if ((op2=(PyAO *)_check_axis(op, &axis, 0)) == NULL) { + return NULL; + } /* Determine if we should use new algorithm or not */ if (op2->descr->f->argsort[which] != NULL) { ret = (PyArrayObject *)_new_argsort(op2, axis, which); @@ -2804,39 +3106,39 @@ /* ap will contain the reference to op2 */ SWAPAXES(ap, op2); - op = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)ap, PyArray_NOTYPE, 1, 0); - Py_DECREF(ap); - if (op == NULL) return NULL; - + if (op == NULL) { + return NULL; + } ret = (PyArrayObject *)PyArray_New(op->ob_type, op->nd, op->dimensions, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)op); - if (ret == NULL) goto fail; - - + if (ret == NULL) { + goto fail; + } ip = (intp *)ret->data; argsort_elsize = op->descr->elsize; m = op->dimensions[op->nd-1]; - if (m == 0) goto finish; - + if (m == 0) { + goto finish; + } n = PyArray_SIZE(op)/m; store_ptr = global_data; global_data = op->data; store = global_obj; global_obj = op; - for (i=0; i 0 in lexsort"); - return NULL; - } + if (!PySequence_Check(sort_keys) + || ((n=PySequence_Size(sort_keys)) <= 0)) { + PyErr_SetString(PyExc_TypeError, + "need sequence of keys with len > 0 in lexsort"); + return NULL; + } mps = (PyArrayObject **) _pya_malloc(n*sizeof(PyArrayObject)); - if (mps==NULL) return PyErr_NoMemory(); + if (mps == NULL) { + return PyErr_NoMemory(); + } its = (PyArrayIterObject **) _pya_malloc(n*sizeof(PyArrayIterObject)); - if (its == NULL) {_pya_free(mps); return PyErr_NoMemory();} - for (i=0; i0) { - if ((mps[i]->nd != mps[0]->nd) || \ - (!PyArray_CompareLists(mps[i]->dimensions, + if (mps[i] == NULL) { + goto fail; + } + if (i > 0) { + if ((mps[i]->nd != mps[0]->nd) + || (!PyArray_CompareLists(mps[i]->dimensions, mps[0]->dimensions, mps[0]->nd))) { PyErr_SetString(PyExc_ValueError, @@ -2909,78 +3220,92 @@ "merge sort not available for item %d", i); goto fail; } - if (!object && - PyDataType_FLAGCHK(mps[i]->descr, NPY_NEEDS_PYAPI)) + if (!object + && PyDataType_FLAGCHK(mps[i]->descr, NPY_NEEDS_PYAPI)) { object = 1; - its[i] = (PyArrayIterObject *)PyArray_IterAllButAxis \ + } + its[i] = (PyArrayIterObject *)PyArray_IterAllButAxis ((PyObject *)mps[i], &axis); - if (its[i]==NULL) goto fail; + if (its[i] == NULL) { + goto fail; + } } /* Now we can check the axis */ nd = mps[0]->nd; - if ((nd==0) || (PyArray_SIZE(mps[0])==1)) { + if ((nd == 0) || (PyArray_SIZE(mps[0]) == 1)) { ret = (PyArrayObject *)PyArray_New(&PyArray_Type, mps[0]->nd, mps[0]->dimensions, PyArray_INTP, NULL, NULL, 0, 0, NULL); - if (ret == NULL) goto fail; + if (ret == NULL) { + goto fail; + } *((intp *)(ret->data)) = 0; goto finish; } - if (axis < 0) axis += nd; + if (axis < 0) { + axis += nd; + } if ((axis < 0) || (axis >= nd)) { - PyErr_Format(PyExc_ValueError, - "axis(=%d) out of bounds", axis); + PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", axis); goto fail; } /* Now do the sorting */ - ret = (PyArrayObject *)PyArray_New(&PyArray_Type, mps[0]->nd, mps[0]->dimensions, PyArray_INTP, NULL, NULL, 0, 0, NULL); - if (ret == NULL) goto fail; - - rit = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ret, &axis); - if (rit == NULL) goto fail; - - if (!object) {NPY_BEGIN_THREADS;} - + if (ret == NULL) { + goto fail; + } + rit = (PyArrayIterObject *) + PyArray_IterAllButAxis((PyObject *)ret, &axis); + if (rit == NULL) { + goto fail; + } + if (!object) { + NPY_BEGIN_THREADS; + } size = rit->size; N = mps[0]->dimensions[axis]; rstride = PyArray_STRIDE(ret,axis); - maxelsize = mps[0]->descr->elsize; needcopy = (rstride != sizeof(intp)); - for (j=0; jflags & ALIGNED) || \ - (mps[j]->strides[axis] != (intp)mps[j]->descr->elsize); - if (mps[j]->descr->elsize > maxelsize) + for (j = 0; j < n && !needcopy; j++) { + needcopy = PyArray_ISBYTESWAPPED(mps[j]) + || !(mps[j]->flags & ALIGNED) + || (mps[j]->strides[axis] != (intp)mps[j]->descr->elsize); + if (mps[j]->descr->elsize > maxelsize) { maxelsize = mps[j]->descr->elsize; + } } if (needcopy) { char *valbuffer, *indbuffer; int *swaps; + valbuffer = PyDataMem_NEW(N*maxelsize); indbuffer = PyDataMem_NEW(N*sizeof(intp)); swaps = malloc(n*sizeof(int)); - for (j=0; jdescr->elsize; astride = mps[j]->strides[axis]; argsort = mps[j]->descr->f->argsort[PyArray_MERGESORT]; _unaligned_strided_byte_copy(valbuffer, (intp) elsize, its[j]->dataptr, astride, N, elsize); - if (swaps[j]) + if (swaps[j]) { _strided_byte_swap(valbuffer, (intp) elsize, N, elsize); + } if (argsort(valbuffer, (intp *)indbuffer, N, mps[j]) < 0) { PyDataMem_FREE(valbuffer); PyDataMem_FREE(indbuffer); @@ -3000,21 +3325,30 @@ else { while (size--) { iptr = (intp *)rit->dataptr; - for (i=0; idescr->f->argsort[PyArray_MERGESORT]; if (argsort(its[j]->dataptr, (intp *)rit->dataptr, - N, mps[j]) < 0) goto fail; + N, mps[j]) < 0) { + goto fail; + } PyArray_ITER_NEXT(its[j]); } PyArray_ITER_NEXT(rit); } } - if (!object) {NPY_END_THREADS;} + if (!object) { + NPY_END_THREADS; + } finish: - for (i=0; idescr->elsize; intp i; - for(i = 0; i < nkeys; ++i) { + for (i = 0; i < nkeys; ++i) { intp imin = 0; intp imax = nelts; while (imin < imax) { intp imid = imin + ((imax - imin) >> 2); - if (compare(parr + elsize*imid, pkey, key) < 0) + if (compare(parr + elsize*imid, pkey, key) < 0) { imin = imid + 1; - else + } + else { imax = imid; + } } *pret = imin; pret += 1; @@ -3103,10 +3440,12 @@ intp imax = nelts; while (imin < imax) { intp imid = imin + ((imax - imin) >> 2); - if (compare(parr + elsize*imid, pkey, key) <= 0) + if (compare(parr + elsize*imid, pkey, key) <= 0) { imin = imid + 1; - else + } + else { imax = imid; + } } *pret = imin; pret += 1; @@ -3116,8 +3455,8 @@ /*NUMPY_API - Convert object to searchsorted side -*/ + * Convert object to searchsorted side + */ static int PyArray_SearchsideConverter(PyObject *obj, void *addr) { @@ -3130,10 +3469,12 @@ return PY_FAIL; } - if (str[0] == 'l' || str[0] == 'L') + if (str[0] == 'l' || str[0] == 'L') { *side = NPY_SEARCHLEFT; - else if (str[0] == 'r' || str[0] == 'R') + } + else if (str[0] == 'r' || str[0] == 'R') { *side = NPY_SEARCHRIGHT; + } else { PyErr_Format(PyExc_ValueError, "'%s' is an invalid value for keyword 'side'", str); @@ -3144,43 +3485,40 @@ /*NUMPY_API - Numeric.searchsorted(a,v) -*/ + * Numeric.searchsorted(a,v) + */ static PyObject * PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE side) { - PyArrayObject *ap1=NULL; - PyArrayObject *ap2=NULL; - PyArrayObject *ret=NULL; + PyArrayObject *ap1 = NULL; + PyArrayObject *ap2 = NULL; + PyArrayObject *ret = NULL; PyArray_Descr *dtype; - NPY_BEGIN_THREADS_DEF; dtype = PyArray_DescrFromObject((PyObject *)op2, op1->descr); - /* need ap1 as contiguous array and of right type */ Py_INCREF(dtype); ap1 = (PyArrayObject *)PyArray_FromAny((PyObject *)op1, dtype, 1, 1, NPY_DEFAULT, NULL); - if (ap1 == NULL) { Py_DECREF(dtype); return NULL; } /* need ap2 as contiguous array and of right type */ - ap2 = (PyArrayObject *)PyArray_FromAny(op2, dtype, 0, 0, NPY_DEFAULT, NULL); - - if (ap2 == NULL) + ap2 = (PyArrayObject *)PyArray_FromAny(op2, dtype, + 0, 0, NPY_DEFAULT, NULL); + if (ap2 == NULL) { goto fail; - + } /* ret is a contiguous array of intp type to hold returned indices */ ret = (PyArrayObject *)PyArray_New(ap2->ob_type, ap2->nd, ap2->dimensions, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)ap2); - if (ret == NULL) + if (ret == NULL) { goto fail; - + } /* check that comparison function exists */ if (ap2->descr->f->compare == NULL) { PyErr_SetString(PyExc_TypeError, @@ -3210,9 +3548,9 @@ } /* - Make a new empty array, of the passed size, of a type that takes the - priority of ap1 and ap2 into account. -*/ + * Make a new empty array, of the passed size, of a type that takes the + * priority of ap1 and ap2 into account. + */ static PyArrayObject * new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, int nd, intp dimensions[], int typenum) @@ -3220,15 +3558,16 @@ PyArrayObject *ret; PyTypeObject *subtype; double prior1, prior2; - /* Need to choose an output array that can hold a sum - -- use priority to determine which subtype. - */ + /* + * Need to choose an output array that can hold a sum + * -- use priority to determine which subtype. + */ if (ap2->ob_type != ap1->ob_type) { prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - subtype = (prior2 > prior1 ? ap2->ob_type : ap1->ob_type); - } else { + } + else { prior1 = prior2 = 0.0; subtype = ap1->ob_type; } @@ -3240,16 +3579,15 @@ return ret; } -/* Could perhaps be redone to not make contiguous arrays - */ +/* Could perhaps be redone to not make contiguous arrays */ /*NUMPY_API - Numeric.innerproduct(a,v) -*/ + * Numeric.innerproduct(a,v) + */ static PyObject * PyArray_InnerProduct(PyObject *op1, PyObject *op2) { - PyArrayObject *ap1, *ap2, *ret=NULL; + PyArrayObject *ap1, *ap2, *ret = NULL; PyArrayIterObject *it1, *it2; intp i, j, l; int typenum, nd, axis; @@ -3258,90 +3596,86 @@ intp dimensions[MAX_DIMS]; PyArray_DotFunc *dot; PyArray_Descr *typec; - NPY_BEGIN_THREADS_DEF; - typenum = PyArray_ObjectType(op1, 0); + typenum = PyArray_ObjectType(op1, 0); typenum = PyArray_ObjectType(op2, typenum); typec = PyArray_DescrFromType(typenum); Py_INCREF(typec); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, - BEHAVED, NULL); - if (ap1 == NULL) {Py_DECREF(typec); return NULL;} - ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, - BEHAVED, NULL); - if (ap2 == NULL) goto fail; - + ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, BEHAVED, NULL); + if (ap1 == NULL) { + Py_DECREF(typec); + return NULL; + } + ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, BEHAVED, NULL); + if (ap2 == NULL) { + goto fail; + } if (ap1->nd == 0 || ap2->nd == 0) { ret = (ap1->nd == 0 ? ap1 : ap2); - ret = (PyArrayObject *)ret->ob_type->tp_as_number->\ - nb_multiply((PyObject *)ap1, (PyObject *)ap2); + ret = (PyArrayObject *)ret->ob_type->tp_as_number->nb_multiply( + (PyObject *)ap1, (PyObject *)ap2); Py_DECREF(ap1); Py_DECREF(ap2); return (PyObject *)ret; } - l = ap1->dimensions[ap1->nd-1]; - - if (ap2->dimensions[ap2->nd-1] != l) { + l = ap1->dimensions[ap1->nd - 1]; + if (ap2->dimensions[ap2->nd - 1] != l) { PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); goto fail; } - nd = ap1->nd+ap2->nd-2; + nd = ap1->nd + ap2->nd - 2; j = 0; - for(i=0; ind-1; i++) { + for (i = 0; i < ap1->nd - 1; i++) { dimensions[j++] = ap1->dimensions[i]; } - for(i=0; ind-1; i++) { + for (i = 0; i < ap2->nd - 1; i++) { dimensions[j++] = ap2->dimensions[i]; } - - /* Need to choose an output array that can hold a sum - -- use priority to determine which subtype. - */ + /* + * Need to choose an output array that can hold a sum + * -- use priority to determine which subtype. + */ ret = new_array_for_sum(ap1, ap2, nd, dimensions, typenum); - if (ret == NULL) goto fail; - + if (ret == NULL) { + goto fail; + } dot = (ret->descr->f->dotfunc); - if (dot == NULL) { PyErr_SetString(PyExc_ValueError, "dot not available for this type"); goto fail; } - - is1 = ap1->strides[ap1->nd-1]; - is2 = ap2->strides[ap2->nd-1]; + is1 = ap1->strides[ap1->nd - 1]; + is2 = ap2->strides[ap2->nd - 1]; op = ret->data; os = ret->descr->elsize; - - axis = ap1->nd-1; - it1 = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ap1, &axis); - axis = ap2->nd-1; - it2 = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ap2, &axis); - + axis = ap1->nd - 1; + it1 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap1, &axis); + axis = ap2->nd - 1; + it2 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap2, &axis); NPY_BEGIN_THREADS_DESCR(ap2->descr); - while(1) { - while(it2->index < it2->size) { - dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); - op += os; - PyArray_ITER_NEXT(it2); - } - PyArray_ITER_NEXT(it1); - if (it1->index >= it1->size) break; - PyArray_ITER_RESET(it2); + while (1) { + while (it2->index < it2->size) { + dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); + op += os; + PyArray_ITER_NEXT(it2); } + PyArray_ITER_NEXT(it1); + if (it1->index >= it1->size) { + break; + } + PyArray_ITER_RESET(it2); + } NPY_END_THREADS_DESCR(ap2->descr); - Py_DECREF(it1); + Py_DECREF(it1); Py_DECREF(it2); - - if (PyErr_Occurred()) goto fail; - - + if (PyErr_Occurred()) { + goto fail; + } Py_DECREF(ap1); Py_DECREF(ap2); return (PyObject *)ret; @@ -3354,14 +3688,14 @@ } -/* just like inner product but does the swapaxes stuff on the fly */ /*NUMPY_API - Numeric.matrixproduct(a,v) -*/ + *Numeric.matrixproduct(a,v) + * just like inner product but does the swapaxes stuff on the fly + */ static PyObject * PyArray_MatrixProduct(PyObject *op1, PyObject *op2) { - PyArrayObject *ap1, *ap2, *ret=NULL; + PyArrayObject *ap1, *ap2, *ret = NULL; PyArrayIterObject *it1, *it2; intp i, j, l; int typenum, nd, axis, matchDim; @@ -3370,54 +3704,50 @@ intp dimensions[MAX_DIMS]; PyArray_DotFunc *dot; PyArray_Descr *typec; - NPY_BEGIN_THREADS_DEF; - typenum = PyArray_ObjectType(op1, 0); + typenum = PyArray_ObjectType(op1, 0); typenum = PyArray_ObjectType(op2, typenum); - typec = PyArray_DescrFromType(typenum); Py_INCREF(typec); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, - BEHAVED, NULL); - if (ap1 == NULL) {Py_DECREF(typec); return NULL;} - ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, - BEHAVED, NULL); - if (ap2 == NULL) goto fail; - + ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, BEHAVED, NULL); + if (ap1 == NULL) { + Py_DECREF(typec); + return NULL; + } + ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, BEHAVED, NULL); + if (ap2 == NULL) { + goto fail; + } if (ap1->nd == 0 || ap2->nd == 0) { ret = (ap1->nd == 0 ? ap1 : ap2); - ret = (PyArrayObject *)ret->ob_type->tp_as_number->\ - nb_multiply((PyObject *)ap1, (PyObject *)ap2); + ret = (PyArrayObject *)ret->ob_type->tp_as_number->nb_multiply( + (PyObject *)ap1, (PyObject *)ap2); Py_DECREF(ap1); Py_DECREF(ap2); return (PyObject *)ret; } - - l = ap1->dimensions[ap1->nd-1]; + l = ap1->dimensions[ap1->nd - 1]; if (ap2->nd > 1) { matchDim = ap2->nd - 2; } else { matchDim = 0; } - if (ap2->dimensions[matchDim] != l) { PyErr_SetString(PyExc_ValueError, "objects are not aligned"); goto fail; } - - nd = ap1->nd+ap2->nd-2; + nd = ap1->nd + ap2->nd - 2; if (nd > NPY_MAXDIMS) { - PyErr_SetString(PyExc_ValueError, - "dot: too many dimensions in result"); + PyErr_SetString(PyExc_ValueError, "dot: too many dimensions in result"); goto fail; } j = 0; - for(i=0; ind-1; i++) { + for (i = 0; i < ap1->nd - 1; i++) { dimensions[j++] = ap1->dimensions[i]; } - for(i=0; ind-2; i++) { + for (i = 0; i < ap2->nd - 2; i++) { dimensions[j++] = ap2->dimensions[i]; } if(ap2->nd > 1) { @@ -3431,20 +3761,20 @@ */ is1 = ap1->strides[ap1->nd-1]; is2 = ap2->strides[matchDim]; - /* Choose which subtype to return */ ret = new_array_for_sum(ap1, ap2, nd, dimensions, typenum); - if (ret == NULL) goto fail; - + if (ret == NULL) { + goto fail; + } /* Ensure that multiarray.dot(,<0xM>) -> zeros((N,M)) */ if (PyArray_SIZE(ap1) == 0 && PyArray_SIZE(ap2) == 0) { memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret)); } - else { /* Ensure that multiarray.dot([],[]) -> 0 */ + else { + /* Ensure that multiarray.dot([],[]) -> 0 */ memset(PyArray_DATA(ret), 0, PyArray_ITEMSIZE(ret)); } - dot = ret->descr->f->dotfunc; if (dot == NULL) { PyErr_SetString(PyExc_ValueError, @@ -3453,29 +3783,31 @@ } op = ret->data; os = ret->descr->elsize; - axis = ap1->nd-1; - it1 = (PyArrayIterObject *)\ + it1 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap1, &axis); - it2 = (PyArrayIterObject *)\ + it2 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap2, &matchDim); - NPY_BEGIN_THREADS_DESCR(ap2->descr); - while(1) { - while(it2->index < it2->size) { - dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); - op += os; - PyArray_ITER_NEXT(it2); - } - PyArray_ITER_NEXT(it1); - if (it1->index >= it1->size) break; - PyArray_ITER_RESET(it2); + while (1) { + while (it2->index < it2->size) { + dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); + op += os; + PyArray_ITER_NEXT(it2); } + PyArray_ITER_NEXT(it1); + if (it1->index >= it1->size) { + break; + } + PyArray_ITER_RESET(it2); + } NPY_END_THREADS_DESCR(ap2->descr); - Py_DECREF(it1); + Py_DECREF(it1); Py_DECREF(it2); - if (PyErr_Occurred()) goto fail; /* only for OBJECT arrays */ - + if (PyErr_Occurred()) { + /* only for OBJECT arrays */ + goto fail; + } Py_DECREF(ap1); Py_DECREF(ap2); return (PyObject *)ret; @@ -3488,8 +3820,8 @@ } /*NUMPY_API - Fast Copy and Transpose -*/ + * Fast Copy and Transpose + */ static PyObject * PyArray_CopyAndTranspose(PyObject *op) { @@ -3503,9 +3835,12 @@ /* make sure it is well-behaved */ arr = PyArray_FromAny(op, NULL, 0, 0, CARRAY, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } nd = PyArray_NDIM(arr); - if (nd == 1) { /* we will give in to old behavior */ + if (nd == 1) { + /* we will give in to old behavior */ ret = PyArray_Copy((PyArrayObject *)arr); Py_DECREF(arr); return ret; @@ -3521,24 +3856,23 @@ dims[0] = PyArray_DIM(arr,1); dims[1] = PyArray_DIM(arr,0); elsize = PyArray_ITEMSIZE(arr); - Py_INCREF(PyArray_DESCR(arr)); ret = PyArray_NewFromDescr(arr->ob_type, PyArray_DESCR(arr), 2, dims, NULL, NULL, 0, arr); - if (ret == NULL) { Py_DECREF(arr); return NULL; } + /* do 2-d loop */ NPY_BEGIN_ALLOW_THREADS; optr = PyArray_DATA(ret); str2 = elsize*dims[0]; - for (i=0; idimensions[0]; n2 = ap2->dimensions[0]; - if (n1 < n2) { - ret = ap1; ap1 = ap2; ap2 = ret; - ret = NULL; i = n1;n1=n2;n2=i; + ret = ap1; + ap1 = ap2; + ap2 = ret; + ret = NULL; + i = n1; + n1 = n2; + n2 = i; } length = n1; n = n2; switch(mode) { case 0: - length = length-n+1; + length = length - n + 1; n_left = n_right = 0; break; case 1: n_left = (intp)(n/2); - n_right = n-n_left-1; + n_right = n - n_left - 1; break; case 2: - n_right = n-1; - n_left = n-1; - length = length+n-1; + n_right = n - 1; + n_left = n - 1; + length = length + n - 1; break; default: - PyErr_SetString(PyExc_ValueError, - "mode must be 0, 1, or 2"); + PyErr_SetString(PyExc_ValueError, "mode must be 0, 1, or 2"); goto fail; } - /* Need to choose an output array that can hold a sum - -- use priority to determine which subtype. - */ + /* + * Need to choose an output array that can hold a sum + * -- use priority to determine which subtype. + */ ret = new_array_for_sum(ap1, ap2, 1, &length, typenum); - if (ret == NULL) goto fail; - + if (ret == NULL) { + goto fail; + } dot = ret->descr->f->dotfunc; if (dot == NULL) { PyErr_SetString(PyExc_ValueError, @@ -3622,32 +3963,34 @@ } NPY_BEGIN_THREADS_DESCR(ret->descr); - - is1 = ap1->strides[0]; is2 = ap2->strides[0]; - op = ret->data; os = ret->descr->elsize; - - ip1 = ap1->data; ip2 = ap2->data+n_left*is2; - n = n-n_left; - for(i=0; istrides[0]; + is2 = ap2->strides[0]; + op = ret->data; + os = ret->descr->elsize; + ip1 = ap1->data; + ip2 = ap2->data + n_left*is2; + n = n - n_left; + for (i = 0; i < n_left; i++) { dot(ip1, is1, ip2, is2, op, n, ret); n++; ip2 -= is2; op += os; } - for(i=0; i<(n1-n2+1); i++) { + for (i = 0; i < (n1 - n2 + 1); i++) { dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } - for(i=0; idescr); - - if (PyErr_Occurred()) goto fail; + if (PyErr_Occurred()) { + goto fail; + } Py_DECREF(ap1); Py_DECREF(ap2); return (PyObject *)ret; @@ -3661,8 +4004,8 @@ /*NUMPY_API - ArgMin -*/ + * ArgMin + */ static PyObject * PyArray_ArgMin(PyArrayObject *ap, int axis, PyArrayObject *out) { Modified: trunk/numpy/core/src/scalarmathmodule.c.src =================================================================== --- trunk/numpy/core/src/scalarmathmodule.c.src 2009-02-20 22:28:05 UTC (rev 6426) +++ trunk/numpy/core/src/scalarmathmodule.c.src 2009-02-21 04:21:50 UTC (rev 6427) @@ -64,8 +64,18 @@ ulonglong ah, al, bh, bl, w, x, y, z; /* Convert to non-negative quantities */ - if (a0 < 0) { a = -a0; } else { a = a0; } - if (b0 < 0) { b = -b0; } else { b = b0; } + if (a0 < 0) { + a = -a0; + } + else { + a = a0; + } + if (b0 < 0) { + b = -b0; + } + else { + b = b0; + } #if SIZEOF_LONGLONG == 64 @@ -109,55 +119,61 @@ /* Basic operations: + * + * BINARY: + * + * add, subtract, multiply, divide, remainder, divmod, power, + * floor_divide, true_divide + * + * lshift, rshift, and, or, xor (integers only) + * + * UNARY: + * + * negative, positive, absolute, nonzero, invert, int, long, float, oct, hex + * + */ - BINARY: - - add, subtract, multiply, divide, remainder, divmod, power, - floor_divide, true_divide - - lshift, rshift, and, or, xor (integers only) - - UNARY: - - negative, positive, absolute, nonzero, invert, int, long, float, oct, hex - -*/ - /**begin repeat - #name=byte,short,int,long,longlong# -**/ + * #name = byte, short, int, long, longlong# + */ static void @name at _ctype_add(@name@ a, @name@ b, @name@ *out) { *out = a + b; - if ((*out^a) >= 0 || (*out^b) >= 0) + if ((*out^a) >= 0 || (*out^b) >= 0) { return; + } generate_overflow_error(); return; } static void @name at _ctype_subtract(@name@ a, @name@ b, @name@ *out) { *out = a - b; - if ((*out^a) >= 0 || (*out^~b) >= 0) + if ((*out^a) >= 0 || (*out^~b) >= 0) { return; + } generate_overflow_error(); return; } /**end repeat**/ + /**begin repeat - #name=ubyte,ushort,uint,ulong,ulonglong# -**/ + * #name = ubyte, ushort, uint, ulong, ulonglong# + */ static void @name at _ctype_add(@name@ a, @name@ b, @name@ *out) { *out = a + b; - if (*out >= a && *out >= b) + if (*out >= a && *out >= b) { return; + } generate_overflow_error(); return; } static void @name at _ctype_subtract(@name@ a, @name@ b, @name@ *out) { *out = a - b; - if (a >= b) return; + if (a >= b) { + return; + } generate_overflow_error(); return; } @@ -168,13 +184,14 @@ #endif /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong# - #big=(int,uint)*2,(longlong,ulonglong)*2# - #NAME=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG# - #SIZENAME=BYTE*2,SHORT*2,INT*2,LONG*2# - #SIZE=INT*4,LONGLONG*4# - #neg=(1,0)*4# -**/ + * + * #name = byte, ubyte, short, ushort, int, uint, long, ulong# + * #big = (int,uint)*2, (longlong,ulonglong)*2# + * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG# + * #SIZENAME = BYTE*2, SHORT*2, INT*2, LONG*2# + * #SIZE = INT*4,LONGLONG*4# + * #neg = (1,0)*4# + */ #if SIZEOF_ at SIZE@ > SIZEOF_ at SIZENAME@ static void @name at _ctype_multiply(@name@ a, @name@ b, @name@ *out) { @@ -193,25 +210,29 @@ /**end repeat**/ /**begin repeat - #name=int,uint,long,ulong,longlong,ulonglong# - #SIZE=INT*2,LONG*2,LONGLONG*2# - #char=(s,u)*3# -**/ + * + * #name = int, uint, long, ulong, longlong, ulonglong# + * #SIZE = INT*2, LONG*2, LONGLONG*2# + * #char = (s,u)*3# + */ #if SIZEOF_LONGLONG == SIZEOF_ at SIZE@ static void @name at _ctype_multiply(@name@ a, @name@ b, @name@ *out) { *out = a * b; - if (@char at longlong_overflow(a, b)) + if (@char at longlong_overflow(a, b)) { generate_overflow_error(); + } return; } #endif /**end repeat**/ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #neg=(1,0)*5# -**/ + * + * #name = byte, ubyte, short, ushort, int, uint, long, + * ulong, longlong, ulonglong# + * #neg = (1,0)*5# + */ static void @name at _ctype_divide(@name@ a, @name@ b, @name@ *out) { if (b == 0) { @@ -228,13 +249,16 @@ #if @neg@ @name@ tmp; tmp = a / b; - if (((a > 0) != (b > 0)) && (a % b != 0)) tmp--; + if (((a > 0) != (b > 0)) && (a % b != 0)) { + tmp--; + } *out = tmp; #else *out = a / b; #endif } } + #define @name at _ctype_floor_divide @name at _ctype_divide static void @name at _ctype_remainder(@name@ a, @name@ b, @name@ *out) { @@ -247,7 +271,8 @@ else if ((a > 0) == (b > 0)) { *out = a % b; } - else { /* handled like Python does */ + else { + /* handled like Python does */ *out = a % b; if (*out) *out += b; } @@ -258,18 +283,21 @@ /**end repeat**/ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #otyp=float*4, double*6# -**/ + * + * #name = byte, ubyte, short, ushort, int, uint, long, + * ulong, longlong, ulonglong# + * #otyp = float*4, double*6# + */ #define @name at _ctype_true_divide(a, b, out) \ *(out) = ((@otyp@) (a)) / ((@otyp@) (b)); /**end repeat**/ /* b will always be positive in this call */ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #upc=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# -**/ + * + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong# + * #upc = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG# + */ static void @name at _ctype_power(@name@ a, @name@ b, @name@ *out) { @name@ temp, ix, mult; @@ -280,11 +308,14 @@ if (b & 1) { @name at _ctype_multiply(ix, temp, &mult); ix = mult; - if (temp == 0) - break; /* Avoid ix / 0 */ + if (temp == 0) { + break; + } } b >>= 1; /* Shift exponent down by 1 bit */ - if (b==0) break; + if (b==0) { + break; + } /* Square the value of temp */ @name at _ctype_multiply(temp, temp, &mult); temp = mult; @@ -298,16 +329,16 @@ /* QUESTION: Should we check for overflow / underflow in (l,r)shift? */ /**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong)*5# - #oper=and*10, xor*10, or*10, lshift*10, rshift*10# - #op=&*10, ^*10, |*10, <<*10, >>*10# -**/ + * #name = (byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong)*5# + * #oper = and*10, xor*10, or*10, lshift*10, rshift*10# + * #op = &*10, ^*10, |*10, <<*10, >>*10# + */ #define @name at _ctype_@oper@(arg1, arg2, out) *(out) = (arg1) @op@ (arg2) /**end repeat**/ /**begin repeat - #name=float, double, longdouble# -**/ + * #name = float, double, longdouble# + */ static @name@ (*_basic_ at name@_floor)(@name@); static @name@ (*_basic_ at name@_sqrt)(@name@); static @name@ (*_basic_ at name@_fmod)(@name@, @name@); @@ -321,44 +352,46 @@ /**end repeat**/ /**begin repeat - #name=cfloat, cdouble, clongdouble# - #rtype=float, double, longdouble# - #c=f,,l# -**/ + * #name = cfloat, cdouble, clongdouble# + * #rtype = float, double, longdouble# + * #c = f,,l# + */ #define @name at _ctype_add(a, b, outp) do{ \ (outp)->real = (a).real + (b).real; \ (outp)->imag = (a).imag + (b).imag; \ - }while(0) + } while(0) #define @name at _ctype_subtract(a, b, outp) do{ \ (outp)->real = (a).real - (b).real; \ (outp)->imag = (a).imag - (b).imag; \ - }while(0) + } while(0) #define @name at _ctype_multiply(a, b, outp) do{ \ (outp)->real = (a).real * (b).real - (a).imag * (b).imag; \ (outp)->imag = (a).real * (b).imag + (a).imag * (b).real; \ - }while(0) + } while(0) #define @name at _ctype_divide(a, b, outp) do{ \ @rtype@ d = (b).real*(b).real + (b).imag*(b).imag; \ (outp)->real = ((a).real*(b).real + (a).imag*(b).imag)/d; \ (outp)->imag = ((a).imag*(b).real - (a).real*(b).imag)/d; \ - }while(0) + } while(0) #define @name at _ctype_true_divide @name at _ctype_divide #define @name at _ctype_floor_divide(a, b, outp) do { \ (outp)->real = _basic_ at rtype@_floor \ (((a).real*(b).real + (a).imag*(b).imag) / \ ((b).real*(b).real + (b).imag*(b).imag)); \ (outp)->imag = 0; \ - }while(0) + } while(0) /**end repeat**/ /**begin repeat - #name=float,double,longdouble# -**/ + * #name = float, double, longdouble# + */ static void @name at _ctype_remainder(@name@ a, @name@ b, @name@ *out) { @name@ mod; mod = _basic_ at name@_fmod(a, b); - if (mod && (((b < 0) != (mod < 0)))) mod += b; + if (mod && (((b < 0) != (mod < 0)))) { + mod += b; + } *out = mod; } /**end repeat**/ @@ -366,8 +399,9 @@ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, + * ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble# + */ #define @name at _ctype_divmod(a, b, out, out2) { \ @name at _ctype_floor_divide(a, b, out); \ @name at _ctype_remainder(a, b, out2); \ @@ -375,8 +409,8 @@ /**end repeat**/ /**begin repeat - #name= float, double, longdouble# -**/ + * #name = float, double, longdouble# + */ static @name@ (*_basic_ at name@_pow)(@name@ a, @name@ b); static void @name at _ctype_power(@name@ a, @name@ b, @name@ *out) { @@ -385,9 +419,10 @@ /**end repeat**/ /**begin repeat - #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# - #uns=(0,1)*5,0*3# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, + * ulonglong, float, double, longdouble# + * #uns = (0,1)*5,0*3# + */ static void @name at _ctype_negative(@name@ a, @name@ *out) { @@ -400,8 +435,8 @@ /**begin repeat - #name= cfloat, cdouble, clongdouble# -**/ + * #name = cfloat, cdouble, clongdouble# + */ static void @name at _ctype_negative(@name@ a, @name@ *out) { @@ -411,8 +446,9 @@ /**end repeat**/ /**begin repeat - #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, + * ulonglong, float, double, longdouble# + */ static void @name at _ctype_positive(@name@ a, @name@ *out) { @@ -420,13 +456,14 @@ } /**end repeat**/ -/* Get the nc_powf, nc_pow, and nc_powl functions from - the data area of the power ufunc in umathmodule. -*/ +/* + * Get the nc_powf, nc_pow, and nc_powl functions from + * the data area of the power ufunc in umathmodule. + */ /**begin repeat - #name=cfloat, cdouble, clongdouble# -**/ + * #name = cfloat, cdouble, clongdouble# + */ static void @name at _ctype_positive(@name@ a, @name@ *out) { @@ -443,15 +480,15 @@ /**begin repeat - #name=ubyte, ushort, uint, ulong, ulonglong# -**/ + * #name = ubyte, ushort, uint, ulong, ulonglong# + */ #define @name at _ctype_absolute @name at _ctype_positive /**end repeat**/ /**begin repeat - #name=byte, short, int, long, longlong, float, double, longdouble# -**/ + * #name = byte, short, int, long, longlong, float, double, longdouble# + */ static void @name at _ctype_absolute(@name@ a, @name@ *out) { @@ -460,9 +497,9 @@ /**end repeat**/ /**begin repeat - #name= cfloat, cdouble, clongdouble# - #rname= float, double, longdouble# -**/ + * #name = cfloat, cdouble, clongdouble# + * #rname = float, double, longdouble# + */ static void @name at _ctype_absolute(@name@ a, @rname@ *out) { @@ -471,8 +508,9 @@ /**end repeat**/ /**begin repeat - #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, + * ulong, longlong, ulonglong# + */ #define @name at _ctype_invert(a, out) *(out) = ~a; /**end repeat**/ @@ -480,25 +518,27 @@ /* The general strategy for commutative binary operators is to + * + * 1) Convert the types to the common type if both are scalars (0 return) + * 2) If both are not scalars use ufunc machinery (-2 return) + * 3) If both are scalars but cannot be cast to the right type + * return NotImplmented (-1 return) + * + * 4) Perform the function on the C-type. + * 5) If an error condition occurred, check to see + * what the current error-handling is and handle the error. + * + * 6) Construct and return the output scalar. + */ - 1) Convert the types to the common type if both are scalars (0 return) - 2) If both are not scalars use ufunc machinery (-2 return) - 3) If both are scalars but cannot be cast to the right type - return NotImplmented (-1 return) - - 4) Perform the function on the C-type. - 5) If an error condition occurred, check to see - what the current error-handling is and handle the error. - - 6) Construct and return the output scalar. -*/ - - /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #Name=Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - #NAME=BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, + * ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble# + * #Name = Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, + * ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# + * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, + * ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE# + */ static int _ at name@_convert_to_ctype(PyObject *a, @name@ *arg1) @@ -512,13 +552,17 @@ else if (PyArray_IsScalar(a, Generic)) { PyArray_Descr *descr1; int ret; - if (!PyArray_IsScalar(a, Number)) return -1; + if (!PyArray_IsScalar(a, Number)) { + return -1; + } descr1 = PyArray_DescrFromTypeObject((PyObject *)(a->ob_type)); if (PyArray_CanCastSafely(descr1->type_num, PyArray_ at NAME@)) { PyArray_CastScalarDirect(a, descr1, arg1, PyArray_ at NAME@); ret = 0; } - else ret = -1; + else { + ret = -1; + } Py_DECREF(descr1); return ret; } @@ -535,26 +579,29 @@ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,cfloat,cdouble# -**/ - + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, cfloat, cdouble# + */ static int _ at name@_convert2_to_ctypes(PyObject *a, @name@ *arg1, PyObject *b, @name@ *arg2) { int ret; ret = _ at name@_convert_to_ctype(a, arg1); - if (ret < 0) return ret; + if (ret < 0) { + return ret; + } ret = _ at name@_convert_to_ctype(b, arg2); - if (ret < 0) return ret; + if (ret < 0) { + return ret; + } return 0; } - /**end repeat**/ /**begin repeat - #name=longdouble, clongdouble# -**/ + * #name = longdouble, clongdouble# + */ static int _ at name@_convert2_to_ctypes(PyObject *a, @name@ *arg1, @@ -562,10 +609,16 @@ { int ret; ret = _ at name@_convert_to_ctype(a, arg1); - if (ret < 0) return ret; + if (ret < 0) { + return ret; + } ret = _ at name@_convert_to_ctype(b, arg2); - if (ret == -2) ret = -3; - if (ret < 0) return ret; + if (ret == -2) { + ret = -3; + } + if (ret < 0) { + return ret; + } return 0; } @@ -600,26 +653,34 @@ #endif switch(_ at name@_convert2_to_ctypes(a, &arg1, b, &arg2)) { - case 0: - break; - case -1: /* one of them can't be cast safely - must be mixed-types*/ - return PyArray_Type.tp_as_number->nb_ at oper@(a,b); - case -2: /* use default handling */ - if (PyErr_Occurred()) return NULL; - return PyGenericArrType_Type.tp_as_number->nb_ at oper@(a,b); - case -3: /* special case for longdouble and clongdouble - because they have a recursive getitem in their dtype */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; + case 0: + break; + case -1: + /* one of them can't be cast safely must be mixed-types*/ + return PyArray_Type.tp_as_number->nb_ at oper@(a,b); + case -2: + /* use default handling */ + if (PyErr_Occurred()) { + return NULL; + } + return PyGenericArrType_Type.tp_as_number->nb_ at oper@(a,b); + case -3: + /* + * special case for longdouble and clongdouble + * because they have a recursive getitem in their dtype + */ + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; } #if @fperr@ PyUFunc_clearfperr(); #endif - /* here we do the actual calculation with arg1 and arg2 */ - /* as a function call. */ + /* + * here we do the actual calculation with arg1 and arg2 + * as a function call. + */ #if @twoout@ @name at _ctype_@oper@(arg1, arg2, &out, &out2); #else @@ -632,9 +693,11 @@ if (retstatus) { int bufsize, errmask; PyObject *errobj; + if (PyUFunc_GetPyValues("@name at _scalars", &bufsize, &errmask, - &errobj) < 0) + &errobj) < 0) { return NULL; + } first = 1; if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { Py_XDECREF(errobj); @@ -647,18 +710,28 @@ #if @twoout@ ret = PyTuple_New(2); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } obj = PyArrayScalar_New(@OName@); - if (obj == NULL) {Py_DECREF(ret); return NULL;} + if (obj == NULL) { + Py_DECREF(ret); + return NULL; + } PyArrayScalar_ASSIGN(obj, @OName@, out); PyTuple_SET_ITEM(ret, 0, obj); obj = PyArrayScalar_New(@OName@); - if (obj == NULL) {Py_DECREF(ret); return NULL;} + if (obj == NULL) { + Py_DECREF(ret); + return NULL; + } PyArrayScalar_ASSIGN(obj, @OName@, out2); PyTuple_SET_ITEM(ret, 1, obj); #else ret = PyArrayScalar_New(@OName@); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_ASSIGN(ret, @OName@, out); #endif return ret; @@ -692,24 +765,32 @@ #endif switch(_ at name@_convert2_to_ctypes(a, &arg1, b, &arg2)) { - case 0: - break; - case -1: /* can't cast both safely - mixed-types? */ - return PyArray_Type.tp_as_number->nb_power(a,b,NULL); - case -2: /* use default handling */ - if (PyErr_Occurred()) return NULL; - return PyGenericArrType_Type.tp_as_number->nb_power(a,b,NULL); - case -3: /* special case for longdouble and clongdouble - because they have a recursive getitem in their dtype */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; + case 0: + break; + case -1: + /* can't cast both safely mixed-types? */ + return PyArray_Type.tp_as_number->nb_power(a,b,NULL); + case -2: + /* use default handling */ + if (PyErr_Occurred()) { + return NULL; + } + return PyGenericArrType_Type.tp_as_number->nb_power(a,b,NULL); + case -3: + /* + * special case for longdouble and clongdouble + * because they have a recursive getitem in their dtype + */ + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; } PyUFunc_clearfperr(); - /* here we do the actual calculation with arg1 and arg2 */ - /* as a function call. */ + /* + * here we do the actual calculation with arg1 and arg2 + * as a function call. + */ #if @cmplx@ if (arg2.real == 0 && arg1.real == 0) { out1.real = out.real = 1; @@ -735,9 +816,11 @@ if (retstatus) { int bufsize, errmask; PyObject *errobj; + if (PyUFunc_GetPyValues("@name at _scalars", &bufsize, &errmask, - &errobj) < 0) + &errobj) < 0) { return NULL; + } first = 1; if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { Py_XDECREF(errobj); @@ -749,17 +832,23 @@ #if @isint@ if (arg2 < 0) { ret = PyArrayScalar_New(@OName@); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_ASSIGN(ret, @OName@, out1); } else { ret = PyArrayScalar_New(@Name@); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_ASSIGN(ret, @Name@, out); } #else ret = PyArrayScalar_New(@Name@); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_ASSIGN(ret, @Name@, out); #endif @@ -769,26 +858,26 @@ /**begin repeat - #name=(cfloat,cdouble,clongdouble)*2# - #oper=divmod*3,remainder*3# -**/ + * #name = (cfloat,cdouble,clongdouble)*2# + * #oper = divmod*3,remainder*3# + */ #define @name at _@oper@ NULL /**end repeat**/ /**begin repeat - #name=(float,double,longdouble,cfloat,cdouble,clongdouble)*5# - #oper=lshift*6, rshift*6, and*6, or*6, xor*6# -**/ + * #name = (float,double,longdouble,cfloat,cdouble,clongdouble)*5# + * #oper = lshift*6, rshift*6, and*6, or*6, xor*6# + */ #define @name at _@oper@ NULL /**end repeat**/ /**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*3, byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #otyp=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,float,double,longdouble,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #OName=(Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble)*2, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, Float, Double, LongDouble, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong# - #oper=negative*16, positive*16, absolute*16, invert*10# -**/ + * #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*3, byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# + * #otyp=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,float,double,longdouble,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# + * #OName=(Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble)*2, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, Float, Double, LongDouble, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong# + * #oper=negative*16, positive*16, absolute*16, invert*10# + */ static PyObject * @name at _@oper@(PyObject *a) { @@ -799,16 +888,22 @@ switch(_ at name@_convert_to_ctype(a, &arg1)) { case 0: break; - case -1: /* can't cast both safely use different add function */ + case -1: + /* can't cast both safely use different add function */ Py_INCREF(Py_NotImplemented); return Py_NotImplemented; - case -2: /* use default handling */ - if (PyErr_Occurred()) return NULL; + case -2: + /* use default handling */ + if (PyErr_Occurred()) { + return NULL; + } return PyGenericArrType_Type.tp_as_number->nb_ at oper@(a); } - /* here we do the actual calculation with arg1 and arg2 */ - /* make it a function call. */ + /* + * here we do the actual calculation with arg1 and arg2 + * make it a function call. + */ @name at _ctype_@oper@(arg1, &out); @@ -820,15 +915,16 @@ /**end repeat**/ /**begin repeat - #name=float,double,longdouble,cfloat,cdouble,clongdouble# -**/ + * #name = float, double, longdouble, cfloat, cdouble, clongdouble# + */ #define @name at _invert NULL /**end repeat**/ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #simp=1*13,0*3# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, + * ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble# + * #simp=1*13,0*3# + */ static int @name at _nonzero(PyObject *a) { @@ -836,12 +932,16 @@ @name@ arg1; if (_ at name@_convert_to_ctype(a, &arg1) < 0) { - if (PyErr_Occurred()) return -1; + if (PyErr_Occurred()) { + return -1; + } return PyGenericArrType_Type.tp_as_number->nb_nonzero(a); } - /* here we do the actual calculation with arg1 and arg2 */ - /* make it a function call. */ + /* + * here we do the actual calculation with arg1 and arg2 + * make it a function call. + */ #if @simp@ ret = (arg1 != 0); @@ -854,14 +954,15 @@ /**end repeat**/ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #Name=Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Float,Double,LongDouble,CFloat,CDouble,CLongDouble# - #cmplx=,,,,,,,,,,,,,.real,.real,.real# - #sign=(signed,unsigned)*5,,,,,,# - #ctype=long*8,PY_LONG_LONG*2,double*6# - #realtyp=0*10,1*6# - #func=(PyLong_FromLong,PyLong_FromUnsignedLong)*4,PyLong_FromLongLong,PyLong_FromUnsignedLongLong,PyLong_FromDouble*6# -**/ + * + * #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# + * #Name=Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Float,Double,LongDouble,CFloat,CDouble,CLongDouble# + * #cmplx=,,,,,,,,,,,,,.real,.real,.real# + * #sign=(signed,unsigned)*5,,,,,,# + * #ctype=long*8,PY_LONG_LONG*2,double*6# + * #realtyp=0*10,1*6# + * #func=(PyLong_FromLong,PyLong_FromUnsignedLong)*4,PyLong_FromLongLong,PyLong_FromUnsignedLongLong,PyLong_FromDouble*6# + */ static PyObject * @name at _int(PyObject *obj) { @@ -878,12 +979,13 @@ /**end repeat**/ /**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2# - #Name=(Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Float,Double,LongDouble,CFloat,CDouble,CLongDouble)*2# - #cmplx=(,,,,,,,,,,,,,.real,.real,.real)*2# - #which=long*16,float*16# - #func=(PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,PyLong_FromDouble*6,PyFloat_FromDouble*16# -**/ + * + * #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2# + * #Name=(Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Float,Double,LongDouble,CFloat,CDouble,CLongDouble)*2# + * #cmplx=(,,,,,,,,,,,,,.real,.real,.real)*2# + * #which=long*16,float*16# + * #func=(PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,PyLong_FromDouble*6,PyFloat_FromDouble*16# + */ static PyObject * @name at _@which@(PyObject *obj) { @@ -893,11 +995,12 @@ /**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2# - #oper=oct*16, hex*16# - #kind=(int*5, long*5, int, long*2, int, long*2)*2# - #cap=(Int*5, Long*5, Int, Long*2, Int, Long*2)*2# -**/ + * + * #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2# + * #oper=oct*16, hex*16# + * #kind=(int*5, long*5, int, long*2, int, long*2)*2# + * #cap=(Int*5, Long*5, Int, Long*2, Int, Long*2)*2# + */ static PyObject * @name at _@oper@(PyObject *obj) { @@ -910,9 +1013,9 @@ /**begin repeat - #oper=le,ge,lt,gt,eq,ne# - #op=<=,>=,<,>,==,!=# -**/ + * #oper=le,ge,lt,gt,eq,ne# + * #op=<=,>=,<,>,==,!=# + */ #define def_cmp_ at oper@(arg1, arg2) (arg1 @op@ arg2) #define cmplx_cmp_ at oper@(arg1, arg2) ((arg1.real == arg2.real) ? \ arg1.imag @op@ arg2.imag : \ @@ -920,9 +1023,9 @@ /**end repeat**/ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #simp=def*13,cmplx*3# -**/ + * #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# + * #simp=def*13,cmplx*3# + */ static PyObject* @name at _richcompare(PyObject *self, PyObject *other, int cmp_op) { From numpy-svn at scipy.org Fri Feb 20 23:36:18 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 22:36:18 -0600 (CST) Subject: [Numpy-svn] r6428 - trunk Message-ID: <20090221043618.5E29FC7C033@scipy.org> Author: cdavid Date: 2009-02-20 22:35:57 -0600 (Fri, 20 Feb 2009) New Revision: 6428 Modified: trunk/ Log: Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/branches/fix_float_format Property changes on: trunk ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-2871 From numpy-svn at scipy.org Fri Feb 20 23:37:51 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 22:37:51 -0600 (CST) Subject: [Numpy-svn] r6429 - branches/fix_float_format Message-ID: <20090221043751.C6A6FC7C033@scipy.org> Author: cdavid Date: 2009-02-20 22:37:41 -0600 (Fri, 20 Feb 2009) New Revision: 6429 Modified: branches/fix_float_format/ Log: Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/trunk Property changes on: branches/fix_float_format ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6271 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 From numpy-svn at scipy.org Fri Feb 20 23:39:56 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 22:39:56 -0600 (CST) Subject: [Numpy-svn] r6430 - branches Message-ID: <20090221043956.D00D1C7C033@scipy.org> Author: cdavid Date: 2009-02-20 22:39:39 -0600 (Fri, 20 Feb 2009) New Revision: 6430 Removed: branches/fix_float_format/ Log: Remove fix_float_format branch, was integrated in trunk From numpy-svn at scipy.org Fri Feb 20 23:41:25 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 22:41:25 -0600 (CST) Subject: [Numpy-svn] r6431 - trunk/numpy/core/include/numpy Message-ID: <20090221044125.3F1BDC7C033@scipy.org> Author: charris Date: 2009-02-20 22:41:17 -0600 (Fri, 20 Feb 2009) New Revision: 6431 Modified: trunk/numpy/core/include/numpy/ndarrayobject.h Log: Remove terminating ";" from macro to fix ticket #918. Modified: trunk/numpy/core/include/numpy/ndarrayobject.h =================================================================== --- trunk/numpy/core/include/numpy/ndarrayobject.h 2009-02-21 04:39:39 UTC (rev 6430) +++ trunk/numpy/core/include/numpy/ndarrayobject.h 2009-02-21 04:41:17 UTC (rev 6431) @@ -1692,12 +1692,12 @@ #define PyArray_GETITEM(obj,itemptr) \ ((PyArrayObject *)(obj))->descr->f->getitem((char *)(itemptr), \ - (PyArrayObject *)(obj)); + (PyArrayObject *)(obj)) #define PyArray_SETITEM(obj,itemptr,v) \ ((PyArrayObject *)(obj))->descr->f->setitem((PyObject *)(v), \ (char *)(itemptr), \ - (PyArrayObject *)(obj)); + (PyArrayObject *)(obj)) #define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) From numpy-svn at scipy.org Sat Feb 21 00:37:17 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 23:37:17 -0600 (CST) Subject: [Numpy-svn] r6432 - branches/visualstudio_manifest Message-ID: <20090221053717.A74DBC7C075@scipy.org> Author: cdavid Date: 2009-02-20 23:37:05 -0600 (Fri, 20 Feb 2009) New Revision: 6432 Modified: branches/visualstudio_manifest/ Log: Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/trunk Property changes on: branches/visualstudio_manifest ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 /trunk:1-6076 + /branches/distutils-revamp:1-2752 /branches/multicore:1-3687 From numpy-svn at scipy.org Sat Feb 21 00:38:38 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 23:38:38 -0600 (CST) Subject: [Numpy-svn] r6433 - trunk Message-ID: <20090221053838.5F26AC7C033@scipy.org> Author: cdavid Date: 2009-02-20 23:38:29 -0600 (Fri, 20 Feb 2009) New Revision: 6433 Modified: trunk/ Log: Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/branches/visualstudio_manifest Property changes on: trunk ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /trunk:1-2871 From numpy-svn at scipy.org Sat Feb 21 00:39:32 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 23:39:32 -0600 (CST) Subject: [Numpy-svn] r6434 - branches Message-ID: <20090221053932.767B0C7C033@scipy.org> Author: cdavid Date: 2009-02-20 23:39:20 -0600 (Fri, 20 Feb 2009) New Revision: 6434 Removed: branches/visualstudio_manifest/ Log: Remove obsolete branch for visual studio manifest handling with mingw: integrated to the trunk From numpy-svn at scipy.org Sat Feb 21 00:41:18 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 23:41:18 -0600 (CST) Subject: [Numpy-svn] r6435 - branches/clean_math_config Message-ID: <20090221054118.D299DC7C033@scipy.org> Author: cdavid Date: 2009-02-20 23:41:10 -0600 (Fri, 20 Feb 2009) New Revision: 6435 Modified: branches/clean_math_config/ Log: Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/trunk Property changes on: branches/clean_math_config ___________________________________________________________________ Name: svnmerge-integrated - /branches/aligned_alloca:1-5127 /branches/build_with_scons:1-4676 /branches/cdavid:1-5257 /branches/cleanconfig_rtm:1-4677 /branches/distutils-revamp:1-2752 /branches/distutils_scons_command:1-4619 /branches/multicore:1-3687 /branches/numpy.scons:1-4484 /branches/vs_longstring:1-5656 /cleaned_math_config:1-5731 /trunk:1-5911 + /branches/aligned_alloca:1-5127 /branches/build_with_scons:1-4676 /branches/cdavid:1-5257 /branches/cleanconfig_rtm:1-4677 /branches/distutils-revamp:1-2752 /branches/distutils_scons_command:1-4619 /branches/multicore:1-3687 /branches/numpy.scons:1-4484 /branches/vs_longstring:1-5656 /cleaned_math_config:1-5731 From numpy-svn at scipy.org Sat Feb 21 00:43:06 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 20 Feb 2009 23:43:06 -0600 (CST) Subject: [Numpy-svn] r6436 - branches Message-ID: <20090221054306.F15BAC7C033@scipy.org> Author: cdavid Date: 2009-02-20 23:42:52 -0600 (Fri, 20 Feb 2009) New Revision: 6436 Removed: branches/clean_math_config/ Log: Remove obsolete branch clean_math_config, has been integrated into the trunk. From numpy-svn at scipy.org Sat Feb 21 12:01:17 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 11:01:17 -0600 (CST) Subject: [Numpy-svn] r6437 - in branches/coremath/numpy/core: . include/numpy Message-ID: <20090221170117.20A42C7C029@scipy.org> Author: cdavid Date: 2009-02-21 11:00:45 -0600 (Sat, 21 Feb 2009) New Revision: 6437 Modified: branches/coremath/numpy/core/SConscript branches/coremath/numpy/core/include/numpy/numpyconfig.h.in Log: Update isnan and co checks for npymath. Modified: branches/coremath/numpy/core/SConscript =================================================================== --- branches/coremath/numpy/core/SConscript 2009-02-21 05:42:52 UTC (rev 6436) +++ branches/coremath/numpy/core/SConscript 2009-02-21 17:00:45 UTC (rev 6437) @@ -131,7 +131,7 @@ #---------------------------------- # Function to check: mfuncs = ('expl', 'expf', 'log1p', 'expm1', 'asinh', 'atanhf', 'atanhl', - 'isnan', 'isinf', 'rint', 'trunc') + 'rint', 'trunc') # Set value to 1 for each defined function (in math lib) mfuncs_defined = dict([(f, 0) for f in mfuncs]) @@ -185,7 +185,11 @@ #include #include """ - config.CheckDeclaration(f, includes=includes) + st = config.CheckDeclaration(f, includes=includes) + if st: + numpyconfig_sym.append(('DEFINE_NPY_HAVE_%s_DECL' % f.upper(), + '#define NPY_HAVE_%s_DECL' % f.upper())) + #------------------------------------------------------- # Define the function PyOS_ascii_strod if not available Modified: branches/coremath/numpy/core/include/numpy/numpyconfig.h.in =================================================================== --- branches/coremath/numpy/core/include/numpy/numpyconfig.h.in 2009-02-21 05:42:52 UTC (rev 6436) +++ branches/coremath/numpy/core/include/numpy/numpyconfig.h.in 2009-02-21 17:00:45 UTC (rev 6437) @@ -6,6 +6,11 @@ #define NPY_SIZEOF_LONGDOUBLE @SIZEOF_LONG_DOUBLE@ #define NPY_SIZEOF_PY_INTPTR_T @SIZEOF_PY_INTPTR_T@ + at DEFINE_NPY_HAVE_ISNAN_DECL@ + at DEFINE_NPY_HAVE_ISINF_DECL@ + at DEFINE_NPY_HAVE_ISFINITE_DECL@ + at DEFINE_NPY_HAVE_SIGNBIT_DECL@ + @DEFINE_NPY_NO_SIGNAL@ #define NPY_NO_SMP @NPY_NO_SMP@ From numpy-svn at scipy.org Sat Feb 21 12:01:54 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 11:01:54 -0600 (CST) Subject: [Numpy-svn] r6438 - branches/coremath/numpy/core Message-ID: <20090221170154.C5FA8C7C01F@scipy.org> Author: cdavid Date: 2009-02-21 11:01:40 -0600 (Sat, 21 Feb 2009) New Revision: 6438 Modified: branches/coremath/numpy/core/SConscript Log: Add npymath lib to scons script. Modified: branches/coremath/numpy/core/SConscript =================================================================== --- branches/coremath/numpy/core/SConscript 2009-02-21 17:00:45 UTC (rev 6437) +++ branches/coremath/numpy/core/SConscript 2009-02-21 17:01:40 UTC (rev 6438) @@ -260,7 +260,6 @@ # Generate generated code #------------------------ scalartypes_src = env.GenerateFromTemplate(pjoin('src', 'scalartypes.inc.src')) -umath_funcs_c99_src = env.GenerateFromTemplate(pjoin('src', 'umath_funcs_c99.inc.src')) umath_funcs_src = env.GenerateFromTemplate(pjoin('src', 'umath_funcs.inc.src')) umath_loops_src = env.GenerateFromTemplate(pjoin('src', 'umath_loops.inc.src')) arraytypes_src = env.GenerateFromTemplate(pjoin('src', 'arraytypes.inc.src')) @@ -280,6 +279,12 @@ env.Prepend(CPPPATH = ['include', '.']) +# npymath core lib +npymath_src = env.GenerateFromTemplate(pjoin('src', 'npy_math.c.src')) +env.DistutilsStaticExtLibrary("npymath", npymath_src) +env.Prepend(LIBS=["npymath"]) +env.Prepend(LIBPATH=["."]) + #----------------- # Build multiarray #----------------- From numpy-svn at scipy.org Sat Feb 21 12:02:37 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 11:02:37 -0600 (CST) Subject: [Numpy-svn] r6439 - in branches/coremath/numpy/core: . include/numpy Message-ID: <20090221170237.762F7C7C01F@scipy.org> Author: cdavid Date: 2009-02-21 11:02:18 -0600 (Sat, 21 Feb 2009) New Revision: 6439 Modified: branches/coremath/numpy/core/SConscript branches/coremath/numpy/core/include/numpy/numpyconfig.h.in Log: Fix numpyconfig generation in scons build. Modified: branches/coremath/numpy/core/SConscript =================================================================== --- branches/coremath/numpy/core/SConscript 2009-02-21 17:01:40 UTC (rev 6438) +++ branches/coremath/numpy/core/SConscript 2009-02-21 17:02:18 UTC (rev 6439) @@ -187,8 +187,8 @@ """ st = config.CheckDeclaration(f, includes=includes) if st: - numpyconfig_sym.append(('DEFINE_NPY_HAVE_%s_DECL' % f.upper(), - '#define NPY_HAVE_%s_DECL' % f.upper())) + numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), + '#define NPY_HAVE_DECL_%s' % f.upper())) #------------------------------------------------------- Modified: branches/coremath/numpy/core/include/numpy/numpyconfig.h.in =================================================================== --- branches/coremath/numpy/core/include/numpy/numpyconfig.h.in 2009-02-21 17:01:40 UTC (rev 6438) +++ branches/coremath/numpy/core/include/numpy/numpyconfig.h.in 2009-02-21 17:02:18 UTC (rev 6439) @@ -6,10 +6,10 @@ #define NPY_SIZEOF_LONGDOUBLE @SIZEOF_LONG_DOUBLE@ #define NPY_SIZEOF_PY_INTPTR_T @SIZEOF_PY_INTPTR_T@ - at DEFINE_NPY_HAVE_ISNAN_DECL@ - at DEFINE_NPY_HAVE_ISINF_DECL@ - at DEFINE_NPY_HAVE_ISFINITE_DECL@ - at DEFINE_NPY_HAVE_SIGNBIT_DECL@ + at DEFINE_NPY_HAVE_DECL_ISNAN@ + at DEFINE_NPY_HAVE_DECL_ISINF@ + at DEFINE_NPY_HAVE_DECL_ISFINITE@ + at DEFINE_NPY_HAVE_DECL_SIGNBIT@ @DEFINE_NPY_NO_SIGNAL@ #define NPY_NO_SMP @NPY_NO_SMP@ From numpy-svn at scipy.org Sat Feb 21 12:03:29 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 11:03:29 -0600 (CST) Subject: [Numpy-svn] r6440 - in branches/coremath/numpy/core: include/numpy src Message-ID: <20090221170329.E029FC7C01F@scipy.org> Author: cdavid Date: 2009-02-21 11:03:06 -0600 (Sat, 21 Feb 2009) New Revision: 6440 Modified: branches/coremath/numpy/core/include/numpy/npy_math.h branches/coremath/numpy/core/src/umath_funcs.inc.src Log: Move math constants out of umath into npymath. Modified: branches/coremath/numpy/core/include/numpy/npy_math.h =================================================================== --- branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-21 17:02:18 UTC (rev 6439) +++ branches/coremath/numpy/core/include/numpy/npy_math.h 2009-02-21 17:03:06 UTC (rev 6440) @@ -5,6 +5,42 @@ #include /* + * Useful constants + */ +#define NPY_E 2.7182818284590452353602874713526625 /* e */ +#define NPY_LOG2E 1.4426950408889634073599246810018921 /* log_2 e */ +#define NPY_LOG10E 0.4342944819032518276511289189166051 /* log_10 e */ +#define NPY_LOGE2 0.6931471805599453094172321214581766 /* log_e 2 */ +#define NPY_LOGE10 2.3025850929940456840179914546843642 /* log_e 10 */ +#define NPY_PI 3.1415926535897932384626433832795029 /* pi */ +#define NPY_PI_2 1.5707963267948966192313216916397514 /* pi/2 */ +#define NPY_PI_4 0.7853981633974483096156608458198757 /* pi/4 */ +#define NPY_1_PI 0.3183098861837906715377675267450287 /* 1/pi */ +#define NPY_2_PI 0.6366197723675813430755350534900574 /* 2/pi */ + +#define NPY_Ef 2.7182818284590452353602874713526625F /* e */ +#define NPY_LOG2Ef 1.4426950408889634073599246810018921F /* log_2 e */ +#define NPY_LOG10Ef 0.4342944819032518276511289189166051F /* log_10 e */ +#define NPY_LOGE2f 0.6931471805599453094172321214581766F /* log_e 2 */ +#define NPY_LOGE10f 2.3025850929940456840179914546843642F /* log_e 10 */ +#define NPY_PIf 3.1415926535897932384626433832795029F /* pi */ +#define NPY_PI_2f 1.5707963267948966192313216916397514F /* pi/2 */ +#define NPY_PI_4f 0.7853981633974483096156608458198757F /* pi/4 */ +#define NPY_1_PIf 0.3183098861837906715377675267450287F /* 1/pi */ +#define NPY_2_PIf 0.6366197723675813430755350534900574F /* 2/pi */ + +#define NPY_El 2.7182818284590452353602874713526625L /* e */ +#define NPY_LOG2El 1.4426950408889634073599246810018921L /* log_2 e */ +#define NPY_LOG10El 0.4342944819032518276511289189166051L /* log_10 e */ +#define NPY_LOGE2l 0.6931471805599453094172321214581766L /* log_e 2 */ +#define NPY_LOGE10l 2.3025850929940456840179914546843642L /* log_e 10 */ +#define NPY_PIl 3.1415926535897932384626433832795029L /* pi */ +#define NPY_PI_2l 1.5707963267948966192313216916397514L /* pi/2 */ +#define NPY_PI_4l 0.7853981633974483096156608458198757L /* pi/4 */ +#define NPY_1_PIl 0.3183098861837906715377675267450287L /* 1/pi */ +#define NPY_2_PIl 0.6366197723675813430755350534900574L /* 2/pi */ + +/* * C99 double math funcs */ double npy_sin(double x); Modified: branches/coremath/numpy/core/src/umath_funcs.inc.src =================================================================== --- branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-21 17:02:18 UTC (rev 6439) +++ branches/coremath/numpy/core/src/umath_funcs.inc.src 2009-02-21 17:03:06 UTC (rev 6440) @@ -10,24 +10,6 @@ #define M_LOG10_E 0.434294481903251827651128918916605082294397 -/* Useful constants in three precisions.*/ - -/**begin repeat - * #c = f, ,l# - * #C = F, ,L# - */ -#define NPY_E at c@ 2.7182818284590452353602874713526625 at C@ /* e */ -#define NPY_LOG2E at c@ 1.4426950408889634073599246810018921 at C@ /* log_2 e */ -#define NPY_LOG10E at c@ 0.4342944819032518276511289189166051 at C@ /* log_10 e */ -#define NPY_LOGE2 at c@ 0.6931471805599453094172321214581766 at C@ /* log_e 2 */ -#define NPY_LOGE10 at c@ 2.3025850929940456840179914546843642 at C@ /* log_e 10 */ -#define NPY_PI at c@ 3.1415926535897932384626433832795029 at C@ /* pi */ -#define NPY_PI_2 at c@ 1.5707963267948966192313216916397514 at C@ /* pi/2 */ -#define NPY_PI_4 at c@ 0.7853981633974483096156608458198757 at C@ /* pi/4 */ -#define NPY_1_PI at c@ 0.3183098861837906715377675267450287 at C@ /* 1/pi */ -#define NPY_2_PI at c@ 0.6366197723675813430755350534900574 at C@ /* 2/pi */ -/**end repeat**/ - /* ***************************************************************************** ** PYTHON OBJECT FUNCTIONS ** From numpy-svn at scipy.org Sat Feb 21 12:25:33 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 11:25:33 -0600 (CST) Subject: [Numpy-svn] r6441 - in trunk: . numpy/core numpy/distutils numpy/distutils/command Message-ID: <20090221172533.EDD30C7C029@scipy.org> Author: cdavid Date: 2009-02-21 11:25:09 -0600 (Sat, 21 Feb 2009) New Revision: 6441 Modified: trunk/ trunk/numpy/core/setup.py trunk/numpy/distutils/command/build_ext.py trunk/numpy/distutils/mingw32ccompiler.py Log: Merged revisions 6153-6173,6176-6178,6184 via svnmerge from http://svn.scipy.org/svn/numpy/branches/numpy-mingw-w64 ........ r6153 | cdavid | 2008-12-19 17:06:06 +0900 (Fri, 19 Dec 2008) | 1 line Add a function to find python dll on windows. ........ r6154 | cdavid | 2008-12-19 17:28:49 +0900 (Fri, 19 Dec 2008) | 1 line Fix typo when getting system32 location. ........ r6155 | cdavid | 2008-12-19 17:37:19 +0900 (Fri, 19 Dec 2008) | 1 line Add a function to get a dump of private headers from dll. ........ r6156 | cdavid | 2008-12-19 17:41:39 +0900 (Fri, 19 Dec 2008) | 1 line Add a function to generate a .def file from a dll. ........ r6157 | cdavid | 2008-12-19 17:43:56 +0900 (Fri, 19 Dec 2008) | 1 line Forgot to add the regex for the generate_def function. ........ r6158 | cdavid | 2008-12-19 17:53:49 +0900 (Fri, 19 Dec 2008) | 1 line Fix .def file generation. ........ r6159 | cdavid | 2008-12-19 17:56:54 +0900 (Fri, 19 Dec 2008) | 1 line Add a warning if no symbols found in the dll (if stripped, for example). ........ r6160 | cdavid | 2008-12-19 18:02:24 +0900 (Fri, 19 Dec 2008) | 1 line Refactor build_import_library to take into account multi arch. ........ r6161 | cdavid | 2008-12-19 18:10:03 +0900 (Fri, 19 Dec 2008) | 1 line Do not generate manifest when built with msver 8.*, it does not look like it is needed, and we dont support it anyway ATM. ........ r6162 | cdavid | 2008-12-19 18:18:08 +0900 (Fri, 19 Dec 2008) | 1 line Show arch in the log when building import library. ........ r6163 | cdavid | 2008-12-19 18:22:18 +0900 (Fri, 19 Dec 2008) | 1 line Fix missing out filename. ........ r6164 | cdavid | 2008-12-19 18:32:46 +0900 (Fri, 19 Dec 2008) | 1 line Actually build the import library for mingw on amd64. ........ r6165 | cdavid | 2008-12-19 18:46:30 +0900 (Fri, 19 Dec 2008) | 1 line Do not generate ordinal, and use the basename of the dll instead of the full path in the def.file. ........ r6166 | cdavid | 2008-12-19 18:48:01 +0900 (Fri, 19 Dec 2008) | 1 line Trailing spaces. ........ r6167 | cdavid | 2008-12-19 18:55:16 +0900 (Fri, 19 Dec 2008) | 1 line Add MS_WIN64 macro when built on amd64 + mingw. ........ r6168 | cdavid | 2008-12-19 18:57:06 +0900 (Fri, 19 Dec 2008) | 1 line Forgot to import get_build_architecture. ........ r6169 | cdavid | 2008-12-19 18:57:52 +0900 (Fri, 19 Dec 2008) | 1 line Use a tuple when defining the MS_WIN64 macro. ........ r6170 | cdavid | 2008-12-19 19:05:03 +0900 (Fri, 19 Dec 2008) | 1 line Fix macro def. ........ r6171 | cdavid | 2008-12-19 19:21:54 +0900 (Fri, 19 Dec 2008) | 2 lines Do not use g++ for linking on amd64. ........ r6172 | cdavid | 2008-12-19 19:25:18 +0900 (Fri, 19 Dec 2008) | 1 line do not regenerate the import library if already there. ........ r6173 | cdavid | 2008-12-19 19:28:39 +0900 (Fri, 19 Dec 2008) | 1 line Add one full msvcrt version for 80 (for manifest generation). ........ r6176 | cdavid | 2008-12-21 02:31:48 +0900 (Sun, 21 Dec 2008) | 1 line Remove optimization flags for now, to speed up builds. ........ r6177 | cdavid | 2008-12-21 02:32:11 +0900 (Sun, 21 Dec 2008) | 1 line Add MS_WIN64 for every compile command. ........ r6178 | cdavid | 2008-12-21 02:32:33 +0900 (Sun, 21 Dec 2008) | 1 line Remove handling of MS_WIN64 in commands: deal with it in mingw tool only. ........ r6184 | cdavid | 2008-12-21 16:46:28 +0900 (Sun, 21 Dec 2008) | 1 line Hardcode size of long double, because it is broken with mingw. ........ Property changes on: trunk ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6440 /trunk:1-2871 Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-21 17:03:06 UTC (rev 6440) +++ trunk/numpy/core/setup.py 2009-02-21 17:25:09 UTC (rev 6441) @@ -474,7 +474,7 @@ ('SIZEOF_LONG', 'long'), ('SIZEOF_FLOAT', 'float'), ('SIZEOF_DOUBLE', 'double'), - ('SIZEOF_LONG_DOUBLE', 'long double'), + #('SIZEOF_LONG_DOUBLE', 'long double'), ('SIZEOF_PY_INTPTR_T', 'Py_intptr_t'), ]: testcode.append(c_size_test % {'sz' : sz, 'type' : t}) @@ -490,6 +490,7 @@ #else fprintf(fp, "/* PY_LONG_LONG not defined */\n"); #endif + fprintf(fp, "#define SIZEOF_LONG_DOUBLE 8\n"); #ifndef CHAR_BIT { unsigned char var = 2; Modified: trunk/numpy/distutils/command/build_ext.py =================================================================== --- trunk/numpy/distutils/command/build_ext.py 2009-02-21 17:03:06 UTC (rev 6440) +++ trunk/numpy/distutils/command/build_ext.py 2009-02-21 17:25:09 UTC (rev 6441) @@ -16,7 +16,7 @@ from numpy.distutils.system_info import combine_paths from numpy.distutils.misc_util import filter_sources, has_f_sources, \ has_cxx_sources, get_ext_source_files, \ - get_numpy_include_dirs, is_sequence + get_numpy_include_dirs, is_sequence, get_build_architecture from numpy.distutils.command.config_compiler import show_fortran_compilers try: Modified: trunk/numpy/distutils/mingw32ccompiler.py =================================================================== --- trunk/numpy/distutils/mingw32ccompiler.py 2009-02-21 17:03:06 UTC (rev 6440) +++ trunk/numpy/distutils/mingw32ccompiler.py 2009-02-21 17:25:09 UTC (rev 6441) @@ -12,6 +12,8 @@ import subprocess import sys import log +import subprocess +import re # Overwrite certain distutils.ccompiler functions: import numpy.distutils.ccompiler @@ -29,8 +31,12 @@ from distutils.unixccompiler import UnixCCompiler from distutils.msvccompiler import get_build_version as get_build_msvc_version -from numpy.distutils.misc_util import msvc_runtime_library +from numpy.distutils.misc_util import msvc_runtime_library, get_build_architecture +# Useful to generate table of symbols from a dll +_START = re.compile(r'\[Ordinal/Name Pointer\] Table') +_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') + # the same as cygwin plus some additional parameters class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): """ A modified MingW32 compiler compatible with an MSVC built Python. @@ -89,17 +95,29 @@ # linker_exe='gcc -mno-cygwin', # linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s' # % (self.linker, entry_point)) - if self.gcc_version <= "3.0.0": - self.set_executables(compiler='gcc -mno-cygwin -O2 -w', - compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='%s -mno-cygwin -mdll -static %s' - % (self.linker, entry_point)) + + # MS_WIN64 should be defined when building for amd64 on windows, but + # python headers define it only for MS compilers, which has all kind of + # bad consequences, like using Py_ModuleInit4 instead of + # Py_ModuleInit4_64, etc... So we add it here + if get_build_architecture() == 'AMD64': + self.set_executables( + compiler='gcc -DMS_WIN64 -mno-cygwin -O0 -Wall', + compiler_so='gcc -DMS_WIN64 -mno-cygwin -O0 -Wall -Wstrict-prototypes', + linker_exe='gcc -mno-cygwin', + linker_so='gcc -mno-cygwin -shared') else: - self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall', - compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='g++ -mno-cygwin -shared') + if self.gcc_version <= "3.0.0": + self.set_executables(compiler='gcc -mno-cygwin -O2 -w', + compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes', + linker_exe='g++ -mno-cygwin', + linker_so='%s -mno-cygwin -mdll -static %s' + % (self.linker, entry_point)) + else: + self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall', + compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes', + linker_exe='g++ -mno-cygwin', + linker_so='g++ -mno-cygwin -shared') # added for python2.3 support # we can't pass it through set_executables because pre 2.2 would fail self.compiler_cxx = ['g++'] @@ -191,11 +209,102 @@ # object_filenames () +def find_python_dll(): + maj, min, micro = [int(i) for i in sys.version_info[:3]] + dllname = 'python%d%d.dll' % (maj, min) + print "Looking for %s" % dllname + + # We can't do much here: + # - find it in python main dir + # - in system32, + # - ortherwise (Sxs), I don't know how to get it. + lib_dirs = [] + lib_dirs.append(os.path.join(sys.prefix, 'lib')) + try: + lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32')) + except KeyError: + pass + + for d in lib_dirs: + dll = os.path.join(d, dllname) + if os.path.exists(dll): + return dll + + raise ValueError("%s not found in %s" % (dllname, lib_dirs)) + +def dump_table(dll): + st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE) + return st.stdout.readlines() + +def generate_def(dll, dfile): + """Given a dll file location, get all its exported symbols and dump them + into the given def file. + + The .def file will be overwritten""" + dump = dump_table(dll) + for i in range(len(dump)): + if _START.match(dump[i]): + break + + if i == len(dump): + raise ValueError("Symbol table not found") + + syms = [] + for j in range(i+1, len(dump)): + m = _TABLE.match(dump[j]) + if m: + syms.append((int(m.group(1).strip()), m.group(2))) + else: + break + + if len(syms) == 0: + log.warn('No symbols found in %s' % dll) + + d = open(dfile, 'w') + d.write('LIBRARY %s\n' % os.path.basename(dll)) + d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') + d.write(';DATA PRELOAD SINGLE\n') + d.write('\nEXPORTS\n') + for s in syms: + #d.write('@%d %s\n' % (s[0], s[1])) + d.write('%s\n' % s[1]) + d.close() + def build_import_library(): + if os.name != 'nt': + return + + arch = get_build_architecture() + if arch == 'AMD64': + return _build_import_library_amd64() + elif arch == 'Intel': + return _build_import_library_x86() + else: + raise ValueError("Unhandled arch %s" % arch) + +def _build_import_library_amd64(): + dll_file = find_python_dll() + + out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) + out_file = os.path.join(sys.prefix, 'libs', out_name) + if os.path.isfile(out_file): + log.debug('Skip building import library: "%s" exists' % (out_file)) + return + + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix,'libs',def_name) + + log.info('Building import library (arch=AMD64): "%s" (from %s)' \ + % (out_file, dll_file)) + + generate_def(dll_file, def_file) + + cmd = ['dlltool', '-d', def_file, '-l', out_file] + subprocess.Popen(cmd) + +def _build_import_library_x86(): """ Build the import libraries for Mingw32-gcc on Windows """ - if os.name != 'nt': - return lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) lib_file = os.path.join(sys.prefix,'libs',lib_name) out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) @@ -206,7 +315,7 @@ if os.path.isfile(out_file): log.debug('Skip building import library: "%s" exists' % (out_file)) return - log.info('Building import library: "%s"' % (out_file)) + log.info('Building import library (ARCH=x86): "%s"' % (out_file)) from numpy.distutils import lib2def @@ -254,6 +363,9 @@ _MSVCRVER_TO_FULLVER['90'] = msvcrt.CRT_ASSEMBLY_VERSION else: _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" + # I took one version in my SxS directory: no idea if it is the good + # one, and we can't retrieve it from python + _MSVCRVER_TO_FULLVER['90'] = "8.0.50727.42" except ImportError: # If we are here, means python was not built with MSVC. Not sure what to do # in that case: manifest building will fail, but it should not be used in @@ -344,7 +456,7 @@ def generate_manifest(config): msver = get_build_msvc_version() if msver is not None: - if msver >= 8: + if msver >= 9: check_embedded_msvcr_match_linked(msver) ma = int(msver) mi = int((msver - ma) * 10) From numpy-svn at scipy.org Sat Feb 21 12:36:22 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 11:36:22 -0600 (CST) Subject: [Numpy-svn] r6442 - trunk/numpy/core Message-ID: <20090221173622.73249C7C01F@scipy.org> Author: cdavid Date: 2009-02-21 11:36:15 -0600 (Sat, 21 Feb 2009) New Revision: 6442 Modified: trunk/numpy/core/setup.py Log: Re-enable real check for long double size. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-21 17:25:09 UTC (rev 6441) +++ trunk/numpy/core/setup.py 2009-02-21 17:36:15 UTC (rev 6442) @@ -474,7 +474,7 @@ ('SIZEOF_LONG', 'long'), ('SIZEOF_FLOAT', 'float'), ('SIZEOF_DOUBLE', 'double'), - #('SIZEOF_LONG_DOUBLE', 'long double'), + ('SIZEOF_LONG_DOUBLE', 'long double'), ('SIZEOF_PY_INTPTR_T', 'Py_intptr_t'), ]: testcode.append(c_size_test % {'sz' : sz, 'type' : t}) From numpy-svn at scipy.org Sat Feb 21 12:48:22 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 11:48:22 -0600 (CST) Subject: [Numpy-svn] r6443 - branches/coremath Message-ID: <20090221174822.F269BC7C01F@scipy.org> Author: cdavid Date: 2009-02-21 11:48:15 -0600 (Sat, 21 Feb 2009) New Revision: 6443 Modified: branches/coremath/ Log: Initialized merge tracking via "svnmerge" with revisions "1-6373" from http://svn.scipy.org/svn/numpy/trunk Property changes on: branches/coremath ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6373 From numpy-svn at scipy.org Sat Feb 21 18:54:51 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 17:54:51 -0600 (CST) Subject: [Numpy-svn] r6444 - trunk/numpy/core/src Message-ID: <20090221235451.27BA0C7C0DD@scipy.org> Author: charris Date: 2009-02-21 17:54:31 -0600 (Sat, 21 Feb 2009) New Revision: 6444 Modified: trunk/numpy/core/src/multiarraymodule.c Log: Coding style cleanups. Finishes multiarraymodule.c. Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2009-02-21 17:48:15 UTC (rev 6443) +++ trunk/numpy/core/src/multiarraymodule.c 2009-02-21 23:54:31 UTC (rev 6444) @@ -4016,34 +4016,37 @@ "argmax is unsupported for this type"); return NULL; } - else if (PyArray_ISUNSIGNED(ap)) + else if (PyArray_ISUNSIGNED(ap)) { obj = PyInt_FromLong((long) -1); - - else if (PyArray_TYPE(ap)==PyArray_BOOL) + } + else if (PyArray_TYPE(ap) == PyArray_BOOL) { obj = PyInt_FromLong((long) 1); - - else + } + else { obj = PyInt_FromLong((long) 0); - + } new = PyArray_EnsureAnyArray(PyNumber_Subtract(obj, (PyObject *)ap)); Py_DECREF(obj); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } ret = PyArray_ArgMax((PyArrayObject *)new, axis, out); Py_DECREF(new); return ret; } /*NUMPY_API - Max -*/ + * Max + */ static PyObject * PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out) { PyArrayObject *arr; PyObject *ret; - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL) + if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0)) == NULL) { return NULL; + } ret = PyArray_GenericReduceFunction(arr, n_ops.maximum, axis, arr->descr->type_num, out); Py_DECREF(arr); @@ -4051,16 +4054,17 @@ } /*NUMPY_API - Min -*/ + * Min + */ static PyObject * PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out) { PyArrayObject *arr; PyObject *ret; - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL) + if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0)) == NULL) { return NULL; + } ret = PyArray_GenericReduceFunction(arr, n_ops.minimum, axis, arr->descr->type_num, out); Py_DECREF(arr); @@ -4068,21 +4072,26 @@ } /*NUMPY_API - Ptp -*/ + * Ptp + */ static PyObject * PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) { PyArrayObject *arr; PyObject *ret; - PyObject *obj1=NULL, *obj2=NULL; + PyObject *obj1 = NULL, *obj2 = NULL; - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL) + if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0)) == NULL) { return NULL; + } obj1 = PyArray_Max(arr, axis, out); - if (obj1 == NULL) goto fail; + if (obj1 == NULL) { + goto fail; + } obj2 = PyArray_Min(arr, axis, NULL); - if (obj2 == NULL) goto fail; + if (obj2 == NULL) { + goto fail; + } Py_DECREF(arr); if (out) { ret = PyObject_CallFunction(n_ops.subtract, "OOO", out, obj2, out); @@ -4103,58 +4112,60 @@ /*NUMPY_API - ArgMax -*/ + * ArgMax + */ static PyObject * PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) { - PyArrayObject *ap=NULL, *rp=NULL; + PyArrayObject *ap = NULL, *rp = NULL; PyArray_ArgFunc* arg_func; char *ip; intp *rptr; intp i, n, m; int elsize; - int copyret=0; - + int copyret = 0; NPY_BEGIN_THREADS_DEF; - if ((ap=(PyAO *)_check_axis(op, &axis, 0))==NULL) return NULL; - - /* We need to permute the array so that axis is placed at the end. - And all other dimensions are shifted left. - */ + if ((ap=(PyAO *)_check_axis(op, &axis, 0)) == NULL) { + return NULL; + } + /* + * We need to permute the array so that axis is placed at the end. + * And all other dimensions are shifted left. + */ if (axis != ap->nd-1) { PyArray_Dims newaxes; intp dims[MAX_DIMS]; int i; + newaxes.ptr = dims; newaxes.len = ap->nd; - for (i=0; ind-1; i++) dims[i] = i+1; - dims[ap->nd-1] = axis; + for (i = 0; i < axis; i++) dims[i] = i; + for (i = axis; i < ap->nd - 1; i++) dims[i] = i + 1; + dims[ap->nd - 1] = axis; op = (PyAO *)PyArray_Transpose(ap, &newaxes); Py_DECREF(ap); - if (op == NULL) return NULL; + if (op == NULL) { + return NULL; + } } else { op = ap; } - /* Will get native-byte order contiguous copy. - */ - ap = (PyArrayObject *)\ + /* Will get native-byte order contiguous copy. */ + ap = (PyArrayObject *) PyArray_ContiguousFromAny((PyObject *)op, op->descr->type_num, 1, 0); - Py_DECREF(op); - if (ap == NULL) return NULL; - + if (ap == NULL) { + return NULL; + } arg_func = ap->descr->f->argmax; if (arg_func == NULL) { PyErr_SetString(PyExc_TypeError, "data type not ordered"); goto fail; } - elsize = ap->descr->elsize; m = ap->dimensions[ap->nd-1]; if (m == 0) { @@ -4169,11 +4180,13 @@ ap->dimensions, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)ap); - if (rp == NULL) goto fail; + if (rp == NULL) { + goto fail; + } } else { - if (PyArray_SIZE(out) != \ - PyArray_MultiplyList(ap->dimensions, ap->nd-1)) { + if (PyArray_SIZE(out) != + PyArray_MultiplyList(ap->dimensions, ap->nd - 1)) { PyErr_SetString(PyExc_TypeError, "invalid shape for output array."); } @@ -4181,14 +4194,18 @@ PyArray_FromArray(out, PyArray_DescrFromType(PyArray_INTP), NPY_CARRAY | NPY_UPDATEIFCOPY); - if (rp == NULL) goto fail; - if (rp != out) copyret = 1; + if (rp == NULL) { + goto fail; + } + if (rp != out) { + copyret = 1; + } } NPY_BEGIN_THREADS_DESCR(ap->descr); n = PyArray_SIZE(ap)/m; rptr = (intp *)rp->data; - for (ip = ap->data, i=0; idata, i = 0; i < n; i++, ip += elsize*m) { arg_func(ip, m, rptr, ap); rptr += 1; } @@ -4212,8 +4229,8 @@ /*NUMPY_API - Take -*/ + * Take + */ static PyObject * PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, PyArrayObject *ret, NPY_CLIPMODE clipmode) @@ -4223,29 +4240,33 @@ intp nd, i, j, n, m, max_item, tmp, chunk, nelem; intp shape[MAX_DIMS]; char *src, *dest; - int copyret=0; + int copyret = 0; int err; indices = NULL; self = (PyAO *)_check_axis(self0, &axis, CARRAY); - if (self == NULL) return NULL; - + if (self == NULL) { + return NULL; + } indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, PyArray_INTP, 1, 0); - if (indices == NULL) goto fail; - + if (indices == NULL) { + goto fail; + } n = m = chunk = 1; nd = self->nd + indices->nd - 1; - for (i=0; i< nd; i++) { + for (i = 0; i < nd; i++) { if (i < axis) { shape[i] = self->dimensions[i]; n *= shape[i]; - } else { + } + else { if (i < axis+indices->nd) { shape[i] = indices->dimensions[i-axis]; m *= shape[i]; - } else { + } + else { shape[i] = self->dimensions[i-indices->nd+1]; chunk *= shape[i]; } @@ -4259,7 +4280,9 @@ NULL, NULL, 0, (PyObject *)self); - if (ret == NULL) goto fail; + if (ret == NULL) { + goto fail; + } } else { PyArrayObject *obj; @@ -4275,17 +4298,22 @@ } if (clipmode == NPY_RAISE) { - /* we need to make sure and get a copy - so the input array is not changed - before the error is called - */ + /* + * we need to make sure and get a copy + * so the input array is not changed + * before the error is called + */ flags |= NPY_ENSURECOPY; } obj = (PyArrayObject *)PyArray_FromArray(ret, self->descr, flags); - if (obj != ret) copyret = 1; + if (obj != ret) { + copyret = 1; + } ret = obj; - if (ret == NULL) goto fail; + if (ret == NULL) { + goto fail; + } } max_item = self->dimensions[axis]; @@ -4296,63 +4324,73 @@ func = self->descr->f->fasttake; if (func == NULL) { - - switch(clipmode) { - case NPY_RAISE: - for(i=0; idata))[j]; - if (tmp < 0) tmp = tmp+max_item; - if ((tmp < 0) || (tmp >= max_item)) { - PyErr_SetString(PyExc_IndexError, - "index out of range "\ - "for array"); - goto fail; + switch(clipmode) { + case NPY_RAISE: + for (i = 0; i < n; i++) { + for (j = 0; j < m; j++) { + tmp = ((intp *)(indices->data))[j]; + if (tmp < 0) { + tmp = tmp + max_item; + } + if ((tmp < 0) || (tmp >= max_item)) { + PyErr_SetString(PyExc_IndexError, + "index out of range "\ + "for array"); + goto fail; + } + memmove(dest, src + tmp*chunk, chunk); + dest += chunk; } - memmove(dest, src+tmp*chunk, chunk); - dest += chunk; + src += chunk*max_item; } - src += chunk*max_item; - } - break; - case NPY_WRAP: - for(i=0; idata))[j]; - if (tmp < 0) while (tmp < 0) tmp += max_item; - else if (tmp >= max_item) - while (tmp >= max_item) - tmp -= max_item; - memmove(dest, src+tmp*chunk, chunk); - dest += chunk; + break; + case NPY_WRAP: + for (i = 0; i < n; i++) { + for (j = 0; j < m; j++) { + tmp = ((intp *)(indices->data))[j]; + if (tmp < 0) { + while (tmp < 0) { + tmp += max_item; + } + } + else if (tmp >= max_item) { + while (tmp >= max_item) { + tmp -= max_item; + } + } + memmove(dest, src + tmp*chunk, chunk); + dest += chunk; + } + src += chunk*max_item; } - src += chunk*max_item; - } - break; - case NPY_CLIP: - for(i=0; idata))[j]; - if (tmp < 0) - tmp = 0; - else if (tmp >= max_item) - tmp = max_item-1; - memmove(dest, src+tmp*chunk, chunk); - dest += chunk; + break; + case NPY_CLIP: + for (i = 0; i < n; i++) { + for (j = 0; j < m; j++) { + tmp = ((intp *)(indices->data))[j]; + if (tmp < 0) { + tmp = 0; + } + else if (tmp >= max_item) { + tmp = max_item - 1; + } + memmove(dest, src+tmp*chunk, chunk); + dest += chunk; + } + src += chunk*max_item; } - src += chunk*max_item; + break; } - break; } - } else { err = func(dest, src, (intp *)(indices->data), max_item, n, m, nelem, clipmode); - if (err) goto fail; + if (err) { + goto fail; + } } PyArray_INCREF(ret); - Py_XDECREF(indices); Py_XDECREF(self); if (copyret) { @@ -4362,10 +4400,8 @@ Py_DECREF(ret); ret = (PyArrayObject *)obj; } - return (PyObject *)ret; - fail: PyArray_XDECREF_ERR(ret); Py_XDECREF(indices); @@ -4374,8 +4410,8 @@ } /*NUMPY_API - Put values into an array -*/ + * Put values into an array + */ static PyObject * PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, NPY_CLIPMODE clipmode) @@ -4387,7 +4423,6 @@ indices = NULL; values = NULL; - if (!PyArray_Check(self)) { PyErr_SetString(PyExc_TypeError, "put: first argument must be an array"); @@ -4396,68 +4431,86 @@ if (!PyArray_ISCONTIGUOUS(self)) { PyArrayObject *obj; int flags = NPY_CARRAY | NPY_UPDATEIFCOPY; + if (clipmode == NPY_RAISE) { flags |= NPY_ENSURECOPY; } Py_INCREF(self->descr); obj = (PyArrayObject *)PyArray_FromArray(self, self->descr, flags); - if (obj != self) copied = 1; + if (obj != self) { + copied = 1; + } self = obj; } max_item = PyArray_SIZE(self); dest = self->data; chunk = self->descr->elsize; - indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, PyArray_INTP, 0, 0); - if (indices == NULL) goto fail; + if (indices == NULL) { + goto fail; + } ni = PyArray_SIZE(indices); - Py_INCREF(self->descr); values = (PyArrayObject *)PyArray_FromAny(values0, self->descr, 0, 0, DEFAULT | FORCECAST, NULL); - if (values == NULL) goto fail; + if (values == NULL) { + goto fail; + } nv = PyArray_SIZE(values); - if (nv <= 0) goto finish; + if (nv <= 0) { + goto finish; + } if (PyDataType_REFCHK(self->descr)) { switch(clipmode) { case NPY_RAISE: - for(i=0; idata + chunk * (i % nv); + for (i = 0; i < ni; i++) { + src = values->data + chunk*(i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = tmp+max_item; + if (tmp < 0) { + tmp = tmp + max_item; + } if ((tmp < 0) || (tmp >= max_item)) { PyErr_SetString(PyExc_IndexError, - "index out of " \ - "range for array"); + "index out of " \ + "range for array"); goto fail; } PyArray_Item_INCREF(src, self->descr); PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); - memmove(dest + tmp * chunk, src, chunk); + memmove(dest + tmp*chunk, src, chunk); } break; case NPY_WRAP: - for(i=0; idata + chunk * (i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) while(tmp < 0) tmp+=max_item; - else if (tmp >= max_item) - while(tmp >= max_item) + if (tmp < 0) { + while (tmp < 0) { + tmp += max_item; + } + } + else if (tmp >= max_item) { + while (tmp >= max_item) { tmp -= max_item; + } + } PyArray_Item_INCREF(src, self->descr); PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); memmove(dest + tmp * chunk, src, chunk); } break; case NPY_CLIP: - for(i=0; idata + chunk * (i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = 0; - else if (tmp >= max_item) + if (tmp < 0) { + tmp = 0; + } + else if (tmp >= max_item) { tmp = max_item - 1; + } PyArray_Item_INCREF(src, self->descr); PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); memmove(dest + tmp * chunk, src, chunk); @@ -4468,37 +4521,48 @@ else { switch(clipmode) { case NPY_RAISE: - for(i=0; idata + chunk * (i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = tmp+max_item; + if (tmp < 0) { + tmp = tmp + max_item; + } if ((tmp < 0) || (tmp >= max_item)) { PyErr_SetString(PyExc_IndexError, - "index out of " \ - "range for array"); + "index out of " \ + "range for array"); goto fail; } memmove(dest + tmp * chunk, src, chunk); } break; case NPY_WRAP: - for(i=0; idata + chunk * (i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) while(tmp < 0) tmp+=max_item; - else if (tmp >= max_item) - while(tmp >= max_item) + if (tmp < 0) { + while (tmp < 0) { + tmp += max_item; + } + } + else if (tmp >= max_item) { + while (tmp >= max_item) { tmp -= max_item; + } + } memmove(dest + tmp * chunk, src, chunk); } break; case NPY_CLIP: - for(i=0; idata + chunk * (i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = 0; - else if (tmp >= max_item) + if (tmp < 0) { + tmp = 0; + } + else if (tmp >= max_item) { tmp = max_item - 1; + } memmove(dest + tmp * chunk, src, chunk); } break; @@ -4533,15 +4597,15 @@ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!OO:putmask", kwlist, &PyArray_Type, - &array, &mask, &values)) + &array, &mask, &values)) { return NULL; - + } return PyArray_PutMask((PyArrayObject *)array, values, mask); } /*NUMPY_API - Put values into an array according to a mask. -*/ + * Put values into an array according to a mask. + */ static PyObject * PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) { @@ -4549,11 +4613,10 @@ PyArrayObject *mask, *values; int i, chunk, ni, max_item, nv, tmp; char *src, *dest; - int copied=0; + int copied = 0; mask = NULL; values = NULL; - if (!PyArray_Check(self)) { PyErr_SetString(PyExc_TypeError, "putmask: first argument must "\ @@ -4563,20 +4626,24 @@ if (!PyArray_ISCONTIGUOUS(self)) { PyArrayObject *obj; int flags = NPY_CARRAY | NPY_UPDATEIFCOPY; + Py_INCREF(self->descr); obj = (PyArrayObject *)PyArray_FromArray(self, self->descr, flags); - if (obj != self) copied = 1; + if (obj != self) { + copied = 1; + } self = obj; } max_item = PyArray_SIZE(self); dest = self->data; chunk = self->descr->elsize; - mask = (PyArrayObject *)\ PyArray_FROM_OTF(mask0, PyArray_BOOL, CARRAY | FORCECAST); - if (mask == NULL) goto fail; + if (mask == NULL) { + goto fail; + } ni = PyArray_SIZE(mask); if (ni != max_item) { PyErr_SetString(PyExc_ValueError, @@ -4587,8 +4654,10 @@ Py_INCREF(self->descr); values = (PyArrayObject *)\ PyArray_FromAny(values0, self->descr, 0, 0, NPY_CARRAY, NULL); - if (values == NULL) goto fail; - nv = PyArray_SIZE(values); /* zero if null array */ + if (values == NULL) { + goto fail; + } + nv = PyArray_SIZE(values); /* zero if null array */ if (nv <= 0) { Py_XDECREF(values); Py_XDECREF(mask); @@ -4596,7 +4665,7 @@ return Py_None; } if (PyDataType_REFCHK(self->descr)) { - for(i=0; idata))[i]; if (tmp) { src = values->data + chunk * (i % nv); @@ -4609,11 +4678,11 @@ else { func = self->descr->f->fastputmask; if (func == NULL) { - for(i=0; idata))[i]; if (tmp) { - src = values->data + chunk * (i % nv); - memmove(dest + i * chunk, src, chunk); + src = values->data + chunk*(i % nv); + memmove(dest + i*chunk, src, chunk); } } } @@ -4640,18 +4709,17 @@ } -/* This conversion function can be used with the "O&" argument for - PyArg_ParseTuple. It will immediately return an object of array type - or will convert to a CARRAY any other object. - - If you use PyArray_Converter, you must DECREF the array when finished - as you get a new reference to it. -*/ - /*NUMPY_API - Useful to pass as converter function for O& processing in - PyArgs_ParseTuple. -*/ + * + * Useful to pass as converter function for O& processing in PyArgs_ParseTuple. + * + * This conversion function can be used with the "O&" argument for + * PyArg_ParseTuple. It will immediately return an object of array type + * or will convert to a CARRAY any other object. + * + * If you use PyArray_Converter, you must DECREF the array when finished + * as you get a new reference to it. + */ static int PyArray_Converter(PyObject *object, PyObject **address) { @@ -4662,15 +4730,17 @@ } else { *address = PyArray_FromAny(object, NULL, 0, 0, CARRAY, NULL); - if (*address == NULL) return PY_FAIL; + if (*address == NULL) { + return PY_FAIL; + } return PY_SUCCEED; } } /*NUMPY_API - Useful to pass as converter function for O& processing in - PyArgs_ParseTuple for output arrays -*/ + * Useful to pass as converter function for O& processing in + * PyArgs_ParseTuple for output arrays + */ static int PyArray_OutputConverter(PyObject *object, PyArrayObject **address) { @@ -4692,22 +4762,26 @@ /*NUMPY_API - Convert an object to true / false -*/ + * Convert an object to true / false + */ static int PyArray_BoolConverter(PyObject *object, Bool *val) { - if (PyObject_IsTrue(object)) - *val=TRUE; - else *val=FALSE; - if (PyErr_Occurred()) + if (PyObject_IsTrue(object)) { + *val = TRUE; + } + else { + *val = FALSE; + } + if (PyErr_Occurred()) { return PY_FAIL; + } return PY_SUCCEED; } /*NUMPY_API - Convert an object to FORTRAN / C / ANY -*/ + * Convert an object to FORTRAN / C / ANY + */ static int PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) { @@ -4716,12 +4790,15 @@ *val = PyArray_ANYORDER; } else if (!PyString_Check(object) || PyString_GET_SIZE(object) < 1) { - if (PyObject_IsTrue(object)) + if (PyObject_IsTrue(object)) { *val = PyArray_FORTRANORDER; - else + } + else { *val = PyArray_CORDER; - if (PyErr_Occurred()) + } + if (PyErr_Occurred()) { return PY_FAIL; + } return PY_SUCCEED; } else { @@ -4745,8 +4822,8 @@ } /*NUMPY_API - Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP -*/ + * Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP + */ static int PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val) { @@ -4772,13 +4849,17 @@ } } else { - int number; - number = PyInt_AsLong(object); - if (number == -1 && PyErr_Occurred()) goto fail; - if (number <= (int) NPY_RAISE && - number >= (int) NPY_CLIP) + int number = PyInt_AsLong(object); + if (number == -1 && PyErr_Occurred()) { + goto fail; + } + if (number <= (int) NPY_RAISE + && number >= (int) NPY_CLIP) { *val = (NPY_CLIPMODE) number; - else goto fail; + } + else { + goto fail; + } } return PY_SUCCEED; @@ -4791,18 +4872,20 @@ /*NUMPY_API - Typestr converter -*/ + * Typestr converter + */ static int PyArray_TypestrConvert(int itemsize, int gentype) { register int newtype = gentype; if (gentype == PyArray_GENBOOLLTR) { - if (itemsize == 1) + if (itemsize == 1) { newtype = PyArray_BOOL; - else + } + else { newtype = PyArray_NOTYPE; + } } else if (gentype == PyArray_SIGNEDLTR) { switch(itemsize) { @@ -4827,7 +4910,6 @@ newtype = PyArray_NOTYPE; } } - else if (gentype == PyArray_UNSIGNEDLTR) { switch(itemsize) { case 1: @@ -4879,7 +4961,6 @@ newtype = PyArray_NOTYPE; } } - else if (gentype == PyArray_COMPLEXLTR) { switch(itemsize) { case 8: @@ -4907,25 +4988,22 @@ newtype = PyArray_NOTYPE; } } - return newtype; } -/* this function takes a Python object which exposes the (single-segment) - buffer interface and returns a pointer to the data segment - - You should increment the reference count by one of buf->base - if you will hang on to a reference - - You only get a borrowed reference to the object. Do not free the - memory... -*/ - - /*NUMPY_API - Get buffer chunk from object -*/ + * Get buffer chunk from object + * + * this function takes a Python object which exposes the (single-segment) + * buffer interface and returns a pointer to the data segment + * + * You should increment the reference count by one of buf->base + * if you will hang on to a reference + * + * You only get a borrowed reference to the object. Do not free the + * memory... + */ static int PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) { @@ -4934,38 +5012,40 @@ buf->ptr = NULL; buf->flags = BEHAVED; buf->base = NULL; - - if (obj == Py_None) + if (obj == Py_None) { return PY_SUCCEED; - + } if (PyObject_AsWriteBuffer(obj, &(buf->ptr), &buflen) < 0) { PyErr_Clear(); buf->flags &= ~WRITEABLE; if (PyObject_AsReadBuffer(obj, (const void **)&(buf->ptr), - &buflen) < 0) + &buflen) < 0) { return PY_FAIL; + } } buf->len = (intp) buflen; /* Point to the base of the buffer object if present */ - if (PyBuffer_Check(obj)) buf->base = ((PyArray_Chunk *)obj)->base; - if (buf->base == NULL) buf->base = obj; - + if (PyBuffer_Check(obj)) { + buf->base = ((PyArray_Chunk *)obj)->base; + } + if (buf->base == NULL) { + buf->base = obj; + } return PY_SUCCEED; } -/* This function takes a Python sequence object and allocates and - fills in an intp array with the converted values. - - **Remember to free the pointer seq.ptr when done using - PyDimMem_FREE(seq.ptr)** -*/ - /*NUMPY_API - Get intp chunk from sequence -*/ + * Get intp chunk from sequence + * + * This function takes a Python sequence object and allocates and + * fills in an intp array with the converted values. + * + * Remember to free the pointer seq.ptr when done using + * PyDimMem_FREE(seq.ptr)** + */ static int PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) { @@ -4974,10 +5054,15 @@ seq->ptr = NULL; seq->len = 0; - if (obj == Py_None) return PY_SUCCEED; + if (obj == Py_None) { + return PY_SUCCEED; + } len = PySequence_Size(obj); - if (len == -1) { /* Check to see if it is a number */ - if (PyNumber_Check(obj)) len = 1; + if (len == -1) { + /* Check to see if it is a number */ + if (PyNumber_Check(obj)) { + len = 1; + } } if (len < 0) { PyErr_SetString(PyExc_TypeError, @@ -5000,27 +5085,28 @@ nd = PyArray_IntpFromSequence(obj, (intp *)seq->ptr, len); if (nd == -1 || nd != len) { PyDimMem_FREE(seq->ptr); - seq->ptr=NULL; + seq->ptr = NULL; return PY_FAIL; } return PY_SUCCEED; } -/* A tuple type would be either (generic typeobject, typesize) - or (fixed-length data-type, shape) - - or (inheriting data-type, new-data-type) - The new data-type must have the same itemsize as the inheriting data-type - unless the latter is 0 - - Thus (int32, {'real':(int16,0),'imag',(int16,2)}) - - is one way to specify a descriptor that will give - a['real'] and a['imag'] to an int32 array. -*/ - -/* leave type reference alone */ +/* + * A tuple type would be either (generic typeobject, typesize) + * or (fixed-length data-type, shape) + * + * or (inheriting data-type, new-data-type) + * The new data-type must have the same itemsize as the inheriting data-type + * unless the latter is 0 + * + * Thus (int32, {'real':(int16,0),'imag',(int16,2)}) + * + * is one way to specify a descriptor that will give + * a['real'] and a['imag'] to an int32 array. + * + * leave type reference alone + */ static PyArray_Descr * _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag) { @@ -5033,8 +5119,9 @@ } *errflag = 1; new = PyArray_DescrNew(type); - if (new == NULL) goto fail; - + if (new == NULL) { + goto fail; + } if (new->elsize && new->elsize != conv->elsize) { PyErr_SetString(PyExc_ValueError, "mismatch in size of old "\ @@ -5056,7 +5143,6 @@ fail: Py_DECREF(conv); return NULL; - } static PyArray_Descr * @@ -5066,26 +5152,33 @@ PyObject *val; int errflag; - if (PyTuple_GET_SIZE(obj) != 2) return NULL; - - if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj,0), &type)) + if (PyTuple_GET_SIZE(obj) != 2) { return NULL; + } + if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj,0), &type)) { + return NULL; + } val = PyTuple_GET_ITEM(obj,1); /* try to interpret next item as a type */ res = _use_inherit(type, val, &errflag); if (res || errflag) { Py_DECREF(type); - if (res) return res; - else return NULL; + if (res) { + return res; + } + else { + return NULL; + } } PyErr_Clear(); - /* We get here if res was NULL but errflag wasn't set - --- i.e. the conversion to a data-descr failed in _use_inherit - */ + /* + * We get here if res was NULL but errflag wasn't set + * --- i.e. the conversion to a data-descr failed in _use_inherit + */ + if (type->elsize == 0) { + /* interpret next item as a typesize */ + int itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); - if (type->elsize == 0) { /* interpret next item as a typesize */ - int itemsize; - itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); if (error_converting(itemsize)) { PyErr_SetString(PyExc_ValueError, "invalid itemsize in generic type "\ @@ -5093,20 +5186,24 @@ goto fail; } PyArray_DESCR_REPLACE(type); - if (type->type_num == PyArray_UNICODE) + if (type->type_num == PyArray_UNICODE) { type->elsize = itemsize << 2; - else + } + else { type->elsize = itemsize; + } } else { - /* interpret next item as shape (if it's a tuple) - and reset the type to PyArray_VOID with - a new fields attribute. - */ - PyArray_Dims shape={NULL,-1}; + /* + * interpret next item as shape (if it's a tuple) + * and reset the type to PyArray_VOID with + * a new fields attribute. + */ + PyArray_Dims shape = {NULL, -1}; PyArray_Descr *newdescr; - if (!(PyArray_IntpConverter(val, &shape)) || - (shape.len > MAX_DIMS)) { + + if (!(PyArray_IntpConverter(val, &shape)) + || (shape.len > MAX_DIMS)) { PyDimMem_FREE(shape.ptr); PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple."); @@ -5114,16 +5211,18 @@ } /* If (type, 1) was given, it is equivalent to type... or (type, ()) was given it is equivalent to type... */ - if ((shape.len == 1 && shape.ptr[0] == 1 && PyNumber_Check(val)) || \ - (shape.len == 0 && PyTuple_Check(val))) { + if ((shape.len == 1 && shape.ptr[0] == 1 && PyNumber_Check(val)) + || (shape.len == 0 && PyTuple_Check(val))) { PyDimMem_FREE(shape.ptr); return type; } newdescr = PyArray_DescrNewFromType(PyArray_VOID); - if (newdescr == NULL) {PyDimMem_FREE(shape.ptr); goto fail;} + if (newdescr == NULL) { + PyDimMem_FREE(shape.ptr); + goto fail; + } newdescr->elsize = type->elsize; - newdescr->elsize *= PyArray_MultiplyList(shape.ptr, - shape.len); + newdescr->elsize *= PyArray_MultiplyList(shape.ptr, shape.len); PyDimMem_FREE(shape.ptr); newdescr->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr)); newdescr->subarray->base = type; @@ -5143,11 +5242,12 @@ return NULL; } -/* obj is a list. Each item is a tuple with - - (field-name, data-type (either a list or a string), and an optional - shape parameter). -*/ +/* + * obj is a list. Each item is a tuple with + * + * (field-name, data-type (either a list or a string), and an optional + * shape parameter). + */ static PyArray_Descr * _convert_from_array_descr(PyObject *obj, int align) { @@ -5158,34 +5258,43 @@ PyObject *nameslist; PyArray_Descr *new; PyArray_Descr *conv; - int dtypeflags=0; + int dtypeflags = 0; int maxalign = 0; n = PyList_GET_SIZE(obj); nameslist = PyTuple_New(n); - if (!nameslist) return NULL; + if (!nameslist) { + return NULL; + } totalsize = 0; fields = PyDict_New(); - for (i=0; ialignment; - if (_align > 1) totalsize = \ - ((totalsize + _align - 1)/_align)*_align; + if (_align > 1) { + totalsize = ((totalsize + _align - 1)/_align)*_align; + } maxalign = MAX(maxalign, _align); } PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize)); - /* Title can be "meta-data". Only insert it - into the fields dictionary if it is a string - */ + /* + * Title can be "meta-data". Only insert it + * into the fields dictionary if it is a string + */ if (title != NULL) { Py_INCREF(title); PyTuple_SET_ITEM(tup, 2, title); - if (PyString_Check(title) || PyUnicode_Check(title)) + if (PyString_Check(title) || PyUnicode_Check(title)) { PyDict_SetItem(fields, title, tup); + } } PyDict_SetItem(fields, name, tup); totalsize += conv->elsize; @@ -5250,7 +5366,9 @@ if (maxalign > 1) { totalsize = ((totalsize+maxalign-1)/maxalign)*maxalign; } - if (align) new->alignment = maxalign; + if (align) { + new->alignment = maxalign; + } return new; fail: @@ -5260,37 +5378,45 @@ } -/* a list specifying a data-type can just be - a list of formats. The names for the fields - will default to f0, f1, f2, and so forth. -*/ - +/* + * a list specifying a data-type can just be + * a list of formats. The names for the fields + * will default to f0, f1, f2, and so forth. + */ static PyArray_Descr * _convert_from_list(PyObject *obj, int align) { int n, i; int totalsize; PyObject *fields; - PyArray_Descr *conv=NULL; + PyArray_Descr *conv = NULL; PyArray_Descr *new; PyObject *key, *tup; - PyObject *nameslist=NULL; + PyObject *nameslist = NULL; int ret; - int maxalign=0; - int dtypeflags=0; + int maxalign = 0; + int dtypeflags = 0; n = PyList_GET_SIZE(obj); - /* Ignore any empty string at end which _internal._commastring - can produce */ + /* + * Ignore any empty string at end which _internal._commastring + * can produce + */ key = PyList_GET_ITEM(obj, n-1); - if (PyString_Check(key) && PyString_GET_SIZE(key) == 0) n = n-1; + if (PyString_Check(key) && PyString_GET_SIZE(key) == 0) { + n = n - 1; + } /* End ignore code.*/ totalsize = 0; - if (n==0) return NULL; + if (n == 0) { + return NULL; + } nameslist = PyTuple_New(n); - if (!nameslist) return NULL; + if (!nameslist) { + return NULL; + } fields = PyDict_New(); - for (i=0; ialignment; - if (_align > 1) totalsize = \ - ((totalsize + _align - 1)/_align)*_align; + if (_align > 1) { + totalsize = ((totalsize + _align - 1)/_align)*_align; + } maxalign = MAX(maxalign, _align); } PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize)); @@ -5321,7 +5449,9 @@ if (maxalign > 1) { totalsize = ((totalsize+maxalign-1)/maxalign)*maxalign; } - if (align) new->alignment = maxalign; + if (align) { + new->alignment = maxalign; + } new->elsize = totalsize; return new; @@ -5332,15 +5462,15 @@ } -/* comma-separated string */ -/* this is the format developed by the numarray records module */ -/* and implemented by the format parser in that module */ -/* this is an alternative implementation found in the _internal.py - file patterned after that one -- the approach is to try to convert - to a list (with tuples if any repeat information is present) - and then call the _convert_from_list) -*/ - +/* + * comma-separated string + * this is the format developed by the numarray records module + * and implemented by the format parser in that module + * this is an alternative implementation found in the _internal.py + * file patterned after that one -- the approach is to try to convert + * to a list (with tuples if any repeat information is present) + * and then call the _convert_from_list) + */ static PyArray_Descr * _convert_from_commastring(PyObject *obj, int align) { @@ -5348,13 +5478,18 @@ PyArray_Descr *res; PyObject *_numpy_internal; - if (!PyString_Check(obj)) return NULL; + if (!PyString_Check(obj)) { + return NULL; + } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - listobj = PyObject_CallMethod(_numpy_internal, "_commastring", - "O", obj); + if (_numpy_internal == NULL) { + return NULL; + } + listobj = PyObject_CallMethod(_numpy_internal, "_commastring", "O", obj); Py_DECREF(_numpy_internal); - if (!listobj) return NULL; + if (!listobj) { + return NULL; + } if (!PyList_Check(listobj) || PyList_GET_SIZE(listobj)<1) { PyErr_SetString(PyExc_RuntimeError, "_commastring is " \ "not returning a list with len >= 1"); @@ -5379,48 +5514,51 @@ -/* a dictionary specifying a data-type - must have at least two and up to four - keys These must all be sequences of the same length. - - "names" --- field names - "formats" --- the data-type descriptors for the field. - - Optional: - - "offsets" --- integers indicating the offset into the - record of the start of the field. - if not given, then "consecutive offsets" - will be assumed and placed in the dictionary. - - "titles" --- Allows the use of an additional key - for the fields dictionary.(if these are strings - or unicode objects) or - this can also be meta-data to - be passed around with the field description. - - Attribute-lookup-based field names merely has to query the fields - dictionary of the data-descriptor. Any result present can be used - to return the correct field. - - So, the notion of what is a name and what is a title is really quite - arbitrary. - - What does distinguish a title, however, is that if it is not None, - it will be placed at the end of the tuple inserted into the - fields dictionary.and can therefore be used to carry meta-data around. - - If the dictionary does not have "names" and "formats" entries, - then it will be checked for conformity and used directly. -*/ - +/* + * a dictionary specifying a data-type + * must have at least two and up to four + * keys These must all be sequences of the same length. + * + * "names" --- field names + * "formats" --- the data-type descriptors for the field. + * + * Optional: + * + * "offsets" --- integers indicating the offset into the + * record of the start of the field. + * if not given, then "consecutive offsets" + * will be assumed and placed in the dictionary. + * + * "titles" --- Allows the use of an additional key + * for the fields dictionary.(if these are strings + * or unicode objects) or + * this can also be meta-data to + * be passed around with the field description. + * + * Attribute-lookup-based field names merely has to query the fields + * dictionary of the data-descriptor. Any result present can be used + * to return the correct field. + * + * So, the notion of what is a name and what is a title is really quite + * arbitrary. + * + * What does distinguish a title, however, is that if it is not None, + * it will be placed at the end of the tuple inserted into the + * fields dictionary.and can therefore be used to carry meta-data around. + * + * If the dictionary does not have "names" and "formats" entries, + * then it will be checked for conformity and used directly. + */ static PyArray_Descr * _use_fields_dict(PyObject *obj, int align) { PyObject *_numpy_internal; PyArray_Descr *res; + _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; + if (_numpy_internal == NULL) { + return NULL; + } res = (PyArray_Descr *)PyObject_CallMethod(_numpy_internal, "_usefields", "Oi", obj, align); @@ -5432,19 +5570,19 @@ _convert_from_dict(PyObject *obj, int align) { PyArray_Descr *new; - PyObject *fields=NULL; + PyObject *fields = NULL; PyObject *names, *offsets, *descrs, *titles; int n, i; int totalsize; - int maxalign=0; - int dtypeflags=0; + int maxalign = 0; + int dtypeflags = 0; fields = PyDict_New(); - if (fields == NULL) return (PyArray_Descr *)PyErr_NoMemory(); - + if (fields == NULL) { + return (PyArray_Descr *)PyErr_NoMemory(); + } names = PyDict_GetItemString(obj, "names"); descrs = PyDict_GetItemString(obj, "formats"); - if (!names || !descrs) { Py_DECREF(fields); return _use_fields_dict(obj, align); @@ -5452,9 +5590,9 @@ n = PyObject_Length(names); offsets = PyDict_GetItemString(obj, "offsets"); titles = PyDict_GetItemString(obj, "titles"); - if ((n > PyObject_Length(descrs)) || \ - (offsets && (n > PyObject_Length(offsets))) || \ - (titles && (n > PyObject_Length(titles)))) { + if ((n > PyObject_Length(descrs)) + || (offsets && (n > PyObject_Length(offsets))) + || (titles && (n > PyObject_Length(titles)))) { PyErr_SetString(PyExc_ValueError, "all items in the dictionary must have" \ " the same length."); @@ -5462,9 +5600,9 @@ } totalsize = 0; - for(i=0; i 1) { - totalsize = ((totalsize + _align - 1) \ - /_align)*_align; + totalsize = ((totalsize + _align - 1)/_align)*_align; } PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(totalsize)); } - if (len == 3) PyTuple_SET_ITEM(tup, 2, item); + if (len == 3) { + PyTuple_SET_ITEM(tup, 2, item); + } name = PyObject_GetItem(names, index); Py_DECREF(index); if (!(PyString_Check(name) || PyUnicode_Check(name))) { @@ -5530,8 +5673,8 @@ PyDict_SetItem(fields, name, tup); Py_DECREF(name); if (len == 3) { - if ((PyString_Check(item) || PyUnicode_Check(item)) && - PyDict_GetItem(fields, item) != NULL) { + if ((PyString_Check(item) || PyUnicode_Check(item)) + && PyDict_GetItem(fields, item) != NULL) { PyErr_SetString(PyExc_ValueError, "title already used as a "\ "name or title."); @@ -5542,16 +5685,23 @@ } } Py_DECREF(tup); - if ((ret == PY_FAIL) || (newdescr->elsize == 0)) goto fail; + if ((ret == PY_FAIL) || (newdescr->elsize == 0)) { + goto fail; + } dtypeflags |= (newdescr->hasobject & NPY_FROM_FIELDS); totalsize += newdescr->elsize; } new = PyArray_DescrNewFromType(PyArray_VOID); - if (new == NULL) goto fail; - if (maxalign > 1) + if (new == NULL) { + goto fail; + } + if (maxalign > 1) { totalsize = ((totalsize + maxalign - 1)/maxalign)*maxalign; - if (align) new->alignment = maxalign; + } + if (align) { + new->alignment = maxalign; + } new->elsize = totalsize; if (!PyTuple_Check(names)) { names = PySequence_Tuple(names); @@ -5580,59 +5730,56 @@ /* Check for ints at start of string */ if ((type[0] >= '0' && type[0] <= '9') || ((len > 1) && _chk_byteorder(type[0]) && - (type[1] >= '0' && type[1] <= '9'))) + (type[1] >= '0' && type[1] <= '9'))) { return 1; - + } /* Check for empty tuple */ if (((len > 1) && (type[0] == '(' && type[1] == ')')) || ((len > 3) && _chk_byteorder(type[0]) && - (type[1] == '(' && type[2] == ')'))) + (type[1] == '(' && type[2] == ')'))) { return 1; - + } /* Check for presence of commas */ - for (i=1;i= size computed from fields - - The .fields attribute must return a convertible dictionary if - present. Result inherits from PyArray_VOID. -*/ - - /*NUMPY_API - Get type-descriptor from an object forcing alignment if possible - None goes to DEFAULT type. + * Get type-descriptor from an object forcing alignment if possible + * None goes to DEFAULT type. + * + * any object with the .fields attribute and/or .itemsize attribute (if the + *.fields attribute does not give the total size -- i.e. a partial record + * naming). If itemsize is given it must be >= size computed from fields + * + * The .fields attribute must return a convertible dictionary if present. + * Result inherits from PyArray_VOID. */ static int PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at) { if PyDict_Check(obj) { - *at = _convert_from_dict(obj, 1); - } + *at = _convert_from_dict(obj, 1); + } else if PyString_Check(obj) { - *at = _convert_from_commastring(obj, 1); - } + *at = _convert_from_commastring(obj, 1); + } else if PyList_Check(obj) { - *at = _convert_from_array_descr(obj, 1); - } + *at = _convert_from_array_descr(obj, 1); + } else { return PyArray_DescrConverter(obj, at); } if (*at == NULL) { if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ValueError, - "data-type-descriptor not understood"); + "data-type-descriptor not understood"); } return PY_FAIL; } @@ -5640,28 +5787,28 @@ } /*NUMPY_API - Get type-descriptor from an object forcing alignment if possible - None goes to NULL. -*/ + * Get type-descriptor from an object forcing alignment if possible + * None goes to NULL. + */ static int PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at) { if PyDict_Check(obj) { - *at = _convert_from_dict(obj, 1); - } + *at = _convert_from_dict(obj, 1); + } else if PyString_Check(obj) { - *at = _convert_from_commastring(obj, 1); - } + *at = _convert_from_commastring(obj, 1); + } else if PyList_Check(obj) { - *at = _convert_from_array_descr(obj, 1); - } + *at = _convert_from_array_descr(obj, 1); + } else { return PyArray_DescrConverter2(obj, at); } if (*at == NULL) { if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ValueError, - "data-type-descriptor not understood"); + "data-type-descriptor not understood"); } return PY_FAIL; } @@ -5670,8 +5817,8 @@ /*NUMPY_API - Get typenum from an object -- None goes to NULL -*/ + * Get typenum from an object -- None goes to NULL + */ static int PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) { @@ -5679,41 +5826,39 @@ *at = NULL; return PY_SUCCEED; } - else return PyArray_DescrConverter(obj, at); + else { + return PyArray_DescrConverter(obj, at); + } } -/* This function takes a Python object representing a type and converts it - to a the correct PyArray_Descr * structure to describe the type. - - Many objects can be used to represent a data-type which in NumPy is - quite a flexible concept. - - This is the central code that converts Python objects to - Type-descriptor objects that are used throughout numpy. -*/ - -/* new reference in *at */ /*NUMPY_API - Get typenum from an object -- None goes to PyArray_DEFAULT -*/ + * Get typenum from an object -- None goes to PyArray_DEFAULT + * This function takes a Python object representing a type and converts it + * to a the correct PyArray_Descr * structure to describe the type. + * + * Many objects can be used to represent a data-type which in NumPy is + * quite a flexible concept. + * + * This is the central code that converts Python objects to + * Type-descriptor objects that are used throughout numpy. + * new reference in *at + */ static int PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) { char *type; - int check_num=PyArray_NOTYPE+10; + int check_num = PyArray_NOTYPE + 10; int len; PyObject *item; int elsize = 0; char endian = '='; - *at=NULL; - + *at = NULL; /* default */ if (obj == Py_None) { *at = PyArray_DescrFromType(PyArray_DEFAULT); return PY_SUCCEED; } - if (PyArray_DescrCheck(obj)) { *at = (PyArray_Descr *)obj; Py_INCREF(*at); @@ -5724,134 +5869,165 @@ if (PyType_IsSubtype((PyTypeObject *)obj, &PyGenericArrType_Type)) { *at = PyArray_DescrFromTypeObject(obj); - if (*at) return PY_SUCCEED; - else return PY_FAIL; + if (*at) { + return PY_SUCCEED; + } + else { + return PY_FAIL; + } } check_num = PyArray_OBJECT; - if (obj == (PyObject *)(&PyInt_Type)) + if (obj == (PyObject *)(&PyInt_Type)) { check_num = PyArray_LONG; - else if (obj == (PyObject *)(&PyLong_Type)) + } + else if (obj == (PyObject *)(&PyLong_Type)) { check_num = PyArray_LONGLONG; - else if (obj == (PyObject *)(&PyFloat_Type)) + } + else if (obj == (PyObject *)(&PyFloat_Type)) { check_num = PyArray_DOUBLE; - else if (obj == (PyObject *)(&PyComplex_Type)) + } + else if (obj == (PyObject *)(&PyComplex_Type)) { check_num = PyArray_CDOUBLE; - else if (obj == (PyObject *)(&PyBool_Type)) + } + else if (obj == (PyObject *)(&PyBool_Type)) { check_num = PyArray_BOOL; - else if (obj == (PyObject *)(&PyString_Type)) + } + else if (obj == (PyObject *)(&PyString_Type)) { check_num = PyArray_STRING; - else if (obj == (PyObject *)(&PyUnicode_Type)) + } + else if (obj == (PyObject *)(&PyUnicode_Type)) { check_num = PyArray_UNICODE; - else if (obj == (PyObject *)(&PyBuffer_Type)) + } + else if (obj == (PyObject *)(&PyBuffer_Type)) { check_num = PyArray_VOID; + } else { *at = _arraydescr_fromobj(obj); - if (*at) return PY_SUCCEED; + if (*at) { + return PY_SUCCEED; + } } goto finish; } /* or a typecode string */ - if (PyString_Check(obj)) { /* Check for a string typecode. */ type = PyString_AS_STRING(obj); len = PyString_GET_SIZE(obj); - if (len <= 0) goto fail; - - /* check for commas present - or first (or second) element a digit */ + if (len <= 0) { + goto fail; + } + /* check for commas present or first (or second) element a digit */ if (_check_for_commastring(type, len)) { *at = _convert_from_commastring(obj, 0); - if (*at) return PY_SUCCEED; + if (*at) { + return PY_SUCCEED; + } return PY_FAIL; } check_num = (int) type[0]; - if ((char) check_num == '>' || (char) check_num == '<' || \ - (char) check_num == '|' || (char) check_num == '=') { - if (len <= 1) goto fail; + if ((char) check_num == '>' || (char) check_num == '<' + || (char) check_num == '|' || (char) check_num == '=') { + if (len <= 1) { + goto fail; + } endian = (char) check_num; type++; len--; check_num = (int) type[0]; - if (endian == '|') endian = '='; + if (endian == '|') { + endian = '='; + } } if (len > 1) { - elsize = atoi(type+1); + elsize = atoi(type + 1); if (elsize == 0) { check_num = PyArray_NOTYPE+10; } - /* When specifying length of UNICODE - the number of characters is given to match - the STRING interface. Each character can be - more than one byte and itemsize must be - the number of bytes. - */ + /* + * When specifying length of UNICODE + * the number of characters is given to match + * the STRING interface. Each character can be + * more than one byte and itemsize must be + * the number of bytes. + */ else if (check_num == PyArray_UNICODELTR) { elsize <<= 2; } - /* Support for generic processing - c4, i4, f8, etc... - */ - else if ((check_num != PyArray_STRINGLTR) && \ - (check_num != PyArray_VOIDLTR) && \ - (check_num != PyArray_STRINGLTR2)) { - check_num = \ - PyArray_TypestrConvert(elsize, - check_num); - if (check_num == PyArray_NOTYPE) + /* Support for generic processing c4, i4, f8, etc...*/ + else if ((check_num != PyArray_STRINGLTR) + && (check_num != PyArray_VOIDLTR) + && (check_num != PyArray_STRINGLTR2)) { + check_num = PyArray_TypestrConvert(elsize, check_num); + if (check_num == PyArray_NOTYPE) { check_num += 10; + } elsize = 0; } } } - /* or a tuple */ else if (PyTuple_Check(obj)) { + /* or a tuple */ *at = _convert_from_tuple(obj); if (*at == NULL){ - if (PyErr_Occurred()) return PY_FAIL; + if (PyErr_Occurred()) { + return PY_FAIL; + } goto fail; } return PY_SUCCEED; } - /* or a list */ else if (PyList_Check(obj)) { + /* or a list */ *at = _convert_from_array_descr(obj,0); if (*at == NULL) { - if (PyErr_Occurred()) return PY_FAIL; + if (PyErr_Occurred()) { + return PY_FAIL; + } goto fail; } return PY_SUCCEED; } - /* or a dictionary */ else if (PyDict_Check(obj)) { + /* or a dictionary */ *at = _convert_from_dict(obj,0); if (*at == NULL) { - if (PyErr_Occurred()) return PY_FAIL; + if (PyErr_Occurred()) { + return PY_FAIL; + } goto fail; } return PY_SUCCEED; } - else if (PyArray_Check(obj)) goto fail; - else /* goto fail;*/ { + else if (PyArray_Check(obj)) { + goto fail; + } + else { *at = _arraydescr_fromobj(obj); - if (*at) return PY_SUCCEED; - if (PyErr_Occurred()) return PY_FAIL; + if (*at) { + return PY_SUCCEED; + } + if (PyErr_Occurred()) { + return PY_FAIL; + } goto fail; } - if (PyErr_Occurred()) goto fail; - + if (PyErr_Occurred()) { + goto fail; + } /* if (check_num == PyArray_NOTYPE) return PY_FAIL; */ finish: - if ((check_num == PyArray_NOTYPE+10) || \ - (*at = PyArray_DescrFromType(check_num))==NULL) { - /* Now check to see if the object is registered - in typeDict */ + if ((check_num == PyArray_NOTYPE + 10) + || (*at = PyArray_DescrFromType(check_num)) == NULL) { + /* Now check to see if the object is registered in typeDict */ if (typeDict != NULL) { item = PyDict_GetItem(typeDict, obj); - if (item) return PyArray_DescrConverter(item, at); + if (item) { + return PyArray_DescrConverter(item, at); + } } goto fail; } @@ -5860,51 +6036,58 @@ PyArray_DESCR_REPLACE(*at); (*at)->elsize = elsize; } - if (endian != '=' && PyArray_ISNBO(endian)) endian = '='; - - if (endian != '=' && (*at)->byteorder != '|' && \ - (*at)->byteorder != endian) { + if (endian != '=' && PyArray_ISNBO(endian)) { + endian = '='; + } + if (endian != '=' && (*at)->byteorder != '|' + && (*at)->byteorder != endian) { PyArray_DESCR_REPLACE(*at); (*at)->byteorder = endian; } - return PY_SUCCEED; fail: - PyErr_SetString(PyExc_TypeError, - "data type not understood"); - *at=NULL; + PyErr_SetString(PyExc_TypeError, "data type not understood"); + *at = NULL; return PY_FAIL; } /*NUMPY_API - Convert object to endian -*/ + * Convert object to endian + */ static int PyArray_ByteorderConverter(PyObject *obj, char *endian) { char *str; + *endian = PyArray_SWAP; str = PyString_AsString(obj); - if (!str) return PY_FAIL; + if (!str) { + return PY_FAIL; + } if (strlen(str) < 1) { PyErr_SetString(PyExc_ValueError, "Byteorder string must be at least length 1"); return PY_FAIL; } *endian = str[0]; - if (str[0] != PyArray_BIG && str[0] != PyArray_LITTLE && \ - str[0] != PyArray_NATIVE && str[0] != PyArray_IGNORE) { - if (str[0] == 'b' || str[0] == 'B') + if (str[0] != PyArray_BIG && str[0] != PyArray_LITTLE + && str[0] != PyArray_NATIVE && str[0] != PyArray_IGNORE) { + if (str[0] == 'b' || str[0] == 'B') { *endian = PyArray_BIG; - else if (str[0] == 'l' || str[0] == 'L') + } + else if (str[0] == 'l' || str[0] == 'L') { *endian = PyArray_LITTLE; - else if (str[0] == 'n' || str[0] == 'N') + } + else if (str[0] == 'n' || str[0] == 'N') { *endian = PyArray_NATIVE; - else if (str[0] == 'i' || str[0] == 'I') + } + else if (str[0] == 'i' || str[0] == 'I') { *endian = PyArray_IGNORE; - else if (str[0] == 's' || str[0] == 'S') + } + else if (str[0] == 's' || str[0] == 'S') { *endian = PyArray_SWAP; + } else { PyErr_Format(PyExc_ValueError, "%s is an unrecognized byteorder", @@ -5916,26 +6099,32 @@ } /*NUMPY_API - Convert object to sort kind -*/ + * Convert object to sort kind + */ static int PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) { char *str; + *sortkind = PyArray_QUICKSORT; str = PyString_AsString(obj); - if (!str) return PY_FAIL; + if (!str) { + return PY_FAIL; + } if (strlen(str) < 1) { PyErr_SetString(PyExc_ValueError, "Sort kind string must be at least length 1"); return PY_FAIL; } - if (str[0] == 'q' || str[0] == 'Q') + if (str[0] == 'q' || str[0] == 'Q') { *sortkind = PyArray_QUICKSORT; - else if (str[0] == 'h' || str[0] == 'H') + } + else if (str[0] == 'h' || str[0] == 'H') { *sortkind = PyArray_HEAPSORT; - else if (str[0] == 'm' || str[0] == 'M') + } + else if (str[0] == 'm' || str[0] == 'M') { *sortkind = PyArray_MERGESORT; + } else { PyErr_Format(PyExc_ValueError, "%s is an unrecognized kind of sort", @@ -5946,44 +6135,54 @@ } -/* compare the field dictionary for two types - return 1 if the same or 0 if not -*/ - +/* + * compare the field dictionary for two types + * return 1 if the same or 0 if not + */ static int _equivalent_fields(PyObject *field1, PyObject *field2) { int same, val; - if (field1 == field2) return 1; - if (field1 == NULL || field2 == NULL) return 0; + if (field1 == field2) { + return 1; + } + if (field1 == NULL || field2 == NULL) { + return 0; + } val = PyObject_Compare(field1, field2); - if (val != 0 || PyErr_Occurred()) same = 0; - else same = 1; + if (val != 0 || PyErr_Occurred()) { + same = 0; + } + else { + same = 1; + } PyErr_Clear(); return same; } -/* This function returns true if the two typecodes are - equivalent (same basic kind and same itemsize). -*/ -/*NUMPY_API*/ +/*NUMPY_API + * + * This function returns true if the two typecodes are + * equivalent (same basic kind and same itemsize). + */ static unsigned char PyArray_EquivTypes(PyArray_Descr *typ1, PyArray_Descr *typ2) { - register int typenum1=typ1->type_num; - register int typenum2=typ2->type_num; - register int size1=typ1->elsize; - register int size2=typ2->elsize; + int typenum1 = typ1->type_num; + int typenum2 = typ2->type_num; + int size1 = typ1->elsize; + int size2 = typ2->elsize; - if (size1 != size2) return FALSE; - - if (PyArray_ISNBO(typ1->byteorder) != PyArray_ISNBO(typ2->byteorder)) + if (size1 != size2) { return FALSE; - - if (typenum1 == PyArray_VOID || \ - typenum2 == PyArray_VOID) { + } + if (PyArray_ISNBO(typ1->byteorder) != PyArray_ISNBO(typ2->byteorder)) { + return FALSE; + } + if (typenum1 == PyArray_VOID + || typenum2 == PyArray_VOID) { return ((typenum1 == typenum2) && _equivalent_fields(typ1->fields, typ2->fields)); } @@ -5996,6 +6195,7 @@ { PyArray_Descr *d1, *d2; Bool ret; + d1 = PyArray_DescrFromType(typenum1); d2 = PyArray_DescrFromType(typenum2); ret = PyArray_EquivTypes(d1, d2); @@ -6011,16 +6211,16 @@ { intp newdims[MAX_DIMS]; intp newstrides[MAX_DIMS]; - int i,k,num; + int i, k, num; PyObject *ret; - num = ndmin-nd; - for (i=0; idescr->elsize; } - for (i=num;idimensions[k]; newstrides[i] = arr->strides[k]; } @@ -6028,8 +6228,7 @@ ret = PyArray_NewFromDescr(arr->ob_type, arr->descr, ndmin, newdims, newstrides, arr->data, arr->flags, (PyObject *)arr); - /* steals a reference to arr --- so don't increment - here */ + /* steals a reference to arr --- so don't increment here */ PyArray_BASE(ret) = (PyObject *)arr; return ret; } @@ -6046,23 +6245,22 @@ static PyObject * _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { - PyObject *op, *ret=NULL; + PyObject *op, *ret = NULL; static char *kwd[]= {"object", "dtype", "copy", "order", "subok", "ndmin", NULL}; - Bool subok=FALSE; - Bool copy=TRUE; - int ndmin=0, nd; - PyArray_Descr *type=NULL; - PyArray_Descr *oldtype=NULL; + Bool subok = FALSE; + Bool copy = TRUE; + int ndmin = 0, nd; + PyArray_Descr *type = NULL; + PyArray_Descr *oldtype = NULL; NPY_ORDER order=PyArray_ANYORDER; - int flags=0; + int flags = 0; if (PyTuple_GET_SIZE(args) > 2) { PyErr_SetString(PyExc_ValueError, "only 2 non-keyword arguments accepted"); return NULL; } - if(!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i", kwd, &op, PyArray_DescrConverter2, &type, @@ -6074,19 +6272,17 @@ return NULL; } - /* fast exit if simple call */ - if ((subok && PyArray_Check(op)) || - (!subok && PyArray_CheckExact(op))) { - if (type==NULL) { + if ((subok && PyArray_Check(op)) + || (!subok && PyArray_CheckExact(op))) { + if (type == NULL) { if (!copy && STRIDING_OK(op, order)) { Py_INCREF(op); ret = op; goto finish; } else { - ret = PyArray_NewCopy((PyArrayObject*)op, - order); + ret = PyArray_NewCopy((PyArrayObject*)op, order); goto finish; } } @@ -6099,9 +6295,10 @@ goto finish; } else { - ret = PyArray_NewCopy((PyArrayObject*)op, - order); - if (oldtype == type) goto finish; + ret = PyArray_NewCopy((PyArrayObject*)op, order); + if (oldtype == type) { + goto finish; + } Py_INCREF(oldtype); Py_DECREF(PyArray_DESCR(ret)); PyArray_DESCR(ret) = oldtype; @@ -6116,9 +6313,9 @@ if (order == PyArray_CORDER) { flags |= CONTIGUOUS; } - else if ((order == PyArray_FORTRANORDER) || + else if ((order == PyArray_FORTRANORDER) /* order == PyArray_ANYORDER && */ - (PyArray_Check(op) && PyArray_ISFORTRAN(op))) { + || (PyArray_Check(op) && PyArray_ISFORTRAN(op))) { flags |= FORTRAN; } if (!subok) { @@ -6126,23 +6323,27 @@ } flags |= NPY_FORCECAST; - Py_XINCREF(type); ret = PyArray_CheckFromAny(op, type, 0, 0, flags, NULL); finish: Py_XDECREF(type); - if (!ret || (nd=PyArray_NDIM(ret)) >= ndmin) return ret; - /* create a new array from the same data with ones in the shape */ - /* steals a reference to ret */ + if (!ret || (nd=PyArray_NDIM(ret)) >= ndmin) { + return ret; + } + /* + * create a new array from the same data with ones in the shape + * steals a reference to ret + */ return _prepend_ones((PyArrayObject *)ret, nd, ndmin); } -/* accepts NULL type */ -/* steals referenct to type */ /*NUMPY_API - Empty -*/ + * Empty + * + * accepts NULL type + * steals referenct to type + */ static PyObject * PyArray_Empty(int nd, intp *dims, PyArray_Descr *type, int fortran) { @@ -6153,11 +6354,15 @@ type, nd, dims, NULL, NULL, fortran, NULL); - if (ret == NULL) return NULL; - + if (ret == NULL) { + return NULL; + } if (PyDataType_REFCHK(type)) { PyArray_FillObjectArray(ret, Py_None); - if (PyErr_Occurred()) {Py_DECREF(ret); return NULL;} + if (PyErr_Occurred()) { + Py_DECREF(ret); + return NULL; + } } return (PyObject *)ret; } @@ -6167,23 +6372,26 @@ { static char *kwlist[] = {"shape","dtype","order",NULL}; - PyArray_Descr *typecode=NULL; + PyArray_Descr *typecode = NULL; PyArray_Dims shape = {NULL, 0}; NPY_ORDER order = PyArray_CORDER; Bool fortran; - PyObject *ret=NULL; + PyObject *ret = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&", kwlist, PyArray_IntpConverter, &shape, PyArray_DescrConverter, &typecode, - PyArray_OrderConverter, &order)) + PyArray_OrderConverter, &order)) { goto fail; - - if (order == PyArray_FORTRANORDER) fortran = TRUE; - else fortran = FALSE; - + } + if (order == PyArray_FORTRANORDER) { + fortran = TRUE; + } + else { + fortran = FALSE; + } ret = PyArray_Empty(shape.len, shape.ptr, typecode, fortran); PyDimMem_FREE(shape.ptr); return ret; @@ -6194,17 +6402,18 @@ return NULL; } -/* This function is needed for supporting Pickles of - numpy scalar objects. -*/ +/* + * This function is needed for supporting Pickles of + * numpy scalar objects. + */ static PyObject * array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) { static char *kwlist[] = {"dtype","obj", NULL}; PyArray_Descr *typecode; - PyObject *obj=NULL; - int alloc=0; + PyObject *obj = NULL; + int alloc = 0; void *dptr; PyObject *ret; @@ -6212,17 +6421,18 @@ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|O", kwlist, &PyArrayDescr_Type, &typecode, - &obj)) + &obj)) { return NULL; - + } if (typecode->elsize == 0) { - PyErr_SetString(PyExc_ValueError, \ - "itemsize cannot be zero"); + PyErr_SetString(PyExc_ValueError, "itemsize cannot be zero"); return NULL; } if (PyDataType_FLAGCHK(typecode, NPY_ITEM_IS_POINTER)) { - if (obj == NULL) obj = Py_None; + if (obj == NULL) { + obj = Py_None; + } dptr = &obj; } else { @@ -6254,29 +6464,37 @@ ret = PyArray_Scalar(dptr, typecode, NULL); /* free dptr which contains zeros */ - if (alloc) _pya_free(dptr); + if (alloc) { + _pya_free(dptr); + } return ret; } -/* steal a reference */ -/* accepts NULL type */ /*NUMPY_API - Zeros -*/ + * Zeros + * + * steal a reference + * accepts NULL type + */ static PyObject * PyArray_Zeros(int nd, intp *dims, PyArray_Descr *type, int fortran) { PyArrayObject *ret; - if (!type) type = PyArray_DescrFromType(PyArray_DEFAULT); + if (!type) { + type = PyArray_DescrFromType(PyArray_DEFAULT); + } ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, type, nd, dims, NULL, NULL, fortran, NULL); - if (ret == NULL) return NULL; - - if (_zerofill(ret) < 0) return NULL; + if (ret == NULL) { + return NULL; + } + if (_zerofill(ret) < 0) { + return NULL; + } return (PyObject *)ret; } @@ -6285,11 +6503,11 @@ array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) { static char *kwlist[] = {"shape","dtype","order",NULL}; /* XXX ? */ - PyArray_Descr *typecode=NULL; + PyArray_Descr *typecode = NULL; PyArray_Dims shape = {NULL, 0}; NPY_ORDER order = PyArray_CORDER; Bool fortran = FALSE; - PyObject *ret=NULL; + PyObject *ret = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&", kwlist, PyArray_IntpConverter, @@ -6297,11 +6515,15 @@ PyArray_DescrConverter, &typecode, PyArray_OrderConverter, - &order)) + &order)) { goto fail; - - if (order == PyArray_FORTRANORDER) fortran = TRUE; - else fortran = FALSE; + } + if (order == PyArray_FORTRANORDER) { + fortran = TRUE; + } + else { + fortran = FALSE; + } ret = PyArray_Zeros(shape.len, shape.ptr, typecode, (int) fortran); PyDimMem_FREE(shape.ptr); return ret; @@ -6316,23 +6538,28 @@ array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args) { PyObject *dict; - if (!PyArg_ParseTuple(args, "O", &dict)) return NULL; - Py_XDECREF(typeDict); /* Decrement old reference (if any)*/ + + if (!PyArg_ParseTuple(args, "O", &dict)) { + return NULL; + } + /* Decrement old reference (if any)*/ + Py_XDECREF(typeDict); typeDict = dict; - Py_INCREF(dict); /* Create an internal reference to it */ + /* Create an internal reference to it */ + Py_INCREF(dict); Py_INCREF(Py_None); return Py_None; } -/* Reading from a file or a string. +/* + * Reading from a file or a string. + * + * As much as possible, we try to use the same code for both files and strings, + * so the semantics for fromstring and fromfile are the same, especially with + * regards to the handling of text representations. + */ - As much as possible, we try to use the same code for both files and strings, - so the semantics for fromstring and fromfile are the same, especially with - regards to the handling of text representations. -*/ - - typedef int (*next_element)(void **, void *, PyArray_Descr *, void *); typedef int (*skip_separator)(void **, const char *, void *); @@ -6355,14 +6582,16 @@ return dtype->f->scanfunc(*fp, dptr, NULL, dtype); } -/* Remove multiple whitespace from the separator, and add a space to the - beginning and end. This simplifies the separator-skipping code below. -*/ +/* + * Remove multiple whitespace from the separator, and add a space to the + * beginning and end. This simplifies the separator-skipping code below. + */ static char * swab_separator(char *sep) { int skip_space = 0; char *s, *start; + s = start = malloc(strlen(sep)+3); /* add space to front if there isn't one */ if (*sep != '\0' && !isspace(*sep)) { @@ -6372,14 +6601,18 @@ if (isspace(*sep)) { if (skip_space) { sep++; - } else { + } + else { *s = ' '; - s++; sep++; + s++; + sep++; skip_space = 1; } - } else { + } + else { *s = *sep; - s++; sep++; + s++; + sep++; skip_space = 0; } } @@ -6392,17 +6625,17 @@ return start; } -/* Assuming that the separator is the next bit in the string (file), skip it. - - Single spaces in the separator are matched to arbitrary-long sequences - of whitespace in the input. If the separator consists only of spaces, - it matches one or more whitespace characters. - - If we can't match the separator, return -2. - If we hit the end of the string (file), return -1. - Otherwise, return 0. -*/ - +/* + * Assuming that the separator is the next bit in the string (file), skip it. + * + * Single spaces in the separator are matched to arbitrary-long sequences + * of whitespace in the input. If the separator consists only of spaces, + * it matches one or more whitespace characters. + * + * If we can't match the separator, return -2. + * If we hit the end of the string (file), return -1. + * Otherwise, return 0. + */ static int fromstr_skip_separator(char **s, const char *sep, const char *end) { @@ -6413,26 +6646,31 @@ if (c == '\0' || (end != NULL && string >= end)) { result = -1; break; - } else if (*sep == '\0') { + } + else if (*sep == '\0') { if (string != *s) { /* matched separator */ result = 0; break; - } else { + } + else { /* separator was whitespace wildcard that didn't match */ result = -2; break; } - } else if (*sep == ' ') { + } + else if (*sep == ' ') { /* whitespace wildcard */ if (!isspace(c)) { sep++; continue; } - } else if (*sep != c) { + } + else if (*sep != c) { result = -2; break; - } else { + } + else { sep++; } string++; @@ -6446,46 +6684,54 @@ { int result = 0; const char *sep_start = sep; + while (1) { int c = fgetc(*fp); + if (c == EOF) { result = -1; break; - } else if (*sep == '\0') { + } + else if (*sep == '\0') { ungetc(c, *fp); if (sep != sep_start) { /* matched separator */ result = 0; break; - } else { + } + else { /* separator was whitespace wildcard that didn't match */ result = -2; break; } - } else if (*sep == ' ') { + } + else if (*sep == ' ') { /* whitespace wildcard */ if (!isspace(c)) { sep++; sep_start++; ungetc(c, *fp); - } else if (sep == sep_start) { + } + else if (sep == sep_start) { sep_start--; } - } else if (*sep != c) { + } + else if (*sep != c) { ungetc(c, *fp); result = -2; break; - } else { + } + else { sep++; } } return result; } -/* Create an array by reading from the given stream, using the passed - next_element and skip_separator functions. -*/ - +/* + * Create an array by reading from the given stream, using the passed + * next_element and skip_separator functions. + */ #define FROM_BUFFER_SIZE 4096 static PyArrayObject * array_from_text(PyArray_Descr *dtype, intp num, char *sep, size_t *nread, @@ -6501,21 +6747,23 @@ intp bytes, totalbytes; size = (num >= 0) ? num : FROM_BUFFER_SIZE; - r = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &size, NULL, NULL, 0, NULL); - if (r == NULL) return NULL; + if (r == NULL) { + return NULL; + } clean_sep = swab_separator(sep); NPY_BEGIN_ALLOW_THREADS; totalbytes = bytes = size * dtype->elsize; dptr = r->data; - for (i=0; num < 0 || i < num; i++) { - if (next(&stream, dptr, dtype, stream_data) < 0) + for (i= 0; num < 0 || i < num; i++) { + if (next(&stream, dptr, dtype, stream_data) < 0) { break; + } *nread += 1; thisbuf += 1; dptr += dtype->elsize; @@ -6530,12 +6778,15 @@ dptr = tmp + (totalbytes - bytes); thisbuf = 0; } - if (skip_sep(&stream, clean_sep, stream_data) < 0) + if (skip_sep(&stream, clean_sep, stream_data) < 0) { break; + } } if (num < 0) { tmp = PyDataMem_RENEW(r->data, (*nread)*dtype->elsize); - if (tmp == NULL) err=1; + if (tmp == NULL) { + err = 1; + } else { PyArray_DIM(r,0) = *nread; r->data = tmp; @@ -6543,7 +6794,9 @@ } NPY_END_ALLOW_THREADS; free(clean_sep); - if (err == 1) PyErr_NoMemory(); + if (err == 1) { + PyErr_NoMemory(); + } if (PyErr_Occurred()) { Py_DECREF(r); return NULL; @@ -6553,26 +6806,26 @@ #undef FROM_BUFFER_SIZE /*NUMPY_API - - Given a pointer to a string ``data``, a string length ``slen``, and - a ``PyArray_Descr``, return an array corresponding to the data - encoded in that string. - - If the dtype is NULL, the default array type is used (double). - If non-null, the reference is stolen. - - If ``slen`` is < 0, then the end of string is used for text data. - It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs - would be the norm). - - The number of elements to read is given as ``num``; if it is < 0, then - then as many as possible are read. - - If ``sep`` is NULL or empty, then binary data is assumed, else - text data, with ``sep`` as the separator between elements. Whitespace in - the separator matches any length of whitespace in the text, and a match - for whitespace around the separator is added. -*/ + * + * Given a pointer to a string ``data``, a string length ``slen``, and + * a ``PyArray_Descr``, return an array corresponding to the data + * encoded in that string. + * + * If the dtype is NULL, the default array type is used (double). + * If non-null, the reference is stolen. + * + * If ``slen`` is < 0, then the end of string is used for text data. + * It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs + * would be the norm). + * + * The number of elements to read is given as ``num``; if it is < 0, then + * then as many as possible are read. + * + * If ``sep`` is NULL or empty, then binary data is assumed, else + * text data, with ``sep`` as the separator between elements. Whitespace in + * the separator matches any length of whitespace in the text, and a match + * for whitespace around the separator is added. + */ static PyObject * PyArray_FromString(char *data, intp slen, PyArray_Descr *dtype, intp num, char *sep) @@ -6581,9 +6834,9 @@ PyArrayObject *ret; Bool binary; - if (dtype == NULL) + if (dtype == NULL) { dtype=PyArray_DescrFromType(PyArray_DEFAULT); - + } if (PyDataType_FLAGCHK(dtype, NPY_ITEM_IS_POINTER)) { PyErr_SetString(PyExc_ValueError, "Cannot create an object array from" \ @@ -6591,7 +6844,6 @@ Py_DECREF(dtype); return NULL; } - itemsize = dtype->elsize; if (itemsize == 0) { PyErr_SetString(PyExc_ValueError, "zero-valued itemsize"); @@ -6600,7 +6852,6 @@ } binary = ((sep == NULL) || (strlen(sep) == 0)); - if (binary) { if (num < 0 ) { if (slen % itemsize != 0) { @@ -6611,7 +6862,8 @@ return NULL; } num = slen/itemsize; - } else { + } + else { if (slen < num*itemsize) { PyErr_SetString(PyExc_ValueError, "string is smaller than " \ @@ -6620,17 +6872,20 @@ return NULL; } } - ret = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &num, NULL, NULL, 0, NULL); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } memcpy(ret->data, data, num*dtype->elsize); - } else { + } + else { /* read from character-based string */ size_t nread = 0; char *end; + if (dtype->f->scanfunc == NULL) { PyErr_SetString(PyExc_ValueError, "don't know how to read " \ @@ -6641,7 +6896,8 @@ } if (slen < 0) { end = NULL; - } else { + } + else { end = data + slen; } ret = array_from_text(dtype, num, sep, &nread, @@ -6657,11 +6913,11 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) { char *data; - Py_ssize_t nin=-1; - char *sep=NULL; + Py_ssize_t nin = -1; + char *sep = NULL; Py_ssize_t s; static char *kwlist[] = {"string", "dtype", "count", "sep", NULL}; - PyArray_Descr *descr=NULL; + PyArray_Descr *descr = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "s#|O&" NPY_SSIZE_T_PYFMT "s", kwlist, @@ -6671,7 +6927,6 @@ Py_XDECREF(descr); return NULL; } - return PyArray_FromString(data, (intp)s, descr, (intp)nin, sep); } @@ -6684,14 +6939,23 @@ intp start, numbytes; if (num < 0) { - int fail=0; + int fail = 0; + start = (intp )ftell(fp); - if (start < 0) fail=1; - if (fseek(fp, 0, SEEK_END) < 0) fail=1; + if (start < 0) { + fail = 1; + } + if (fseek(fp, 0, SEEK_END) < 0) { + fail = 1; + } numbytes = (intp) ftell(fp); - if (numbytes < 0) fail=1; + if (numbytes < 0) { + fail = 1; + } numbytes -= start; - if (fseek(fp, start, SEEK_SET) < 0) fail=1; + if (fseek(fp, start, SEEK_SET) < 0) { + fail = 1; + } if (fail) { PyErr_SetString(PyExc_IOError, "could not seek in file"); @@ -6705,7 +6969,9 @@ 1, &num, NULL, NULL, 0, NULL); - if (r==NULL) return NULL; + if (r == NULL) { + return NULL; + } NPY_BEGIN_ALLOW_THREADS; *nread = fread(r->data, dtype->elsize, num, fp); NPY_END_ALLOW_THREADS; @@ -6713,24 +6979,24 @@ } /*NUMPY_API - - Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an - array corresponding to the data encoded in that file. - - If the dtype is NULL, the default array type is used (double). - If non-null, the reference is stolen. - - The number of elements to read is given as ``num``; if it is < 0, then - then as many as possible are read. - - If ``sep`` is NULL or empty, then binary data is assumed, else - text data, with ``sep`` as the separator between elements. Whitespace in - the separator matches any length of whitespace in the text, and a match - for whitespace around the separator is added. - - For memory-mapped files, use the buffer interface. No more data than - necessary is read by this routine. -*/ + * + * Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an + * array corresponding to the data encoded in that file. + * + * If the dtype is NULL, the default array type is used (double). + * If non-null, the reference is stolen. + * + * The number of elements to read is given as ``num``; if it is < 0, then + * then as many as possible are read. + * + * If ``sep`` is NULL or empty, then binary data is assumed, else + * text data, with ``sep`` as the separator between elements. Whitespace in + * the separator matches any length of whitespace in the text, and a match + * for whitespace around the separator is added. + * + * For memory-mapped files, use the buffer interface. No more data than + * necessary is read by this routine. + */ static PyObject * PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, intp num, char *sep) { @@ -6749,10 +7015,10 @@ Py_DECREF(dtype); return NULL; } - if ((sep == NULL) || (strlen(sep) == 0)) { ret = array_fromfile_binary(fp, dtype, num, &nread); - } else { + } + else { if (dtype->f->scanfunc == NULL) { PyErr_SetString(PyExc_ValueError, "don't know how to read " \ @@ -6789,12 +7055,12 @@ static PyObject * array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) { - PyObject *file=NULL, *ret; + PyObject *file = NULL, *ret; FILE *fp; - char *sep=""; - Py_ssize_t nin=-1; + char *sep = ""; + Py_ssize_t nin = -1; static char *kwlist[] = {"file", "dtype", "count", "sep", NULL}; - PyArray_Descr *type=NULL; + PyArray_Descr *type = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "O|O&" NPY_SSIZE_T_PYFMT "s", @@ -6805,16 +7071,16 @@ Py_XDECREF(type); return NULL; } - if (PyString_Check(file) || PyUnicode_Check(file)) { file = PyObject_CallFunction((PyObject *)&PyFile_Type, "Os", file, "rb"); - if (file==NULL) return NULL; + if (file == NULL) { + return NULL; + } } else { Py_INCREF(file); } - fp = PyFile_AsFile(file); if (fp == NULL) { PyErr_SetString(PyExc_IOError, @@ -6822,17 +7088,19 @@ Py_DECREF(file); return NULL; } - - if (type == NULL) type = PyArray_DescrFromType(PyArray_DEFAULT); - + if (type == NULL) { + type = PyArray_DescrFromType(PyArray_DEFAULT); + } ret = PyArray_FromFile(fp, type, (intp) nin, sep); Py_DECREF(file); return ret; } -/* steals a reference to dtype (which cannot be NULL) */ -/*NUMPY_API */ +/*NUMPY_API + * + * steals a reference to dtype (which cannot be NULL) + */ static PyObject * PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, intp count) { @@ -6842,8 +7110,9 @@ intp i, elsize, elcount; char *item, *new_data; - if (iter == NULL) goto done; - + if (iter == NULL) { + goto done; + } elcount = (count < 0) ? 0 : count; if ((elsize=dtype->elsize) == 0) { PyErr_SetString(PyExc_ValueError, "Must specify length "\ @@ -6851,9 +7120,10 @@ goto done; } - /* We would need to alter the memory RENEW code to decrement any - reference counts before throwing away any memory. - */ + /* + * We would need to alter the memory RENEW code to decrement any + * reference counts before throwing away any memory. + */ if (PyDataType_REFCHK(dtype)) { PyErr_SetString(PyExc_ValueError, "cannot create "\ "object arrays from iterator"); @@ -6863,11 +7133,11 @@ ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &elcount, NULL,NULL, 0, NULL); dtype = NULL; - if (ret == NULL) goto done; - + if (ret == NULL) { + goto done; + } for (i = 0; (i < count || count == -1) && (value = PyIter_Next(iter)); i++) { - if (i >= elcount) { /* Grow ret->data: @@ -6875,10 +7145,12 @@ 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ... */ elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; - if (elcount <= (intp)((~(size_t)0) / elsize)) + if (elcount <= (intp)((~(size_t)0) / elsize)) { new_data = PyDataMem_RENEW(ret->data, elcount * elsize); - else + } + else { new_data = NULL; + } if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate array memory"); @@ -6887,15 +7159,14 @@ } ret->data = new_data; } - ret->dimensions[0] = i+1; + ret->dimensions[0] = i + 1; - if (((item = index2ptr(ret, i)) == NULL) || - (ret->descr->f->setitem(value, item, ret) == -1)) { + if (((item = index2ptr(ret, i)) == NULL) + || (ret->descr->f->setitem(value, item, ret) == -1)) { Py_DECREF(value); goto done; } Py_DECREF(value); - } if (i < count) { @@ -6904,10 +7175,12 @@ } /* - Realloc the data so that don't keep extra memory tied up - (assuming realloc is reasonably good about reusing space...) - */ - if (i==0) i = 1; + * Realloc the data so that don't keep extra memory tied up + * (assuming realloc is reasonably good about reusing space...) + */ + if (i == 0) { + i = 1; + } new_data = PyDataMem_RENEW(ret->data, i * elsize); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate array memory"); @@ -6929,9 +7202,9 @@ array_fromiter(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) { PyObject *iter; - Py_ssize_t nin=-1; + Py_ssize_t nin = -1; static char *kwlist[] = {"iter", "dtype", "count", NULL}; - PyArray_Descr *descr=NULL; + PyArray_Descr *descr = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "OO&|" NPY_SSIZE_T_PYFMT, @@ -6942,7 +7215,6 @@ Py_XDECREF(descr); return NULL; } - return PyArray_FromIter(iter, descr, (intp)nin); } @@ -6957,7 +7229,7 @@ Py_ssize_t ts; intp s, n; int itemsize; - int write=1; + int write = 1; if (PyDataType_REFCHK(type)) { @@ -6973,21 +7245,25 @@ Py_DECREF(type); return NULL; } - - if (buf->ob_type->tp_as_buffer == NULL || \ - (buf->ob_type->tp_as_buffer->bf_getwritebuffer == NULL && \ - buf->ob_type->tp_as_buffer->bf_getreadbuffer == NULL)) { + if (buf->ob_type->tp_as_buffer == NULL + || (buf->ob_type->tp_as_buffer->bf_getwritebuffer == NULL + && buf->ob_type->tp_as_buffer->bf_getreadbuffer == NULL)) { PyObject *newbuf; newbuf = PyObject_GetAttrString(buf, "__buffer__"); - if (newbuf == NULL) {Py_DECREF(type); return NULL;} + if (newbuf == NULL) { + Py_DECREF(type); + return NULL; + } buf = newbuf; } - else {Py_INCREF(buf);} + else { + Py_INCREF(buf); + } - if (PyObject_AsWriteBuffer(buf, (void *)&data, &ts)==-1) { + if (PyObject_AsWriteBuffer(buf, (void *)&data, &ts) == -1) { write = 0; PyErr_Clear(); - if (PyObject_AsReadBuffer(buf, (void *)&data, &ts)==-1) { + if (PyObject_AsReadBuffer(buf, (void *)&data, &ts) == -1) { Py_DECREF(buf); Py_DECREF(type); return NULL; @@ -7004,7 +7280,6 @@ s = (intp)ts - offset; n = (intp)count; itemsize = type->elsize; - if (n < 0 ) { if (s % itemsize != 0) { PyErr_SetString(PyExc_ValueError, @@ -7015,7 +7290,8 @@ return NULL; } n = s/itemsize; - } else { + } + else { if (s < n*itemsize) { PyErr_SetString(PyExc_ValueError, "buffer is smaller than requested"\ @@ -7036,8 +7312,9 @@ return NULL; } - if (!write) ret->flags &= ~WRITEABLE; - + if (!write) { + ret->flags &= ~WRITEABLE; + } /* Store a reference for decref on deallocation */ ret->base = buf; PyArray_UpdateFlags(ret, ALIGNED); @@ -7047,10 +7324,10 @@ static PyObject * array_frombuffer(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) { - PyObject *obj=NULL; - Py_ssize_t nin=-1, offset=0; + PyObject *obj = NULL; + Py_ssize_t nin = -1, offset = 0; static char *kwlist[] = {"buffer", "dtype", "count", "offset", NULL}; - PyArray_Descr *type=NULL; + PyArray_Descr *type = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "O|O&" NPY_SSIZE_T_PYFMT @@ -7061,9 +7338,9 @@ Py_XDECREF(type); return NULL; } - if (type==NULL) + if (type == NULL) { type = PyArray_DescrFromType(PyArray_DEFAULT); - + } return PyArray_FromBuffer(obj, type, (intp)nin, (intp)offset); } @@ -7071,48 +7348,53 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *a0; - int axis=0; + int axis = 0; static char *kwlist[] = {"seq", "axis", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist, &a0, - PyArray_AxisConverter, &axis)) + PyArray_AxisConverter, &axis)) { return NULL; + } return PyArray_Concatenate(a0, axis); } static PyObject *array_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *b0, *a0; - if (!PyArg_ParseTuple(args, "OO", &a0, &b0)) return NULL; - + if (!PyArg_ParseTuple(args, "OO", &a0, &b0)) { + return NULL; + } return _ARET(PyArray_InnerProduct(a0, b0)); } static PyObject *array_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *v, *a; - if (!PyArg_ParseTuple(args, "OO", &a, &v)) return NULL; - + if (!PyArg_ParseTuple(args, "OO", &a, &v)) { + return NULL; + } return _ARET(PyArray_MatrixProduct(a, v)); } static PyObject *array_fastCopyAndTranspose(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *a0; - if (!PyArg_ParseTuple(args, "O", &a0)) return NULL; - + if (!PyArg_ParseTuple(args, "O", &a0)) { + return NULL; + } return _ARET(PyArray_CopyAndTranspose(a0)); } static PyObject *array_correlate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *shape, *a0; - int mode=0; + int mode = 0; static char *kwlist[] = {"a", "v", "mode", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i", kwlist, - &a0, &shape, &mode)) return NULL; - + &a0, &shape, &mode)) { + return NULL; + } return PyArray_Correlate(a0, shape, mode); } @@ -7136,37 +7418,45 @@ return PyArray_New(&PyArray_Type, 1, &length, type_num, NULL, NULL, 0, 0, NULL); } - range = PyArray_New(&PyArray_Type, 1, &length, type_num, NULL, NULL, 0, 0, NULL); - if (range == NULL) return NULL; - + if (range == NULL) { + return NULL; + } funcs = PyArray_DESCR(range)->f; - /* place start in the buffer and the next value in the second position */ - /* if length > 2, then call the inner loop, otherwise stop */ - + /* + * place start in the buffer and the next value in the second position + * if length > 2, then call the inner loop, otherwise stop + */ obj = PyFloat_FromDouble(start); ret = funcs->setitem(obj, PyArray_DATA(range), (PyArrayObject *)range); Py_DECREF(obj); - if (ret < 0) goto fail; - if (length == 1) return range; - + if (ret < 0) { + goto fail; + } + if (length == 1) { + return range; + } obj = PyFloat_FromDouble(start + step); ret = funcs->setitem(obj, PyArray_BYTES(range)+PyArray_ITEMSIZE(range), (PyArrayObject *)range); Py_DECREF(obj); - if (ret < 0) goto fail; - if (length == 2) return range; - + if (ret < 0) { + goto fail; + } + if (length == 2) { + return range; + } if (!funcs->fill) { PyErr_SetString(PyExc_ValueError, "no fill-function for data-type."); Py_DECREF(range); return NULL; } funcs->fill(PyArray_DATA(range), length, (PyArrayObject *)range); - if (PyErr_Occurred()) goto fail; - + if (PyErr_Occurred()) { + goto fail; + } return range; fail: @@ -7174,9 +7464,9 @@ return NULL; } -/* the formula is - len = (intp) ceil((start - stop) / step); -*/ +/* + * the formula is len = (intp) ceil((start - stop) / step); + */ static intp _calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, int cmplx) { @@ -7195,35 +7485,48 @@ return -1; } val = PyNumber_TrueDivide(*next, step); - Py_DECREF(*next); *next=NULL; - if (!val) return -1; + Py_DECREF(*next); + *next = NULL; + if (!val) { + return -1; + } if (cmplx && PyComplex_Check(val)) { value = PyComplex_RealAsDouble(val); - if (error_converting(value)) {Py_DECREF(val); return -1;} + if (error_converting(value)) { + Py_DECREF(val); + return -1; + } len = (intp) ceil(value); value = PyComplex_ImagAsDouble(val); Py_DECREF(val); - if (error_converting(value)) return -1; + if (error_converting(value)) { + return -1; + } len = MIN(len, (intp) ceil(value)); } else { value = PyFloat_AsDouble(val); Py_DECREF(val); - if (error_converting(value)) return -1; + if (error_converting(value)) { + return -1; + } len = (intp) ceil(value); } - if (len > 0) { *next = PyNumber_Add(start, step); - if (!next) return -1; + if (!next) { + return -1; + } } return len; } -/* this doesn't change the references */ /*NUMPY_API - ArangeObj, -*/ + * + * ArangeObj, + * + * this doesn't change the references + */ static PyObject * PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject *step, PyArray_Descr *dtype) { @@ -7231,7 +7534,7 @@ PyArray_ArrFuncs *funcs; PyObject *next; intp length; - PyArray_Descr *native=NULL; + PyArray_Descr *native = NULL; int swap; if (!dtype) { @@ -7254,33 +7557,41 @@ } dtype = deftype; } - else Py_INCREF(dtype); - + else { + Py_INCREF(dtype); + } if (!step || step == Py_None) { step = PyInt_FromLong(1); } - else Py_XINCREF(step); - + else { + Py_XINCREF(step); + } if (!stop || stop == Py_None) { stop = start; start = PyInt_FromLong(0); } - else Py_INCREF(start); - + else { + Py_INCREF(start); + } /* calculate the length and next = start + step*/ length = _calc_length(start, stop, step, &next, PyTypeNum_ISCOMPLEX(dtype->type_num)); - - if (PyErr_Occurred()) {Py_DECREF(dtype); goto fail;} + if (PyErr_Occurred()) { + Py_DECREF(dtype); + goto fail; + } if (length <= 0) { length = 0; range = PyArray_SimpleNewFromDescr(1, &length, dtype); - Py_DECREF(step); Py_DECREF(start); return range; + Py_DECREF(step); + Py_DECREF(start); + return range; } - /* If dtype is not in native byte-order then get native-byte - order version. And then swap on the way out. - */ + /* + * If dtype is not in native byte-order then get native-byte + * order version. And then swap on the way out. + */ if (!PyArray_ISNBO(dtype->byteorder)) { native = PyArray_DescrNewByteorder(dtype, PyArray_NATBYTE); swap = 1; @@ -7291,28 +7602,38 @@ } range = PyArray_SimpleNewFromDescr(1, &length, native); - if (range == NULL) goto fail; + if (range == NULL) { + goto fail; + } + /* + * place start in the buffer and the next value in the second position + * if length > 2, then call the inner loop, otherwise stop + */ funcs = PyArray_DESCR(range)->f; - - /* place start in the buffer and the next value in the second position */ - /* if length > 2, then call the inner loop, otherwise stop */ - - if (funcs->setitem(start, PyArray_DATA(range), (PyArrayObject *)range) < 0) + if (funcs->setitem( + start, PyArray_DATA(range), (PyArrayObject *)range) < 0) { goto fail; - if (length == 1) goto finish; + } + if (length == 1) { + goto finish; + } if (funcs->setitem(next, PyArray_BYTES(range)+PyArray_ITEMSIZE(range), - (PyArrayObject *)range) < 0) goto fail; - if (length == 2) goto finish; - + (PyArrayObject *)range) < 0) { + goto fail; + } + if (length == 2) { + goto finish; + } if (!funcs->fill) { PyErr_SetString(PyExc_ValueError, "no fill-function for data-type."); Py_DECREF(range); goto fail; } funcs->fill(PyArray_DATA(range), length, (PyArrayObject *)range); - if (PyErr_Occurred()) goto fail; - + if (PyErr_Occurred()) { + goto fail; + } finish: if (swap) { PyObject *new; @@ -7321,7 +7642,6 @@ Py_DECREF(PyArray_DESCR(range)); PyArray_DESCR(range) = dtype; /* steals the reference */ } - Py_DECREF(start); Py_DECREF(step); Py_DECREF(next); @@ -7336,9 +7656,9 @@ static PyObject * array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { - PyObject *o_start=NULL, *o_stop=NULL, *o_step=NULL; + PyObject *o_start = NULL, *o_stop = NULL, *o_step = NULL; static char *kwd[]= {"start", "stop", "step", "dtype", NULL}; - PyArray_Descr *typecode=NULL; + PyArray_Descr *typecode = NULL; if(!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&", kwd, &o_start, &o_stop, &o_step, @@ -7347,14 +7667,13 @@ Py_XDECREF(typecode); return NULL; } - return PyArray_ArangeObj(o_start, o_stop, o_step, typecode); } /* - Included at the very first so not auto-grabbed and thus not - labeled. -*/ + * Included at the very first so not auto-grabbed and thus not + * labeled. + */ static unsigned int PyArray_GetNDArrayCVersion(void) { @@ -7365,8 +7684,10 @@ array__get_ndarray_c_version(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { static char *kwlist[] = {NULL}; - if(!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) return NULL; + if(!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) { + return NULL; + } return PyInt_FromLong( (long) PyArray_GetNDArrayCVersion() ); } @@ -7377,46 +7698,51 @@ PyObject *ret; PyTypeObject *subtype; PyArray_Dims shape = {NULL, 0}; - PyArray_Descr *dtype=NULL; + PyArray_Descr *dtype = NULL; if (!PyArg_ParseTuple(args, "O!O&O&", &PyType_Type, &subtype, PyArray_IntpConverter, &shape, - PyArray_DescrConverter, &dtype)) + PyArray_DescrConverter, &dtype)) { goto fail; - + } if (!PyType_IsSubtype(subtype, &PyArray_Type)) { PyErr_SetString(PyExc_TypeError, "_reconstruct: First argument must be " \ "a sub-type of ndarray"); goto fail; } - ret = PyArray_NewFromDescr(subtype, dtype, (int)shape.len, shape.ptr, NULL, NULL, 0, NULL); - if (shape.ptr) PyDimMem_FREE(shape.ptr); + if (shape.ptr) { + PyDimMem_FREE(shape.ptr); + } return ret; fail: Py_XDECREF(dtype); - if (shape.ptr) PyDimMem_FREE(shape.ptr); + if (shape.ptr) { + PyDimMem_FREE(shape.ptr); + } return NULL; } static PyObject * array_set_string_function(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { - PyObject *op=NULL; + PyObject *op = NULL; int repr=1; static char *kwlist[] = {"f", "repr", NULL}; if(!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi", kwlist, - &op, &repr)) return NULL; - + &op, &repr)) { + return NULL; + } /* reset the array_repr function to built-in */ - if (op == Py_None) op = NULL; + if (op == Py_None) { + op = NULL; + } if (op != NULL && !PyCallable_Check(op)) { - PyErr_SetString(PyExc_TypeError, - "Argument must be callable."); + PyErr_SetString(PyExc_TypeError, "Argument must be callable."); return NULL; } PyArray_SetStringFunction(op, repr); @@ -7427,18 +7753,19 @@ static PyObject * array_set_ops_function(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), PyObject *kwds) { - PyObject *oldops=NULL; + PyObject *oldops = NULL; - if ((oldops = PyArray_GetNumericOps())==NULL) return NULL; - - /* Should probably ensure that objects are at least callable */ - /* Leave this to the caller for now --- error will be raised - later when use is attempted - */ + if ((oldops = PyArray_GetNumericOps()) == NULL) { + return NULL; + } + /* + * Should probably ensure that objects are at least callable + * Leave this to the caller for now --- error will be raised + * later when use is attempted + */ if (kwds && PyArray_SetNumericOps(kwds) == -1) { Py_DECREF(oldops); - PyErr_SetString(PyExc_ValueError, - "one or more objects not callable"); + PyErr_SetString(PyExc_ValueError, "one or more objects not callable"); return NULL; } return oldops; @@ -7446,26 +7773,25 @@ /*NUMPY_API - Where -*/ + * Where + */ static PyObject * PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) { PyArrayObject *arr; - PyObject *tup=NULL, *obj=NULL; - PyObject *ret=NULL, *zero=NULL; + PyObject *tup = NULL, *obj = NULL; + PyObject *ret = NULL, *zero = NULL; - arr = (PyArrayObject *)PyArray_FromAny(condition, NULL, 0, 0, 0, NULL); - if (arr == NULL) return NULL; - - if ((x==NULL) && (y==NULL)) { + if (arr == NULL) { + return NULL; + } + if ((x == NULL) && (y == NULL)) { ret = PyArray_Nonzero(arr); Py_DECREF(arr); return ret; } - - if ((x==NULL) || (y==NULL)) { + if ((x == NULL) || (y == NULL)) { Py_DECREF(arr); PyErr_SetString(PyExc_ValueError, "either both or neither " "of x and y should be given"); @@ -7474,18 +7800,19 @@ zero = PyInt_FromLong((long) 0); - obj = PyArray_EnsureAnyArray(PyArray_GenericBinaryFunction(arr, zero, n_ops.not_equal)); Py_DECREF(zero); Py_DECREF(arr); - if (obj == NULL) return NULL; - + if (obj == NULL) { + return NULL; + } tup = Py_BuildValue("(OO)", y, x); - if (tup == NULL) {Py_DECREF(obj); return NULL;} - + if (tup == NULL) { + Py_DECREF(obj); + return NULL; + } ret = PyArray_Choose((PyAO *)obj, tup, NULL, NPY_RAISE); - Py_DECREF(obj); Py_DECREF(tup); return ret; @@ -7494,23 +7821,25 @@ static PyObject * array_where(PyObject *NPY_UNUSED(ignored), PyObject *args) { - PyObject *obj=NULL, *x=NULL, *y=NULL; + PyObject *obj = NULL, *x = NULL, *y = NULL; - if (!PyArg_ParseTuple(args, "O|OO", &obj, &x, &y)) return NULL; - + if (!PyArg_ParseTuple(args, "O|OO", &obj, &x, &y)) { + return NULL; + } return PyArray_Where(obj, x, y); } static PyObject * array_lexsort(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) { - int axis=-1; + int axis = -1; PyObject *obj; static char *kwlist[] = {"keys", "axis", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|i", kwlist, - &obj, &axis)) return NULL; - + &obj, &axis)) { + return NULL; + } return _ARET(PyArray_LexSort(obj, axis)); } @@ -7519,10 +7848,10 @@ static PyObject * array_can_cast_safely(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { - PyArray_Descr *d1=NULL; - PyArray_Descr *d2=NULL; + PyArray_Descr *d1 = NULL; + PyArray_Descr *d2 = NULL; Bool ret; - PyObject *retobj=NULL; + PyObject *retobj = NULL; static char *kwlist[] = {"from", "to", NULL}; if(!PyArg_ParseTupleAndKeywords(args, kwds, "O&O&", kwlist, @@ -7538,7 +7867,7 @@ } ret = PyArray_CanCastTo(d1, d2); - retobj = (ret ? Py_True : Py_False); + retobj = ret ? Py_True : Py_False; Py_INCREF(retobj); finish: @@ -7552,9 +7881,9 @@ { int size; - if(!PyArg_ParseTuple(args, "i", &size)) + if(!PyArg_ParseTuple(args, "i", &size)) { return NULL; - + } return PyBuffer_New(size); } @@ -7562,22 +7891,22 @@ buffer_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *obj; - Py_ssize_t offset=0, size=Py_END_OF_BUFFER, n; + Py_ssize_t offset = 0, size = Py_END_OF_BUFFER, n; void *unused; static char *kwlist[] = {"object", "offset", "size", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|" NPY_SSIZE_T_PYFMT \ + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT, kwlist, - &obj, &offset, &size)) + &obj, &offset, &size)) { return NULL; - - + } if (PyObject_AsWriteBuffer(obj, &unused, &n) < 0) { PyErr_Clear(); return PyBuffer_FromObject(obj, offset, size); } - else + else { return PyBuffer_FromReadWriteObject(obj, offset, size); + } } #ifndef _MSC_VER @@ -7609,22 +7938,29 @@ { PyObject *mem; Py_ssize_t size; - Bool ro=FALSE, check=TRUE; + Bool ro = FALSE, check = TRUE; void *memptr; static char *kwlist[] = {"mem", "size", "readonly", "check", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O" \ NPY_SSIZE_T_PYFMT "|O&O&", kwlist, &mem, &size, PyArray_BoolConverter, &ro, PyArray_BoolConverter, - &check)) return NULL; + &check)) { + return NULL; + } memptr = PyLong_AsVoidPtr(mem); - if (memptr == NULL) return NULL; - + if (memptr == NULL) { + return NULL; + } if (check) { - /* Try to dereference the start and end of the memory region */ - /* Catch segfault and report error if it occurs */ + /* + * Try to dereference the start and end of the memory region + * Catch segfault and report error if it occurs + */ char test; - int err=0; + int err = 0; + #ifdef _MSC_VER __try { _test_code(); @@ -7635,7 +7971,6 @@ #else PyOS_sighandler_t _npy_sig_save; _npy_sig_save = PyOS_setsig(SIGSEGV, _SigSegv_Handler); - if (setjmp(_NPY_SIGSEGV_BUF) == 0) { _test_code(); } @@ -7696,49 +8031,66 @@ Bool rstrip; char *cmp_str; Py_ssize_t strlen; - PyObject *res=NULL; - static char msg[] = \ - "comparision must be '==', '!=', '<', '>', '<=', '>='"; - + PyObject *res = NULL; + static char msg[] = "comparision must be '==', '!=', '<', '>', '<=', '>='"; static char *kwlist[] = {"a1", "a2", "cmp", "rstrip", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOs#O&", kwlist, &array, &other, &cmp_str, &strlen, - PyArray_BoolConverter, &rstrip)) + PyArray_BoolConverter, &rstrip)) { return NULL; - - if (strlen < 1 || strlen > 2) goto err; + } + if (strlen < 1 || strlen > 2) { + goto err; + } if (strlen > 1) { - if (cmp_str[1] != '=') goto err; - if (cmp_str[0] == '=') cmp_op = Py_EQ; - else if (cmp_str[0] == '!') cmp_op = Py_NE; - else if (cmp_str[0] == '<') cmp_op = Py_LE; - else if (cmp_str[0] == '>') cmp_op = Py_GE; - else goto err; + if (cmp_str[1] != '=') { + goto err; + } + if (cmp_str[0] == '=') { + cmp_op = Py_EQ; + } + else if (cmp_str[0] == '!') { + cmp_op = Py_NE; + } + else if (cmp_str[0] == '<') { + cmp_op = Py_LE; + } + else if (cmp_str[0] == '>') { + cmp_op = Py_GE; + } + else { + goto err; + } } else { - if (cmp_str[0] == '<') cmp_op = Py_LT; - else if (cmp_str[0] == '>') cmp_op = Py_GT; - else goto err; + if (cmp_str[0] == '<') { + cmp_op = Py_LT; + } + else if (cmp_str[0] == '>') { + cmp_op = Py_GT; + } + else { + goto err; + } } newarr = (PyArrayObject *)PyArray_FROM_O(array); - if (newarr == NULL) return NULL; + if (newarr == NULL) { + return NULL; + } newoth = (PyArrayObject *)PyArray_FROM_O(other); if (newoth == NULL) { Py_DECREF(newarr); return NULL; } - if (PyArray_ISSTRING(newarr) && PyArray_ISSTRING(newoth)) { res = _strings_richcompare(newarr, newoth, cmp_op, rstrip != 0); } else { - PyErr_SetString(PyExc_TypeError, - "comparison of non-string arrays"); + PyErr_SetString(PyExc_TypeError, "comparison of non-string arrays"); } - Py_DECREF(newarr); Py_DECREF(newoth); return res; @@ -7790,120 +8142,158 @@ static PyObject * test_interrupt(PyObject *NPY_UNUSED(self), PyObject *args) { - int kind=0; + int kind = 0; int a = 0; - if (!PyArg_ParseTuple(args, "|i", &kind)) return NULL; - + if (!PyArg_ParseTuple(args, "|i", &kind)) { + return NULL; + } if (kind) { Py_BEGIN_ALLOW_THREADS; - while (a>=0) { - if ((a % 1000 == 0) && - PyOS_InterruptOccurred()) break; - a+=1; + while (a >= 0) { + if ((a % 1000 == 0) && PyOS_InterruptOccurred()) { + break; } + a += 1; + } Py_END_ALLOW_THREADS; - } + } else { - NPY_SIGINT_ON - - while(a>=0) { - a += 1; - } - + while(a >= 0) { + a += 1; + } NPY_SIGINT_OFF - } - + } return PyInt_FromLong(a); } static struct PyMethodDef array_module_methods[] = { - {"_get_ndarray_c_version", (PyCFunction)array__get_ndarray_c_version, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"_reconstruct", (PyCFunction)array__reconstruct, - METH_VARARGS, NULL}, - {"set_string_function", (PyCFunction)array_set_string_function, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"set_numeric_ops", (PyCFunction)array_set_ops_function, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"set_typeDict", (PyCFunction)array_set_typeDict, - METH_VARARGS, NULL}, + {"_get_ndarray_c_version", + (PyCFunction)array__get_ndarray_c_version, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"_reconstruct", + (PyCFunction)array__reconstruct, + METH_VARARGS, NULL}, + {"set_string_function", + (PyCFunction)array_set_string_function, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"set_numeric_ops", + (PyCFunction)array_set_ops_function, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"set_typeDict", + (PyCFunction)array_set_typeDict, + METH_VARARGS, NULL}, - {"array", (PyCFunction)_array_fromobject, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"arange", (PyCFunction)array_arange, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"zeros", (PyCFunction)array_zeros, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"empty", (PyCFunction)array_empty, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"scalar", (PyCFunction)array_scalar, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"where", (PyCFunction)array_where, - METH_VARARGS, NULL}, - {"lexsort", (PyCFunction)array_lexsort, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"putmask", (PyCFunction)array_putmask, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"fromstring",(PyCFunction)array_fromstring, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"fromiter",(PyCFunction)array_fromiter, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"concatenate", (PyCFunction)array_concatenate, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"inner", (PyCFunction)array_innerproduct, - METH_VARARGS, NULL}, - {"dot", (PyCFunction)array_matrixproduct, - METH_VARARGS, NULL}, - {"_fastCopyAndTranspose", (PyCFunction)array_fastCopyAndTranspose, - METH_VARARGS, NULL}, - {"correlate", (PyCFunction)array_correlate, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"frombuffer", (PyCFunction)array_frombuffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"fromfile", (PyCFunction)array_fromfile, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"can_cast", (PyCFunction)array_can_cast_safely, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"newbuffer", (PyCFunction)new_buffer, - METH_VARARGS, NULL}, - {"getbuffer", (PyCFunction)buffer_buffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"int_asbuffer", (PyCFunction)as_buffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"format_longfloat", (PyCFunction)format_longfloat, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"compare_chararrays", (PyCFunction)compare_chararrays, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"test_interrupt", (PyCFunction)test_interrupt, - METH_VARARGS, NULL}, + {"array", + (PyCFunction)_array_fromobject, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"arange", + (PyCFunction)array_arange, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"zeros", + (PyCFunction)array_zeros, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"empty", + (PyCFunction)array_empty, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"scalar", + (PyCFunction)array_scalar, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"where", + (PyCFunction)array_where, + METH_VARARGS, NULL}, + {"lexsort", + (PyCFunction)array_lexsort, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"putmask", + (PyCFunction)array_putmask, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"fromstring", + (PyCFunction)array_fromstring, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"fromiter", + (PyCFunction)array_fromiter, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"concatenate", + (PyCFunction)array_concatenate, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"inner", + (PyCFunction)array_innerproduct, + METH_VARARGS, NULL}, + {"dot", + (PyCFunction)array_matrixproduct, + METH_VARARGS, NULL}, + {"_fastCopyAndTranspose", + (PyCFunction)array_fastCopyAndTranspose, + METH_VARARGS, NULL}, + {"correlate", + (PyCFunction)array_correlate, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"frombuffer", + (PyCFunction)array_frombuffer, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"fromfile", + (PyCFunction)array_fromfile, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"can_cast", + (PyCFunction)array_can_cast_safely, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"newbuffer", + (PyCFunction)new_buffer, + METH_VARARGS, NULL}, + {"getbuffer", + (PyCFunction)buffer_buffer, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"int_asbuffer", + (PyCFunction)as_buffer, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"format_longfloat", + (PyCFunction)format_longfloat, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"compare_chararrays", + (PyCFunction)compare_chararrays, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"test_interrupt", + (PyCFunction)test_interrupt, + METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; #include "__multiarray_api.c" -/* Establish scalar-type hierarchy */ - -/* For dual inheritance we need to make sure that the objects being - inherited from have the tp->mro object initialized. This is - not necessarily true for the basic type objects of Python (it is - checked for single inheritance but not dual in PyType_Ready). - - Thus, we call PyType_Ready on the standard Python Types, here. -*/ +/* Establish scalar-type hierarchy + * + * For dual inheritance we need to make sure that the objects being + * inherited from have the tp->mro object initialized. This is + * not necessarily true for the basic type objects of Python (it is + * checked for single inheritance but not dual in PyType_Ready). + * + * Thus, we call PyType_Ready on the standard Python Types, here. + */ static int setup_scalartypes(PyObject *NPY_UNUSED(dict)) { - initialize_numeric_types(); - if (PyType_Ready(&PyBool_Type) < 0) return -1; - if (PyType_Ready(&PyInt_Type) < 0) return -1; - if (PyType_Ready(&PyFloat_Type) < 0) return -1; - if (PyType_Ready(&PyComplex_Type) < 0) return -1; - if (PyType_Ready(&PyString_Type) < 0) return -1; - if (PyType_Ready(&PyUnicode_Type) < 0) return -1; + if (PyType_Ready(&PyBool_Type) < 0) { + return -1; + } + if (PyType_Ready(&PyInt_Type) < 0) { + return -1; + } + if (PyType_Ready(&PyFloat_Type) < 0) { + return -1; + } + if (PyType_Ready(&PyComplex_Type) < 0) { + return -1; + } + if (PyType_Ready(&PyString_Type) < 0) { + return -1; + } + if (PyType_Ready(&PyUnicode_Type) < 0) { + return -1; + } #define SINGLE_INHERIT(child, parent) \ Py##child##ArrType_Type.tp_base = &Py##parent##ArrType_Type; \ @@ -7915,9 +8305,9 @@ return -1; \ } - if (PyType_Ready(&PyGenericArrType_Type) < 0) + if (PyType_Ready(&PyGenericArrType_Type) < 0) { return -1; - + } SINGLE_INHERIT(Number, Generic); SINGLE_INHERIT(Integer, Number); SINGLE_INHERIT(Inexact, Number); @@ -7975,7 +8365,11 @@ SINGLE_INHERIT(LongLong, SignedInteger); #endif - /* fprintf(stderr, "tp_free = %p, PyObject_Del = %p, int_tp_free = %p, base.tp_free = %p\n", PyIntArrType_Type.tp_free, PyObject_Del, PyInt_Type.tp_free, PySignedIntegerArrType_Type.tp_free); + /* + fprintf(stderr, + "tp_free = %p, PyObject_Del = %p, int_tp_free = %p, base.tp_free = %p\n", + PyIntArrType_Type.tp_free, PyObject_Del, PyInt_Type.tp_free, + PySignedIntegerArrType_Type.tp_free); */ SINGLE_INHERIT(UByte, UnsignedInteger); SINGLE_INHERIT(UShort, UnsignedInteger); @@ -8003,9 +8397,10 @@ #undef SINGLE_INHERIT #undef DUAL_INHERIT - /* Clean up string and unicode array types so they act more like - strings -- get their tables from the standard types. - */ + /* + * Clean up string and unicode array types so they act more like + * strings -- get their tables from the standard types. + */ } /* place a flag dictionary in d */ @@ -8057,45 +8452,48 @@ /* Create the module and add the functions */ m = Py_InitModule("multiarray", array_module_methods); - if (!m) goto err; - + if (!m) { + goto err; + } /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); - if (!d) goto err; - + if (!d) { + goto err; + } PyArray_Type.tp_free = _pya_free; - if (PyType_Ready(&PyArray_Type) < 0) + if (PyType_Ready(&PyArray_Type) < 0) { return; - - if (setup_scalartypes(d) < 0) goto err; - + } + if (setup_scalartypes(d) < 0) { + goto err; + } PyArrayIter_Type.tp_iter = PyObject_SelfIter; PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter; PyArrayMultiIter_Type.tp_free = _pya_free; - if (PyType_Ready(&PyArrayIter_Type) < 0) + if (PyType_Ready(&PyArrayIter_Type) < 0) { return; - - if (PyType_Ready(&PyArrayMapIter_Type) < 0) + } + if (PyType_Ready(&PyArrayMapIter_Type) < 0) { return; - - if (PyType_Ready(&PyArrayMultiIter_Type) < 0) + } + if (PyType_Ready(&PyArrayMultiIter_Type) < 0) { return; - + } PyArrayDescr_Type.tp_hash = (hashfunc)_Py_HashPointer; - if (PyType_Ready(&PyArrayDescr_Type) < 0) + if (PyType_Ready(&PyArrayDescr_Type) < 0) { return; - - if (PyType_Ready(&PyArrayFlags_Type) < 0) + } + if (PyType_Ready(&PyArrayFlags_Type) < 0) { return; - + } c_api = PyCObject_FromVoidPtr((void *)PyArray_API, NULL); PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); - if (PyErr_Occurred()) goto err; - + if (PyErr_Occurred()) { + goto err; + } MultiArrayError = PyString_FromString ("multiarray.error"); PyDict_SetItemString (d, "error", MultiArrayError); - s = PyString_FromString("3.0"); PyDict_SetItemString(d, "__version__", s); Py_DECREF(s); @@ -8138,7 +8536,9 @@ set_flaginfo(d); - if (set_typeinfo(d) != 0) goto err; + if (set_typeinfo(d) != 0) { + goto err; + } return; err: From numpy-svn at scipy.org Sat Feb 21 22:42:03 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 21:42:03 -0600 (CST) Subject: [Numpy-svn] r6445 - in branches/coremath: . numpy/core/include/numpy numpy/core/src numpy/distutils numpy/distutils/command numpy/distutils/fcompiler numpy/lib/tests Message-ID: <20090222034203.CCDE0C7C009@scipy.org> Author: cdavid Date: 2009-02-21 21:41:06 -0600 (Sat, 21 Feb 2009) New Revision: 6445 Modified: branches/coremath/ branches/coremath/numpy/core/include/numpy/ndarrayobject.h branches/coremath/numpy/core/src/arrayobject.c branches/coremath/numpy/core/src/multiarraymodule.c branches/coremath/numpy/core/src/scalarmathmodule.c.src branches/coremath/numpy/core/src/scalartypes.inc.src branches/coremath/numpy/distutils/command/build_ext.py branches/coremath/numpy/distutils/fcompiler/compaq.py branches/coremath/numpy/distutils/mingw32ccompiler.py branches/coremath/numpy/lib/tests/test_io.py Log: Merged revisions 6398-6400,6410,6421-6424,6426-6428,6431,6433,6441-6442 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ................ r6398 | charris | 2009-02-19 05:54:52 +0900 (Thu, 19 Feb 2009) | 2 lines Coding style cleanups. ................ r6399 | charris | 2009-02-19 09:45:14 +0900 (Thu, 19 Feb 2009) | 2 lines Coding style cleanups. ................ r6400 | charris | 2009-02-19 13:58:23 +0900 (Thu, 19 Feb 2009) | 1 line Coding style cleanups. ................ r6410 | cdavid | 2009-02-19 19:05:28 +0900 (Thu, 19 Feb 2009) | 1 line Tag known failure on win32. ................ r6421 | stefan | 2009-02-20 04:28:08 +0900 (Fri, 20 Feb 2009) | 1 line Fix tests using strptime to be Python 2.4 compatible. ................ r6422 | charris | 2009-02-20 08:25:01 +0900 (Fri, 20 Feb 2009) | 2 lines Coding style cleanups. ................ r6423 | charris | 2009-02-20 12:40:53 +0900 (Fri, 20 Feb 2009) | 1 line Coding style cleanups. ................ r6424 | cdavid | 2009-02-20 22:30:20 +0900 (Fri, 20 Feb 2009) | 1 line Unhelpful message for compaq fortran compiler. ................ r6426 | charris | 2009-02-21 07:28:05 +0900 (Sat, 21 Feb 2009) | 2 lines Change indentation of switch statements. ................ r6427 | charris | 2009-02-21 13:21:50 +0900 (Sat, 21 Feb 2009) | 1 line Coding style cleanups. ................ r6428 | cdavid | 2009-02-21 13:35:57 +0900 (Sat, 21 Feb 2009) | 3 lines Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/branches/fix_float_format ................ r6431 | charris | 2009-02-21 13:41:17 +0900 (Sat, 21 Feb 2009) | 1 line Remove terminating ";" from macro to fix ticket #918. ................ r6433 | cdavid | 2009-02-21 14:38:29 +0900 (Sat, 21 Feb 2009) | 3 lines Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/branches/visualstudio_manifest ................ r6441 | cdavid | 2009-02-22 02:25:09 +0900 (Sun, 22 Feb 2009) | 105 lines Merged revisions 6153-6173,6176-6178,6184 via svnmerge from http://svn.scipy.org/svn/numpy/branches/numpy-mingw-w64 ........ r6153 | cdavid | 2008-12-19 17:06:06 +0900 (Fri, 19 Dec 2008) | 1 line Add a function to find python dll on windows. ........ r6154 | cdavid | 2008-12-19 17:28:49 +0900 (Fri, 19 Dec 2008) | 1 line Fix typo when getting system32 location. ........ r6155 | cdavid | 2008-12-19 17:37:19 +0900 (Fri, 19 Dec 2008) | 1 line Add a function to get a dump of private headers from dll. ........ r6156 | cdavid | 2008-12-19 17:41:39 +0900 (Fri, 19 Dec 2008) | 1 line Add a function to generate a .def file from a dll. ........ r6157 | cdavid | 2008-12-19 17:43:56 +0900 (Fri, 19 Dec 2008) | 1 line Forgot to add the regex for the generate_def function. ........ r6158 | cdavid | 2008-12-19 17:53:49 +0900 (Fri, 19 Dec 2008) | 1 line Fix .def file generation. ........ r6159 | cdavid | 2008-12-19 17:56:54 +0900 (Fri, 19 Dec 2008) | 1 line Add a warning if no symbols found in the dll (if stripped, for example). ........ r6160 | cdavid | 2008-12-19 18:02:24 +0900 (Fri, 19 Dec 2008) | 1 line Refactor build_import_library to take into account multi arch. ........ r6161 | cdavid | 2008-12-19 18:10:03 +0900 (Fri, 19 Dec 2008) | 1 line Do not generate manifest when built with msver 8.*, it does not look like it is needed, and we dont support it anyway ATM. ........ r6162 | cdavid | 2008-12-19 18:18:08 +0900 (Fri, 19 Dec 2008) | 1 line Show arch in the log when building import library. ........ r6163 | cdavid | 2008-12-19 18:22:18 +0900 (Fri, 19 Dec 2008) | 1 line Fix missing out filename. ........ r6164 | cdavid | 2008-12-19 18:32:46 +0900 (Fri, 19 Dec 2008) | 1 line Actually build the import library for mingw on amd64. ........ r6165 | cdavid | 2008-12-19 18:46:30 +0900 (Fri, 19 Dec 2008) | 1 line Do not generate ordinal, and use the basename of the dll instead of the full path in the def.file. ........ r6166 | cdavid | 2008-12-19 18:48:01 +0900 (Fri, 19 Dec 2008) | 1 line Trailing spaces. ........ r6167 | cdavid | 2008-12-19 18:55:16 +0900 (Fri, 19 Dec 2008) | 1 line Add MS_WIN64 macro when built on amd64 + mingw. ........ r6168 | cdavid | 2008-12-19 18:57:06 +0900 (Fri, 19 Dec 2008) | 1 line Forgot to import get_build_architecture. ........ r6169 | cdavid | 2008-12-19 18:57:52 +0900 (Fri, 19 Dec 2008) | 1 line Use a tuple when defining the MS_WIN64 macro. ........ r6170 | cdavid | 2008-12-19 19:05:03 +0900 (Fri, 19 Dec 2008) | 1 line Fix macro def. ........ r6171 | cdavid | 2008-12-19 19:21:54 +0900 (Fri, 19 Dec 2008) | 2 lines Do not use g++ for linking on amd64. ........ r6172 | cdavid | 2008-12-19 19:25:18 +0900 (Fri, 19 Dec 2008) | 1 line do not regenerate the import library if already there. ........ r6173 | cdavid | 2008-12-19 19:28:39 +0900 (Fri, 19 Dec 2008) | 1 line Add one full msvcrt version for 80 (for manifest generation). ........ r6176 | cdavid | 2008-12-21 02:31:48 +0900 (Sun, 21 Dec 2008) | 1 line Remove optimization flags for now, to speed up builds. ........ r6177 | cdavid | 2008-12-21 02:32:11 +0900 (Sun, 21 Dec 2008) | 1 line Add MS_WIN64 for every compile command. ........ r6178 | cdavid | 2008-12-21 02:32:33 +0900 (Sun, 21 Dec 2008) | 1 line Remove handling of MS_WIN64 in commands: deal with it in mingw tool only. ........ r6184 | cdavid | 2008-12-21 16:46:28 +0900 (Sun, 21 Dec 2008) | 1 line Hardcode size of long double, because it is broken with mingw. ........ ................ r6442 | cdavid | 2009-02-22 02:36:15 +0900 (Sun, 22 Feb 2009) | 1 line Re-enable real check for long double size. ................ Property changes on: branches/coremath ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6373 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6443 Modified: branches/coremath/numpy/core/include/numpy/ndarrayobject.h =================================================================== --- branches/coremath/numpy/core/include/numpy/ndarrayobject.h 2009-02-21 23:54:31 UTC (rev 6444) +++ branches/coremath/numpy/core/include/numpy/ndarrayobject.h 2009-02-22 03:41:06 UTC (rev 6445) @@ -1627,12 +1627,12 @@ #define PyArray_GETITEM(obj,itemptr) \ ((PyArrayObject *)(obj))->descr->f->getitem((char *)(itemptr), \ - (PyArrayObject *)(obj)); + (PyArrayObject *)(obj)) #define PyArray_SETITEM(obj,itemptr,v) \ ((PyArrayObject *)(obj))->descr->f->setitem((PyObject *)(v), \ (char *)(itemptr), \ - (PyArrayObject *)(obj)); + (PyArrayObject *)(obj)) #define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) Modified: branches/coremath/numpy/core/src/arrayobject.c =================================================================== --- branches/coremath/numpy/core/src/arrayobject.c 2009-02-21 23:54:31 UTC (rev 6444) +++ branches/coremath/numpy/core/src/arrayobject.c 2009-02-22 03:41:06 UTC (rev 6445) @@ -5099,157 +5099,157 @@ int typenum; switch (cmp_op) { - case Py_LT: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.less); - break; - case Py_LE: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.less_equal); - break; - case Py_EQ: - if (other == Py_None) { + case Py_LT: + result = PyArray_GenericBinaryFunction(self, other, + n_ops.less); + break; + case Py_LE: + result = PyArray_GenericBinaryFunction(self, other, + n_ops.less_equal); + break; + case Py_EQ: + if (other == Py_None) { + Py_INCREF(Py_False); + return Py_False; + } + /* Try to convert other to an array */ + if (!PyArray_Check(other)) { + typenum = self->descr->type_num; + if (typenum != PyArray_OBJECT) { + typenum = PyArray_NOTYPE; + } + array_other = PyArray_FromObject(other, + typenum, 0, 0); + /* + * If not successful, then return False. This fixes code + * that used to allow equality comparisons between arrays + * and other objects which would give a result of False. + */ + if ((array_other == NULL) || + (array_other == Py_None)) { + Py_XDECREF(array_other); + PyErr_Clear(); Py_INCREF(Py_False); return Py_False; } - /* Try to convert other to an array */ - if (!PyArray_Check(other)) { - typenum = self->descr->type_num; - if (typenum != PyArray_OBJECT) { - typenum = PyArray_NOTYPE; - } - array_other = PyArray_FromObject(other, - typenum, 0, 0); - /* - * If not successful, then return False. This fixes code - * that used to allow equality comparisons between arrays - * and other objects which would give a result of False. - */ - if ((array_other == NULL) || - (array_other == Py_None)) { - Py_XDECREF(array_other); - PyErr_Clear(); - Py_INCREF(Py_False); - return Py_False; - } + } + else { + Py_INCREF(other); + array_other = other; + } + result = PyArray_GenericBinaryFunction(self, + array_other, + n_ops.equal); + if ((result == Py_NotImplemented) && + (self->descr->type_num == PyArray_VOID)) { + int _res; + + _res = PyObject_RichCompareBool + ((PyObject *)self->descr, + (PyObject *)\ + PyArray_DESCR(array_other), + Py_EQ); + if (_res < 0) { + Py_DECREF(result); + Py_DECREF(array_other); + return NULL; } - else { - Py_INCREF(other); - array_other = other; + if (_res) { + Py_DECREF(result); + result = _void_compare + (self, + (PyArrayObject *)array_other, + cmp_op); + Py_DECREF(array_other); } - result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.equal); - if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { - int _res; - - _res = PyObject_RichCompareBool - ((PyObject *)self->descr, - (PyObject *)\ - PyArray_DESCR(array_other), - Py_EQ); - if (_res < 0) { - Py_DECREF(result); - Py_DECREF(array_other); - return NULL; - } - if (_res) { - Py_DECREF(result); - result = _void_compare - (self, - (PyArrayObject *)array_other, - cmp_op); - Py_DECREF(array_other); - } - return result; + return result; + } + /* + * If the comparison results in NULL, then the + * two array objects can not be compared together so + * return zero + */ + Py_DECREF(array_other); + if (result == NULL) { + PyErr_Clear(); + Py_INCREF(Py_False); + return Py_False; + } + break; + case Py_NE: + if (other == Py_None) { + Py_INCREF(Py_True); + return Py_True; + } + /* Try to convert other to an array */ + if (!PyArray_Check(other)) { + typenum = self->descr->type_num; + if (typenum != PyArray_OBJECT) { + typenum = PyArray_NOTYPE; } + array_other = PyArray_FromObject(other, typenum, 0, 0); /* - * If the comparison results in NULL, then the - * two array objects can not be compared together so - * return zero + * If not successful, then objects cannot be + * compared and cannot be equal, therefore, + * return True; */ - Py_DECREF(array_other); - if (result == NULL) { + if ((array_other == NULL) || (array_other == Py_None)) { + Py_XDECREF(array_other); PyErr_Clear(); - Py_INCREF(Py_False); - return Py_False; - } - break; - case Py_NE: - if (other == Py_None) { Py_INCREF(Py_True); return Py_True; } - /* Try to convert other to an array */ - if (!PyArray_Check(other)) { - typenum = self->descr->type_num; - if (typenum != PyArray_OBJECT) { - typenum = PyArray_NOTYPE; - } - array_other = PyArray_FromObject(other, typenum, 0, 0); - /* - * If not successful, then objects cannot be - * compared and cannot be equal, therefore, - * return True; - */ - if ((array_other == NULL) || (array_other == Py_None)) { - Py_XDECREF(array_other); - PyErr_Clear(); - Py_INCREF(Py_True); - return Py_True; - } + } + else { + Py_INCREF(other); + array_other = other; + } + result = PyArray_GenericBinaryFunction(self, + array_other, + n_ops.not_equal); + if ((result == Py_NotImplemented) && + (self->descr->type_num == PyArray_VOID)) { + int _res; + + _res = PyObject_RichCompareBool( + (PyObject *)self->descr, + (PyObject *) + PyArray_DESCR(array_other), + Py_EQ); + if (_res < 0) { + Py_DECREF(result); + Py_DECREF(array_other); + return NULL; } - else { - Py_INCREF(other); - array_other = other; + if (_res) { + Py_DECREF(result); + result = _void_compare( + self, + (PyArrayObject *)array_other, + cmp_op); + Py_DECREF(array_other); } - result = PyArray_GenericBinaryFunction(self, - array_other, - n_ops.not_equal); - if ((result == Py_NotImplemented) && - (self->descr->type_num == PyArray_VOID)) { - int _res; + return result; + } - _res = PyObject_RichCompareBool( - (PyObject *)self->descr, - (PyObject *) - PyArray_DESCR(array_other), - Py_EQ); - if (_res < 0) { - Py_DECREF(result); - Py_DECREF(array_other); - return NULL; - } - if (_res) { - Py_DECREF(result); - result = _void_compare( - self, - (PyArrayObject *)array_other, - cmp_op); - Py_DECREF(array_other); - } - return result; - } - - Py_DECREF(array_other); - if (result == NULL) { - PyErr_Clear(); - Py_INCREF(Py_True); - return Py_True; - } - break; - case Py_GT: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater); - break; - case Py_GE: - result = PyArray_GenericBinaryFunction(self, other, - n_ops.greater_equal); - break; - default: - result = Py_NotImplemented; - Py_INCREF(result); + Py_DECREF(array_other); + if (result == NULL) { + PyErr_Clear(); + Py_INCREF(Py_True); + return Py_True; + } + break; + case Py_GT: + result = PyArray_GenericBinaryFunction(self, other, + n_ops.greater); + break; + case Py_GE: + result = PyArray_GenericBinaryFunction(self, other, + n_ops.greater_equal); + break; + default: + result = Py_NotImplemented; + Py_INCREF(result); } if (result == Py_NotImplemented) { /* Try to handle string comparisons */ @@ -5612,22 +5612,23 @@ return; } -/* This routine checks to see if newstrides (of length nd) will not - ever be able to walk outside of the memory implied numbytes and offset. +/* + * This routine checks to see if newstrides (of length nd) will not + * ever be able to walk outside of the memory implied numbytes and offset. + * + * The available memory is assumed to start at -offset and proceed + * to numbytes-offset. The strides are checked to ensure + * that accessing memory using striding will not try to reach beyond + * this memory for any of the axes. + * + * If numbytes is 0 it will be calculated using the dimensions and + * element-size. + * + * This function checks for walking beyond the beginning and right-end + * of the buffer and therefore works for any integer stride (positive + * or negative). + */ - The available memory is assumed to start at -offset and proceed - to numbytes-offset. The strides are checked to ensure - that accessing memory using striding will not try to reach beyond - this memory for any of the axes. - - If numbytes is 0 it will be calculated using the dimensions and - element-size. - - This function checks for walking beyond the beginning and right-end - of the buffer and therefore works for any integer stride (positive - or negative). -*/ - /*NUMPY_API*/ static Bool PyArray_CheckStrides(int elsize, int nd, intp numbytes, intp offset, @@ -5638,37 +5639,38 @@ intp begin; intp end; - if (numbytes == 0) + if (numbytes == 0) { numbytes = PyArray_MultiplyList(dims, nd) * elsize; - + } begin = -offset; end = numbytes - offset - elsize; - for(i=0; i end)) + for (i = 0; i < nd; i++) { + byte_begin = newstrides[i]*(dims[i] - 1); + if ((byte_begin < begin) || (byte_begin > end)) { return FALSE; + } } return TRUE; - } -/* This is the main array creation routine. */ +/* + * This is the main array creation routine. + * + * Flags argument has multiple related meanings + * depending on data and strides: + * + * If data is given, then flags is flags associated with data. + * If strides is not given, then a contiguous strides array will be created + * and the CONTIGUOUS bit will be set. If the flags argument + * has the FORTRAN bit set, then a FORTRAN-style strides array will be + * created (and of course the FORTRAN flag bit will be set). + * + * If data is not given but created here, then flags will be DEFAULT + * and a non-zero flags argument can be used to indicate a FORTRAN style + * array is desired. + */ -/* Flags argument has multiple related meanings - depending on data and strides: - - If data is given, then flags is flags associated with data. - If strides is not given, then a contiguous strides array will be created - and the CONTIGUOUS bit will be set. If the flags argument - has the FORTRAN bit set, then a FORTRAN-style strides array will be - created (and of course the FORTRAN flag bit will be set). - - If data is not given but created here, then flags will be DEFAULT - and a non-zero flags argument can be used to indicate a FORTRAN style - array is desired. -*/ - static size_t _array_fill_strides(intp *strides, intp *dims, int nd, size_t itemsize, int inflag, int *objflags) @@ -5676,29 +5678,37 @@ int i; /* Only make Fortran strides if not contiguous as well */ if ((inflag & FORTRAN) && !(inflag & CONTIGUOUS)) { - for(i=0; i 1) *objflags &= ~CONTIGUOUS; - else *objflags |= CONTIGUOUS; + if (nd > 1) { + *objflags &= ~CONTIGUOUS; + } + else { + *objflags |= CONTIGUOUS; + } } else { - for(i=nd-1;i>=0;i--) { + for (i = nd - 1; i >= 0; i--) { strides[i] = itemsize; itemsize *= dims[i] ? dims[i] : 1; } *objflags |= CONTIGUOUS; - if (nd > 1) *objflags &= ~FORTRAN; - else *objflags |= FORTRAN; + if (nd > 1) { + *objflags &= ~FORTRAN; + } + else { + *objflags |= FORTRAN; + } } return itemsize; } /*NUMPY_API - Generic new array creation routine. -*/ + * Generic new array creation routine. + */ static PyObject * PyArray_New(PyTypeObject *subtype, int nd, intp *dims, int type_num, intp *strides, void *data, int itemsize, int flags, @@ -5708,7 +5718,9 @@ PyObject *new; descr = PyArray_DescrFromType(type_num); - if (descr == NULL) return NULL; + if (descr == NULL) { + return NULL; + } if (descr->elsize == 0) { if (itemsize < 1) { PyErr_SetString(PyExc_ValueError, @@ -5724,14 +5736,16 @@ return new; } -/* Change a sub-array field to the base descriptor */ -/* and update the dimensions and strides - appropriately. Dimensions and strides are added - to the end unless we have a FORTRAN array - and then they are added to the beginning - - Strides are only added if given (because data is given). -*/ +/* + * Change a sub-array field to the base descriptor + * + * and update the dimensions and strides + * appropriately. Dimensions and strides are added + * to the end unless we have a FORTRAN array + * and then they are added to the beginning + * + * Strides are only added if given (because data is given). + */ static int _update_descr_and_dimensions(PyArray_Descr **des, intp *newdims, intp *newstrides, int oldnd, int isfortran) @@ -5758,16 +5772,17 @@ newnd = oldnd + numnew; - if (newnd > MAX_DIMS) goto finish; + if (newnd > MAX_DIMS) { + goto finish; + } if (isfortran) { memmove(newdims+numnew, newdims, oldnd*sizeof(intp)); mydim = newdims; } - if (tuple) { - for(i=0; isubarray->shape, i)); + for (i = 0; i < numnew; i++) { + mydim[i] = (intp) PyInt_AsLong( + PyTuple_GET_ITEM(old->subarray->shape, i)); } } else { @@ -5777,15 +5792,15 @@ if (newstrides) { intp tempsize; intp *mystrides; + mystrides = newstrides + oldnd; if (isfortran) { - memmove(newstrides+numnew, newstrides, - oldnd*sizeof(intp)); + memmove(newstrides+numnew, newstrides, oldnd*sizeof(intp)); mystrides = newstrides; } /* Make new strides -- alwasy C-contiguous */ tempsize = (*des)->elsize; - for(i=numnew-1; i>=0; i--) { + for (i = numnew - 1; i >= 0; i--) { mystrides[i] = tempsize; tempsize *= mydim[i] ? mydim[i] : 1; } @@ -5798,10 +5813,11 @@ } -/* steals a reference to descr (even on failure) */ /*NUMPY_API - Generic new array creation routine. -*/ + * Generic new array creation routine. + * + * steals a reference to descr (even on failure) + */ static PyObject * PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd, intp *dims, intp *strides, void *data, @@ -5816,9 +5832,9 @@ if (descr->subarray) { PyObject *ret; intp newdims[2*MAX_DIMS]; - intp *newstrides=NULL; - int isfortran=0; - isfortran = (data && (flags & FORTRAN) && !(flags & CONTIGUOUS)) || \ + intp *newstrides = NULL; + int isfortran = 0; + isfortran = (data && (flags & FORTRAN) && !(flags & CONTIGUOUS)) || (!data && flags); memcpy(newdims, dims, nd*sizeof(intp)); if (strides) { @@ -5832,7 +5848,6 @@ data, flags, obj); return ret; } - if (nd < 0) { PyErr_SetString(PyExc_ValueError, "number of dimensions must be >=0"); @@ -5856,13 +5871,19 @@ return NULL; } PyArray_DESCR_REPLACE(descr); - if (descr->type_num == NPY_STRING) descr->elsize = 1; - else descr->elsize = sizeof(PyArray_UCS4); + if (descr->type_num == NPY_STRING) { + descr->elsize = 1; + } + else { + descr->elsize = sizeof(PyArray_UCS4); + } sd = (size_t) descr->elsize; } largest = MAX_INTP / sd; - for(i=0;iflags = DEFAULT; if (flags) { self->flags |= FORTRAN; - if (nd > 1) self->flags &= ~CONTIGUOUS; + if (nd > 1) { + self->flags &= ~CONTIGUOUS; + } flags = FORTRAN; } } - else self->flags = (flags & ~UPDATEIFCOPY); - + else { + self->flags = (flags & ~UPDATEIFCOPY); + } self->descr = descr; self->base = (PyObject *)NULL; self->weakreflist = (PyObject *)NULL; @@ -5913,84 +5937,102 @@ sd = _array_fill_strides(self->strides, dims, nd, sd, flags, &(self->flags)); } - else { /* we allow strides even when we create - the memory, but be careful with this... - */ + else { + /* + * we allow strides even when we create + * the memory, but be careful with this... + */ memcpy(self->strides, strides, sizeof(intp)*nd); sd *= size; } } - else { self->dimensions = self->strides = NULL; } + else { + self->dimensions = self->strides = NULL; + } if (data == NULL) { + /* + * Allocate something even for zero-space arrays + * e.g. shape=(0,) -- otherwise buffer exposure + * (a.data) doesn't work as it should. + */ - /* Allocate something even for zero-space arrays - e.g. shape=(0,) -- otherwise buffer exposure - (a.data) doesn't work as it should. */ - - if (sd==0) sd = descr->elsize; - - if ((data = PyDataMem_NEW(sd))==NULL) { + if (sd == 0) { + sd = descr->elsize; + } + if ((data = PyDataMem_NEW(sd)) == NULL) { PyErr_NoMemory(); goto fail; } self->flags |= OWNDATA; - /* It is bad to have unitialized OBJECT pointers */ - /* which could also be sub-fields of a VOID array */ + /* + * It is bad to have unitialized OBJECT pointers + * which could also be sub-fields of a VOID array + */ if (PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT)) { memset(data, 0, sd); } } else { - self->flags &= ~OWNDATA; /* If data is passed in, - this object won't own it - by default. - Caller must arrange for - this to be reset if truly - desired */ + /* + * If data is passed in, this object won't own it by default. + * Caller must arrange for this to be reset if truly desired + */ + self->flags &= ~OWNDATA; } self->data = data; - /* call the __array_finalize__ - method if a subtype. - If obj is NULL, then call method with Py_None - */ + /* + * call the __array_finalize__ + * method if a subtype. + * If obj is NULL, then call method with Py_None + */ if ((subtype != &PyArray_Type)) { PyObject *res, *func, *args; - static PyObject *str=NULL; + static PyObject *str = NULL; if (str == NULL) { str = PyString_InternFromString("__array_finalize__"); } func = PyObject_GetAttr((PyObject *)self, str); if (func && func != Py_None) { - if (strides != NULL) { /* did not allocate own data - or funny strides */ - /* update flags before finalize function */ + if (strides != NULL) { + /* + * did not allocate own data or funny strides + * update flags before finalize function + */ PyArray_UpdateFlags(self, UPDATE_ALL); } - if PyCObject_Check(func) { /* A C-function is stored here */ - PyArray_FinalizeFunc *cfunc; - cfunc = PyCObject_AsVoidPtr(func); - Py_DECREF(func); - if (cfunc(self, obj) < 0) goto fail; + if PyCObject_Check(func) { + /* A C-function is stored here */ + PyArray_FinalizeFunc *cfunc; + cfunc = PyCObject_AsVoidPtr(func); + Py_DECREF(func); + if (cfunc(self, obj) < 0) { + goto fail; } + } else { args = PyTuple_New(1); - if (obj == NULL) obj=Py_None; + if (obj == NULL) { + obj=Py_None; + } Py_INCREF(obj); PyTuple_SET_ITEM(args, 0, obj); res = PyObject_Call(func, args, NULL); Py_DECREF(args); Py_DECREF(func); - if (res == NULL) goto fail; - else Py_DECREF(res); + if (res == NULL) { + goto fail; + } + else { + Py_DECREF(res); + } } } else Py_XDECREF(func); } - return (PyObject *)self; fail: @@ -6005,14 +6047,17 @@ memset(optr, 0, dtype->elsize); } else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { + return; + } _putzero(optr + offset, zero, new); } } @@ -6027,13 +6072,11 @@ /*NUMPY_API - Resize (reallocate data). Only works if nothing else is referencing - this array and it is contiguous. - If refcheck is 0, then the reference count is not checked - and assumed to be 1. - You still must own this data and have no weak-references and no base - object. -*/ + * Resize (reallocate data). Only works if nothing else is referencing this + * array and it is contiguous. If refcheck is 0, then the reference count is + * not checked and assumed to be 1. You still must own this data and have no + * weak-references and no base object. + */ static PyObject * PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, NPY_ORDER fortran) @@ -6054,9 +6097,9 @@ return NULL; } - if (fortran == PyArray_ANYORDER) + if (fortran == PyArray_ANYORDER) { fortran = PyArray_CORDER; - + } if (self->descr->elsize == 0) { PyErr_SetString(PyExc_ValueError, "Bad data-type size."); return NULL; @@ -6064,7 +6107,9 @@ newsize = 1; largest = MAX_INTP / self->descr->elsize; for(k=0; k 2) || (self->base != NULL) || \ + if (refcheck) { + refcnt = REFCOUNT(self); + } + else { + refcnt = 1; + } + if ((refcnt > 2) || (self->base != NULL) || (self->weakreflist != NULL)) { PyErr_SetString(PyExc_ValueError, "cannot resize an array that has "\ @@ -6097,8 +6146,12 @@ return NULL; } - if (newsize == 0) sd = self->descr->elsize; - else sd = newsize * self->descr->elsize; + if (newsize == 0) { + sd = self->descr->elsize; + } + else { + sd = newsize*self->descr->elsize; + } /* Reallocate space if needed */ new_data = PyDataMem_RENEW(self->data, sd); if (new_data == NULL) { @@ -6117,21 +6170,20 @@ char *optr; optr = self->data + oldsize*elsize; n = newsize - oldsize; - for(k=0; kdescr); optr += elsize; } Py_DECREF(zero); } else{ - memset(self->data+oldsize*elsize, 0, - (newsize-oldsize)*elsize); + memset(self->data+oldsize*elsize, 0, (newsize-oldsize)*elsize); } } - if (self->nd != new_nd) { /* Different number of dimensions. */ + if (self->nd != new_nd) { + /* Different number of dimensions. */ self->nd = new_nd; - /* Need new dimensions and strides arrays */ dimptr = PyDimMem_RENEW(self->dimensions, 2*new_nd); if (dimptr == NULL) { @@ -6148,42 +6200,44 @@ sd = (size_t) self->descr->elsize; sd = (size_t) _array_fill_strides(new_strides, new_dimensions, new_nd, sd, self->flags, &(self->flags)); - memmove(self->dimensions, new_dimensions, new_nd*sizeof(intp)); memmove(self->strides, new_strides, new_nd*sizeof(intp)); - Py_INCREF(Py_None); return Py_None; - } static void _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype) { if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { - if ((obj == Py_None) || - (PyInt_Check(obj) && PyInt_AsLong(obj)==0)) + if ((obj == Py_None) || (PyInt_Check(obj) && PyInt_AsLong(obj)==0)) { return; + } else { PyObject *arr; Py_INCREF(dtype); arr = PyArray_NewFromDescr(&PyArray_Type, dtype, 0, NULL, NULL, NULL, 0, NULL); - if (arr!=NULL) + if (arr!=NULL) { dtype->f->setitem(obj, optr, arr); + } Py_XDECREF(arr); } } else if (PyDescr_HASFIELDS(dtype)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; + while (PyDict_Next(dtype->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return; + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { + return; + } _fillobject(optr + offset, obj, new); } } @@ -6196,8 +6250,9 @@ } } -/* Assumes contiguous */ -/*NUMPY_API*/ +/*NUMPY_API + * Assumes contiguous + */ static void PyArray_FillObjectArray(PyArrayObject *arr, PyObject *obj) { @@ -6208,12 +6263,12 @@ optr = (PyObject **)(arr->data); n = PyArray_SIZE(arr); if (obj == NULL) { - for(i=0; idata; - for(i=0; idescr); optr += arr->descr->elsize; } @@ -6250,7 +6305,9 @@ descr = PyArray_DESCR(arr); Py_INCREF(descr); newarr = PyArray_FromAny(obj, descr, 0,0, ALIGNED, NULL); - if (newarr == NULL) return -1; + if (newarr == NULL) { + return -1; + } fromptr = PyArray_DATA(newarr); swap = (PyArray_ISNOTSWAPPED(arr) != PyArray_ISNOTSWAPPED(newarr)); } @@ -6280,7 +6337,7 @@ Py_XDECREF(newarr); return -1; } - while(size--) { + while (size--) { copyswap(iter->dataptr, fromptr, swap, arr); PyArray_ITER_NEXT(iter); } @@ -6307,14 +6364,11 @@ PyArrayObject *ret; buffer.ptr = NULL; - /* Usually called with shape and type - but can also be called with buffer, strides, and swapped info - */ - - /* For now, let's just use this to create an empty, contiguous - array of a specific type and shape. - */ - + /* + * Usually called with shape and type but can also be called with buffer, + * strides, and swapped info For now, let's just use this to create an + * empty, contiguous array of a specific type and shape. + */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&LO&O&", kwlist, PyArray_IntpConverter, &dims, @@ -6326,16 +6380,17 @@ &PyArray_IntpConverter, &strides, &PyArray_OrderConverter, - &order)) + &order)) { goto fail; - - if (order == PyArray_FORTRANORDER) fortran = 1; - - if (descr == NULL) + } + if (order == PyArray_FORTRANORDER) { + fortran = 1; + } + if (descr == NULL) { descr = PyArray_DescrFromType(PyArray_DEFAULT); + } itemsize = descr->elsize; - if (itemsize == 0) { PyErr_SetString(PyExc_ValueError, "data-type with unspecified variable length"); @@ -6373,27 +6428,31 @@ } if (buffer.ptr == NULL) { - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(subtype, descr, (int)dims.len, dims.ptr, strides.ptr, NULL, fortran, NULL); - if (ret == NULL) {descr=NULL;goto fail;} + if (ret == NULL) { + descr = NULL; + goto fail; + } if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)) { /* place Py_None in object positions */ PyArray_FillObjectArray(ret, Py_None); if (PyErr_Occurred()) { - descr=NULL; + descr = NULL; goto fail; } } } - else { /* buffer given -- use it */ + else { + /* buffer given -- use it */ if (dims.len == 1 && dims.ptr[0] == -1) { dims.ptr[0] = (buffer.len-(intp)offset) / itemsize; } - else if ((strides.ptr == NULL) && \ - (buffer.len < ((intp)itemsize)* \ + else if ((strides.ptr == NULL) && + (buffer.len < ((intp)itemsize)* PyArray_MultiplyList(dims.ptr, dims.len))) { PyErr_SetString(PyExc_TypeError, "buffer is too small for " \ @@ -6401,27 +6460,38 @@ goto fail; } /* get writeable and aligned */ - if (fortran) buffer.flags |= FORTRAN; + if (fortran) { + buffer.flags |= FORTRAN; + } ret = (PyArrayObject *)\ PyArray_NewFromDescr(subtype, descr, dims.len, dims.ptr, strides.ptr, offset + (char *)buffer.ptr, buffer.flags, NULL); - if (ret == NULL) {descr=NULL; goto fail;} + if (ret == NULL) { + descr = NULL; + goto fail; + } PyArray_UpdateFlags(ret, UPDATE_ALL); ret->base = buffer.base; Py_INCREF(buffer.base); } PyDimMem_FREE(dims.ptr); - if (strides.ptr) PyDimMem_FREE(strides.ptr); + if (strides.ptr) { + PyDimMem_FREE(strides.ptr); + } return (PyObject *)ret; fail: Py_XDECREF(descr); - if (dims.ptr) PyDimMem_FREE(dims.ptr); - if (strides.ptr) PyDimMem_FREE(strides.ptr); + if (dims.ptr) { + PyDimMem_FREE(dims.ptr); + } + if (strides.ptr) { + PyDimMem_FREE(strides.ptr); + } return NULL; } @@ -6467,7 +6537,9 @@ /* Assumes C-order */ ret = PyArray_Reshape(self, val); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } if (PyArray_DATA(ret) != PyArray_DATA(self)) { Py_DECREF(ret); PyErr_SetString(PyExc_AttributeError, @@ -6480,7 +6552,8 @@ PyDimMem_FREE(self->dimensions); nd = PyArray_NDIM(ret); self->nd = nd; - if (nd > 0) { /* create new dimensions and strides */ + if (nd > 0) { + /* create new dimensions and strides */ self->dimensions = PyDimMem_NEW(2*nd); if (self->dimensions == NULL) { Py_DECREF(ret); @@ -6488,12 +6561,13 @@ return -1; } self->strides = self->dimensions + nd; - memcpy(self->dimensions, PyArray_DIMS(ret), - nd*sizeof(intp)); - memcpy(self->strides, PyArray_STRIDES(ret), - nd*sizeof(intp)); + memcpy(self->dimensions, PyArray_DIMS(ret), nd*sizeof(intp)); + memcpy(self->strides, PyArray_STRIDES(ret), nd*sizeof(intp)); } - else {self->dimensions=NULL; self->strides=NULL;} + else { + self->dimensions = NULL; + self->strides = NULL; + } Py_DECREF(ret); PyArray_UpdateFlags(self, CONTIGUOUS | FORTRAN); return 0; @@ -6511,12 +6585,12 @@ { PyArray_Dims newstrides = {NULL, 0}; PyArrayObject *new; - intp numbytes=0; - intp offset=0; + intp numbytes = 0; + intp offset = 0; Py_ssize_t buf_len; char *buf; - if (!PyArray_IntpConverter(obj, &newstrides) || \ + if (!PyArray_IntpConverter(obj, &newstrides) || newstrides.ptr == NULL) { PyErr_SetString(PyExc_TypeError, "invalid strides"); return -1; @@ -6530,9 +6604,10 @@ while(new->base && PyArray_Check(new->base)) { new = (PyArrayObject *)(new->base); } - /* Get the available memory through the buffer - interface on new->base or if that fails - from the current new */ + /* + * Get the available memory through the buffer interface on + * new->base or if that fails from the current new + */ if (new->base && PyObject_AsReadBuffer(new->base, (const void **)&buf, &buf_len) >= 0) { @@ -6568,10 +6643,12 @@ static PyObject * array_priority_get(PyArrayObject *self) { - if (PyArray_CheckExact(self)) + if (PyArray_CheckExact(self)) { return PyFloat_FromDouble(PyArray_PRIORITY); - else + } + else { return PyFloat_FromDouble(PyArray_SUBTYPE_PRIORITY); + } } static PyObject *arraydescr_protocol_typestr_get(PyArray_Descr *); @@ -6598,16 +6675,23 @@ PyObject *dobj; res = arraydescr_protocol_descr_get(self->descr); - if (res) return res; + if (res) { + return res; + } PyErr_Clear(); /* get default */ dobj = PyTuple_New(2); - if (dobj == NULL) return NULL; + if (dobj == NULL) { + return NULL; + } PyTuple_SET_ITEM(dobj, 0, PyString_FromString("")); PyTuple_SET_ITEM(dobj, 1, array_typestr_get(self)); res = PyList_New(1); - if (res == NULL) {Py_DECREF(dobj); return NULL;} + if (res == NULL) { + Py_DECREF(dobj); + return NULL; + } PyList_SET_ITEM(res, 0, dobj); return res; } @@ -6616,9 +6700,9 @@ array_protocol_strides_get(PyArrayObject *self) { if PyArray_ISCONTIGUOUS(self) { - Py_INCREF(Py_None); - return Py_None; - } + Py_INCREF(Py_None); + return Py_None; + } return PyArray_IntTupleFromIntp(self->nd, self->strides); } @@ -6639,9 +6723,10 @@ PyObject *_numpy_internal; PyObject *ret; _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - ret = PyObject_CallMethod(_numpy_internal, "_ctypes", - "ON", self, + if (_numpy_internal == NULL) { + return NULL; + } + ret = PyObject_CallMethod(_numpy_internal, "_ctypes", "ON", self, PyLong_FromVoidPtr(self->data)); Py_DECREF(_numpy_internal); return ret; @@ -6652,8 +6737,11 @@ { PyObject *dict; PyObject *obj; + dict = PyDict_New(); - if (dict == NULL) return NULL; + if (dict == NULL) { + return NULL; + } /* dataptr */ obj = array_dataptr_get(self); @@ -6693,11 +6781,12 @@ return NULL; } nbytes = PyArray_NBYTES(self); - if PyArray_ISWRITEABLE(self) - return PyBuffer_FromReadWriteObject((PyObject *)self, 0, - (int) nbytes); - else + if PyArray_ISWRITEABLE(self) { + return PyBuffer_FromReadWriteObject((PyObject *)self, 0, (int) nbytes); + } + else { return PyBuffer_FromObject((PyObject *)self, 0, (int) nbytes); + } } static int @@ -6709,8 +6798,7 @@ if (PyObject_AsWriteBuffer(op, &buf, &buf_len) < 0) { writeable = 0; - if (PyObject_AsReadBuffer(op, (const void **)&buf, - &buf_len) < 0) { + if (PyObject_AsReadBuffer(op, (const void **)&buf, &buf_len) < 0) { PyErr_SetString(PyExc_AttributeError, "object does not have single-segment " \ "buffer interface"); @@ -6723,8 +6811,7 @@ return -1; } if (PyArray_NBYTES(self) > buf_len) { - PyErr_SetString(PyExc_AttributeError, - "not enough data for array"); + PyErr_SetString(PyExc_AttributeError, "not enough data for array"); return -1; } if (self->flags & OWNDATA) { @@ -6742,8 +6829,9 @@ self->base = op; self->data = buf; self->flags = CARRAY; - if (!writeable) + if (!writeable) { self->flags &= ~WRITEABLE; + } return 0; } @@ -6761,10 +6849,12 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) size); #else - if (size > MAX_LONG || size < MIN_LONG) + if (size > MAX_LONG || size < MIN_LONG) { return PyLong_FromLongLong(size); - else + } + else { return PyInt_FromLong((long) size); + } #endif } @@ -6775,28 +6865,29 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) nbytes); #else - if (nbytes > MAX_LONG || nbytes < MIN_LONG) + if (nbytes > MAX_LONG || nbytes < MIN_LONG) { return PyLong_FromLongLong(nbytes); - else + } + else { return PyInt_FromLong((long) nbytes); + } #endif } -/* If the type is changed. - Also needing change: strides, itemsize +/* + * If the type is changed. + * Also needing change: strides, itemsize + * + * Either itemsize is exactly the same or the array is single-segment + * (contiguous or fortran) with compatibile dimensions The shape and strides + * will be adjusted in that case as well. + */ - Either itemsize is exactly the same - or the array is single-segment (contiguous or fortran) with - compatibile dimensions - - The shape and strides will be adjusted in that case as well. -*/ - static int array_descr_set(PyArrayObject *self, PyObject *arg) { - PyArray_Descr *newtype=NULL; + PyArray_Descr *newtype = NULL; intp newdim; int index; char *msg = "new type not compatible with array."; @@ -6825,51 +6916,61 @@ } - if ((newtype->elsize != self->descr->elsize) && \ - (self->nd == 0 || !PyArray_ISONESEGMENT(self) || \ - newtype->subarray)) goto fail; - - if (PyArray_ISCONTIGUOUS(self)) index = self->nd - 1; - else index = 0; - + if ((newtype->elsize != self->descr->elsize) && + (self->nd == 0 || !PyArray_ISONESEGMENT(self) || + newtype->subarray)) { + goto fail; + } + if (PyArray_ISCONTIGUOUS(self)) { + index = self->nd - 1; + } + else { + index = 0; + } if (newtype->elsize < self->descr->elsize) { - /* if it is compatible increase the size of the - dimension at end (or at the front for FORTRAN) - */ - if (self->descr->elsize % newtype->elsize != 0) + /* + * if it is compatible increase the size of the + * dimension at end (or at the front for FORTRAN) + */ + if (self->descr->elsize % newtype->elsize != 0) { goto fail; + } newdim = self->descr->elsize / newtype->elsize; self->dimensions[index] *= newdim; self->strides[index] = newtype->elsize; } - else if (newtype->elsize > self->descr->elsize) { - - /* Determine if last (or first if FORTRAN) dimension - is compatible */ - + /* + * Determine if last (or first if FORTRAN) dimension + * is compatible + */ newdim = self->dimensions[index] * self->descr->elsize; - if ((newdim % newtype->elsize) != 0) goto fail; - + if ((newdim % newtype->elsize) != 0) { + goto fail; + } self->dimensions[index] = newdim / newtype->elsize; self->strides[index] = newtype->elsize; } /* fall through -- adjust type*/ - Py_DECREF(self->descr); if (newtype->subarray) { - /* create new array object from data and update - dimensions, strides and descr from it */ + /* + * create new array object from data and update + * dimensions, strides and descr from it + */ PyArrayObject *temp; - - /* We would decref newtype here --- temp will - steal a reference to it */ - temp = (PyArrayObject *) \ + /* + * We would decref newtype here. + * temp will steal a reference to it + */ + temp = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, newtype, self->nd, self->dimensions, self->strides, self->data, self->flags, NULL); - if (temp == NULL) return -1; + if (temp == NULL) { + return -1; + } PyDimMem_FREE(self->dimensions); self->dimensions = temp->dimensions; self->nd = temp->nd; @@ -6884,7 +6985,6 @@ self->descr = newtype; PyArray_UpdateFlags(self, UPDATE_ALL); - return 0; fail: @@ -6899,7 +6999,9 @@ PyArrayInterface *inter; inter = (PyArrayInterface *)_pya_malloc(sizeof(PyArrayInterface)); - if (inter==NULL) return PyErr_NoMemory(); + if (inter==NULL) { + return PyErr_NoMemory(); + } inter->two = 2; inter->nd = self->nd; inter->typekind = self->descr->kind; @@ -6908,9 +7010,10 @@ /* reset unused flags */ inter->flags &= ~(UPDATEIFCOPY | OWNDATA); if (PyArray_ISNOTSWAPPED(self)) inter->flags |= NOTSWAPPED; - /* Copy shape and strides over since these can be reset - when the array is "reshaped". - */ + /* + * Copy shape and strides over since these can be reset + *when the array is "reshaped". + */ if (self->nd > 0) { inter->shape = (intp *)_pya_malloc(2*sizeof(intp)*self->nd); if (inter->shape == NULL) { @@ -6928,10 +7031,16 @@ inter->data = self->data; if (self->descr->names) { inter->descr = arraydescr_protocol_descr_get(self->descr); - if (inter->descr == NULL) PyErr_Clear(); - else inter->flags &= ARR_HAS_DESCR; + if (inter->descr == NULL) { + PyErr_Clear(); + } + else { + inter->flags &= ARR_HAS_DESCR; + } } - else inter->descr = NULL; + else { + inter->descr = NULL; + } Py_INCREF(self); return PyCObject_FromVoidPtrAndDesc(inter, self, gentype_struct_free); } @@ -6958,7 +7067,7 @@ PyArray_FillObjectArray(ret, zero); Py_DECREF(zero); if (PyErr_Occurred()) { - Py_DECREF(ret); + Py_DECREF(ret); return -1; } } @@ -6966,14 +7075,14 @@ intp n = PyArray_NBYTES(ret); memset(ret->data, 0, n); } - return 0; + return 0; } -/* Create a view of a complex array with an equivalent data-type - except it is real instead of complex. -*/ - +/* + * Create a view of a complex array with an equivalent data-type + * except it is real instead of complex. + */ static PyArrayObject * _get_part(PyArrayObject *self, int imag) { @@ -6992,7 +7101,7 @@ Py_DECREF(type); type = new; } - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(self->ob_type, type, self->nd, @@ -7000,7 +7109,9 @@ self->strides, self->data + offset, self->flags, (PyObject *)self); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } ret->flags &= ~CONTIGUOUS; ret->flags &= ~FORTRAN; Py_INCREF(self); @@ -7033,14 +7144,19 @@ if (PyArray_ISCOMPLEX(self)) { ret = _get_part(self, 0); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } } else { Py_INCREF(self); ret = self; } new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0, NULL); - if (new == NULL) {Py_DECREF(ret); return -1;} + if (new == NULL) { + Py_DECREF(ret); + return -1; + } rint = PyArray_MoveInto(ret, new); Py_DECREF(ret); Py_DECREF(new); @@ -7059,15 +7175,17 @@ Py_INCREF(self->descr); ret = (PyArrayObject *)PyArray_NewFromDescr(self->ob_type, self->descr, - self->nd, + self->nd, self->dimensions, NULL, NULL, PyArray_ISFORTRAN(self), (PyObject *)self); - if (ret == NULL) return NULL; - - if (_zerofill(ret) < 0) return NULL; - + if (ret == NULL) { + return NULL; + } + if (_zerofill(ret) < 0) { + return NULL; + } ret->flags &= ~WRITEABLE; } return (PyObject *) ret; @@ -7082,9 +7200,14 @@ int rint; ret = _get_part(self, 1); - if (ret == NULL) return -1; + if (ret == NULL) { + return -1; + } new = (PyArrayObject *)PyArray_FromAny(val, NULL, 0, 0, 0, NULL); - if (new == NULL) {Py_DECREF(ret); return -1;} + if (new == NULL) { + Py_DECREF(ret); + return -1; + } rint = PyArray_MoveInto(ret, new); Py_DECREF(ret); Py_DECREF(new); @@ -7106,9 +7229,9 @@ static int array_flat_set(PyArrayObject *self, PyObject *val) { - PyObject *arr=NULL; + PyObject *arr = NULL; int retval = -1; - PyArrayIterObject *selfit=NULL, *arrit=NULL; + PyArrayIterObject *selfit = NULL, *arrit = NULL; PyArray_Descr *typecode; int swap; PyArray_CopySwapFunc *copyswap; @@ -7117,28 +7240,36 @@ Py_INCREF(typecode); arr = PyArray_FromAny(val, typecode, 0, 0, FORCECAST | FORTRAN_IF(self), NULL); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } arrit = (PyArrayIterObject *)PyArray_IterNew(arr); - if (arrit == NULL) goto exit; + if (arrit == NULL) { + goto exit; + } selfit = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (selfit == NULL) goto exit; - - if (arrit->size == 0) {retval = 0; goto exit;} - + if (selfit == NULL) { + goto exit; + } + if (arrit->size == 0) { + retval = 0; + goto exit; + } swap = PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(arr); copyswap = self->descr->f->copyswap; if (PyDataType_REFCHK(self->descr)) { - while(selfit->index < selfit->size) { + while (selfit->index < selfit->size) { PyArray_Item_XDECREF(selfit->dataptr, self->descr); PyArray_Item_INCREF(arrit->dataptr, PyArray_DESCR(arr)); - memmove(selfit->dataptr, arrit->dataptr, - sizeof(PyObject **)); - if (swap) + memmove(selfit->dataptr, arrit->dataptr, sizeof(PyObject **)); + if (swap) { copyswap(selfit->dataptr, NULL, swap, self); + } PyArray_ITER_NEXT(selfit); PyArray_ITER_NEXT(arrit); - if (arrit->index == arrit->size) + if (arrit->index == arrit->size) { PyArray_ITER_RESET(arrit); + } } retval = 0; goto exit; @@ -7146,14 +7277,17 @@ while(selfit->index < selfit->size) { memmove(selfit->dataptr, arrit->dataptr, self->descr->elsize); - if (swap) + if (swap) { copyswap(selfit->dataptr, NULL, swap, self); + } PyArray_ITER_NEXT(selfit); PyArray_ITER_NEXT(arrit); - if (arrit->index == arrit->size) + if (arrit->index == arrit->size) { PyArray_ITER_RESET(arrit); + } } retval = 0; + exit: Py_XDECREF(selfit); Py_XDECREF(arrit); @@ -7261,77 +7395,78 @@ static PyTypeObject PyArray_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.ndarray", /*tp_name*/ - sizeof(PyArrayObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ + 0, /* ob_size */ + "numpy.ndarray", /* tp_name */ + sizeof(PyArrayObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)array_dealloc, /*tp_dealloc */ - (printfunc)NULL, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - (cmpfunc)0, /*tp_compare*/ - (reprfunc)array_repr, /*tp_repr*/ - &array_as_number, /*tp_as_number*/ - &array_as_sequence, /*tp_as_sequence*/ - &array_as_mapping, /*tp_as_mapping*/ - (hashfunc)0, /*tp_hash*/ - (ternaryfunc)0, /*tp_call*/ - (reprfunc)array_str, /*tp_str*/ - - (getattrofunc)0, /*tp_getattro*/ - (setattrofunc)0, /*tp_setattro*/ - &array_as_buffer, /*tp_as_buffer*/ + (destructor)array_dealloc, /* tp_dealloc */ + (printfunc)NULL, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + (cmpfunc)0, /* tp_compare */ + (reprfunc)array_repr, /* tp_repr */ + &array_as_number, /* tp_as_number */ + &array_as_sequence, /* tp_as_sequence */ + &array_as_mapping, /* tp_as_mapping */ + (hashfunc)0, /* tp_hash */ + (ternaryfunc)0, /* tp_call */ + (reprfunc)array_str, /* tp_str */ + (getattrofunc)0, /* tp_getattro */ + (setattrofunc)0, /* tp_setattro */ + &array_as_buffer, /* tp_as_buffer */ (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE - | Py_TPFLAGS_CHECKTYPES), /*tp_flags*/ + | Py_TPFLAGS_CHECKTYPES), /* tp_flags */ /*Documentation string */ - 0, /*tp_doc*/ + 0, /* tp_doc */ - (traverseproc)0, /*tp_traverse */ - (inquiry)0, /*tp_clear */ - (richcmpfunc)array_richcompare, /*tp_richcompare */ - offsetof(PyArrayObject, weakreflist), /*tp_weaklistoffset */ + (traverseproc)0, /* tp_traverse */ + (inquiry)0, /* tp_clear */ + (richcmpfunc)array_richcompare, /* tp_richcompare */ + offsetof(PyArrayObject, weakreflist), /* tp_weaklistoffset */ /* Iterator support (use standard) */ - (getiterfunc)array_iter, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ + (getiterfunc)array_iter, /* tp_iter */ + (iternextfunc)0, /* tp_iternext */ /* Sub-classing (new-style object) support */ - array_methods, /* tp_methods */ - 0, /* tp_members */ - array_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - array_alloc, /* tp_alloc */ - (newfunc)array_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + array_methods, /* tp_methods */ + 0, /* tp_members */ + array_getsetlist, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + array_alloc, /* tp_alloc */ + (newfunc)array_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; -/* The rest of this code is to build the right kind of array from a python */ -/* object. */ +/* + * The rest of this code is to build the right kind of array + * from a python object. + */ static int discover_depth(PyObject *s, int max, int stop_at_string, int stop_at_tuple) @@ -7420,13 +7555,12 @@ } n = PyObject_Length(s); - if ((nd == 0) || PyString_Check(s) || PyUnicode_Check(s) || PyBuffer_Check(s)) { *itemsize = MAX(*itemsize, n); return 0; } - for(i = 0; i < n; i++) { + for (i = 0; i < n; i++) { if ((e = PySequence_GetItem(s,i))==NULL) { return -1; } @@ -7456,8 +7590,7 @@ } return 0; } - - n=PyObject_Length(s); + n = PyObject_Length(s); *d = n; if (*d < 0) { return -1; @@ -7507,10 +7640,11 @@ } - if (chktype->type_num > mintype->type_num) + if (chktype->type_num > mintype->type_num) { outtype_num = chktype->type_num; + } else { - if (PyDataType_ISOBJECT(chktype) && \ + if (PyDataType_ISOBJECT(chktype) && PyDataType_ISSTRING(mintype)) { return PyArray_DescrFromType(NPY_OBJECT); } @@ -7520,10 +7654,11 @@ } save_num = outtype_num; - while(outtype_num < PyArray_NTYPES && + while (outtype_num < PyArray_NTYPES && !(PyArray_CanCastSafely(chktype->type_num, outtype_num) - && PyArray_CanCastSafely(mintype->type_num, outtype_num))) + && PyArray_CanCastSafely(mintype->type_num, outtype_num))) { outtype_num++; + } if (outtype_num == PyArray_NTYPES) { outtype = PyArray_DescrFromType(save_num); } @@ -7532,11 +7667,13 @@ } if (PyTypeNum_ISEXTENDED(outtype->type_num)) { int testsize = outtype->elsize; - register int chksize, minsize; + int chksize, minsize; chksize = chktype->elsize; minsize = mintype->elsize; - /* Handle string->unicode case separately - because string itemsize is 4* as large */ + /* + * Handle string->unicode case separately + * because string itemsize is 4* as large + */ if (outtype->type_num == PyArray_UNICODE && mintype->type_num == PyArray_STRING) { testsize = MAX(chksize, 4*minsize); @@ -7569,7 +7706,8 @@ /* bools are a subclass of int */ if (PyBool_Check(op)) { return PyArray_DescrFromType(PyArray_BOOL); - } else { + } + else { return PyArray_DescrFromType(PyArray_LONG); } } @@ -7607,39 +7745,42 @@ } -/* op is an object to be converted to an ndarray. - - minitype is the minimum type-descriptor needed. - - max is the maximum number of dimensions -- used for recursive call - to avoid infinite recursion... - -*/ - +/* + * op is an object to be converted to an ndarray. + * + * minitype is the minimum type-descriptor needed. + * + * max is the maximum number of dimensions -- used for recursive call + * to avoid infinite recursion... + */ static PyArray_Descr * _array_find_type(PyObject *op, PyArray_Descr *minitype, int max) { int l; PyObject *ip; - PyArray_Descr *chktype=NULL; + PyArray_Descr *chktype = NULL; PyArray_Descr *outtype; - /* These need to come first because if op already carries - a descr structure, then we want it to be the result if minitype - is NULL. - */ - + /* + * These need to come first because if op already carries + * a descr structure, then we want it to be the result if minitype + * is NULL. + */ if (PyArray_Check(op)) { chktype = PyArray_DESCR(op); Py_INCREF(chktype); - if (minitype == NULL) return chktype; + if (minitype == NULL) { + return chktype; + } Py_INCREF(minitype); goto finish; } if (PyArray_IsScalar(op, Generic)) { chktype = PyArray_DescrFromScalar(op); - if (minitype == NULL) return chktype; + if (minitype == NULL) { + return chktype; + } Py_INCREF(minitype); goto finish; } @@ -7647,10 +7788,12 @@ if (minitype == NULL) { minitype = PyArray_DescrFromType(PyArray_BOOL); } - else Py_INCREF(minitype); - - if (max < 0) goto deflt; - + else { + Py_INCREF(minitype); + } + if (max < 0) { + goto deflt; + } chktype = _array_find_python_scalar_type(op); if (chktype) { goto finish; @@ -7661,15 +7804,17 @@ PyObject *new; new = PyDict_GetItemString(ip, "typestr"); if (new && PyString_Check(new)) { - chktype =_array_typedescr_fromstr \ - (PyString_AS_STRING(new)); + chktype =_array_typedescr_fromstr(PyString_AS_STRING(new)); } } Py_DECREF(ip); - if (chktype) goto finish; + if (chktype) { + goto finish; + } } - else PyErr_Clear(); - + else { + PyErr_Clear(); + } if ((ip=PyObject_GetAttrString(op, "__array_struct__")) != NULL) { PyArrayInterface *inter; char buf[40]; @@ -7682,9 +7827,13 @@ } } Py_DECREF(ip); - if (chktype) goto finish; + if (chktype) { + goto finish; + } } - else PyErr_Clear(); + else { + PyErr_Clear(); + } if (PyString_Check(op)) { chktype = PyArray_DescrNewFromType(PyArray_STRING); @@ -7720,10 +7869,10 @@ if (PyErr_Occurred()) PyErr_Clear(); } - if (PyInstance_Check(op)) goto deflt; - + if (PyInstance_Check(op)) { + goto deflt; + } if (PySequence_Check(op)) { - l = PyObject_Length(op); if (l < 0 && PyErr_Occurred()) { PyErr_Clear(); @@ -7757,13 +7906,14 @@ chktype = _use_default_type(op); finish: - outtype = _array_small_type(chktype, minitype); Py_DECREF(chktype); Py_DECREF(minitype); - /* VOID Arrays should not occur by "default" - unless input was already a VOID */ - if (outtype->type_num == PyArray_VOID && \ + /* + * VOID Arrays should not occur by "default" + * unless input was already a VOID + */ + if (outtype->type_num == PyArray_VOID && minitype->type_num != PyArray_VOID) { Py_DECREF(outtype); return PyArray_DescrFromType(PyArray_OBJECT); @@ -7778,15 +7928,15 @@ Py_ssize_t i, slen; int res = 0; - /* This code is to ensure that the sequence access below will - return a lower-dimensional sequence. + /* + * This code is to ensure that the sequence access below will + * return a lower-dimensional sequence. */ if (PyArray_Check(s) && !(PyArray_CheckExact(s))) { - /* FIXME: This could probably copy the entire subarray - at once here using a faster algorithm. - Right now, just make sure a base-class array - is used so that the dimensionality reduction assumption - is correct. + /* + * FIXME: This could probably copy the entire subarray at once here using + * a faster algorithm. Right now, just make sure a base-class array is + * used so that the dimensionality reduction assumption is correct. */ s = PyArray_EnsureArray(s); } @@ -7798,14 +7948,13 @@ } slen = PySequence_Length(s); - if (slen != a->dimensions[dim]) { PyErr_Format(PyExc_ValueError, "setArrayFromSequence: sequence/array shape mismatch."); return -1; } - for(i=0; ind - dim) > 1) { res = setArrayFromSequence(a, o, dim+1, offset); @@ -7814,7 +7963,9 @@ res = a->descr->f->setitem(o, (a->data + offset), a); } Py_DECREF(o); - if (res < 0) return res; + if (res < 0) { + return res; + } offset += a->strides[dim]; } return 0; @@ -7834,12 +7985,13 @@ "assignment to 0-d array"); return -1; } - return setArrayFromSequence(self, v, 0, 0); } -/* "Array Scalars don't call this code" */ -/* steals reference to typecode -- no NULL*/ +/* + * "Array Scalars don't call this code" + * steals reference to typecode -- no NULL + */ static PyObject * Array_FromPyScalar(PyObject *op, PyArray_Descr *typecode) { @@ -7852,7 +8004,6 @@ if (itemsize == 0 && PyTypeNum_ISEXTENDED(type)) { itemsize = PyObject_Length(op); - if (type == PyArray_UNICODE) { itemsize *= 4; } @@ -7879,21 +8030,21 @@ if (PyErr_Occurred()) { Py_DECREF(ret); return NULL; - } + } else { return (PyObject *)ret; } } -/* If s is not a list, return 0 - Otherwise: - - run object_depth_and_dimension on all the elements - and make sure the returned shape and size - is the same for each element - -*/ +/* + * If s is not a list, return 0 + * Otherwise: + * + * run object_depth_and_dimension on all the elements + * and make sure the returned shape and size is the + * same for each element + */ static int object_depth_and_dimension(PyObject *s, int max, intp *dims) { @@ -7931,7 +8082,7 @@ } nd = object_depth_and_dimension(obj, max - 1, newdims); - for(i = 1; i < size; i++) { + for (i = 1; i < size; i++) { if (islist) { obj = PyList_GET_ITEM(s, i); } @@ -7947,7 +8098,7 @@ } } - for(i = 1; i <= nd; i++) { + for (i = 1; i <= nd; i++) { dims[i] = newdims[i-1]; } dims[0] = size; @@ -7970,12 +8121,10 @@ if (nd == 0) { return Array_FromPyScalar(s, typecode); } - r = (PyArrayObject*)PyArray_NewFromDescr(&PyArray_Type, typecode, nd, d, NULL, NULL, fortran, NULL); - if (!r) { return NULL; } @@ -7986,12 +8135,12 @@ return (PyObject*)r; } -/* +/* * isobject means that we are constructing an * object array on-purpose with a nested list. * Only a list is interpreted as a sequence with these rules + * steals reference to typecode */ -/* steals reference to typecode */ static PyObject * Array_FromSequence(PyObject *s, PyArray_Descr *typecode, int fortran, int min_depth, int max_depth) @@ -8007,11 +8156,9 @@ int itemsize = typecode->elsize; check_it = (typecode->type != PyArray_CHARLTR); - stop_at_string = (type != PyArray_STRING) || (typecode->type == PyArray_STRINGLTR); - - stop_at_tuple = (type == PyArray_VOID && (typecode->names \ + stop_at_tuple = (type == PyArray_VOID && (typecode->names || typecode->subarray)); nd = discover_depth(s, MAX_DIMS + 1, stop_at_string, stop_at_tuple); @@ -8076,8 +8223,8 @@ /*NUMPY_API - Is the typenum valid? -*/ + * Is the typenum valid? + */ static int PyArray_ValidType(int type) { @@ -8092,11 +8239,11 @@ return res; } -/* For backward compatibility */ - -/* steals reference to at --- cannot be NULL*/ /*NUMPY_API - *Cast an array using typecode structure. + * For backward compatibility + * + * Cast an array using typecode structure. + * steals reference to at --- cannot be NULL */ static PyObject * PyArray_CastToType(PyArrayObject *mp, PyArray_Descr *at, int fortran) @@ -8107,12 +8254,11 @@ mpd = mp->descr; - if (((mpd == at) || ((mpd->type_num == at->type_num) && \ - PyArray_EquivByteorders(mpd->byteorder,\ - at->byteorder) && \ - ((mpd->elsize == at->elsize) || \ - (at->elsize==0)))) && \ - PyArray_ISBEHAVED_RO(mp)) { + if (((mpd == at) || + ((mpd->type_num == at->type_num) && + PyArray_EquivByteorders(mpd->byteorder, at->byteorder) && + ((mpd->elsize == at->elsize) || (at->elsize==0)))) && + PyArray_ISBEHAVED_RO(mp)) { Py_DECREF(at); Py_INCREF(mp); return (PyObject *)mp; @@ -8123,7 +8269,7 @@ if (at == NULL) { return NULL; } - if (mpd->type_num == PyArray_STRING && + if (mpd->type_num == PyArray_STRING && at->type_num == PyArray_UNICODE) { at->elsize = mpd->elsize << 2; } @@ -8157,14 +8303,15 @@ } /*NUMPY_API - Get a cast function to cast from the input descriptor to the - output type_number (must be a registered data-type). - Returns NULL if un-successful. -*/ + * Get a cast function to cast from the input descriptor to the + * output type_number (must be a registered data-type). + * Returns NULL if un-successful. + */ static PyArray_VectorUnaryFunc * PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) { - PyArray_VectorUnaryFunc *castfunc=NULL; + PyArray_VectorUnaryFunc *castfunc = NULL; + if (type_num < PyArray_NTYPES) { castfunc = descr->f->cast[type_num]; } @@ -8189,19 +8336,19 @@ return castfunc; } - PyErr_SetString(PyExc_ValueError, - "No cast function available."); + PyErr_SetString(PyExc_ValueError, "No cast function available."); return NULL; } -/* Reference counts: - copyswapn is used which increases and decreases reference counts for OBJECT arrays. - All that needs to happen is for any reference counts in the buffers to be - decreased when completely finished with the buffers. - - buffers[0] is the destination - buffers[1] is the source -*/ +/* + * Reference counts: + * copyswapn is used which increases and decreases reference counts for OBJECT arrays. + * All that needs to happen is for any reference counts in the buffers to be + * decreased when completely finished with the buffers. + * + * buffers[0] is the destination + * buffers[1] is the source + */ static void _strided_buffered_cast(char *dptr, intp dstride, int delsize, int dswap, PyArray_CopySwapNFunc *dcopyfunc, @@ -8213,10 +8360,11 @@ { int i; if (N <= bufsize) { - /* 1. copy input to buffer and swap - 2. cast input to output - 3. swap output if necessary and copy from output buffer - */ + /* + * 1. copy input to buffer and swap + * 2. cast input to output + * 3. swap output if necessary and copy from output buffer + */ scopyfunc(buffers[1], selsize, sptr, sstride, N, sswap, src); castfunc(buffers[1], buffers[0], N, src, dest); dcopyfunc(dptr, dstride, buffers[0], delsize, N, dswap, dest); @@ -8225,9 +8373,9 @@ /* otherwise we need to divide up into bufsize pieces */ i = 0; - while(N > 0) { - int newN; - newN = MIN(N, bufsize); + while (N > 0) { + int newN = MIN(N, bufsize); + _strided_buffered_cast(dptr+i*dstride, dstride, delsize, dswap, dcopyfunc, sptr+i*sstride, sstride, selsize, @@ -8307,7 +8455,7 @@ } #endif - while(multi->index < multi->size) { + while (multi->index < multi->size) { _strided_buffered_cast(multi->iters[0]->dataptr, ostrides, delsize, oswap, ocopyfunc, @@ -8326,13 +8474,13 @@ Py_DECREF(multi); if (PyDataType_REFCHK(in->descr)) { obptr = buffers[1]; - for(i = 0; i < N; i++, obptr+=selsize) { + for (i = 0; i < N; i++, obptr+=selsize) { PyArray_Item_XDECREF(obptr, out->descr); } } if (PyDataType_REFCHK(out->descr)) { obptr = buffers[0]; - for(i = 0; i < N; i++, obptr+=delsize) { + for (i = 0; i < N; i++, obptr+=delsize) { PyArray_Item_XDECREF(obptr, out->descr); } } @@ -8362,7 +8510,7 @@ { int simple; int same; - PyArray_VectorUnaryFunc *castfunc=NULL; + PyArray_VectorUnaryFunc *castfunc = NULL; int mpsize = PyArray_SIZE(mp); int iswap, oswap; NPY_BEGIN_THREADS_DEF; @@ -8371,8 +8519,7 @@ return 0; } if (!PyArray_ISWRITEABLE(out)) { - PyErr_SetString(PyExc_ValueError, - "output array is not writeable"); + PyErr_SetString(PyExc_ValueError, "output array is not writeable"); return -1; } @@ -8427,13 +8574,13 @@ { char *inbuffer, *bptr, *optr; char *outbuffer=NULL; - PyArrayIterObject *it_in=NULL, *it_out=NULL; + PyArrayIterObject *it_in = NULL, *it_out = NULL; register intp i, index; intp ncopies = PyArray_SIZE(out) / PyArray_SIZE(in); int elsize=in->descr->elsize; int nels = PyArray_BUFSIZE; int el; - int inswap, outswap=0; + int inswap, outswap = 0; int obuf=!PyArray_ISCARRAY(out); int oelsize = out->descr->elsize; PyArray_CopySwapFunc *in_csn; @@ -8452,45 +8599,50 @@ inswap = !(PyArray_ISFLEXIBLE(in) || PyArray_ISNOTSWAPPED(in)); inbuffer = PyDataMem_NEW(PyArray_BUFSIZE*elsize); - if (inbuffer == NULL) return -1; - if (PyArray_ISOBJECT(in)) + if (inbuffer == NULL) { + return -1; + } + if (PyArray_ISOBJECT(in)) { memset(inbuffer, 0, PyArray_BUFSIZE*elsize); + } it_in = (PyArrayIterObject *)PyArray_IterNew((PyObject *)in); - if (it_in == NULL) goto exit; - + if (it_in == NULL) { + goto exit; + } if (obuf) { - outswap = !(PyArray_ISFLEXIBLE(out) || \ + outswap = !(PyArray_ISFLEXIBLE(out) || PyArray_ISNOTSWAPPED(out)); outbuffer = PyDataMem_NEW(PyArray_BUFSIZE*oelsize); - if (outbuffer == NULL) goto exit; - if (PyArray_ISOBJECT(out)) + if (outbuffer == NULL) { + goto exit; + } + if (PyArray_ISOBJECT(out)) { memset(outbuffer, 0, PyArray_BUFSIZE*oelsize); - + } it_out = (PyArrayIterObject *)PyArray_IterNew((PyObject *)out); - if (it_out == NULL) goto exit; - + if (it_out == NULL) { + goto exit; + } nels = MIN(nels, PyArray_BUFSIZE); } optr = (obuf) ? outbuffer: out->data; bptr = inbuffer; el = 0; - while(ncopies--) { + while (ncopies--) { index = it_in->size; PyArray_ITER_RESET(it_in); - while(index--) { + while (index--) { in_csn(bptr, it_in->dataptr, inswap, in); bptr += elsize; PyArray_ITER_NEXT(it_in); el += 1; if ((el == nels) || (index == 0)) { /* buffer filled, do cast */ - castfunc(inbuffer, optr, el, in, out); - if (obuf) { /* Copy from outbuffer to array */ - for(i=0; idataptr, optr, outswap, out); @@ -8508,6 +8660,7 @@ } } retval = 0; + exit: Py_XDECREF(it_in); PyDataMem_FREE(inbuffer); @@ -8519,20 +8672,21 @@ } /*NUMPY_API - Cast to an already created array. Arrays don't have to be "broadcastable" - Only requirement is they have the same number of elements. -*/ + * Cast to an already created array. Arrays don't have to be "broadcastable" + * Only requirement is they have the same number of elements. + */ static int PyArray_CastAnyTo(PyArrayObject *out, PyArrayObject *mp) { int simple; - PyArray_VectorUnaryFunc *castfunc=NULL; + PyArray_VectorUnaryFunc *castfunc = NULL; int mpsize = PyArray_SIZE(mp); - if (mpsize == 0) return 0; + if (mpsize == 0) { + return 0; + } if (!PyArray_ISWRITEABLE(out)) { - PyErr_SetString(PyExc_ValueError, - "output array is not writeable"); + PyErr_SetString(PyExc_ValueError, "output array is not writeable"); return -1; } @@ -8544,36 +8698,34 @@ } castfunc = PyArray_GetCastFunc(mp->descr, out->descr->type_num); - if (castfunc == NULL) return -1; - - + if (castfunc == NULL) { + return -1; + } simple = ((PyArray_ISCARRAY_RO(mp) && PyArray_ISCARRAY(out)) || (PyArray_ISFARRAY_RO(mp) && PyArray_ISFARRAY(out))); - if (simple) { castfunc(mp->data, out->data, mpsize, mp, out); return 0; } - if (PyArray_SAMESHAPE(out, mp)) { int iswap, oswap; iswap = PyArray_ISBYTESWAPPED(mp) && !PyArray_ISFLEXIBLE(mp); oswap = PyArray_ISBYTESWAPPED(out) && !PyArray_ISFLEXIBLE(out); return _broadcast_cast(out, mp, castfunc, iswap, oswap); } - return _bufferedcast(out, mp, castfunc); } -/* steals reference to newtype --- acc. NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * steals reference to newtype --- acc. NULL + */ static PyObject * PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) { - PyArrayObject *ret=NULL; + PyArrayObject *ret = NULL; int itemsize; int copy = 0; int arrflags; @@ -8582,9 +8734,7 @@ PyTypeObject *subtype; oldtype = PyArray_DESCR(arr); - subtype = arr->ob_type; - if (newtype == NULL) { newtype = oldtype; Py_INCREF(oldtype); } @@ -8598,10 +8748,11 @@ itemsize = newtype->elsize; } - /* Can't cast unless ndim-0 array, FORCECAST is specified - or the cast is safe. - */ - if (!(flags & FORCECAST) && !PyArray_NDIM(arr)==0 && + /* + * Can't cast unless ndim-0 array, FORCECAST is specified + * or the cast is safe. + */ + if (!(flags & FORCECAST) && !PyArray_NDIM(arr) == 0 && !PyArray_CanCastTo(oldtype, newtype)) { Py_DECREF(newtype); PyErr_SetString(PyExc_TypeError, @@ -8613,16 +8764,15 @@ /* Don't copy if sizes are compatible */ if ((flags & ENSURECOPY) || PyArray_EquivTypes(oldtype, newtype)) { arrflags = arr->flags; - - copy = (flags & ENSURECOPY) || \ - ((flags & CONTIGUOUS) && (!(arrflags & CONTIGUOUS))) \ - || ((flags & ALIGNED) && (!(arrflags & ALIGNED))) \ - || (arr->nd > 1 && \ - ((flags & FORTRAN) && (!(arrflags & FORTRAN)))) \ + copy = (flags & ENSURECOPY) || + ((flags & CONTIGUOUS) && (!(arrflags & CONTIGUOUS))) + || ((flags & ALIGNED) && (!(arrflags & ALIGNED))) + || (arr->nd > 1 && + ((flags & FORTRAN) && (!(arrflags & FORTRAN)))) || ((flags & WRITEABLE) && (!(arrflags & WRITEABLE))); if (copy) { - if ((flags & UPDATEIFCOPY) && \ + if ((flags & UPDATEIFCOPY) && (!PyArray_ISWRITEABLE(arr))) { Py_DECREF(newtype); PyErr_SetString(PyExc_ValueError, msg); @@ -8631,7 +8781,7 @@ if ((flags & ENSUREARRAY)) { subtype = &PyArray_Type; } - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(subtype, newtype, arr->nd, arr->dimensions, @@ -8652,14 +8802,16 @@ Py_INCREF(arr); } } - /* If no copy then just increase the reference - count and return the input */ + /* + * If no copy then just increase the reference + * count and return the input + */ else { Py_DECREF(newtype); if ((flags & ENSUREARRAY) && !PyArray_CheckExact(arr)) { Py_INCREF(arr->descr); - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, arr->descr, arr->nd, @@ -8679,10 +8831,12 @@ } } - /* The desired output type is different than the input - array type and copy was not specified */ + /* + * The desired output type is different than the input + * array type and copy was not specified + */ else { - if ((flags & UPDATEIFCOPY) && \ + if ((flags & UPDATEIFCOPY) && (!PyArray_ISWRITEABLE(arr))) { Py_DECREF(newtype); PyErr_SetString(PyExc_ValueError, msg); @@ -8691,7 +8845,7 @@ if ((flags & ENSUREARRAY)) { subtype = &PyArray_Type; } - ret = (PyArrayObject *) \ + ret = (PyArrayObject *) PyArray_NewFromDescr(subtype, newtype, arr->nd, arr->dimensions, NULL, NULL, @@ -8729,72 +8883,105 @@ swapchar = str[0]; str += 1; -#define _MY_FAIL { \ - PyErr_SetString(PyExc_ValueError, msg); \ - return NULL; \ - } - typechar = str[0]; size = atoi(str + 1); switch (typechar) { case 'b': - if (size == sizeof(Bool)) + if (size == sizeof(Bool)) { type_num = PyArray_BOOL; - else _MY_FAIL - break; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; case 'u': - if (size == sizeof(uintp)) + if (size == sizeof(uintp)) { type_num = PyArray_UINTP; - else if (size == sizeof(char)) + } + else if (size == sizeof(char)) { type_num = PyArray_UBYTE; - else if (size == sizeof(short)) + } + else if (size == sizeof(short)) { type_num = PyArray_USHORT; - else if (size == sizeof(ulong)) + } + else if (size == sizeof(ulong)) { type_num = PyArray_ULONG; - else if (size == sizeof(int)) + } + else if (size == sizeof(int)) { type_num = PyArray_UINT; - else if (size == sizeof(ulonglong)) + } + else if (size == sizeof(ulonglong)) { type_num = PyArray_ULONGLONG; - else _MY_FAIL - break; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; case 'i': - if (size == sizeof(intp)) + if (size == sizeof(intp)) { type_num = PyArray_INTP; - else if (size == sizeof(char)) + } + else if (size == sizeof(char)) { type_num = PyArray_BYTE; - else if (size == sizeof(short)) + } + else if (size == sizeof(short)) { type_num = PyArray_SHORT; - else if (size == sizeof(long)) + } + else if (size == sizeof(long)) { type_num = PyArray_LONG; - else if (size == sizeof(int)) + } + else if (size == sizeof(int)) { type_num = PyArray_INT; - else if (size == sizeof(longlong)) + } + else if (size == sizeof(longlong)) { type_num = PyArray_LONGLONG; - else _MY_FAIL - break; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; case 'f': - if (size == sizeof(float)) + if (size == sizeof(float)) { type_num = PyArray_FLOAT; - else if (size == sizeof(double)) + } + else if (size == sizeof(double)) { type_num = PyArray_DOUBLE; - else if (size == sizeof(longdouble)) + } + else if (size == sizeof(longdouble)) { type_num = PyArray_LONGDOUBLE; - else _MY_FAIL - break; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; case 'c': - if (size == sizeof(float)*2) + if (size == sizeof(float)*2) { type_num = PyArray_CFLOAT; - else if (size == sizeof(double)*2) + } + else if (size == sizeof(double)*2) { type_num = PyArray_CDOUBLE; - else if (size == sizeof(longdouble)*2) + } + else if (size == sizeof(longdouble)*2) { type_num = PyArray_CLONGDOUBLE; - else _MY_FAIL - break; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; case 'O': - if (size == sizeof(PyObject *)) + if (size == sizeof(PyObject *)) { type_num = PyArray_OBJECT; - else _MY_FAIL - break; + } + else { + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + break; case PyArray_STRINGLTR: type_num = PyArray_STRING; break; @@ -8806,22 +8993,27 @@ type_num = PyArray_VOID; break; default: - _MY_FAIL - } + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } -#undef _MY_FAIL - descr = PyArray_DescrFromType(type_num); - if (descr == NULL) return NULL; + if (descr == NULL) { + return NULL; + } swap = !PyArray_ISNBO(swapchar); if (descr->elsize == 0 || swap) { /* Need to make a new PyArray_Descr */ PyArray_DESCR_REPLACE(descr); - if (descr==NULL) return NULL; - if (descr->elsize == 0) + if (descr==NULL) { + return NULL; + } + if (descr->elsize == 0) { descr->elsize = size; - if (swap) + } + if (swap) { descr->byteorder = swapchar; + } } return descr; } @@ -8830,7 +9022,7 @@ static PyObject * PyArray_FromStructInterface(PyObject *input) { - PyArray_Descr *thetype=NULL; + PyArray_Descr *thetype = NULL; char buf[40]; PyArrayInterface *inter; PyObject *attr, *r; @@ -8841,9 +9033,13 @@ PyErr_Clear(); return Py_NotImplemented; } - if (!PyCObject_Check(attr)) goto fail; + if (!PyCObject_Check(attr)) { + goto fail; + } inter = PyCObject_AsVoidPtr(attr); - if (inter->two != 2) goto fail; + if (inter->two != 2) { + goto fail; + } if ((inter->flags & NOTSWAPPED) != NOTSWAPPED) { endian = PyArray_OPPBYTE; inter->flags &= ~NOTSWAPPED; @@ -8887,10 +9083,10 @@ static PyObject * PyArray_FromInterface(PyObject *input) { - PyObject *attr=NULL, *item=NULL; - PyObject *tstr=NULL, *shape=NULL; - PyObject *inter=NULL; - PyObject *base=NULL; + PyObject *attr = NULL, *item = NULL; + PyObject *tstr = NULL, *shape = NULL; + PyObject *inter = NULL; + PyObject *base = NULL; PyArrayObject *ret; PyArray_Descr *type=NULL; char *data; @@ -8905,26 +9101,42 @@ /* Get the strides */ inter = PyObject_GetAttrString(input, "__array_interface__"); - if (inter == NULL) {PyErr_Clear(); return Py_NotImplemented;} - if (!PyDict_Check(inter)) {Py_DECREF(inter); return Py_NotImplemented;} - + if (inter == NULL) { + PyErr_Clear(); + return Py_NotImplemented; + } + if (!PyDict_Check(inter)) { + Py_DECREF(inter); + return Py_NotImplemented; + } shape = PyDict_GetItemString(inter, "shape"); - if (shape == NULL) {Py_DECREF(inter); return Py_NotImplemented;} + if (shape == NULL) { + Py_DECREF(inter); + return Py_NotImplemented; + } tstr = PyDict_GetItemString(inter, "typestr"); - if (tstr == NULL) {Py_DECREF(inter); return Py_NotImplemented;} + if (tstr == NULL) { + Py_DECREF(inter); + return Py_NotImplemented; + } attr = PyDict_GetItemString(inter, "data"); base = input; if ((attr == NULL) || (attr==Py_None) || (!PyTuple_Check(attr))) { - if (attr && (attr != Py_None)) item=attr; - else item=input; - res = PyObject_AsWriteBuffer(item, (void **)&data, - &buffer_len); + if (attr && (attr != Py_None)) { + item = attr; + } + else { + item = input; + } + res = PyObject_AsWriteBuffer(item, (void **)&data, &buffer_len); if (res < 0) { PyErr_Clear(); - res = PyObject_AsReadBuffer(item, (const void **)&data, - &buffer_len); - if (res < 0) goto fail; + res = PyObject_AsReadBuffer( + item, (const void **)&data, &buffer_len); + if (res < 0) { + goto fail; + } dataflags &= ~WRITEABLE; } attr = PyDict_GetItemString(inter, "offset"); @@ -8979,7 +9191,9 @@ goto fail; } type = _array_typedescr_fromstr(PyString_AS_STRING(attr)); - if (type==NULL) goto fail; + if (type == NULL) { + goto fail; + } attr = shape; if (!PyTuple_Check(attr)) { PyErr_SetString(PyExc_TypeError, "shape must be a tuple"); @@ -8987,17 +9201,21 @@ goto fail; } n = PyTuple_GET_SIZE(attr); - for(i=0; ibase = base; @@ -9016,12 +9234,16 @@ Py_DECREF(ret); return NULL; } - for(i=0; istrides, strides, n*sizeof(intp)); } else PyErr_Clear(); @@ -9042,35 +9264,38 @@ PyObject *array_meth; array_meth = PyObject_GetAttrString(op, "__array__"); - if (array_meth == NULL) {PyErr_Clear(); return Py_NotImplemented;} + if (array_meth == NULL) { + PyErr_Clear(); + return Py_NotImplemented; + } if (context == NULL) { - if (typecode == NULL) new = PyObject_CallFunction(array_meth, - NULL); - else new = PyObject_CallFunction(array_meth, "O", typecode); + if (typecode == NULL) { + new = PyObject_CallFunction(array_meth, NULL); + } + else { + new = PyObject_CallFunction(array_meth, "O", typecode); + } } else { if (typecode == NULL) { - new = PyObject_CallFunction(array_meth, "OO", Py_None, - context); - if (new == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { + new = PyObject_CallFunction(array_meth, "OO", Py_None, context); + if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); new = PyObject_CallFunction(array_meth, ""); } } else { - new = PyObject_CallFunction(array_meth, "OO", - typecode, context); - if (new == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { + new = PyObject_CallFunction(array_meth, "OO", typecode, context); + if (new == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); - new = PyObject_CallFunction(array_meth, "O", - typecode); + new = PyObject_CallFunction(array_meth, "O", typecode); } } } Py_DECREF(array_meth); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } if (!PyArray_Check(new)) { PyErr_SetString(PyExc_ValueError, "object __array__ method not " \ @@ -9081,23 +9306,27 @@ return new; } -/* Does not check for ENSURECOPY and NOTSWAPPED in flags */ -/* Steals a reference to newtype --- which can be NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * Does not check for ENSURECOPY and NOTSWAPPED in flags + * Steals a reference to newtype --- which can be NULL + */ static PyObject * PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth, int max_depth, int flags, PyObject *context) { - /* This is the main code to make a NumPy array from a Python - Object. It is called from lot's of different places which - is why there are so many checks. The comments try to - explain some of the checks. */ - - PyObject *r=NULL; + /* + * This is the main code to make a NumPy array from a Python + * Object. It is called from lot's of different places which + * is why there are so many checks. The comments try to + * explain some of the checks. + */ + PyObject *r = NULL; int seq = FALSE; - /* Is input object already an array? */ - /* This is where the flags are used */ + /* + * Is input object already an array? + * This is where the flags are used + */ if (PyArray_Check(op)) { r = PyArray_FromArray((PyArrayObject *)op, newtype, flags); } @@ -9121,8 +9350,7 @@ return NULL; } if (newtype != NULL || flags != 0) { - new = PyArray_FromArray((PyArrayObject *)r, newtype, - flags); + new = PyArray_FromArray((PyArrayObject *)r, newtype, flags); Py_DECREF(r); r = new; } @@ -9158,7 +9386,7 @@ PyErr_Clear(); if (isobject) { Py_INCREF(newtype); - r = ObjectArray_FromNestedList \ + r = ObjectArray_FromNestedList (op, newtype, flags & FORTRAN); seq = TRUE; Py_DECREF(newtype); @@ -9180,7 +9408,6 @@ } /* Be sure we succeed here */ - if(!PyArray_Check(r)) { PyErr_SetString(PyExc_RuntimeError, "internal error: PyArray_FromAny "\ @@ -9210,8 +9437,9 @@ return NULL; } -/* new reference -- accepts NULL for mintype*/ -/*NUMPY_API*/ +/*NUMPY_API +* new reference -- accepts NULL for mintype +*/ static PyArray_Descr * PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype) { @@ -9219,9 +9447,8 @@ } /*NUMPY_API - Return the typecode of the array a Python object would be converted - to -*/ + * Return the typecode of the array a Python object would be converted to + */ static int PyArray_ObjectType(PyObject *op, int minimum_type) { @@ -9230,7 +9457,9 @@ int ret; intype = PyArray_DescrFromType(minimum_type); - if (intype == NULL) PyErr_Clear(); + if (intype == NULL) { + PyErr_Clear(); + } outtype = _array_find_type(op, intype, MAX_DIMS); ret = outtype->type_num; Py_DECREF(outtype); @@ -9239,56 +9468,57 @@ } -/* flags is any of - CONTIGUOUS, - FORTRAN, - ALIGNED, - WRITEABLE, - NOTSWAPPED, - ENSURECOPY, - UPDATEIFCOPY, - FORCECAST, - ENSUREARRAY, - ELEMENTSTRIDES +/* + * flags is any of + * CONTIGUOUS, + * FORTRAN, + * ALIGNED, + * WRITEABLE, + * NOTSWAPPED, + * ENSURECOPY, + * UPDATEIFCOPY, + * FORCECAST, + * ENSUREARRAY, + * ELEMENTSTRIDES + * + * or'd (|) together + * + * Any of these flags present means that the returned array should + * guarantee that aspect of the array. Otherwise the returned array + * won't guarantee it -- it will depend on the object as to whether or + * not it has such features. + * + * Note that ENSURECOPY is enough + * to guarantee CONTIGUOUS, ALIGNED and WRITEABLE + * and therefore it is redundant to include those as well. + * + * BEHAVED == ALIGNED | WRITEABLE + * CARRAY = CONTIGUOUS | BEHAVED + * FARRAY = FORTRAN | BEHAVED + * + * FORTRAN can be set in the FLAGS to request a FORTRAN array. + * Fortran arrays are always behaved (aligned, + * notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). + * + * UPDATEIFCOPY flag sets this flag in the returned array if a copy is + * made and the base argument points to the (possibly) misbehaved array. + * When the new array is deallocated, the original array held in base + * is updated with the contents of the new array. + * + * FORCECAST will cause a cast to occur regardless of whether or not + * it is safe. + */ - or'd (|) together - - Any of these flags present means that the returned array should - guarantee that aspect of the array. Otherwise the returned array - won't guarantee it -- it will depend on the object as to whether or - not it has such features. - - Note that ENSURECOPY is enough - to guarantee CONTIGUOUS, ALIGNED and WRITEABLE - and therefore it is redundant to include those as well. - - BEHAVED == ALIGNED | WRITEABLE - CARRAY = CONTIGUOUS | BEHAVED - FARRAY = FORTRAN | BEHAVED - - FORTRAN can be set in the FLAGS to request a FORTRAN array. - Fortran arrays are always behaved (aligned, - notswapped, and writeable) and not (C) CONTIGUOUS (if > 1d). - - UPDATEIFCOPY flag sets this flag in the returned array if a copy is - made and the base argument points to the (possibly) misbehaved array. - When the new array is deallocated, the original array held in base - is updated with the contents of the new array. - - FORCECAST will cause a cast to occur regardless of whether or not - it is safe. -*/ - - -/* steals a reference to descr -- accepts NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * steals a reference to descr -- accepts NULL + */ static PyObject * PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth, int max_depth, int requires, PyObject *context) { PyObject *obj; if (requires & NOTSWAPPED) { - if (!descr && PyArray_Check(op) && \ + if (!descr && PyArray_Check(op) && !PyArray_ISNBO(PyArray_DESCR(op)->byteorder)) { descr = PyArray_DescrNew(PyArray_DESCR(op)); } @@ -9300,9 +9530,10 @@ } } - obj = PyArray_FromAny(op, descr, min_depth, max_depth, - requires, context); - if (obj == NULL) return NULL; + obj = PyArray_FromAny(op, descr, min_depth, max_depth, requires, context); + if (obj == NULL) { + return NULL; + } if ((requires & ELEMENTSTRIDES) && !PyArray_ElementStrides(obj)) { PyObject *new; @@ -9313,25 +9544,25 @@ return obj; } -/* This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0, - ENSUREARRAY) */ -/* that special cases Arrays and PyArray_Scalars up front */ -/* It *steals a reference* to the object */ -/* It also guarantees that the result is PyArray_Type */ - -/* Because it decrefs op if any conversion needs to take place - so it can be used like PyArray_EnsureArray(some_function(...)) */ - -/*NUMPY_API*/ +/*NUMPY_API + * This is a quick wrapper around PyArray_FromAny(op, NULL, 0, 0, ENSUREARRAY) + * that special cases Arrays and PyArray_Scalars up front + * It *steals a reference* to the object + * It also guarantees that the result is PyArray_Type + * Because it decrefs op if any conversion needs to take place + * so it can be used like PyArray_EnsureArray(some_function(...)) + */ static PyObject * PyArray_EnsureArray(PyObject *op) { PyObject *new; - if (op == NULL) return NULL; - - if (PyArray_CheckExact(op)) return op; - + if (op == NULL) { + return NULL; + } + if (PyArray_CheckExact(op)) { + return op; + } if (PyArray_Check(op)) { new = PyArray_View((PyArrayObject *)op, NULL, &PyArray_Type); Py_DECREF(op); @@ -9351,25 +9582,36 @@ static PyObject * PyArray_EnsureAnyArray(PyObject *op) { - if (op && PyArray_Check(op)) return op; + if (op && PyArray_Check(op)) { + return op; + } return PyArray_EnsureArray(op); } /*NUMPY_API - Check the type coercion rules. -*/ + *Check the type coercion rules. + */ static int PyArray_CanCastSafely(int fromtype, int totype) { PyArray_Descr *from, *to; register int felsize, telsize; - if (fromtype == totype) return 1; - if (fromtype == PyArray_BOOL) return 1; - if (totype == PyArray_BOOL) return 0; - if (totype == PyArray_OBJECT || totype == PyArray_VOID) return 1; - if (fromtype == PyArray_OBJECT || fromtype == PyArray_VOID) return 0; - + if (fromtype == totype) { + return 1; + } + if (fromtype == PyArray_BOOL) { + return 1; + } + if (totype == PyArray_BOOL) { + return 0; + } + if (totype == PyArray_OBJECT || totype == PyArray_VOID) { + return 1; + } + if (fromtype == PyArray_OBJECT || fromtype == PyArray_VOID) { + return 0; + } from = PyArray_DescrFromType(fromtype); /* * cancastto is a PyArray_NOTYPE terminated C-int-array of types that @@ -9379,11 +9621,14 @@ int *curtype; curtype = from->f->cancastto; while (*curtype != PyArray_NOTYPE) { - if (*curtype++ == totype) return 1; + if (*curtype++ == totype) { + return 1; + } } } - if (PyTypeNum_ISUSERDEF(totype)) return 0; - + if (PyTypeNum_ISUSERDEF(totype)) { + return 0; + } to = PyArray_DescrFromType(totype); telsize = to->elsize; felsize = from->elsize; @@ -9401,22 +9646,28 @@ return 0; } else { - return (telsize >= felsize); + return telsize >= felsize; } } else if (PyTypeNum_ISFLOAT(totype)) { - if (felsize < 8) - return (telsize > felsize); - else - return (telsize >= felsize); + if (felsize < 8) { + return telsize > felsize; + } + else { + return telsize >= felsize; + } } else if (PyTypeNum_ISCOMPLEX(totype)) { - if (felsize < 8) - return ((telsize >> 1) > felsize); - else - return ((telsize >> 1) >= felsize); + if (felsize < 8) { + return (telsize >> 1) > felsize; + } + else { + return (telsize >> 1) >= felsize; + } } - else return totype > fromtype; + else { + return totype > fromtype; + } case PyArray_UBYTE: case PyArray_USHORT: case PyArray_UINT: @@ -9424,46 +9675,55 @@ case PyArray_ULONGLONG: if (PyTypeNum_ISINTEGER(totype)) { if (PyTypeNum_ISSIGNED(totype)) { - return (telsize > felsize); + return telsize > felsize; } else { - return (telsize >= felsize); + return telsize >= felsize; } } else if (PyTypeNum_ISFLOAT(totype)) { - if (felsize < 8) - return (telsize > felsize); - else - return (telsize >= felsize); + if (felsize < 8) { + return telsize > felsize; + } + else { + return telsize >= felsize; + } } else if (PyTypeNum_ISCOMPLEX(totype)) { - if (felsize < 8) - return ((telsize >> 1) > felsize); - else - return ((telsize >> 1) >= felsize); + if (felsize < 8) { + return (telsize >> 1) > felsize; + } + else { + return (telsize >> 1) >= felsize; + } } - else return totype > fromtype; + else { + return totype > fromtype; + } case PyArray_FLOAT: case PyArray_DOUBLE: case PyArray_LONGDOUBLE: - if (PyTypeNum_ISCOMPLEX(totype)) - return ((telsize >> 1) >= felsize); - else - return (totype > fromtype); + if (PyTypeNum_ISCOMPLEX(totype)) { + return (telsize >> 1) >= felsize; + } + else { + return totype > fromtype; + } case PyArray_CFLOAT: case PyArray_CDOUBLE: case PyArray_CLONGDOUBLE: - return (totype > fromtype); + return totype > fromtype; case PyArray_STRING: case PyArray_UNICODE: - return (totype > fromtype); + return totype > fromtype; default: return 0; } } -/* leaves reference count alone --- cannot be NULL*/ -/*NUMPY_API*/ +/*NUMPY_API + * leaves reference count alone --- cannot be NULL + */ static Bool PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) { @@ -9472,14 +9732,14 @@ Bool ret; ret = (Bool) PyArray_CanCastSafely(fromtype, totype); - if (ret) { /* Check String and Unicode more closely */ + if (ret) { + /* Check String and Unicode more closely */ if (fromtype == PyArray_STRING) { if (totype == PyArray_STRING) { ret = (from->elsize <= to->elsize); } else if (totype == PyArray_UNICODE) { - ret = (from->elsize << 2 \ - <= to->elsize); + ret = (from->elsize << 2 <= to->elsize); } } else if (fromtype == PyArray_UNICODE) { @@ -9487,17 +9747,18 @@ ret = (from->elsize <= to->elsize); } } - /* TODO: If totype is STRING or unicode - see if the length is long enough to hold the - stringified value of the object. - */ + /* + * TODO: If totype is STRING or unicode + * see if the length is long enough to hold the + * stringified value of the object. + */ } return ret; } /*NUMPY_API - See if array scalars can be cast. -*/ + * See if array scalars can be cast. + */ static Bool PyArray_CanCastScalar(PyTypeObject *from, PyTypeObject *to) { @@ -9506,8 +9767,9 @@ fromtype = _typenum_fromtypeobj((PyObject *)from, 0); totype = _typenum_fromtypeobj((PyObject *)to, 0); - if (fromtype == PyArray_NOTYPE || totype == PyArray_NOTYPE) + if (fromtype == PyArray_NOTYPE || totype == PyArray_NOTYPE) { return FALSE; + } return (Bool) PyArray_CanCastSafely(fromtype, totype); } @@ -9517,8 +9779,8 @@ /* and Python's array iterator ***/ /*NUMPY_API - Get Iterator. -*/ + * Get Iterator. + */ static PyObject * PyArray_IterNew(PyObject *obj) { @@ -9534,26 +9796,29 @@ it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); PyObject_Init((PyObject *)it, &PyArrayIter_Type); /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/ - if (it == NULL) + if (it == NULL) { return NULL; - + } nd = ao->nd; PyArray_UpdateFlags(ao, CONTIGUOUS); - if PyArray_ISCONTIGUOUS(ao) it->contiguous = 1; - else it->contiguous = 0; + if (PyArray_ISCONTIGUOUS(ao)) { + it->contiguous = 1; + } + else { + it->contiguous = 0; + } Py_INCREF(ao); it->ao = ao; it->size = PyArray_SIZE(ao); it->nd_m1 = nd - 1; it->factors[nd-1] = 1; - for(i=0; i < nd; i++) { + for (i = 0; i < nd; i++) { it->dims_m1[i] = ao->dimensions[i] - 1; it->strides[i] = ao->strides[i]; - it->backstrides[i] = it->strides[i] * \ - it->dims_m1[i]; - if (i > 0) - it->factors[nd-i-1] = it->factors[nd-i] * \ - ao->dimensions[nd-i]; + it->backstrides[i] = it->strides[i] * it->dims_m1[i]; + if (i > 0) { + it->factors[nd-i-1] = it->factors[nd-i] * ao->dimensions[nd-i]; + } } PyArray_ITER_RESET(it); @@ -9561,8 +9826,8 @@ } /*NUMPY_API - Get Iterator broadcast to a particular shape -*/ + * Get Iterator broadcast to a particular shape + */ static PyObject * PyArray_BroadcastToShape(PyObject *obj, intp *dims, int nd) { @@ -9570,51 +9835,57 @@ int i, diff, j, compat, k; PyArrayObject *ao = (PyArrayObject *)obj; - if (ao->nd > nd) goto err; + if (ao->nd > nd) { + goto err; + } compat = 1; diff = j = nd - ao->nd; - for(i=0; ind; i++, j++) { - if (ao->dimensions[i] == 1) continue; + for (i = 0; i < ao->nd; i++, j++) { + if (ao->dimensions[i] == 1) { + continue; + } if (ao->dimensions[i] != dims[j]) { compat = 0; break; } } - if (!compat) goto err; - + if (!compat) { + goto err; + } it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject)); PyObject_Init((PyObject *)it, &PyArrayIter_Type); - if (it == NULL) + if (it == NULL) { return NULL; - + } PyArray_UpdateFlags(ao, CONTIGUOUS); - if PyArray_ISCONTIGUOUS(ao) it->contiguous = 1; - else it->contiguous = 0; + if (PyArray_ISCONTIGUOUS(ao)) { + it->contiguous = 1; + } + else { + it->contiguous = 0; + } Py_INCREF(ao); it->ao = ao; it->size = PyArray_MultiplyList(dims, nd); it->nd_m1 = nd - 1; it->factors[nd-1] = 1; - for(i=0; i < nd; i++) { + for (i = 0; i < nd; i++) { it->dims_m1[i] = dims[i] - 1; k = i - diff; - if ((k < 0) || - ao->dimensions[k] != dims[i]) { + if ((k < 0) || ao->dimensions[k] != dims[i]) { it->contiguous = 0; it->strides[i] = 0; } else { it->strides[i] = ao->strides[k]; } - it->backstrides[i] = it->strides[i] * \ - it->dims_m1[i]; - if (i > 0) - it->factors[nd-i-1] = it->factors[nd-i] * \ - dims[nd-i]; + it->backstrides[i] = it->strides[i] * it->dims_m1[i]; + if (i > 0) { + it->factors[nd-i-1] = it->factors[nd-i] * dims[nd-i]; + } } PyArray_ITER_RESET(it); - return (PyObject *)it; err: @@ -9628,29 +9899,31 @@ /*NUMPY_API - Get Iterator that iterates over all but one axis (don't use this with - PyArray_ITER_GOTO1D). The axis will be over-written if negative - with the axis having the smallest stride. -*/ + * Get Iterator that iterates over all but one axis (don't use this with + * PyArray_ITER_GOTO1D). The axis will be over-written if negative + * with the axis having the smallest stride. + */ static PyObject * PyArray_IterAllButAxis(PyObject *obj, int *inaxis) { PyArrayIterObject *it; int axis; it = (PyArrayIterObject *)PyArray_IterNew(obj); - if (it == NULL) return NULL; - - if (PyArray_NDIM(obj)==0) + if (it == NULL) { + return NULL; + } + if (PyArray_NDIM(obj)==0) { return (PyObject *)it; + } if (*inaxis < 0) { - int i, minaxis=0; - intp minstride=0; + int i, minaxis = 0; + intp minstride = 0; i = 0; - while (minstride==0 && i 0 && PyArray_STRIDE(obj, i) < minstride) { minaxis = i; @@ -9668,21 +9941,21 @@ it->dims_m1[axis] = 0; it->backstrides[axis] = 0; - /* (won't fix factors so don't use - PyArray_ITER_GOTO1D with this iterator) */ + /* + * (won't fix factors so don't use + * PyArray_ITER_GOTO1D with this iterator) + */ return (PyObject *)it; } - -/* don't use with PyArray_ITER_GOTO1D because factors are not - adjusted */ - /*NUMPY_API - Adjusts previously broadcasted iterators so that the axis with - the smallest sum of iterator strides is not iterated over. - Returns dimension which is smallest in the range [0,multi->nd). - A -1 is returned if multi->nd == 0. -*/ + * Adjusts previously broadcasted iterators so that the axis with + * the smallest sum of iterator strides is not iterated over. + * Returns dimension which is smallest in the range [0,multi->nd). + * A -1 is returned if multi->nd == 0. + * + * don't use with PyArray_ITER_GOTO1D because factors are not adjusted + */ static int PyArray_RemoveSmallest(PyArrayMultiIterObject *multi) { @@ -9692,34 +9965,33 @@ intp smallest; intp sumstrides[NPY_MAXDIMS]; - if (multi->nd == 0) return -1; - - - for(i=0; ind; i++) { + if (multi->nd == 0) { + return -1; + } + for (i = 0; i < multi->nd; i++) { sumstrides[i] = 0; - for(j=0; jnumiter; j++) { + for (j = 0; j < multi->numiter; j++) { sumstrides[i] += multi->iters[j]->strides[i]; } } - axis=0; + axis = 0; smallest = sumstrides[0]; /* Find longest dimension */ - for(i=1; ind; i++) { + for (i = 1; i < multi->nd; i++) { if (sumstrides[i] < smallest) { axis = i; smallest = sumstrides[i]; } } - - for(i=0; inumiter; i++) { + for(i = 0; i < multi->numiter; i++) { it = multi->iters[i]; it->contiguous = 0; - if (it->size != 0) + if (it->size != 0) { it->size /= (it->dims_m1[axis]+1); + } it->dims_m1[axis] = 0; it->backstrides[axis] = 0; } - multi->size = multi->iters[0]->size; return axis; } @@ -9757,7 +10029,7 @@ iter_subscript_Bool(PyArrayIterObject *self, PyArrayObject *ind) { int index, strides, itemsize; - intp count=0; + intp count = 0; char *dptr, *optr; PyObject *r; int swap; @@ -9779,9 +10051,10 @@ strides = ind->strides[0]; dptr = ind->data; /* Get size of return array */ - while(index--) { - if (*((Bool *)dptr) != 0) + while (index--) { + if (*((Bool *)dptr) != 0) { count++; + } dptr += strides; } itemsize = self->ao->descr->elsize; @@ -9790,17 +10063,17 @@ self->ao->descr, 1, &count, NULL, NULL, 0, (PyObject *)self->ao); - if (r==NULL) return NULL; - + if (r == NULL) { + return NULL; + } /* Set up loop */ optr = PyArray_DATA(r); index = ind->dimensions[0]; dptr = ind->data; - copyswap = self->ao->descr->f->copyswap; /* Loop over Boolean array */ swap = (PyArray_ISNOTSWAPPED(self->ao) != PyArray_ISNOTSWAPPED(r)); - while(index--) { + while (index--) { if (*((Bool *)dptr) != 0) { copyswap(optr, self->dataptr, swap, self->ao); optr += itemsize; @@ -9827,7 +10100,9 @@ itemsize = self->ao->descr->elsize; if (ind->nd == 0) { num = *((intp *)ind->data); - if (num < 0) num += self->size; + if (num < 0) { + num += self->size; + } if (num < 0 || num >= self->size) { PyErr_Format(PyExc_IndexError, "index %d out of bounds" \ @@ -9848,17 +10123,23 @@ ind->nd, ind->dimensions, NULL, NULL, 0, (PyObject *)self->ao); - if (r==NULL) return NULL; - + if (r == NULL) { + return NULL; + } optr = PyArray_DATA(r); ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind); - if (ind_it == NULL) {Py_DECREF(r); return NULL;} + if (ind_it == NULL) { + Py_DECREF(r); + return NULL; + } index = ind_it->size; copyswap = PyArray_DESCR(r)->f->copyswap; swap = (PyArray_ISNOTSWAPPED(r) != PyArray_ISNOTSWAPPED(self->ao)); - while(index--) { + while (index--) { num = *((intp *)(ind_it->dataptr)); - if (num < 0) num += self->size; + if (num < 0) { + num += self->size; + } if (num < 0 || num >= self->size) { PyErr_Format(PyExc_IndexError, "index %d out of bounds" \ @@ -9883,7 +10164,7 @@ static PyObject * iter_subscript(PyArrayIterObject *self, PyObject *ind) { - PyArray_Descr *indtype=NULL; + PyArray_Descr *indtype = NULL; intp start, step_size; intp n_steps; PyObject *r; @@ -9901,7 +10182,9 @@ if (PyTuple_Check(ind)) { int len; len = PyTuple_GET_SIZE(ind); - if (len > 1) goto fail; + if (len > 1) { + goto fail; + } if (len == 0) { Py_INCREF(self->ao); return (PyObject *)self->ao; @@ -9909,12 +10192,11 @@ ind = PyTuple_GET_ITEM(ind, 0); } - /* Tuples >1d not accepted --- i.e. no newaxis */ - /* Could implement this with adjusted strides - and dimensions in iterator */ - - /* Check for Boolean -- this is first becasue - Bool is a subclass of Int */ + /* + * Tuples >1d not accepted --- i.e. no newaxis + * Could implement this with adjusted strides and dimensions in iterator + * Check for Boolean -- this is first becasue Bool is a subclass of Int + */ PyArray_ITER_RESET(self); if (PyBool_Check(ind)) { @@ -9934,12 +10216,12 @@ } /* Check for Integer or Slice */ - if (PyLong_Check(ind) || PyInt_Check(ind) || PySlice_Check(ind)) { start = parse_subindex(ind, &step_size, &n_steps, self->size); - if (start == -1) + if (start == -1) { goto fail; + } if (n_steps == RubberIndex || n_steps == PseudoIndex) { PyErr_SetString(PyExc_IndexError, "cannot use Ellipsis or newaxes here"); @@ -9958,10 +10240,12 @@ 1, &n_steps, NULL, NULL, 0, (PyObject *)self->ao); - if (r==NULL) goto fail; + if (r == NULL) { + goto fail; + } dptr = PyArray_DATA(r); copyswap = PyArray_DESCR(r)->f->copyswap; - while(n_steps--) { + while (n_steps--) { copyswap(dptr, self->dataptr, 0, r); start += step_size; PyArray_ITER_GOTO1D(self, start) @@ -9972,12 +10256,13 @@ } /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(PyArray_INTP); if (PyArray_IsScalar(ind, Integer) || PyList_Check(ind)) { Py_INCREF(indtype); obj = PyArray_FromAny(ind, indtype, 0, 0, FORCECAST, NULL); - if (obj == NULL) goto fail; + if (obj == NULL) { + goto fail; + } } else { Py_INCREF(ind); @@ -9995,7 +10280,9 @@ PyObject *new; new = PyArray_FromAny(obj, indtype, 0, 0, FORCECAST | ALIGNED, NULL); - if (new==NULL) goto fail; + if (new == NULL) { + goto fail; + } Py_DECREF(obj); obj = new; r = iter_subscript_int(self, (PyArrayObject *)obj); @@ -10006,12 +10293,15 @@ Py_DECREF(obj); return r; } - else Py_DECREF(indtype); + else { + Py_DECREF(indtype); + } fail: - if (!PyErr_Occurred()) + if (!PyErr_Occurred()) { PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + } Py_XDECREF(indtype); Py_XDECREF(obj); return NULL; @@ -10045,12 +10335,13 @@ PyArray_ITER_RESET(self); /* Loop over Boolean array */ copyswap = self->ao->descr->f->copyswap; - while(index--) { + while (index--) { if (*((Bool *)dptr) != 0) { copyswap(self->dataptr, val->dataptr, swap, self->ao); PyArray_ITER_NEXT(val); - if (val->index==val->size) + if (val->index == val->size) { PyArray_ITER_RESET(val); + } } dptr += strides; PyArray_ITER_NEXT(self); @@ -10078,11 +10369,15 @@ return 0; } ind_it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)ind); - if (ind_it == NULL) return -1; + if (ind_it == NULL) { + return -1; + } index = ind_it->size; - while(index--) { + while (index--) { num = *((intp *)(ind_it->dataptr)); - if (num < 0) num += self->size; + if (num < 0) { + num += self->size; + } if ((num < 0) || (num >= self->size)) { PyErr_Format(PyExc_IndexError, "index %d out of bounds" \ @@ -10095,8 +10390,9 @@ copyswap(self->dataptr, val->dataptr, swap, self->ao); PyArray_ITER_NEXT(ind_it); PyArray_ITER_NEXT(val); - if (val->index == val->size) + if (val->index == val->size) { PyArray_ITER_RESET(val); + } } Py_DECREF(ind_it); return 0; @@ -10105,14 +10401,14 @@ static int iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val) { - PyObject *arrval=NULL; - PyArrayIterObject *val_it=NULL; + PyObject *arrval = NULL; + PyArrayIterObject *val_it = NULL; PyArray_Descr *type; - PyArray_Descr *indtype=NULL; - int swap, retval=-1; + PyArray_Descr *indtype = NULL; + int swap, retval = -1; intp start, step_size; intp n_steps; - PyObject *obj=NULL; + PyObject *obj = NULL; PyArray_CopySwapFunc *copyswap; @@ -10126,15 +10422,18 @@ if (PyTuple_Check(ind)) { int len; len = PyTuple_GET_SIZE(ind); - if (len > 1) goto finish; + if (len > 1) { + goto finish; + } ind = PyTuple_GET_ITEM(ind, 0); } type = self->ao->descr; - /* Check for Boolean -- this is first becasue - Bool is a subclass of Int */ - + /* + * Check for Boolean -- this is first becasue + * Bool is a subclass of Int + */ if (PyBool_Check(ind)) { retval = 0; if (PyObject_IsTrue(ind)) { @@ -10143,9 +10442,13 @@ goto finish; } - if (PySequence_Check(ind) || PySlice_Check(ind)) goto skip; + if (PySequence_Check(ind) || PySlice_Check(ind)) { + goto skip; + } start = PyArray_PyIntAsIntp(ind); - if (start==-1 && PyErr_Occurred()) PyErr_Clear(); + if (start==-1 && PyErr_Occurred()) { + PyErr_Clear(); + } else { if (start < -self->size || start >= self->size) { PyErr_Format(PyExc_ValueError, @@ -10167,41 +10470,48 @@ skip: Py_INCREF(type); arrval = PyArray_FromAny(val, type, 0, 0, 0, NULL); - if (arrval==NULL) return -1; + if (arrval == NULL) { + return -1; + } val_it = (PyArrayIterObject *)PyArray_IterNew(arrval); - if (val_it==NULL) goto finish; - if (val_it->size == 0) {retval = 0; goto finish;} + if (val_it == NULL) { + goto finish; + } + if (val_it->size == 0) { + retval = 0; + goto finish; + } copyswap = PyArray_DESCR(arrval)->f->copyswap; swap = (PyArray_ISNOTSWAPPED(self->ao)!=PyArray_ISNOTSWAPPED(arrval)); /* Check Slice */ - if (PySlice_Check(ind)) { - start = parse_subindex(ind, &step_size, &n_steps, - self->size); - if (start == -1) goto finish; + start = parse_subindex(ind, &step_size, &n_steps, self->size); + if (start == -1) { + goto finish; + } if (n_steps == RubberIndex || n_steps == PseudoIndex) { PyErr_SetString(PyExc_IndexError, "cannot use Ellipsis or newaxes here"); goto finish; } PyArray_ITER_GOTO1D(self, start); - if (n_steps == SingleIndex) { /* Integer */ - copyswap(self->dataptr, PyArray_DATA(arrval), - swap, arrval); + if (n_steps == SingleIndex) { + /* Integer */ + copyswap(self->dataptr, PyArray_DATA(arrval), swap, arrval); PyArray_ITER_RESET(self); - retval=0; + retval = 0; goto finish; } - while(n_steps--) { - copyswap(self->dataptr, val_it->dataptr, - swap, arrval); + while (n_steps--) { + copyswap(self->dataptr, val_it->dataptr, swap, arrval); start += step_size; - PyArray_ITER_GOTO1D(self, start) - PyArray_ITER_NEXT(val_it); - if (val_it->index == val_it->size) + PyArray_ITER_GOTO1D(self, start); + PyArray_ITER_NEXT(val_it); + if (val_it->index == val_it->size) { PyArray_ITER_RESET(val_it); + } } PyArray_ITER_RESET(self); retval = 0; @@ -10209,7 +10519,6 @@ } /* convert to INTP array if Integer array scalar or List */ - indtype = PyArray_DescrFromType(PyArray_INTP); if (PyList_Check(ind)) { Py_INCREF(indtype); @@ -10224,8 +10533,9 @@ /* Check for Boolean object */ if (PyArray_TYPE(obj)==PyArray_BOOL) { if (iter_ass_sub_Bool(self, (PyArrayObject *)obj, - val_it, swap) < 0) + val_it, swap) < 0) { goto finish; + } retval=0; } /* Check for integer array */ @@ -10236,18 +10546,21 @@ FORCECAST | BEHAVED_NS, NULL); Py_DECREF(obj); obj = new; - if (new==NULL) goto finish; + if (new == NULL) { + goto finish; + } if (iter_ass_sub_int(self, (PyArrayObject *)obj, - val_it, swap) < 0) + val_it, swap) < 0) { goto finish; - retval=0; + } + retval = 0; } } finish: - if (!PyErr_Occurred() && retval < 0) - PyErr_SetString(PyExc_IndexError, - "unsupported iterator index"); + if (!PyErr_Occurred() && retval < 0) { + PyErr_SetString(PyExc_IndexError, "unsupported iterator index"); + } Py_XDECREF(indtype); Py_XDECREF(obj); Py_XDECREF(val_it); @@ -10279,13 +10592,12 @@ /* Any argument ignored */ /* Two options: - 1) underlying array is contiguous - -- return 1-d wrapper around it - 2) underlying array is not contiguous - -- make new 1-d contiguous array with updateifcopy flag set - to copy back to the old array - */ - + * 1) underlying array is contiguous + * -- return 1-d wrapper around it + * 2) underlying array is not contiguous + * -- make new 1-d contiguous array with updateifcopy flag set + * to copy back to the old array + */ size = PyArray_SIZE(it->ao); Py_INCREF(it->ao->descr); if (PyArray_ISCONTIGUOUS(it->ao)) { @@ -10295,7 +10607,9 @@ NULL, it->ao->data, it->ao->flags, (PyObject *)it->ao); - if (r==NULL) return NULL; + if (r == NULL) { + return NULL; + } } else { r = PyArray_NewFromDescr(&PyArray_Type, @@ -10303,7 +10617,9 @@ 1, &size, NULL, NULL, 0, (PyObject *)it->ao); - if (r==NULL) return NULL; + if (r == NULL) { + return NULL; + } if (_flat_copyinto(r, (PyObject *)it->ao, PyArray_CORDER) < 0) { Py_DECREF(r); @@ -10321,7 +10637,9 @@ static PyObject * iter_copy(PyArrayIterObject *it, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } return PyArray_Flatten(it->ao, 0); } @@ -10338,7 +10656,9 @@ PyArrayObject *new; PyObject *ret; new = (PyArrayObject *)iter_array(self, NULL); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } ret = array_richcompare(new, other, cmp_op); Py_DECREF(new); return ret; @@ -10356,12 +10676,15 @@ { int nd; nd = self->ao->nd; - if (self->contiguous) { /* coordinates not kept track of --- need to generate - from index */ + if (self->contiguous) { + /* + * coordinates not kept track of --- + * need to generate from index + */ intp val; int i; val = self->index; - for(i=0;icoordinates[i] = val / self->factors[i]; val = val % self->factors[i]; } @@ -10378,60 +10701,60 @@ static PyTypeObject PyArrayIter_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.flatiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.flatiter", /* tp_name */ + sizeof(PyArrayIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arrayiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - &iter_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)iter_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arrayiter_next, /* tp_iternext */ - iter_methods, /* tp_methods */ - iter_members, /* tp_members */ - iter_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arrayiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + &iter_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + (richcmpfunc)iter_richcompare, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)arrayiter_next, /* tp_iternext */ + iter_methods, /* tp_methods */ + iter_members, /* tp_members */ + iter_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -10462,18 +10785,23 @@ PyArray_Descr *indtype; PyObject *arr; - if (PySlice_Check(obj) || (obj == Py_Ellipsis)) + if (PySlice_Check(obj) || (obj == Py_Ellipsis)) { return 0; + } else if (PyArray_Check(obj) && PyArray_ISBOOL(obj)) { return _nonzero_indices(obj, iter); } else { indtype = PyArray_DescrFromType(PyArray_INTP); arr = PyArray_FromAny(obj, indtype, 0, 0, FORCECAST, NULL); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } *iter = (PyArrayIterObject *)PyArray_IterNew(arr); Py_DECREF(arr); - if (*iter == NULL) return -1; + if (*iter == NULL) { + return -1; + } } return 1; } @@ -10490,23 +10818,26 @@ PyArrayIterObject *it; /* Discover the broadcast number of dimensions */ - for(i=0, nd=0; inumiter; i++) + for (i = 0, nd = 0; i < mit->numiter; i++) { nd = MAX(nd, mit->iters[i]->ao->nd); + } mit->nd = nd; /* Discover the broadcast shape in each dimension */ - for(i=0; idimensions[i] = 1; - for(j=0; jnumiter; j++) { + for (j = 0; j < mit->numiter; j++) { it = mit->iters[j]; - /* This prepends 1 to shapes not already - equal to nd */ + /* This prepends 1 to shapes not already equal to nd */ k = i + it->ao->nd - nd; - if (k>=0) { + if (k >= 0) { tmp = it->ao->dimensions[k]; - if (tmp == 1) continue; - if (mit->dimensions[i] == 1) + if (tmp == 1) { + continue; + } + if (mit->dimensions[i] == 1) { mit->dimensions[i] = tmp; + } else if (mit->dimensions[i] != tmp) { PyErr_SetString(PyExc_ValueError, "shape mismatch: objects" \ @@ -10518,9 +10849,11 @@ } } - /* Reset the iterator dimensions and strides of each iterator - object -- using 0 valued strides for broadcasting */ - /* Need to check for overflow */ + /* + * Reset the iterator dimensions and strides of each iterator + * object -- using 0 valued strides for broadcasting + * Need to check for overflow + */ tmp = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd); if (tmp < 0) { PyErr_SetString(PyExc_ValueError, @@ -10528,18 +10861,20 @@ return -1; } mit->size = tmp; - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; it->nd_m1 = mit->nd - 1; it->size = tmp; nd = it->ao->nd; it->factors[mit->nd-1] = 1; - for(j=0; j < mit->nd; j++) { + for (j = 0; j < mit->nd; j++) { it->dims_m1[j] = mit->dimensions[j] - 1; k = j + nd - mit->nd; - /* If this dimension was added or shape - of underlying array was 1 */ - if ((k < 0) || \ + /* + * If this dimension was added or shape of + * underlying array was 1 + */ + if ((k < 0) || it->ao->dimensions[k] != mit->dimensions[j]) { it->contiguous = 0; it->strides[j] = 0; @@ -10547,12 +10882,10 @@ else { it->strides[j] = it->ao->strides[k]; } - it->backstrides[j] = it->strides[j] * \ - it->dims_m1[j]; + it->backstrides[j] = it->strides[j] * it->dims_m1[j]; if (j > 0) - it->factors[mit->nd-j-1] = \ - it->factors[mit->nd-j] * \ - mit->dimensions[mit->nd-j]; + it->factors[mit->nd-j-1] = + it->factors[mit->nd-j] * mit->dimensions[mit->nd-j]; } PyArray_ITER_RESET(it); } @@ -10574,12 +10907,11 @@ if (mit->subspace != NULL) { memcpy(coord, mit->bscoord, sizeof(intp)*mit->ait->ao->nd); PyArray_ITER_RESET(mit->subspace); - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; PyArray_ITER_RESET(it); j = mit->iteraxes[i]; - copyswap(coord+j,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), + copyswap(coord+j,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), it->ao); } PyArray_ITER_GOTO(mit->ait, coord); @@ -10587,15 +10919,16 @@ mit->dataptr = mit->subspace->dataptr; } else { - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; if (it->size != 0) { PyArray_ITER_RESET(it); - copyswap(coord+i,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), + copyswap(coord+i,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), it->ao); } - else coord[i] = 0; + else { + coord[i] = 0; + } } PyArray_ITER_GOTO(mit->ait, coord); mit->dataptr = mit->ait->dataptr; @@ -10603,9 +10936,10 @@ return; } -/* This function needs to update the state of the map iterator - and point mit->dataptr to the memory-location of the next object -*/ +/* + * This function needs to update the state of the map iterator + * and point mit->dataptr to the memory-location of the next object + */ static void PyArray_MapIterNext(PyArrayMapIterObject *mit) { @@ -10615,23 +10949,22 @@ PyArray_CopySwapFunc *copyswap; mit->index += 1; - if (mit->index >= mit->size) return; + if (mit->index >= mit->size) { + return; + } copyswap = mit->iters[0]->ao->descr->f->copyswap; /* Sub-space iteration */ if (mit->subspace != NULL) { PyArray_ITER_NEXT(mit->subspace); if (mit->subspace->index >= mit->subspace->size) { - /* reset coord to coordinates of - beginning of the subspace */ - memcpy(coord, mit->bscoord, - sizeof(intp)*mit->ait->ao->nd); + /* reset coord to coordinates of beginning of the subspace */ + memcpy(coord, mit->bscoord, sizeof(intp)*mit->ait->ao->nd); PyArray_ITER_RESET(mit->subspace); - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; PyArray_ITER_NEXT(it); j = mit->iteraxes[i]; - copyswap(coord+j,it->dataptr, - !PyArray_ISNOTSWAPPED(it->ao), + copyswap(coord+j,it->dataptr, !PyArray_ISNOTSWAPPED(it->ao), it->ao); } PyArray_ITER_GOTO(mit->ait, coord); @@ -10640,7 +10973,7 @@ mit->dataptr = mit->subspace->dataptr; } else { - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { it = mit->iters[i]; PyArray_ITER_NEXT(it); copyswap(coord+i,it->dataptr, @@ -10653,26 +10986,26 @@ return; } -/* Bind a mapiteration to a particular array */ - -/* Determine if subspace iteration is necessary. If so, - 1) Fill in mit->iteraxes - 2) Create subspace iterator - 3) Update nd, dimensions, and size. - - Subspace iteration is necessary if: arr->nd > mit->numiter -*/ - -/* Need to check for index-errors somewhere. - - Let's do it at bind time and also convert all <0 values to >0 here - as well. -*/ +/* + * Bind a mapiteration to a particular array + * + * Determine if subspace iteration is necessary. If so, + * 1) Fill in mit->iteraxes + * 2) Create subspace iterator + * 3) Update nd, dimensions, and size. + * + * Subspace iteration is necessary if: arr->nd > mit->numiter + * + * Need to check for index-errors somewhere. + * + * Let's do it at bind time and also convert all <0 values to >0 here + * as well. + */ static void PyArray_MapIterBind(PyArrayMapIterObject *mit, PyArrayObject *arr) { int subnd; - PyObject *sub, *obj=NULL; + PyObject *sub, *obj = NULL; int i, j, n, curraxis, ellipexp, noellip; PyArrayIterObject *it; intp dimsize; @@ -10686,22 +11019,24 @@ } mit->ait = (PyArrayIterObject *)PyArray_IterNew((PyObject *)arr); - if (mit->ait == NULL) return; - + if (mit->ait == NULL) { + return; + } /* no subspace iteration needed. Finish up and Return */ if (subnd == 0) { n = arr->nd; - for(i=0; iiteraxes[i] = i; } goto finish; } - /* all indexing arrays have been converted to 0 - therefore we can extract the subspace with a simple - getitem call which will use view semantics - */ - /* But, be sure to do it with a true array. + /* + * all indexing arrays have been converted to 0 + * therefore we can extract the subspace with a simple + * getitem call which will use view semantics + * + * But, be sure to do it with a true array. */ if (PyArray_CheckExact(arr)) { sub = array_subscript_simple(arr, mit->indexobj); @@ -10709,54 +11044,65 @@ else { Py_INCREF(arr); obj = PyArray_EnsureArray((PyObject *)arr); - if (obj == NULL) goto fail; + if (obj == NULL) { + goto fail; + } sub = array_subscript_simple((PyArrayObject *)obj, mit->indexobj); Py_DECREF(obj); } - if (sub == NULL) goto fail; + if (sub == NULL) { + goto fail; + } mit->subspace = (PyArrayIterObject *)PyArray_IterNew(sub); Py_DECREF(sub); - if (mit->subspace == NULL) goto fail; - + if (mit->subspace == NULL) { + goto fail; + } /* Expand dimensions of result */ n = mit->subspace->ao->nd; - for(i=0; idimensions[mit->nd+i] = mit->subspace->ao->dimensions[i]; + } mit->nd += n; - /* Now, we still need to interpret the ellipsis and slice objects - to determine which axes the indexing arrays are referring to - */ + /* + * Now, we still need to interpret the ellipsis and slice objects + * to determine which axes the indexing arrays are referring to + */ n = PyTuple_GET_SIZE(mit->indexobj); - /* The number of dimensions an ellipsis takes up */ ellipexp = arr->nd - n + 1; - /* Now fill in iteraxes -- remember indexing arrays have been - converted to 0's in mit->indexobj */ + /* + * Now fill in iteraxes -- remember indexing arrays have been + * converted to 0's in mit->indexobj + */ curraxis = 0; j = 0; - noellip = 1; /* Only expand the first ellipsis */ + /* Only expand the first ellipsis */ + noellip = 1; memset(mit->bscoord, 0, sizeof(intp)*arr->nd); - for(i=0; iindexobj, i); - if (PyInt_Check(obj) || PyLong_Check(obj)) + if (PyInt_Check(obj) || PyLong_Check(obj)) { mit->iteraxes[j++] = curraxis++; + } else if (noellip && obj == Py_Ellipsis) { curraxis += ellipexp; noellip = 0; } else { - intp start=0; + intp start = 0; intp stop, step; - /* Should be slice object or - another Ellipsis */ + /* Should be slice object or another Ellipsis */ if (obj == Py_Ellipsis) { mit->bscoord[curraxis] = 0; } - else if (!PySlice_Check(obj) || \ + else if (!PySlice_Check(obj) || (slice_GetIndices((PySliceObject *)obj, arr->dimensions[curraxis], &start, &stop, &step, @@ -10773,6 +11119,7 @@ curraxis += 1; } } + finish: /* Here check the indexes (now that we have iteraxes) */ mit->size = PyArray_OverflowMultiplyList(mit->dimensions, mit->nd); @@ -10787,15 +11134,17 @@ goto fail; } - for(i=0; inumiter; i++) { + for (i = 0; i < mit->numiter; i++) { intp indval; it = mit->iters[i]; PyArray_ITER_RESET(it); dimsize = arr->dimensions[mit->iteraxes[i]]; - while(it->index < it->size) { + while (it->index < it->size) { indptr = ((intp *)it->dataptr); indval = *indptr; - if (indval < 0) indval += dimsize; + if (indval < 0) { + indval += dimsize; + } if (indval < 0 || indval >= dimsize) { PyErr_Format(PyExc_IndexError, "index (%d) out of range "\ @@ -10818,14 +11167,15 @@ return; } -/* This function takes a Boolean array and constructs index objects and - iterators as if nonzero(Bool) had been called -*/ +/* + * This function takes a Boolean array and constructs index objects and + * iterators as if nonzero(Bool) had been called + */ static int _nonzero_indices(PyObject *myBool, PyArrayIterObject **iters) { PyArray_Descr *typecode; - PyArrayObject *ba =NULL, *new=NULL; + PyArrayObject *ba = NULL, *new = NULL; int nd, j; intp size, i, count; Bool *ptr; @@ -10835,45 +11185,59 @@ typecode=PyArray_DescrFromType(PyArray_BOOL); ba = (PyArrayObject *)PyArray_FromAny(myBool, typecode, 0, 0, CARRAY, NULL); - if (ba == NULL) return -1; + if (ba == NULL) { + return -1; + } nd = ba->nd; - for(j=0; jdata; count = 0; /* pre-determine how many nonzero entries there are */ - for(i=0; iao->data; coords[j] = 0; dims_m1[j] = ba->dimensions[j]-1; } - ptr = (Bool *)ba->data; + if (count == 0) { + goto finish; + } - if (count == 0) goto finish; - - /* Loop through the Boolean array and copy coordinates - for non-zero entries */ - for(i=0; i=0; j--) { + for (j = nd - 1; j >= 0; j--) { if (coords[j] < dims_m1[j]) { coords[j]++; break; @@ -10889,7 +11253,7 @@ return nd; fail: - for(j=0; jiters[i] = NULL; + } mit->index = 0; mit->ait = NULL; mit->subspace = NULL; @@ -10932,7 +11298,9 @@ if (fancy == SOBJ_LISTTUP) { PyObject *newobj; newobj = PySequence_Tuple(indexobj); - if (newobj == NULL) goto fail; + if (newobj == NULL) { + goto fail; + } Py_DECREF(indexobj); indexobj = newobj; mit->indexobj = indexobj; @@ -10944,57 +11312,72 @@ #undef SOBJ_TOOMANY #undef SOBJ_LISTTUP - if (oned) return (PyObject *)mit; + if (oned) { + return (PyObject *)mit; + } + /* + * Must have some kind of fancy indexing if we are here + * indexobj is either a list, an arrayobject, or a tuple + * (with at least 1 list or arrayobject or Bool object) + */ - /* Must have some kind of fancy indexing if we are here */ - /* indexobj is either a list, an arrayobject, or a tuple - (with at least 1 list or arrayobject or Bool object), */ - /* convert all inputs to iterators */ - if (PyArray_Check(indexobj) && \ - (PyArray_TYPE(indexobj) == PyArray_BOOL)) { + if (PyArray_Check(indexobj) && (PyArray_TYPE(indexobj) == PyArray_BOOL)) { mit->numiter = _nonzero_indices(indexobj, mit->iters); - if (mit->numiter < 0) goto fail; + if (mit->numiter < 0) { + goto fail; + } mit->nd = 1; mit->dimensions[0] = mit->iters[0]->dims_m1[0]+1; Py_DECREF(mit->indexobj); mit->indexobj = PyTuple_New(mit->numiter); - if (mit->indexobj == NULL) goto fail; - for(i=0; inumiter; i++) { - PyTuple_SET_ITEM(mit->indexobj, i, - PyInt_FromLong(0)); + if (mit->indexobj == NULL) { + goto fail; } + for (i = 0; i < mit->numiter; i++) { + PyTuple_SET_ITEM(mit->indexobj, i, PyInt_FromLong(0)); + } } else if (PyArray_Check(indexobj) || !PyTuple_Check(indexobj)) { mit->numiter = 1; indtype = PyArray_DescrFromType(PyArray_INTP); arr = PyArray_FromAny(indexobj, indtype, 0, 0, FORCECAST, NULL); - if (arr == NULL) goto fail; + if (arr == NULL) { + goto fail; + } mit->iters[0] = (PyArrayIterObject *)PyArray_IterNew(arr); - if (mit->iters[0] == NULL) {Py_DECREF(arr); goto fail;} + if (mit->iters[0] == NULL) { + Py_DECREF(arr); + goto fail; + } mit->nd = PyArray_NDIM(arr); - memcpy(mit->dimensions,PyArray_DIMS(arr),mit->nd*sizeof(intp)); + memcpy(mit->dimensions, PyArray_DIMS(arr), mit->nd*sizeof(intp)); mit->size = PyArray_SIZE(arr); Py_DECREF(arr); Py_DECREF(mit->indexobj); mit->indexobj = Py_BuildValue("(N)", PyInt_FromLong(0)); } - else { /* must be a tuple */ + else { + /* must be a tuple */ PyObject *obj; PyArrayIterObject **iterp; PyObject *new; int numiters, j, n2; - /* Make a copy of the tuple -- we will be replacing - index objects with 0's */ + /* + * Make a copy of the tuple -- we will be replacing + * index objects with 0's + */ n = PyTuple_GET_SIZE(indexobj); n2 = n; new = PyTuple_New(n2); - if (new == NULL) goto fail; + if (new == NULL) { + goto fail; + } started = 0; nonindex = 0; j = 0; - for(i=0; iiters + mit->numiter; if ((numiters=_convert_obj(obj, iterp)) < 0) { @@ -11003,39 +11386,45 @@ } if (numiters > 0) { started = 1; - if (nonindex) mit->consec = 0; + if (nonindex) { + mit->consec = 0; + } mit->numiter += numiters; if (numiters == 1) { - PyTuple_SET_ITEM(new,j++, - PyInt_FromLong(0)); + PyTuple_SET_ITEM(new,j++, PyInt_FromLong(0)); } - else { /* we need to grow the - new indexing object and fill - it with 0s for each of the iterators - produced */ + else { + /* + * we need to grow the new indexing object and fill + * it with 0s for each of the iterators produced + */ int k; n2 += numiters - 1; - if (_PyTuple_Resize(&new, n2) < 0) + if (_PyTuple_Resize(&new, n2) < 0) { goto fail; - for(k=0;kindexobj); mit->indexobj = new; - /* Store the number of iterators actually converted */ - /* These will be mapped to actual axes at bind time */ - if (PyArray_Broadcast((PyArrayMultiIterObject *)mit) < 0) + /* + * Store the number of iterators actually converted + * These will be mapped to actual axes at bind time + */ + if (PyArray_Broadcast((PyArrayMultiIterObject *)mit) < 0) { goto fail; + } } return (PyObject *)mit; @@ -11053,96 +11442,94 @@ Py_XDECREF(mit->indexobj); Py_XDECREF(mit->ait); Py_XDECREF(mit->subspace); - for(i=0; inumiter; i++) + for (i = 0; i < mit->numiter; i++) { Py_XDECREF(mit->iters[i]); + } _pya_free(mit); } -/* The mapiter object must be created new each time. It does not work - to bind to a new array, and continue. - - This was the orginal intention, but currently that does not work. - Do not expose the MapIter_Type to Python. - - It's not very useful anyway, since mapiter(indexobj); mapiter.bind(a); - mapiter is equivalent to a[indexobj].flat but the latter gets to use - slice syntax. -*/ - +/* + * The mapiter object must be created new each time. It does not work + * to bind to a new array, and continue. + * + * This was the orginal intention, but currently that does not work. + * Do not expose the MapIter_Type to Python. + * + * It's not very useful anyway, since mapiter(indexobj); mapiter.bind(a); + * mapiter is equivalent to a[indexobj].flat but the latter gets to use + * slice syntax. + */ static PyTypeObject PyArrayMapIter_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.mapiter", /* tp_name */ - sizeof(PyArrayIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.mapiter", /* tp_name */ + sizeof(PyArrayIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraymapiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - (traverseproc)0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arraymapiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + (traverseproc)0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif - }; /** END of Subscript Iterator **/ -/* - NUMPY_API - Get MultiIterator from array of Python objects and any additional - - PyObject **mps -- array of PyObjects - int n - number of PyObjects in the array - int nadd - number of additional arrays to include in the - iterator. - - Returns a multi-iterator object. +/*NUMPY_API + * Get MultiIterator from array of Python objects and any additional + * + * PyObject **mps -- array of PyObjects + * int n - number of PyObjects in the array + * int nadd - number of additional arrays to include in the iterator. + * + * Returns a multi-iterator object. */ static PyObject * PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ...) @@ -11161,17 +11548,20 @@ "array objects (inclusive).", NPY_MAXARGS); return NULL; } - multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); + if (multi == NULL) { + return PyErr_NoMemory(); + } PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - for(i=0; iiters[i] = NULL; + for (i = 0; i < ntot; i++) { + multi->iters[i] = NULL; + } multi->numiter = ntot; multi->index = 0; va_start(va, nadd); - for(i=0; iiters[i] = (PyArrayIterObject *)PyArray_IterNew(arr); Py_DECREF(arr); } } - va_end(va); - if (!err && PyArray_Broadcast(multi) < 0) err=1; - + if (!err && PyArray_Broadcast(multi) < 0) { + err = 1; + } if (err) { Py_DECREF(multi); return NULL; } - PyArray_MultiIter_RESET(multi); - - return (PyObject *)multi; + return (PyObject *)multi; } /*NUMPY_API - Get MultiIterator, -*/ + * Get MultiIterator, + */ static PyObject * PyArray_MultiIterNew(int n, ...) { @@ -11213,7 +11602,7 @@ PyObject *current; PyObject *arr; - int i, err=0; + int i, err = 0; if (n < 2 || n > NPY_MAXARGS) { PyErr_Format(PyExc_ValueError, @@ -11225,37 +11614,40 @@ /* fprintf(stderr, "multi new...");*/ multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); + if (multi == NULL) { + return PyErr_NoMemory(); + } PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); - for(i=0; iiters[i] = NULL; + for (i = 0; i < n; i++) { + multi->iters[i] = NULL; + } multi->numiter = n; multi->index = 0; va_start(va, n); - for(i=0; iiters[i] = (PyArrayIterObject *)PyArray_IterNew(arr); Py_DECREF(arr); } } - va_end(va); - if (!err && PyArray_Broadcast(multi) < 0) err=1; - + if (!err && PyArray_Broadcast(multi) < 0) { + err = 1; + } if (err) { Py_DECREF(multi); return NULL; } - PyArray_MultiIter_RESET(multi); - return (PyObject *)multi; } @@ -11275,7 +11667,9 @@ n = PyTuple_Size(args); if (n < 2 || n > NPY_MAXARGS) { - if (PyErr_Occurred()) return NULL; + if (PyErr_Occurred()) { + return NULL; + } PyErr_Format(PyExc_ValueError, "Need at least two and fewer than (%d) " \ "array objects.", NPY_MAXARGS); @@ -11283,23 +11677,31 @@ } multi = _pya_malloc(sizeof(PyArrayMultiIterObject)); - if (multi == NULL) return PyErr_NoMemory(); + if (multi == NULL) { + return PyErr_NoMemory(); + } PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type); multi->numiter = n; multi->index = 0; - for(i=0; iiters[i] = NULL; - for(i=0; iiters[i] = NULL; + } + for (i = 0; i < n; i++) { arr = PyArray_FromAny(PyTuple_GET_ITEM(args, i), NULL, 0, 0, 0, NULL); - if (arr == NULL) goto fail; - if ((multi->iters[i] = \ - (PyArrayIterObject *)PyArray_IterNew(arr))==NULL) + if (arr == NULL) { goto fail; + } + if ((multi->iters[i] = (PyArrayIterObject *)PyArray_IterNew(arr)) + == NULL) { + goto fail; + } Py_DECREF(arr); } - if (PyArray_Broadcast(multi) < 0) goto fail; + if (PyArray_Broadcast(multi) < 0) { + goto fail; + } PyArray_MultiIter_RESET(multi); - return (PyObject *)multi; fail: @@ -11315,9 +11717,11 @@ n = multi->numiter; ret = PyTuple_New(n); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } if (multi->index < multi->size) { - for(i=0; i < n; i++) { + for (i = 0; i < n; i++) { PyArrayIterObject *it=multi->iters[i]; PyTuple_SET_ITEM(ret, i, PyArray_ToScalar(it->dataptr, it->ao)); @@ -11334,8 +11738,9 @@ { int i; - for(i=0; inumiter; i++) + for (i = 0; i < multi->numiter; i++) { Py_XDECREF(multi->iters[i]); + } multi->ob_type->tp_free((PyObject *)multi); } @@ -11345,10 +11750,12 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) self->size); #else - if (self->size < MAX_LONG) + if (self->size < MAX_LONG) { return PyInt_FromLong((long) self->size); - else + } + else { return PyLong_FromLongLong((longlong) self->size); + } #endif } @@ -11358,10 +11765,12 @@ #if SIZEOF_INTP <= SIZEOF_LONG return PyInt_FromLong((long) self->index); #else - if (self->size < MAX_LONG) + if (self->size < MAX_LONG) { return PyInt_FromLong((long) self->index); - else + } + else { return PyLong_FromLongLong((longlong) self->index); + } #endif } @@ -11376,10 +11785,13 @@ { PyObject *res; int i, n; + n = self->numiter; res = PyTuple_New(n); - if (res == NULL) return res; - for(i=0; iiters[i]); PyTuple_SET_ITEM(res, i, (PyObject *)self->iters[i]); } @@ -11412,8 +11824,9 @@ static PyObject * arraymultiter_reset(PyArrayMultiIterObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; - + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } PyArray_MultiIter_RESET(self); Py_INCREF(Py_None); return Py_None; @@ -11426,61 +11839,61 @@ static PyTypeObject PyArrayMultiIter_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.broadcast", /* tp_name */ - sizeof(PyArrayMultiIterObject), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.broadcast", /* tp_name */ + sizeof(PyArrayMultiIterObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraymultiter_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - (iternextfunc)arraymultiter_next, /* tp_iternext */ - arraymultiter_methods, /* tp_methods */ - arraymultiter_members, /* tp_members */ - arraymultiter_getsetlist, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)0, /* tp_init */ - 0, /* tp_alloc */ - arraymultiter_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arraymultiter_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + (iternextfunc)arraymultiter_next, /* tp_iternext */ + arraymultiter_methods, /* tp_methods */ + arraymultiter_members, /* tp_members */ + arraymultiter_getsetlist, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + (initproc)0, /* tp_init */ + 0, /* tp_alloc */ + arraymultiter_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -11497,21 +11910,23 @@ return new; } -/*** Array Descr Objects for dynamic types **/ +/** Array Descr Objects for dynamic types **/ -/** There are some statically-defined PyArray_Descr objects corresponding - to the basic built-in types. - These can and should be DECREF'd and INCREF'd as appropriate, anyway. - If a mistake is made in reference counting, deallocation on these - builtins will be attempted leading to problems. +/* + * There are some statically-defined PyArray_Descr objects corresponding + * to the basic built-in types. + * These can and should be DECREF'd and INCREF'd as appropriate, anyway. + * If a mistake is made in reference counting, deallocation on these + * builtins will be attempted leading to problems. + * + * This let's us deal with all PyArray_Descr objects using reference + * counting (regardless of whether they are statically or dynamically + * allocated). + */ - This let's us deal with all PyArray_Descr objects using reference - counting (regardless of whether they are statically or dynamically - allocated). -**/ - -/* base cannot be NULL */ -/*NUMPY_API*/ +/*NUMPY_API + * base cannot be NULL + */ static PyArray_Descr * PyArray_DescrNew(PyArray_Descr *base) { @@ -11540,9 +11955,10 @@ return new; } -/* should never be called for builtin-types unless - there is a reference-count problem -*/ +/* + * should never be called for builtin-types unless + * there is a reference-count problem + */ static void arraydescr_dealloc(PyArray_Descr *self) { @@ -11565,20 +11981,29 @@ self->ob_type->tp_free((PyObject *)self); } -/* we need to be careful about setting attributes because these - objects are pointed to by arrays that depend on them for interpreting - data. Currently no attributes of data-type objects can be set - directly except names. -*/ +/* + * we need to be careful about setting attributes because these + * objects are pointed to by arrays that depend on them for interpreting + * data. Currently no attributes of data-type objects can be set + * directly except names. + */ static PyMemberDef arraydescr_members[] = { - {"type", T_OBJECT, offsetof(PyArray_Descr, typeobj), RO, NULL}, - {"kind", T_CHAR, offsetof(PyArray_Descr, kind), RO, NULL}, - {"char", T_CHAR, offsetof(PyArray_Descr, type), RO, NULL}, - {"num", T_INT, offsetof(PyArray_Descr, type_num), RO, NULL}, - {"byteorder", T_CHAR, offsetof(PyArray_Descr, byteorder), RO, NULL}, - {"itemsize", T_INT, offsetof(PyArray_Descr, elsize), RO, NULL}, - {"alignment", T_INT, offsetof(PyArray_Descr, alignment), RO, NULL}, - {"flags", T_UBYTE, offsetof(PyArray_Descr, hasobject), RO, NULL}, + {"type", + T_OBJECT, offsetof(PyArray_Descr, typeobj), RO, NULL}, + {"kind", + T_CHAR, offsetof(PyArray_Descr, kind), RO, NULL}, + {"char", + T_CHAR, offsetof(PyArray_Descr, type), RO, NULL}, + {"num", + T_INT, offsetof(PyArray_Descr, type_num), RO, NULL}, + {"byteorder", + T_CHAR, offsetof(PyArray_Descr, byteorder), RO, NULL}, + {"itemsize", + T_INT, offsetof(PyArray_Descr, elsize), RO, NULL}, + {"alignment", + T_INT, offsetof(PyArray_Descr, alignment), RO, NULL}, + {"flags", + T_UBYTE, offsetof(PyArray_Descr, hasobject), RO, NULL}, {NULL, 0, 0, 0, NULL}, }; @@ -11596,15 +12021,16 @@ static PyObject * arraydescr_protocol_typestr_get(PyArray_Descr *self) { - char basic_=self->kind; + char basic_ = self->kind; char endian = self->byteorder; - int size=self->elsize; + int size = self->elsize; if (endian == '=') { endian = '<'; - if (!PyArray_IsNativeByteOrder(endian)) endian = '>'; + if (!PyArray_IsNativeByteOrder(endian)) { + endian = '>'; + } } - if (self->type_num == PyArray_UNICODE) { size >>= 2; } @@ -11618,7 +12044,8 @@ PyTypeObject *typeobj = self->typeobj; PyObject *res; char *s; - static int prefix_len=0; + /* fixme: not reentrant */ + static int prefix_len = 0; if (PyTypeNum_ISUSERDEF(self->type_num)) { s = strrchr(typeobj->tp_name, '.'); @@ -11626,17 +12053,18 @@ res = PyString_FromString(typeobj->tp_name); } else { - res = PyString_FromStringAndSize(s+1, strlen(s)-1); + res = PyString_FromStringAndSize(s + 1, strlen(s) - 1); } return res; } else { - if (prefix_len == 0) + if (prefix_len == 0) { prefix_len = strlen("numpy."); - + } len = strlen(typeobj->tp_name); - if (*(typeobj->tp_name + (len-1)) == '_') - len-=1; + if (*(typeobj->tp_name + (len-1)) == '_') { + len -= 1; + } len -= prefix_len; res = PyString_FromStringAndSize(typeobj->tp_name+prefix_len, len); } @@ -11681,35 +12109,45 @@ if (self->names == NULL) { /* get default */ dobj = PyTuple_New(2); - if (dobj == NULL) return NULL; + if (dobj == NULL) { + return NULL; + } PyTuple_SET_ITEM(dobj, 0, PyString_FromString("")); - PyTuple_SET_ITEM(dobj, 1, \ - arraydescr_protocol_typestr_get(self)); + PyTuple_SET_ITEM(dobj, 1, arraydescr_protocol_typestr_get(self)); res = PyList_New(1); - if (res == NULL) {Py_DECREF(dobj); return NULL;} + if (res == NULL) { + Py_DECREF(dobj); + return NULL; + } PyList_SET_ITEM(res, 0, dobj); return res; } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - res = PyObject_CallMethod(_numpy_internal, "_array_descr", - "O", self); + if (_numpy_internal == NULL) { + return NULL; + } + res = PyObject_CallMethod(_numpy_internal, "_array_descr", "O", self); Py_DECREF(_numpy_internal); return res; } -/* returns 1 for a builtin type - and 2 for a user-defined data-type descriptor - return 0 if neither (i.e. it's a copy of one) -*/ +/* + * returns 1 for a builtin type + * and 2 for a user-defined data-type descriptor + * return 0 if neither (i.e. it's a copy of one) + */ static PyObject * arraydescr_isbuiltin_get(PyArray_Descr *self) { long val; val = 0; - if (self->fields == Py_None) val = 1; - if (PyTypeNum_ISUSERDEF(self->type_num)) val = 2; + if (self->fields == Py_None) { + val = 1; + } + if (PyTypeNum_ISUSERDEF(self->type_num)) { + val = 2; + } return PyInt_FromLong(val); } @@ -11720,34 +12158,42 @@ return PyArray_ISNBO(self->byteorder); } else { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; - while(PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) return -1; - if (!_arraydescr_isnative(new)) return 0; + Py_ssize_t pos = 0; + while (PyDict_Next(self->fields, &pos, &key, &value)) { + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { + return -1; + } + if (!_arraydescr_isnative(new)) { + return 0; + } } } return 1; } -/* return Py_True if this data-type descriptor - has native byteorder if no fields are defined - - or if all sub-fields have native-byteorder if - fields are defined -*/ +/* + * return Py_True if this data-type descriptor + * has native byteorder if no fields are defined + * + * or if all sub-fields have native-byteorder if + * fields are defined + */ static PyObject * arraydescr_isnative_get(PyArray_Descr *self) { PyObject *ret; int retval; retval = _arraydescr_isnative(self); - if (retval == -1) return NULL; - ret = (retval ? Py_True : Py_False); + if (retval == -1) { + return NULL; + } + ret = retval ? Py_True : Py_False; Py_INCREF(ret); return ret; } @@ -11766,10 +12212,12 @@ arraydescr_hasobject_get(PyArray_Descr *self) { PyObject *res; - if (PyDataType_FLAGCHK(self, NPY_ITEM_HASOBJECT)) + if (PyDataType_FLAGCHK(self, NPY_ITEM_HASOBJECT)) { res = Py_True; - else + } + else { res = Py_False; + } Py_INCREF(res); return res; } @@ -11803,9 +12251,9 @@ return -1; } /* Make sure all entries are strings */ - for(i=0; ifields == Py_None) { descr = PyArray_DescrNew(conv); @@ -11913,9 +12362,11 @@ static PyObject * arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) { - /* version number of this pickle type. Increment if we need to - change the format. Be sure to handle the old versions in - arraydescr_setstate. */ + /* + * version number of this pickle type. Increment if we need to + * change the format. Be sure to handle the old versions in + * arraydescr_setstate. + */ const int version = 3; PyObject *ret, *mod, *obj; PyObject *state; @@ -11923,15 +12374,23 @@ int elsize, alignment; ret = PyTuple_New(3); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) {Py_DECREF(ret); return NULL;} + if (mod == NULL) { + Py_DECREF(ret); + return NULL; + } obj = PyObject_GetAttrString(mod, "dtype"); Py_DECREF(mod); - if (obj == NULL) {Py_DECREF(ret); return NULL;} + if (obj == NULL) { + Py_DECREF(ret); + return NULL; + } PyTuple_SET_ITEM(ret, 0, obj); - if (PyTypeNum_ISUSERDEF(self->type_num) || \ - ((self->type_num == PyArray_VOID && \ + if (PyTypeNum_ISUSERDEF(self->type_num) || + ((self->type_num == PyArray_VOID && self->typeobj != &PyVoidArrType_Type))) { obj = (PyObject *)self->typeobj; Py_INCREF(obj); @@ -11945,12 +12404,16 @@ } PyTuple_SET_ITEM(ret, 1, Py_BuildValue("(Nii)", obj, 0, 1)); - /* Now return the state which is at least - byteorder, subarray, and fields */ + /* + * Now return the state which is at least byteorder, + * subarray, and fields + */ endian = self->byteorder; if (endian == '=') { endian = '<'; - if (!PyArray_IsNativeByteOrder(endian)) endian = '>'; + if (!PyArray_IsNativeByteOrder(endian)) { + endian = '>'; + } } state = PyTuple_New(8); PyTuple_SET_ITEM(state, 0, PyInt_FromLong(version)); @@ -11974,12 +12437,13 @@ elsize = self->elsize; alignment = self->alignment; } - else {elsize = -1; alignment = -1;} - + else { + elsize = -1; + alignment = -1; + } PyTuple_SET_ITEM(state, 5, PyInt_FromLong(elsize)); PyTuple_SET_ITEM(state, 6, PyInt_FromLong(alignment)); PyTuple_SET_ITEM(state, 7, PyInt_FromLong(self->hasobject)); - PyTuple_SET_ITEM(ret, 2, state); return ret; } @@ -11991,17 +12455,20 @@ _descr_find_object(PyArray_Descr *self) { if (self->hasobject || self->type_num == PyArray_OBJECT || - self->kind == 'O') + self->kind == 'O') { return NPY_OBJECT_DTYPE_FLAGS; + } if (PyDescr_HASFIELDS(self)) { - PyObject *key, *value, *title=NULL; + PyObject *key, *value, *title = NULL; PyArray_Descr *new; int offset; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; + while (PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, - &title)) { + if NPY_TITLE_KEY(key, value) { + continue; + } + if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { PyErr_Clear(); return 0; } @@ -12014,22 +12481,24 @@ return 0; } -/* state is at least byteorder, subarray, and fields but could include elsize - and alignment for EXTENDED arrays -*/ - +/* + * state is at least byteorder, subarray, and fields but could include elsize + * and alignment for EXTENDED arrays + */ static PyObject * arraydescr_setstate(PyArray_Descr *self, PyObject *args) { int elsize = -1, alignment = -1; int version = 3; char endian; - PyObject *subarray, *fields, *names=NULL; + PyObject *subarray, *fields, *names = NULL; int incref_names = 1; - int dtypeflags=0; + int dtypeflags = 0; - if (self->fields == Py_None) {Py_INCREF(Py_None); return Py_None;} - + if (self->fields == Py_None) { + Py_INCREF(Py_None); + return Py_None; + } if (PyTuple_GET_SIZE(args) != 1 || !(PyTuple_Check(PyTuple_GET_ITEM(args, 0)))) { PyErr_BadInternalCall(); @@ -12038,40 +12507,42 @@ switch (PyTuple_GET_SIZE(PyTuple_GET_ITEM(args,0))) { case 8: if (!PyArg_ParseTuple(args, "(icOOOiii)", &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment, &dtypeflags)) { + &subarray, &names, &fields, &elsize, + &alignment, &dtypeflags)) { return NULL; } break; case 7: if (!PyArg_ParseTuple(args, "(icOOOii)", &version, &endian, - &subarray, &names, &fields, &elsize, - &alignment)) { + &subarray, &names, &fields, &elsize, + &alignment)) { return NULL; } break; case 6: if (!PyArg_ParseTuple(args, "(icOOii)", &version, - &endian, &subarray, &fields, - &elsize, &alignment)) { + &endian, &subarray, &fields, + &elsize, &alignment)) { PyErr_Clear(); } break; case 5: version = 0; if (!PyArg_ParseTuple(args, "(cOOii)", - &endian, &subarray, &fields, &elsize, - &alignment)) { + &endian, &subarray, &fields, &elsize, + &alignment)) { return NULL; } break; default: - version = -1; /* raise an error */ + /* raise an error */ + version = -1; } - /* If we ever need another pickle format, increment the version - number. But we should still be able to handle the old versions. - */ + /* + * If we ever need another pickle format, increment the version + * number. But we should still be able to handle the old versions. + */ if (version < 0 || version > 3) { PyErr_Format(PyExc_ValueError, "can't handle version %d of numpy.dtype pickle", @@ -12084,7 +12555,9 @@ PyObject *key, *list; key = PyInt_FromLong(-1); list = PyDict_GetItem(fields, key); - if (!list) return NULL; + if (!list) { + return NULL; + } Py_INCREF(list); names = list; PyDict_DelItem(fields, key); @@ -12096,16 +12569,16 @@ } - if ((fields == Py_None && names != Py_None) || \ + if ((fields == Py_None && names != Py_None) || (names == Py_None && fields != Py_None)) { PyErr_Format(PyExc_ValueError, "inconsistent fields and names"); return NULL; } - if (endian != '|' && - PyArray_IsNativeByteOrder(endian)) endian = '='; - + if (endian != '|' && PyArray_IsNativeByteOrder(endian)) { + endian = '='; + } self->byteorder = endian; if (self->subarray) { Py_XDECREF(self->subarray->base); @@ -12128,8 +12601,9 @@ Py_INCREF(fields); Py_XDECREF(self->names); self->names = names; - if (incref_names) + if (incref_names) { Py_INCREF(names); + } } if (PyTypeNum_ISEXTENDED(self->type_num)) { @@ -12146,23 +12620,23 @@ } -/* returns a copy of the PyArray_Descr structure with the byteorder - altered: - no arguments: The byteorder is swapped (in all subfields as well) - single argument: The byteorder is forced to the given state - (in all subfields as well) - - Valid states: ('big', '>') or ('little' or '<') - ('native', or '=') - - If a descr structure with | is encountered it's own - byte-order is not changed but any fields are: -*/ - -/*NUMPY_API - Deep bytorder change of a data-type descriptor - *** Leaves reference count of self unchanged --- does not DECREF self *** - */ + /*NUMPY_API + * returns a copy of the PyArray_Descr structure with the byteorder + * altered: + * no arguments: The byteorder is swapped (in all subfields as well) + * single argument: The byteorder is forced to the given state + * (in all subfields as well) + * + * Valid states: ('big', '>') or ('little' or '<') + * ('native', or '=') + * + * If a descr structure with | is encountered it's own + * byte-order is not changed but any fields are: + * + * + * Deep bytorder change of a data-type descriptor + * *** Leaves reference count of self unchanged --- does not DECREF self *** + */ static PyArray_Descr * PyArray_DescrNewByteorder(PyArray_Descr *self, char newendian) { @@ -12172,9 +12646,14 @@ new = PyArray_DescrNew(self); endian = new->byteorder; if (endian != PyArray_IGNORE) { - if (newendian == PyArray_SWAP) { /* swap byteorder */ - if PyArray_ISNBO(endian) endian = PyArray_OPPBYTE; - else endian = PyArray_NATBYTE; + if (newendian == PyArray_SWAP) { + /* swap byteorder */ + if PyArray_ISNBO(endian) { + endian = PyArray_OPPBYTE; + } + else { + endian = PyArray_NATBYTE; + } new->byteorder = endian; } else if (newendian != PyArray_IGNORE) { @@ -12189,28 +12668,31 @@ PyArray_Descr *newdescr; Py_ssize_t pos = 0; int len, i; + newfields = PyDict_New(); - /* make new dictionary with replaced */ - /* PyArray_Descr Objects */ + /* make new dictionary with replaced PyArray_Descr Objects */ while(PyDict_Next(self->fields, &pos, &key, &value)) { - if NPY_TITLE_KEY(key, value) continue; - if (!PyString_Check(key) || \ - !PyTuple_Check(value) || \ - ((len=PyTuple_GET_SIZE(value)) < 2)) + if NPY_TITLE_KEY(key, value) { continue; - + } + if (!PyString_Check(key) || + !PyTuple_Check(value) || + ((len=PyTuple_GET_SIZE(value)) < 2)) { + continue; + } old = PyTuple_GET_ITEM(value, 0); - if (!PyArray_DescrCheck(old)) continue; - newdescr = PyArray_DescrNewByteorder \ - ((PyArray_Descr *)old, newendian); + if (!PyArray_DescrCheck(old)) { + continue; + } + newdescr = PyArray_DescrNewByteorder( + (PyArray_Descr *)old, newendian); if (newdescr == NULL) { Py_DECREF(newfields); Py_DECREF(new); return NULL; } newvalue = PyTuple_New(len); - PyTuple_SET_ITEM(newvalue, 0, \ - (PyObject *)newdescr); - for(i=1; isubarray) { Py_DECREF(new->subarray->base); - new->subarray->base = PyArray_DescrNewByteorder \ + new->subarray->base = PyArray_DescrNewByteorder (self->subarray->base, newendian); } return new; @@ -12236,19 +12718,20 @@ char endian=PyArray_SWAP; if (!PyArg_ParseTuple(args, "|O&", PyArray_ByteorderConverter, - &endian)) return NULL; - + &endian)) { + return NULL; + } return (PyObject *)PyArray_DescrNewByteorder(self, endian); } static PyMethodDef arraydescr_methods[] = { /* for pickling */ - {"__reduce__", (PyCFunction)arraydescr_reduce, METH_VARARGS, - NULL}, - {"__setstate__", (PyCFunction)arraydescr_setstate, METH_VARARGS, - NULL}, - {"newbyteorder", (PyCFunction)arraydescr_newbyteorder, METH_VARARGS, - NULL}, + {"__reduce__", + (PyCFunction)arraydescr_reduce, METH_VARARGS, NULL}, + {"__setstate__", + (PyCFunction)arraydescr_setstate, METH_VARARGS, NULL}, + {"newbyteorder", + (PyCFunction)arraydescr_newbyteorder, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -12264,7 +12747,9 @@ sub = PyString_FromString(""); PyErr_Clear(); } - else sub = PyObject_Str(lst); + else { + sub = PyObject_Str(lst); + } Py_XDECREF(lst); if (self->type_num != PyArray_VOID) { PyObject *p; @@ -12335,11 +12820,12 @@ static PyObject * arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op) { - PyArray_Descr *new=NULL; + PyArray_Descr *new = NULL; PyObject *result = Py_NotImplemented; if (!PyArray_DescrCheck(other)) { - if (PyArray_DescrConverter(other, &new) == PY_FAIL) + if (PyArray_DescrConverter(other, &new) == PY_FAIL) { return NULL; + } } else { new = (PyArray_Descr *)other; @@ -12347,22 +12833,28 @@ } switch (cmp_op) { case Py_LT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) + if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(self, new)) { result = Py_True; - else + } + else { result = Py_False; + } break; case Py_LE: - if (PyArray_CanCastTo(self, new)) + if (PyArray_CanCastTo(self, new)) { result = Py_True; - else + } + else { result = Py_False; + } break; case Py_EQ: - if (PyArray_EquivTypes(self, new)) + if (PyArray_EquivTypes(self, new)) { result = Py_True; - else + } + else { result = Py_False; + } break; case Py_NE: if (PyArray_EquivTypes(self, new)) @@ -12371,16 +12863,20 @@ result = Py_True; break; case Py_GT: - if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) + if (!PyArray_EquivTypes(self, new) && PyArray_CanCastTo(new, self)) { result = Py_True; - else + } + else { result = Py_False; + } break; case Py_GE: - if (PyArray_CanCastTo(new, self)) + if (PyArray_CanCastTo(new, self)) { result = Py_True; - else + } + else { result = Py_False; + } break; default: result = Py_NotImplemented; @@ -12398,12 +12894,14 @@ static Py_ssize_t descr_length(PyObject *self0) { - PyArray_Descr *self = (PyArray_Descr *)self0; - if (self->names) + if (self->names) { return PyTuple_GET_SIZE(self->names); - else return 0; + } + else { + return 0; + } } static PyObject * @@ -12411,7 +12909,7 @@ { PyObject *tup; PyArray_Descr *new; - if (length < 0) + if (length < 0) { return PyErr_Format(PyExc_ValueError, #if (PY_VERSION_HEX < 0x02050000) "Array length must be >= 0, not %d", @@ -12419,8 +12917,11 @@ "Array length must be >= 0, not %zd", #endif length); + } tup = Py_BuildValue("O" NPY_SSIZE_T_PYFMT, self, length); - if (tup == NULL) return NULL; + if (tup == NULL) { + return NULL; + } PyArray_DescrConverter(tup, &new); Py_DECREF(tup); return (PyObject *)new; @@ -12432,11 +12933,9 @@ if (self->names) { if (PyString_Check(op) || PyUnicode_Check(op)) { - PyObject *obj; - obj = PyDict_GetItem(self->fields, op); + PyObject *obj = PyDict_GetItem(self->fields, op); if (obj != NULL) { - PyObject *descr; - descr = PyTuple_GET_ITEM(obj, 0); + PyObject *descr = PyTuple_GET_ITEM(obj, 0); Py_INCREF(descr); return descr; } @@ -12448,12 +12947,12 @@ } else { PyObject *name; - int value; - value = PyArray_PyIntAsInt(op); + int value = PyArray_PyIntAsInt(op); if (!PyErr_Occurred()) { - int size; - size = PyTuple_GET_SIZE(self->names); - if (value < 0) value += size; + int size = PyTuple_GET_SIZE(self->names); + if (value < 0) { + value += size; + } if (value < 0 || value >= size) { PyErr_Format(PyExc_IndexError, "0<=index<%d not %d", @@ -12484,17 +12983,17 @@ (binaryfunc)NULL, descr_repeat, NULL, NULL, - NULL, /* sq_ass_item */ - NULL, /* ssizessizeobjargproc sq_ass_slice */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0, /* sq_inplace_repeat */ + NULL, /* sq_ass_item */ + NULL, /* ssizessizeobjargproc sq_ass_slice */ + 0, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0, /* sq_inplace_repeat */ }; static PyMappingMethods descr_as_mapping = { - descr_length, /*mp_length*/ - (binaryfunc)descr_subscript, /*mp_subscript*/ - (objobjargproc)NULL, /*mp_ass_subscript*/ + descr_length, /* mp_length*/ + (binaryfunc)descr_subscript, /* mp_subscript*/ + (objobjargproc)NULL, /* mp_ass_subscript*/ }; /****************** End of Mapping Protocol ******************************/ @@ -12502,70 +13001,71 @@ static PyTypeObject PyArrayDescr_Type = { PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "numpy.dtype", /* tp_name */ - sizeof(PyArray_Descr), /* tp_basicsize */ - 0, /* tp_itemsize */ + 0, /* ob_size */ + "numpy.dtype", /* tp_name */ + sizeof(PyArray_Descr), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)arraydescr_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - (reprfunc)arraydescr_repr, /* tp_repr */ - 0, /* tp_as_number */ - &descr_as_sequence, /* tp_as_sequence */ - &descr_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - (reprfunc)arraydescr_str, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - arraydescr_methods, /* tp_methods */ - arraydescr_members, /* tp_members */ - arraydescr_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arraydescr_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)arraydescr_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + (reprfunc)arraydescr_repr, /* tp_repr */ + 0, /* tp_as_number */ + &descr_as_sequence, /* tp_as_sequence */ + &descr_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + (reprfunc)arraydescr_str, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + arraydescr_methods, /* tp_methods */ + arraydescr_members, /* tp_members */ + arraydescr_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + arraydescr_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; -/** Array Flags Object **/ +/* Array Flags Object */ /*NUMPY_API - Get New ArrayFlagsObject -*/ + * + * Get New ArrayFlagsObject + */ static PyObject * PyArray_NewFlagsObject(PyObject *obj) { @@ -12578,11 +13078,12 @@ flags = PyArray_FLAGS(obj); } flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); - if (flagobj == NULL) return NULL; + if (flagobj == NULL) { + return NULL; + } Py_XINCREF(obj); ((PyArrayFlagsObject *)flagobj)->arr = obj; ((PyArrayFlagsObject *)flagobj)->flags = flags; - return flagobj; } @@ -12620,11 +13121,12 @@ PyObject *item; if (((self->flags & FORTRAN) == FORTRAN) || - ((self->flags & CONTIGUOUS) == CONTIGUOUS)) + ((self->flags & CONTIGUOUS) == CONTIGUOUS)) { item = Py_True; - else + } + else { item = Py_False; - + } Py_INCREF(item); return item; } @@ -12635,11 +13137,12 @@ PyObject *item; if (((self->flags & FORTRAN) == FORTRAN) && - !((self->flags & CONTIGUOUS) == CONTIGUOUS)) + !((self->flags & CONTIGUOUS) == CONTIGUOUS)) { item = Py_True; - else + } + else { item = Py_False; - + } Py_INCREF(item); return item; } @@ -12649,13 +13152,14 @@ { PyObject *item; - if (((self->flags & (ALIGNED|WRITEABLE|FORTRAN)) == \ + if (((self->flags & (ALIGNED|WRITEABLE|FORTRAN)) == (ALIGNED|WRITEABLE|FORTRAN)) && - !((self->flags & CONTIGUOUS) == CONTIGUOUS)) + !((self->flags & CONTIGUOUS) == CONTIGUOUS)) { item = Py_True; - else + } + else { item = Py_False; - + } Py_INCREF(item); return item; } @@ -12677,7 +13181,9 @@ } res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, Py_None, (PyObject_IsTrue(obj) ? Py_True : Py_False)); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } Py_DECREF(res); return 0; } @@ -12693,7 +13199,9 @@ res = PyObject_CallMethod(self->arr, "setflags", "OOO", Py_None, (PyObject_IsTrue(obj) ? Py_True : Py_False), Py_None); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } Py_DECREF(res); return 0; } @@ -12709,7 +13217,9 @@ res = PyObject_CallMethod(self->arr, "setflags", "OOO", (PyObject_IsTrue(obj) ? Py_True : Py_False), Py_None, Py_None); - if (res == NULL) return -1; + if (res == NULL) { + return -1; + } Py_DECREF(res); return 0; } @@ -12717,61 +13227,61 @@ static PyGetSetDef arrayflags_getsets[] = { {"contiguous", - (getter)arrayflags_contiguous_get, - NULL, - "", NULL}, + (getter)arrayflags_contiguous_get, + NULL, + "", NULL}, {"c_contiguous", - (getter)arrayflags_contiguous_get, - NULL, - "", NULL}, + (getter)arrayflags_contiguous_get, + NULL, + "", NULL}, {"f_contiguous", - (getter)arrayflags_fortran_get, - NULL, - "", NULL}, + (getter)arrayflags_fortran_get, + NULL, + "", NULL}, {"fortran", - (getter)arrayflags_fortran_get, - NULL, - "", NULL}, + (getter)arrayflags_fortran_get, + NULL, + "", NULL}, {"updateifcopy", - (getter)arrayflags_updateifcopy_get, - (setter)arrayflags_updateifcopy_set, - "", NULL}, + (getter)arrayflags_updateifcopy_get, + (setter)arrayflags_updateifcopy_set, + "", NULL}, {"owndata", - (getter)arrayflags_owndata_get, - NULL, - "", NULL}, + (getter)arrayflags_owndata_get, + NULL, + "", NULL}, {"aligned", - (getter)arrayflags_aligned_get, - (setter)arrayflags_aligned_set, - "", NULL}, + (getter)arrayflags_aligned_get, + (setter)arrayflags_aligned_set, + "", NULL}, {"writeable", - (getter)arrayflags_writeable_get, - (setter)arrayflags_writeable_set, - "", NULL}, + (getter)arrayflags_writeable_get, + (setter)arrayflags_writeable_set, + "", NULL}, {"fnc", - (getter)arrayflags_fnc_get, - NULL, - "", NULL}, + (getter)arrayflags_fnc_get, + NULL, + "", NULL}, {"forc", - (getter)arrayflags_forc_get, - NULL, - "", NULL}, + (getter)arrayflags_forc_get, + NULL, + "", NULL}, {"behaved", - (getter)arrayflags_behaved_get, - NULL, - "", NULL}, + (getter)arrayflags_behaved_get, + NULL, + "", NULL}, {"carray", - (getter)arrayflags_carray_get, - NULL, - "", NULL}, + (getter)arrayflags_carray_get, + NULL, + "", NULL}, {"farray", - (getter)arrayflags_farray_get, - NULL, - "", NULL}, + (getter)arrayflags_farray_get, + NULL, + "", NULL}, {"num", - (getter)arrayflags_num_get, - NULL, - "", NULL}, + (getter)arrayflags_num_get, + NULL, + "", NULL}, {NULL, NULL, NULL, NULL, NULL}, }; @@ -12780,7 +13290,9 @@ { char *key; int n; - if (!PyString_Check(ind)) goto fail; + if (!PyString_Check(ind)) { + goto fail; + } key = PyString_AS_STRING(ind); n = PyString_GET_SIZE(ind); switch(n) { @@ -12805,50 +13317,65 @@ } break; case 2: - if (strncmp(key, "CA", n)==0) + if (strncmp(key, "CA", n) == 0) { return arrayflags_carray_get(self); - if (strncmp(key, "FA", n)==0) + } + if (strncmp(key, "FA", n) == 0) { return arrayflags_farray_get(self); + } break; case 3: - if (strncmp(key, "FNC", n)==0) + if (strncmp(key, "FNC", n) == 0) { return arrayflags_fnc_get(self); + } break; case 4: - if (strncmp(key, "FORC", n)==0) + if (strncmp(key, "FORC", n) == 0) { return arrayflags_forc_get(self); + } break; case 6: - if (strncmp(key, "CARRAY", n)==0) + if (strncmp(key, "CARRAY", n) == 0) { return arrayflags_carray_get(self); - if (strncmp(key, "FARRAY", n)==0) + } + if (strncmp(key, "FARRAY", n) == 0) { return arrayflags_farray_get(self); + } break; case 7: - if (strncmp(key,"FORTRAN",n)==0) + if (strncmp(key,"FORTRAN",n) == 0) { return arrayflags_fortran_get(self); - if (strncmp(key,"BEHAVED",n)==0) + } + if (strncmp(key,"BEHAVED",n) == 0) { return arrayflags_behaved_get(self); - if (strncmp(key,"OWNDATA",n)==0) + } + if (strncmp(key,"OWNDATA",n) == 0) { return arrayflags_owndata_get(self); - if (strncmp(key,"ALIGNED",n)==0) + } + if (strncmp(key,"ALIGNED",n) == 0) { return arrayflags_aligned_get(self); + } break; case 9: - if (strncmp(key,"WRITEABLE",n)==0) + if (strncmp(key,"WRITEABLE",n) == 0) { return arrayflags_writeable_get(self); + } break; case 10: - if (strncmp(key,"CONTIGUOUS",n)==0) + if (strncmp(key,"CONTIGUOUS",n) == 0) { return arrayflags_contiguous_get(self); + } break; case 12: - if (strncmp(key, "UPDATEIFCOPY", n)==0) + if (strncmp(key, "UPDATEIFCOPY", n) == 0) { return arrayflags_updateifcopy_get(self); - if (strncmp(key, "C_CONTIGUOUS", n)==0) + } + if (strncmp(key, "C_CONTIGUOUS", n) == 0) { return arrayflags_contiguous_get(self); - if (strncmp(key, "F_CONTIGUOUS", n)==0) + } + if (strncmp(key, "F_CONTIGUOUS", n) == 0) { return arrayflags_fortran_get(self); + } break; } @@ -12862,18 +13389,23 @@ { char *key; int n; - if (!PyString_Check(ind)) goto fail; + if (!PyString_Check(ind)) { + goto fail; + } key = PyString_AS_STRING(ind); n = PyString_GET_SIZE(ind); - if (((n==9) && (strncmp(key, "WRITEABLE", n)==0)) || - ((n==1) && (strncmp(key, "W", n)==0))) + if (((n==9) && (strncmp(key, "WRITEABLE", n) == 0)) || + ((n==1) && (strncmp(key, "W", n) == 0))) { return arrayflags_writeable_set(self, item); - else if (((n==7) && (strncmp(key, "ALIGNED", n)==0)) || - ((n==1) && (strncmp(key, "A", n)==0))) + } + else if (((n==7) && (strncmp(key, "ALIGNED", n) == 0)) || + ((n==1) && (strncmp(key, "A", n) == 0))) { return arrayflags_aligned_set(self, item); - else if (((n==12) && (strncmp(key, "UPDATEIFCOPY", n)==0)) || - ((n==1) && (strncmp(key, "U", n)==0))) + } + else if (((n==12) && (strncmp(key, "UPDATEIFCOPY", n) == 0)) || + ((n==1) && (strncmp(key, "U", n) == 0))) { return arrayflags_updateifcopy_set(self, item); + } fail: PyErr_SetString(PyExc_KeyError, "Unknown flag"); @@ -12883,8 +13415,12 @@ static char * _torf_(int flags, int val) { - if ((flags & val) == val) return "True"; - else return "False"; + if ((flags & val) == val) { + return "True"; + } + else { + return "False"; + } } static PyObject * @@ -12906,12 +13442,15 @@ static int arrayflags_compare(PyArrayFlagsObject *self, PyArrayFlagsObject *other) { - if (self->flags == other->flags) + if (self->flags == other->flags) { return 0; - else if (self->flags < other->flags) + } + else if (self->flags < other->flags) { return -1; - else + } + else { return 1; + } } static PyMappingMethods arrayflags_as_mapping = { @@ -12929,9 +13468,9 @@ arrayflags_new(PyTypeObject *NPY_UNUSED(self), PyObject *args, PyObject *NPY_UNUSED(kwds)) { PyObject *arg=NULL; - if (!PyArg_UnpackTuple(args, "flagsobj", 0, 1, &arg)) + if (!PyArg_UnpackTuple(args, "flagsobj", 0, 1, &arg)) { return NULL; - + } if ((arg != NULL) && PyArray_Check(arg)) { return PyArray_NewFlagsObject(arg); } @@ -12945,7 +13484,7 @@ 0, "numpy.flagsobj", sizeof(PyArrayFlagsObject), - 0, /* tp_itemsize */ + 0, /* tp_itemsize */ /* methods */ (destructor)arrayflags_dealloc, /* tp_dealloc */ 0, /* tp_print */ @@ -12970,32 +13509,32 @@ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - arrayflags_getsets, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - arrayflags_new, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* tp_methods */ + 0, /* tp_members */ + arrayflags_getsets, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + arrayflags_new, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; Modified: branches/coremath/numpy/core/src/multiarraymodule.c =================================================================== --- branches/coremath/numpy/core/src/multiarraymodule.c 2009-02-21 23:54:31 UTC (rev 6444) +++ branches/coremath/numpy/core/src/multiarraymodule.c 2009-02-22 03:41:06 UTC (rev 6445) @@ -25,7 +25,7 @@ #define PyAO PyArrayObject -static PyObject *typeDict=NULL; /* Must be explicitly loaded */ +static PyObject *typeDict = NULL; /* Must be explicitly loaded */ static PyArray_Descr * _arraydescr_fromobj(PyObject *obj) @@ -39,7 +39,9 @@ if (dtypedescr) { ret = PyArray_DescrConverter(dtypedescr, &new); Py_DECREF(dtypedescr); - if (ret == PY_SUCCEED) return new; + if (ret == PY_SUCCEED) { + return new; + } PyErr_Clear(); } /* Understand basic ctypes */ @@ -52,13 +54,16 @@ PyObject *length; length = PyObject_GetAttrString(obj, "_length_"); PyErr_Clear(); - if (length) { /* derived type */ + if (length) { + /* derived type */ PyObject *newtup; PyArray_Descr *derived; newtup = Py_BuildValue("NO", new, length); ret = PyArray_DescrConverter(newtup, &derived); Py_DECREF(newtup); - if (ret == PY_SUCCEED) return derived; + if (ret == PY_SUCCEED) { + return derived; + } PyErr_Clear(); return NULL; } @@ -75,24 +80,27 @@ if (dtypedescr) { ret = PyArray_DescrAlignConverter(dtypedescr, &new); Py_DECREF(dtypedescr); - if (ret == PY_SUCCEED) return new; + if (ret == PY_SUCCEED) { + return new; + } PyErr_Clear(); } return NULL; } -/* Including this file is the only way I know how to declare functions - static in each file, and store the pointers from functions in both - arrayobject.c and multiarraymodule.c for the C-API +/* + * Including this file is the only way I know how to declare functions + * static in each file, and store the pointers from functions in both + * arrayobject.c and multiarraymodule.c for the C-API + * + * Declarying an external pointer-containing variable in arrayobject.c + * and trying to copy it to PyArray_API, did not work. + * + * Think about two modules with a common api that import each other... + * + * This file would just be the module calls. + */ - Declarying an external pointer-containing variable in arrayobject.c - and trying to copy it to PyArray_API, did not work. - - Think about two modules with a common api that import each other... - - This file would just be the module calls. -*/ - #include "arrayobject.c" @@ -100,60 +108,72 @@ static PyObject *MultiArrayError; /*NUMPY_API - Multiply a List of ints -*/ + * Multiply a List of ints + */ static int PyArray_MultiplyIntList(register int *l1, register int n) { - register int s=1; - while (n--) s *= (*l1++); + int s = 1; + + while (n--) { + s *= (*l1++); + } return s; } /*NUMPY_API - Multiply a List -*/ + * Multiply a List + */ static intp PyArray_MultiplyList(register intp *l1, register int n) { - register intp s=1; - while (n--) s *= (*l1++); + intp s = 1; + + while (n--) { + s *= (*l1++); + } return s; } /*NUMPY_API - Multiply a List of Non-negative numbers with over-flow detection. -*/ + * Multiply a List of Non-negative numbers with over-flow detection. + */ static intp PyArray_OverflowMultiplyList(register intp *l1, register int n) { - register intp s=1; + intp s = 1; + while (n--) { - if (*l1 == 0) return 0; - if ((s > MAX_INTP / *l1) || (*l1 > MAX_INTP / s)) + if (*l1 == 0) { + return 0; + } + if ((s > MAX_INTP / *l1) || (*l1 > MAX_INTP / s)) { return -1; + } s *= (*l1++); } return s; } /*NUMPY_API - Produce a pointer into array -*/ + * Produce a pointer into array + */ static void * PyArray_GetPtr(PyArrayObject *obj, register intp* ind) { - register int n = obj->nd; - register intp *strides = obj->strides; - register char *dptr = obj->data; + int n = obj->nd; + intp *strides = obj->strides; + char *dptr = obj->data; - while (n--) dptr += (*strides++) * (*ind++); + while (n--) { + dptr += (*strides++) * (*ind++); + } return (void *)dptr; } /*NUMPY_API - Get axis from an object (possibly None) -- a converter function, -*/ + * Get axis from an object (possibly None) -- a converter function, + */ static int PyArray_AxisConverter(PyObject *obj, int *axis) { @@ -170,31 +190,37 @@ } /*NUMPY_API - Compare Lists -*/ + * Compare Lists + */ static int PyArray_CompareLists(intp *l1, intp *l2, int n) { int i; - for(i=0;iob_type; - + if (pytype) { + subtype = pytype; + } + else { + subtype = self->ob_type; + } Py_INCREF(self->descr); new = PyArray_NewFromDescr(subtype, self->descr, @@ -202,8 +228,9 @@ self->strides, self->data, self->flags, (PyObject *)self); - - if (new==NULL) return NULL; + if (new == NULL) { + return NULL; + } Py_INCREF(self); PyArray_BASE(new) = (PyObject *)self; @@ -219,20 +246,20 @@ return new; } -/* Returns a contiguous array */ /*NUMPY_API - Ravel -*/ + * Ravel + * Returns a contiguous array + */ static PyObject * PyArray_Ravel(PyArrayObject *a, NPY_ORDER fortran) { PyArray_Dims newdim = {NULL,1}; intp val[1] = {-1}; - if (fortran == PyArray_ANYORDER) + if (fortran == PyArray_ANYORDER) { fortran = PyArray_ISFORTRAN(a); - + } newdim.ptr = val; if (!fortran && PyArray_ISCONTIGUOUS(a)) { return PyArray_Newshape(a, &newdim, PyArray_CORDER); @@ -240,8 +267,9 @@ else if (fortran && PyArray_ISFORTRAN(a)) { return PyArray_Newshape(a, &newdim, PyArray_FORTRANORDER); } - else + else { return PyArray_Flatten(a, fortran); + } } static double @@ -249,23 +277,25 @@ { static const double p10[] = {1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8}; double ret; - if (n < 9) + if (n < 9) { ret = p10[n]; + } else { ret = 1e9; - while (n-- > 9) + while (n-- > 9) { ret *= 10.; + } } return ret; } /*NUMPY_API - Round -*/ + * Round + */ static PyObject * PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out) { - PyObject *f, *ret=NULL, *tmp, *op1, *op2; + PyObject *f, *ret = NULL, *tmp, *op1, *op2; int ret_int=0; PyArray_Descr *my_descr; if (out && (PyArray_SIZE(out) != PyArray_SIZE(a))) { @@ -278,45 +308,68 @@ PyObject *round_part; PyObject *new; int res; + if (out) { new = (PyObject *)out; Py_INCREF(new); } else { new = PyArray_Copy(a); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } } /* new.real = a.real.round(decimals) */ part = PyObject_GetAttrString(new, "real"); - if (part == NULL) {Py_DECREF(new); return NULL;} + if (part == NULL) { + Py_DECREF(new); + return NULL; + } part = PyArray_EnsureAnyArray(part); round_part = PyArray_Round((PyArrayObject *)part, decimals, NULL); Py_DECREF(part); - if (round_part == NULL) {Py_DECREF(new); return NULL;} + if (round_part == NULL) { + Py_DECREF(new); + return NULL; + } res = PyObject_SetAttrString(new, "real", round_part); Py_DECREF(round_part); - if (res < 0) {Py_DECREF(new); return NULL;} + if (res < 0) { + Py_DECREF(new); + return NULL; + } /* new.imag = a.imag.round(decimals) */ part = PyObject_GetAttrString(new, "imag"); - if (part == NULL) {Py_DECREF(new); return NULL;} + if (part == NULL) { + Py_DECREF(new); + return NULL; + } part = PyArray_EnsureAnyArray(part); round_part = PyArray_Round((PyArrayObject *)part, decimals, NULL); Py_DECREF(part); - if (round_part == NULL) {Py_DECREF(new); return NULL;} + if (round_part == NULL) { + Py_DECREF(new); + return NULL; + } res = PyObject_SetAttrString(new, "imag", round_part); Py_DECREF(round_part); - if (res < 0) {Py_DECREF(new); return NULL;} + if (res < 0) { + Py_DECREF(new); + return NULL; + } return new; } /* do the most common case first */ if (decimals >= 0) { if (PyArray_ISINTEGER(a)) { if (out) { - if (PyArray_CopyAnyInto(out, a) < 0) return NULL; + if (PyArray_CopyAnyInto(out, a) < 0) { + return NULL; + } Py_INCREF(out); return (PyObject *)out; } @@ -327,8 +380,7 @@ } if (decimals == 0) { if (out) { - return PyObject_CallFunction(n_ops.rint, "OO", - a, out); + return PyObject_CallFunction(n_ops.rint, "OO", a, out); } return PyObject_CallFunction(n_ops.rint, "O", a); } @@ -352,18 +404,34 @@ out = (PyArrayObject *)PyArray_Empty(a->nd, a->dimensions, my_descr, PyArray_ISFORTRAN(a)); - if (out == NULL) return NULL; + if (out == NULL) { + return NULL; + } } - else Py_INCREF(out); + else { + Py_INCREF(out); + } f = PyFloat_FromDouble(power_of_ten(decimals)); - if (f==NULL) return NULL; + if (f == NULL) { + return NULL; + } ret = PyObject_CallFunction(op1, "OOO", a, f, out); - if (ret==NULL) goto finish; + if (ret == NULL) { + goto finish; + } tmp = PyObject_CallFunction(n_ops.rint, "OO", ret, ret); - if (tmp == NULL) {Py_DECREF(ret); ret=NULL; goto finish;} + if (tmp == NULL) { + Py_DECREF(ret); + ret = NULL; + goto finish; + } Py_DECREF(tmp); tmp = PyObject_CallFunction(op2, "OOO", ret, f, ret); - if (tmp == NULL) {Py_DECREF(ret); ret=NULL; goto finish;} + if (tmp == NULL) { + Py_DECREF(ret); + ret = NULL; + goto finish; + } Py_DECREF(tmp); finish: @@ -377,22 +445,21 @@ return tmp; } return ret; - } /*NUMPY_API - Flatten -*/ + * Flatten + */ static PyObject * PyArray_Flatten(PyArrayObject *a, NPY_ORDER order) { PyObject *ret; intp size; - if (order == PyArray_ANYORDER) + if (order == PyArray_ANYORDER) { order = PyArray_ISFORTRAN(a); - + } size = PyArray_SIZE(a); Py_INCREF(a->descr); ret = PyArray_NewFromDescr(a->ob_type, @@ -402,7 +469,9 @@ NULL, 0, (PyObject *)a); - if (ret== NULL) return NULL; + if (ret == NULL) { + return NULL; + } if (_flat_copyinto(ret, (PyObject *)a, order) < 0) { Py_DECREF(ret); return NULL; @@ -411,20 +480,20 @@ } -/* For back-ward compatability * +/* For back-ward compatability -- Not recommended */ - / * Not recommended */ - /*NUMPY_API - Reshape an array -*/ + * Reshape + */ static PyObject * PyArray_Reshape(PyArrayObject *self, PyObject *shape) { PyObject *ret; PyArray_Dims newdims; - if (!PyArray_IntpConverter(shape, &newdims)) return NULL; + if (!PyArray_IntpConverter(shape, &newdims)) { + return NULL; + } ret = PyArray_Newshape(self, &newdims, PyArray_CORDER); PyDimMem_FREE(newdims.ptr); return ret; @@ -442,25 +511,31 @@ nd = self->nd; dims = self->dimensions; - for (k=0, j=0; !done && (jstrides[j]; - j++; k++; + j++; + k++; } - else if ((knd; oi++) { - if (self->dimensions[oi]!=1) { + for (oi = 0; oi < self->nd; oi++) { + if (self->dimensions[oi]!= 1) { olddims[oldnd] = self->dimensions[oi]; oldstrides[oldnd] = self->strides[oi]; oldnd++; @@ -508,13 +583,17 @@ np = 1; - for (ni=0; nini;nk--) - newstrides[nk-1]=newstrides[nk]*newdims[nk]; + newstrides[ni] = oldstrides[oi]; + for (nk = ni + 1; nk < nj; nk++) { + newstrides[nk] = newstrides[nk - 1]*newdims[nk - 1]; + } } - + else { + /* C order */ + newstrides[nj - 1] = oldstrides[oj - 1]; + for (nk = nj - 1; nk > ni; nk--) { + newstrides[nk - 1] = newstrides[nk]*newdims[nk]; + } + } ni = nj++; oi = oj++; - } /* @@ -591,17 +674,19 @@ s_known = 1; i_unknown = -1; - for(i=0; iptr; PyArrayObject *ret; int n = newdims->len; - Bool same, incref=TRUE; + Bool same, incref = TRUE; intp *strides = NULL; intp newstrides[MAX_DIMS]; int flags; - if (fortran == PyArray_ANYORDER) + if (fortran == PyArray_ANYORDER) { fortran = PyArray_ISFORTRAN(self); - + } /* Quick check to make sure anything actually needs to be done */ if (n == self->nd) { same = TRUE; - i=0; - while(same && iflags; - if (strides==NULL) { /* we are really re-shaping not just adding ones - to the shape somewhere */ - - /* fix any -1 dimensions and check new-dimensions against - old size */ - if (_fix_unknown_dimension(newdims, PyArray_SIZE(self)) < 0) + if (strides == NULL) { + /* + * we are really re-shaping not just adding ones to the shape somewhere + * fix any -1 dimensions and check new-dimensions against old size + */ + if (_fix_unknown_dimension(newdims, PyArray_SIZE(self)) < 0) { return NULL; - - /* sometimes we have to create a new copy of the array - in order to get the right orientation and - because we can't just re-use the buffer with the - data in the order it is in. - */ + } + /* + * sometimes we have to create a new copy of the array + * in order to get the right orientation and + * because we can't just re-use the buffer with the + * data in the order it is in. + */ if (!(PyArray_ISONESEGMENT(self)) || (((PyArray_CHKFLAGS(self, NPY_CONTIGUOUS) && - fortran == NPY_FORTRANORDER) - || (PyArray_CHKFLAGS(self, NPY_FORTRAN) && + fortran == NPY_FORTRANORDER) || + (PyArray_CHKFLAGS(self, NPY_FORTRAN) && fortran == NPY_CORDER)) && (self->nd > 1))) { - - int success=0; + int success = 0; success = _attempt_nocopy_reshape(self,n,dimensions, newstrides,fortran); if (success) { /* no need to copy the array after all */ strides = newstrides; flags = self->flags; - } else { + } + else { PyObject *new; new = PyArray_NewCopy(self, fortran); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } incref = FALSE; self = (PyArrayObject *)new; flags = self->flags; } } - /* We always have to interpret the contiguous buffer correctly - */ + /* We always have to interpret the contiguous buffer correctly */ - /* Make sure the flags argument is set. - */ + /* Make sure the flags argument is set. */ if (n > 1) { if (fortran == NPY_FORTRANORDER) { flags &= ~NPY_CONTIGUOUS; @@ -723,26 +816,29 @@ } } else if (n > 0) { - /* replace any 0-valued strides with - appropriate value to preserve contiguousness - */ + /* + * replace any 0-valued strides with + * appropriate value to preserve contiguousness + */ if (fortran == PyArray_FORTRANORDER) { - if (strides[0] == 0) + if (strides[0] == 0) { strides[0] = self->descr->elsize; - for (i=1; idescr->elsize; - for (i=n-2; i>-1; i--) { - if (strides[i] == 0) - strides[i] = strides[i+1] * \ - dimensions[i+1]; } + for (i = n - 2; i > -1; i--) { + if (strides[i] == 0) { + strides[i] = strides[i+1] * dimensions[i+1]; + } + } } } @@ -754,27 +850,31 @@ self->data, flags, (PyObject *)self); - if (ret== NULL) goto fail; - - if (incref) Py_INCREF(self); + if (ret == NULL) { + goto fail; + } + if (incref) { + Py_INCREF(self); + } ret->base = (PyObject *)self; PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - return (PyObject *)ret; fail: - if (!incref) {Py_DECREF(self);} + if (!incref) { + Py_DECREF(self); + } return NULL; } -/* return a new view of the array object with all of its unit-length - dimensions squeezed out if needed, otherwise - return the same array. -*/ - -/*NUMPY_API*/ +/*NUMPY_API + * + * return a new view of the array object with all of its unit-length + * dimensions squeezed out if needed, otherwise + * return the same array. + */ static PyObject * PyArray_Squeeze(PyArrayObject *self) { @@ -782,14 +882,14 @@ int newnd = nd; intp dimensions[MAX_DIMS]; intp strides[MAX_DIMS]; - int i,j; + int i, j; PyObject *ret; if (nd == 0) { Py_INCREF(self); return (PyObject *)self; } - for (j=0, i=0; idimensions[i] == 1) { newnd -= 1; } @@ -806,7 +906,9 @@ strides, self->data, self->flags, (PyObject *)self); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArray_FLAGS(ret) &= ~OWNDATA; PyArray_BASE(ret) = (PyObject *)self; Py_INCREF(self); @@ -815,16 +917,17 @@ /*NUMPY_API - Mean -*/ + * Mean + */ static PyObject * PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) { - PyObject *obj1=NULL, *obj2=NULL; + PyObject *obj1 = NULL, *obj2 = NULL; PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } obj1 = PyArray_GenericReduceFunction((PyAO *)new, n_ops.add, axis, rtype, out); obj2 = PyFloat_FromDouble((double) PyArray_DIM(new,axis)); @@ -845,10 +948,10 @@ return ret; } -/* Set variance to 1 to by-pass square-root calculation and return variance */ /*NUMPY_API - Std -*/ + * Set variance to 1 to by-pass square-root calculation and return variance + * Std + */ static PyObject * PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, int variance) @@ -860,34 +963,51 @@ __New_PyArray_Std(PyArrayObject *self, int axis, int rtype, PyArrayObject *out, int variance, int num) { - PyObject *obj1=NULL, *obj2=NULL, *obj3=NULL, *new=NULL; - PyObject *ret=NULL, *newshape=NULL; + PyObject *obj1 = NULL, *obj2 = NULL, *obj3 = NULL, *new = NULL; + PyObject *ret = NULL, *newshape = NULL; int i, n; intp val; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } /* Compute and reshape mean */ obj1 = PyArray_EnsureAnyArray(PyArray_Mean((PyAO *)new, axis, rtype, NULL)); - if (obj1 == NULL) {Py_DECREF(new); return NULL;} + if (obj1 == NULL) { + Py_DECREF(new); + return NULL; + } n = PyArray_NDIM(new); newshape = PyTuple_New(n); - if (newshape == NULL) {Py_DECREF(obj1); Py_DECREF(new); return NULL;} - for (i=0; iob_type == ret->ob_type) return ret; + if (ret == NULL || PyArray_CheckExact(self)) { + return ret; + } + if (PyArray_Check(self) && self->ob_type == ret->ob_type) { + return ret; + } obj1 = PyArray_EnsureArray(ret); - if (obj1 == NULL) return NULL; + if (obj1 == NULL) { + return NULL; + } ret = PyArray_View((PyAO *)obj1, NULL, self->ob_type); Py_DECREF(obj1); if (out) { @@ -966,15 +1106,16 @@ /*NUMPY_API - Sum -*/ + *Sum + */ static PyObject * PyArray_Sum(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.add, axis, rtype, out); Py_DECREF(new); @@ -982,15 +1123,16 @@ } /*NUMPY_API - Prod -*/ + * Prod + */ static PyObject * PyArray_Prod(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.multiply, axis, rtype, out); Py_DECREF(new); @@ -998,15 +1140,16 @@ } /*NUMPY_API - CumSum -*/ + *CumSum + */ static PyObject * PyArray_CumSum(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericAccumulateFunction((PyAO *)new, n_ops.add, axis, rtype, out); Py_DECREF(new); @@ -1014,14 +1157,16 @@ } /*NUMPY_API - CumProd -*/ + * CumProd + */ static PyObject * PyArray_CumProd(PyArrayObject *self, int axis, int rtype, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericAccumulateFunction((PyAO *)new, n_ops.multiply, axis, @@ -1031,15 +1176,16 @@ } /*NUMPY_API - Any -*/ + * Any + */ static PyObject * PyArray_Any(PyArrayObject *self, int axis, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.logical_or, axis, PyArray_BOOL, out); @@ -1048,15 +1194,16 @@ } /*NUMPY_API - All -*/ + * All + */ static PyObject * PyArray_All(PyArrayObject *self, int axis, PyArrayObject *out) { PyObject *new, *ret; - if ((new = _check_axis(self, &axis, 0))==NULL) return NULL; - + if ((new = _check_axis(self, &axis, 0)) == NULL) { + return NULL; + } ret = PyArray_GenericReduceFunction((PyAO *)new, n_ops.logical_and, axis, PyArray_BOOL, out); @@ -1066,8 +1213,8 @@ /*NUMPY_API - Compress -*/ + * Compress + */ static PyObject * PyArray_Compress(PyArrayObject *self, PyObject *condition, int axis, PyArrayObject *out) @@ -1076,8 +1223,9 @@ PyObject *res, *ret; cond = (PyAO *)PyArray_FROM_O(condition); - if (cond == NULL) return NULL; - + if (cond == NULL) { + return NULL; + } if (cond->nd != 1) { Py_DECREF(cond); PyErr_SetString(PyExc_ValueError, @@ -1087,7 +1235,9 @@ res = PyArray_Nonzero(cond); Py_DECREF(cond); - if (res == NULL) return res; + if (res == NULL) { + return res; + } ret = PyArray_TakeFrom(self, PyTuple_GET_ITEM(res, 0), axis, out, NPY_RAISE); Py_DECREF(res); @@ -1095,51 +1245,61 @@ } /*NUMPY_API - Nonzero -*/ + * Nonzero + */ static PyObject * PyArray_Nonzero(PyArrayObject *self) { - int n=self->nd, j; - intp count=0, i, size; - PyArrayIterObject *it=NULL; - PyObject *ret=NULL, *item; + int n = self->nd, j; + intp count = 0, i, size; + PyArrayIterObject *it = NULL; + PyObject *ret = NULL, *item; intp *dptr[MAX_DIMS]; it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)self); - if (it==NULL) return NULL; - + if (it == NULL) { + return NULL; + } size = it->size; - for (i=0; idescr->f->nonzero(it->dataptr, self)) count++; + for (i = 0; i < size; i++) { + if (self->descr->f->nonzero(it->dataptr, self)) { + count++; + } PyArray_ITER_NEXT(it); } PyArray_ITER_RESET(it); ret = PyTuple_New(n); - if (ret == NULL) goto fail; - for (j=0; job_type, 1, &count, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)self); - if (item == NULL) goto fail; + if (item == NULL) { + goto fail; + } PyTuple_SET_ITEM(ret, j, item); dptr[j] = (intp *)PyArray_DATA(item); } - if (n==1) { - for (i=0; idescr->f->nonzero(it->dataptr, self)) + if (n == 1) { + for (i = 0; i < size; i++) { + if (self->descr->f->nonzero(it->dataptr, self)) { *(dptr[0])++ = i; + } PyArray_ITER_NEXT(it); } } else { /* reset contiguous so that coordinates gets updated */ it->contiguous = 0; - for (i=0; idescr->f->nonzero(it->dataptr, self)) - for (j=0; jdescr->f->nonzero(it->dataptr, self)) { + for (j = 0; j < n; j++) { *(dptr[j])++ = it->coordinates[j]; + } + } PyArray_ITER_NEXT(it); } } @@ -1158,10 +1318,12 @@ _GenericBinaryOutFunction(PyArrayObject *m1, PyObject *m2, PyArrayObject *out, PyObject *op) { - if (out == NULL) + if (out == NULL) { return PyObject_CallFunction(op, "OO", m1, m2); - else + } + else { return PyObject_CallFunction(op, "OOO", m1, m2, out); + } } static PyObject * @@ -1171,7 +1333,9 @@ if (max != NULL) { res1 = _GenericBinaryOutFunction(self, max, out, n_ops.minimum); - if (res1 == NULL) return NULL; + if (res1 == NULL) { + return NULL; + } } else { res1 = (PyObject *)self; @@ -1181,7 +1345,10 @@ if (min != NULL) { res2 = _GenericBinaryOutFunction((PyArrayObject *)res1, min, out, n_ops.maximum); - if (res2 == NULL) {Py_XDECREF(res1); return NULL;} + if (res2 == NULL) { + Py_XDECREF(res1); + return NULL; + } } else { res2 = res1; @@ -1192,16 +1359,16 @@ } /*NUMPY_API - Clip -*/ + * Clip + */ static PyObject * PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *out) { PyArray_FastClipFunc *func; - int outgood=0, ingood=0; - PyArrayObject *maxa=NULL; - PyArrayObject *mina=NULL; - PyArrayObject *newout=NULL, *newin=NULL; + int outgood = 0, ingood = 0; + PyArrayObject *maxa = NULL; + PyArrayObject *mina = NULL; + PyArrayObject *newout = NULL, *newin = NULL; PyArray_Descr *indescr, *newdescr; char *max_data, *min_data; PyObject *zero; @@ -1214,31 +1381,37 @@ func = self->descr->f->fastclip; if (func == NULL || (min != NULL && !PyArray_CheckAnyScalar(min)) || - (max != NULL && !PyArray_CheckAnyScalar(max))) + (max != NULL && !PyArray_CheckAnyScalar(max))) { return _slow_array_clip(self, min, max, out); - + } /* Use the fast scalar clip function */ /* First we need to figure out the correct type */ indescr = NULL; if (min != NULL) { indescr = PyArray_DescrFromObject(min, NULL); - if (indescr == NULL) return NULL; + if (indescr == NULL) { + return NULL; + } } if (max != NULL) { newdescr = PyArray_DescrFromObject(max, indescr); Py_XDECREF(indescr); - if (newdescr == NULL) return NULL; + if (newdescr == NULL) { + return NULL; + } } else { - newdescr = indescr; /* Steal the reference */ + /* Steal the reference */ + newdescr = indescr; } - /* Use the scalar descriptor only if it is of a bigger - KIND than the input array (and then find the - type that matches both). - */ + /* + * Use the scalar descriptor only if it is of a bigger + * KIND than the input array (and then find the + * type that matches both). + */ if (PyArray_ScalarKind(newdescr->type_num, NULL) > PyArray_ScalarKind(self->descr->type_num, NULL)) { indescr = _array_small_type(newdescr, self->descr); @@ -1254,7 +1427,9 @@ PyArray_Descr *descr2; descr2 = PyArray_DescrNewByteorder(indescr, '='); Py_DECREF(indescr); - if (descr2 == NULL) goto fail; + if (descr2 == NULL) { + goto fail; + } indescr = descr2; } @@ -1262,28 +1437,32 @@ if (max != NULL) { maxa = (NPY_AO *)PyArray_FromAny(max, indescr, 0, 0, NPY_DEFAULT, NULL); - if (maxa == NULL) return NULL; + if (maxa == NULL) { + return NULL; + } } else { /* Side-effect of PyArray_FromAny */ Py_DECREF(indescr); } - - /* If we are unsigned, then make sure min is not <0 */ - /* This is to match the behavior of - _slow_array_clip - - We allow min and max to go beyond the limits - for other data-types in which case they - are interpreted as their modular counterparts. + /* + * If we are unsigned, then make sure min is not < 0 + * This is to match the behavior of _slow_array_clip + * + * We allow min and max to go beyond the limits + * for other data-types in which case they + * are interpreted as their modular counterparts. */ if (min != NULL) { if (PyArray_ISUNSIGNED(self)) { int cmp; zero = PyInt_FromLong(0); cmp = PyObject_RichCompareBool(min, zero, Py_LT); - if (cmp == -1) { Py_DECREF(zero); goto fail;} + if (cmp == -1) { + Py_DECREF(zero); + goto fail; + } if (cmp == 1) { min = zero; } @@ -1301,46 +1480,61 @@ mina = (NPY_AO *)PyArray_FromAny(min, indescr, 0, 0, NPY_DEFAULT, NULL); Py_DECREF(min); - if (mina == NULL) goto fail; + if (mina == NULL) { + goto fail; + } } - /* Check to see if input is single-segment, aligned, - and in native byteorder */ + /* + * Check to see if input is single-segment, aligned, + * and in native byteorder + */ if (PyArray_ISONESEGMENT(self) && PyArray_CHKFLAGS(self, ALIGNED) && - PyArray_ISNOTSWAPPED(self) && (self->descr == indescr)) + PyArray_ISNOTSWAPPED(self) && (self->descr == indescr)) { ingood = 1; - + } if (!ingood) { int flags; - if (PyArray_ISFORTRAN(self)) flags = NPY_FARRAY; - else flags = NPY_CARRAY; + + if (PyArray_ISFORTRAN(self)) { + flags = NPY_FARRAY; + } + else { + flags = NPY_CARRAY; + } Py_INCREF(indescr); newin = (NPY_AO *)PyArray_FromArray(self, indescr, flags); - if (newin == NULL) goto fail; + if (newin == NULL) { + goto fail; + } } else { newin = self; Py_INCREF(newin); } - /* At this point, newin is a single-segment, aligned, and correct - byte-order array of the correct type + /* + * At this point, newin is a single-segment, aligned, and correct + * byte-order array of the correct type + * + * if ingood == 0, then it is a copy, otherwise, + * it is the original input. + */ - if ingood == 0, then it is a copy, otherwise, - it is the original input. - */ - - /* If we have already made a copy of the data, then use - that as the output array - */ + /* + * If we have already made a copy of the data, then use + * that as the output array + */ if (out == NULL && !ingood) { out = newin; } - /* Now, we know newin is a usable array for fastclip, - we need to make sure the output array is available - and usable */ + /* + * Now, we know newin is a usable array for fastclip, + * we need to make sure the output array is available + * and usable + */ if (out == NULL) { Py_INCREF(indescr); out = (NPY_AO*)PyArray_NewFromDescr(self->ob_type, @@ -1349,7 +1543,9 @@ NULL, NULL, PyArray_ISFORTRAN(self), (PyObject *)self); - if (out == NULL) goto fail; + if (out == NULL) { + goto fail; + } outgood = 1; } else Py_INCREF(out); @@ -1363,8 +1559,10 @@ outgood = 1; } - /* Do we still not have a suitable output array? */ - /* Create one, now */ + /* + * Do we still not have a suitable output array? + * Create one, now + */ if (!outgood) { int oflags; if (PyArray_ISFORTRAN(out)) @@ -1374,7 +1572,9 @@ oflags |= NPY_UPDATEIFCOPY | NPY_FORCECAST; Py_INCREF(indescr); newout = (NPY_AO*)PyArray_FromArray(out, indescr, oflags); - if (newout == NULL) goto fail; + if (newout == NULL) { + goto fail; + } } else { newout = out; @@ -1387,22 +1587,20 @@ "same shape as the input."); goto fail; } - if (newout->data != newin->data) { memcpy(newout->data, newin->data, PyArray_NBYTES(newin)); } /* Now we can call the fast-clip function */ - min_data = max_data = NULL; - if (mina != NULL) + if (mina != NULL) { min_data = mina->data; - if (maxa != NULL) + } + if (maxa != NULL) { max_data = maxa->data; + } + func(newin->data, PyArray_SIZE(newin), min_data, max_data, newout->data); - func(newin->data, PyArray_SIZE(newin), min_data, max_data, - newout->data); - /* Clean up temporary variables */ Py_XDECREF(mina); Py_XDECREF(maxa); @@ -1421,8 +1619,8 @@ /*NUMPY_API - Conjugate -*/ + * Conjugate + */ static PyObject * PyArray_Conjugate(PyArrayObject *self, PyArrayObject *out) { @@ -1440,35 +1638,40 @@ else { PyArrayObject *ret; if (out) { - if (PyArray_CopyAnyInto(out, self)< 0) + if (PyArray_CopyAnyInto(out, self) < 0) { return NULL; + } ret = out; } - else ret = self; + else { + ret = self; + } Py_INCREF(ret); return (PyObject *)ret; } } /*NUMPY_API - Trace -*/ + * Trace + */ static PyObject * PyArray_Trace(PyArrayObject *self, int offset, int axis1, int axis2, int rtype, PyArrayObject *out) { - PyObject *diag=NULL, *ret=NULL; + PyObject *diag = NULL, *ret = NULL; diag = PyArray_Diagonal(self, offset, axis1, axis2); - if (diag == NULL) return NULL; + if (diag == NULL) { + return NULL; + } ret = PyArray_GenericReduceFunction((PyAO *)diag, n_ops.add, -1, rtype, out); Py_DECREF(diag); return ret; } /*NUMPY_API - Diagonal -*/ + * Diagonal + */ static PyObject * PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2) { @@ -1484,9 +1687,13 @@ "array.ndim must be >= 2"); return NULL; } - if (axis1 < 0) axis1 += n; - if (axis2 < 0) axis2 += n; - if ((axis1 == axis2) || (axis1 < 0) || (axis1 >= n) || \ + if (axis1 < 0) { + axis1 += n; + } + if (axis2 < 0) { + axis2 += n; + } + if ((axis1 == axis2) || (axis1 < 0) || (axis1 >= n) || (axis2 < 0) || (axis2 >= n)) { PyErr_Format(PyExc_ValueError, "axis1(=%d) and axis2(=%d) "\ "must be different and within range (nd=%d)", @@ -1499,21 +1706,26 @@ newaxes.ptr[n-2] = axis1; newaxes.ptr[n-1] = axis2; pos = 0; - for (i=0; idimensions[0]; n2 = self->dimensions[1]; - step = n2+1; + step = n2 + 1; if (offset < 0) { start = -n2 * offset; stop = MIN(n2, n1+offset)*(n2+1) - n2*offset; @@ -1525,17 +1737,22 @@ /* count = ceil((stop-start)/step) */ count = ((stop-start) / step) + (((stop-start) % step) != 0); - indices = PyArray_New(&PyArray_Type, 1, &count, PyArray_INTP, NULL, NULL, 0, 0, NULL); if (indices == NULL) { - Py_DECREF(self); return NULL; + Py_DECREF(self); + return NULL; } dptr = (intp *)PyArray_DATA(indices); - for (n1=start; n1descr; - mydiagonal = PyList_New(0); - if (mydiagonal == NULL) {Py_DECREF(self); return NULL;} + if (mydiagonal == NULL) { + Py_DECREF(self); + return NULL; + } n1 = self->dimensions[0]; - for (i=0; idata; @@ -1629,8 +1850,10 @@ case 2: n = ap->dimensions[0]; ptr2 = (char **)_pya_malloc(n * sizeof(char *)); - if (!ptr2) goto fail; - for (i=0; idata + i*ap->strides[0]; } *((char ***)ptr) = ptr2; @@ -1639,12 +1862,13 @@ n = ap->dimensions[0]; m = ap->dimensions[1]; ptr3 = (char ***)_pya_malloc(n*(m+1) * sizeof(char *)); - if (!ptr3) goto fail; - for (i=0; idata + i*ap->strides[0] + \ - j*ap->strides[1]; + for (j = 0; j < m; j++) { + ptr3[i][j] = ap->data + i*ap->strides[0] + j*ap->strides[1]; } } *((char ****)ptr) = ptr3; @@ -1661,8 +1885,8 @@ /* Deprecated --- Use PyArray_AsCArray instead */ /*NUMPY_API - Convert to a 1D C-array -*/ + * Convert to a 1D C-array + */ static int PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode) { @@ -1674,15 +1898,16 @@ return -1; } descr = PyArray_DescrFromType(typecode); - if (PyArray_AsCArray(op, (void *)ptr, &newd1, 1, descr) == -1) + if (PyArray_AsCArray(op, (void *)ptr, &newd1, 1, descr) == -1) { return -1; + } *d1 = (int) newd1; return 0; } /*NUMPY_API - Convert to a 2D C-array -*/ + * Convert to a 2D C-array + */ static int PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode) { @@ -1694,9 +1919,9 @@ return -1; } descr = PyArray_DescrFromType(typecode); - if (PyArray_AsCArray(op, (void *)ptr, newdims, 2, descr) == -1) + if (PyArray_AsCArray(op, (void *)ptr, newdims, 2, descr) == -1) { return -1; - + } *d1 = (int ) newdims[0]; *d2 = (int ) newdims[1]; return 0; @@ -1705,15 +1930,16 @@ /* End Deprecated */ /*NUMPY_API - Free pointers created if As2D is called -*/ + * Free pointers created if As2D is called + */ static int PyArray_Free(PyObject *op, void *ptr) { PyArrayObject *ap = (PyArrayObject *)op; - if ((ap->nd < 1) || (ap->nd > 3)) + if ((ap->nd < 1) || (ap->nd > 3)) { return -1; + } if (ap->nd >= 2) { _pya_free(ptr); } @@ -1725,25 +1951,33 @@ static PyObject * _swap_and_concat(PyObject *op, int axis, int n) { - PyObject *newtup=NULL; + PyObject *newtup = NULL; PyObject *otmp, *arr; int i; newtup = PyTuple_New(n); - if (newtup==NULL) return NULL; - for (i=0; i= MAX_DIMS) { otmp = PyArray_Ravel(mps[i],0); Py_DECREF(mps[i]); @@ -1812,9 +2049,13 @@ } new_dim = 0; - for(i=0; ind; + for (i = 0; i < n; i++) { + if (mps[i] == NULL) { + goto fail; + } + if (i == 0) { + nd = mps[i]->nd; + } else { if (nd != mps[i]->nd) { PyErr_SetString(PyExc_ValueError, @@ -1838,7 +2079,6 @@ } new_dim += mps[i]->dimensions[0]; } - tmp = mps[0]->dimensions[0]; mps[0]->dimensions[0] = new_dim; Py_INCREF(mps[0]->descr); @@ -1849,30 +2089,35 @@ (PyObject *)ret); mps[0]->dimensions[0] = tmp; - if (ret == NULL) goto fail; - + if (ret == NULL) { + goto fail; + } data = ret->data; - for(i=0; idata, numbytes); data += numbytes; } PyArray_INCREF(ret); - for(i=0; i= n)) { PyErr_SetString(PyExc_ValueError, "bad axis1 argument to swapaxes"); @@ -1907,10 +2156,16 @@ new_axes.ptr = dims; new_axes.len = n; - for (i=0; ind; - for (i=0; ilen; axes = permute->ptr; if (n != ap->nd) { @@ -1941,12 +2197,14 @@ "axes don't match array"); return NULL; } - for (i=0; ind+axis; + if (axis < 0) { + axis = ap->nd + axis; + } if (axis < 0 || axis >= ap->nd) { PyErr_SetString(PyExc_ValueError, "invalid axis for this array"); @@ -1960,12 +2218,14 @@ reverse_permutation[axis] = i; permutation[i] = axis; } - for (i=0; idata. */ + /* + * this allocates memory for dimensions and strides (but fills them + * incorrectly), sets up descr, and points data at ap->data. + */ Py_INCREF(ap->descr); ret = (PyArrayObject *)\ PyArray_NewFromDescr(ap->ob_type, @@ -1973,25 +2233,25 @@ n, ap->dimensions, NULL, ap->data, ap->flags, (PyObject *)ap); - if (ret == NULL) return NULL; - + if (ret == NULL) { + return NULL; + } /* point at true owner of memory: */ ret->base = (PyObject *)ap; Py_INCREF(ap); /* fix the dimensions and strides of the return-array */ - for(i=0; idimensions[i] = ap->dimensions[permutation[i]]; ret->strides[i] = ap->strides[permutation[i]]; } PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - return (PyObject *)ret; } /*NUMPY_API - Repeat the array. -*/ + * Repeat the array. + */ static PyObject * PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) { @@ -1999,13 +2259,15 @@ intp n, n_outer, i, j, k, chunk, total; intp tmp; int nd; - PyArrayObject *repeats=NULL; - PyObject *ap=NULL; - PyArrayObject *ret=NULL; + PyArrayObject *repeats = NULL; + PyObject *ap = NULL; + PyArrayObject *ret = NULL; char *new_data, *old_data; repeats = (PyAO *)PyArray_ContiguousFromAny(op, PyArray_INTP, 0, 1); - if (repeats == NULL) return NULL; + if (repeats == NULL) { + return NULL; + } nd = repeats->nd; counts = (intp *)repeats->data; @@ -2015,25 +2277,26 @@ } aop = (PyAO *)ap; - - if (nd == 1) + if (nd == 1) { n = repeats->dimensions[0]; - else /* nd == 0 */ + } + else { + /* nd == 0 */ n = aop->dimensions[axis]; - + } if (aop->dimensions[axis] != n) { PyErr_SetString(PyExc_ValueError, "a.shape[axis] != len(repeats)"); goto fail; } - - if (nd == 0) + if (nd == 0) { total = counts[0]*n; + } else { total = 0; - for(j=0; jdimensions[axis] = n; - - if (ret == NULL) goto fail; - + if (ret == NULL) { + goto fail; + } new_data = ret->data; old_data = aop->data; chunk = aop->descr->elsize; - for(i=axis+1; ind; i++) { + for(i = axis + 1; i < aop->nd; i++) { chunk *= aop->dimensions[i]; } n_outer = 1; - for(i=0; idimensions[i]; - - for(i=0; idimensions[i]; + } + for (i = 0; i < n_outer; i++) { + for (j = 0; j < n; j++) { + tmp = nd ? counts[j] : counts[0]; + for (k = 0; k < tmp; k++) { memcpy(new_data, old_data, chunk); new_data += chunk; } @@ -2112,7 +2376,9 @@ } -/*NUMPY_API*/ +/*NUMPY_API + * ScalarKind + */ static NPY_SCALARKIND PyArray_ScalarKind(int typenum, PyArrayObject **arr) { @@ -2141,10 +2407,12 @@ NPY_SCALARKIND retval; PyArray_Descr* descr = PyArray_DescrFromType(typenum); - if (descr->f->scalarkind) + if (descr->f->scalarkind) { retval = descr->f->scalarkind((arr ? *arr : NULL)); - else + } + else { retval = PyArray_NOSCALAR; + } Py_DECREF(descr); return retval; } @@ -2162,15 +2430,15 @@ if (scalar == PyArray_NOSCALAR) { return PyArray_CanCastSafely(thistype, neededtype); } - from = PyArray_DescrFromType(thistype); - if (from->f->cancastscalarkindto && - (castlist = from->f->cancastscalarkindto[scalar])) { - while (*castlist != PyArray_NOTYPE) + if (from->f->cancastscalarkindto + && (castlist = from->f->cancastscalarkindto[scalar])) { + while (*castlist != PyArray_NOTYPE) { if (*castlist++ == neededtype) { Py_DECREF(from); return 1; } + } } Py_DECREF(from); @@ -2186,14 +2454,15 @@ case PyArray_INTPOS_SCALAR: return (neededtype >= PyArray_BYTE); case PyArray_INTNEG_SCALAR: - return (neededtype >= PyArray_BYTE) && \ - !(PyTypeNum_ISUNSIGNED(neededtype)); + return (neededtype >= PyArray_BYTE) + && !(PyTypeNum_ISUNSIGNED(neededtype)); case PyArray_FLOAT_SCALAR: return (neededtype >= PyArray_FLOAT); case PyArray_COMPLEX_SCALAR: return (neededtype >= PyArray_CFLOAT); default: - return 1; /* should never get here... */ + /* should never get here... */ + return 1; } } } @@ -2204,19 +2473,21 @@ static PyArrayObject ** PyArray_ConvertToCommonType(PyObject *op, int *retn) { - int i, n, allscalars=0; - PyArrayObject **mps=NULL; + int i, n, allscalars = 0; + PyArrayObject **mps = NULL; PyObject *otmp; - PyArray_Descr *intype=NULL, *stype=NULL; - PyArray_Descr *newtype=NULL; - NPY_SCALARKIND scalarkind=NPY_NOSCALAR, intypekind=NPY_NOSCALAR; + PyArray_Descr *intype = NULL, *stype = NULL; + PyArray_Descr *newtype = NULL; + NPY_SCALARKIND scalarkind = NPY_NOSCALAR, intypekind = NPY_NOSCALAR; *retn = n = PySequence_Length(op); if (n == 0) { PyErr_SetString(PyExc_ValueError, "0-length sequence."); } - if (PyErr_Occurred()) {*retn = 0; return NULL;} - + if (PyErr_Occurred()) { + *retn = 0; + return NULL; + } mps = (PyArrayObject **)PyDataMem_NEW(n*sizeof(PyArrayObject *)); if (mps == NULL) { *retn = 0; @@ -2224,12 +2495,11 @@ } if (PyArray_Check(op)) { - for (i=0; itype_num, - NULL); + intypekind = PyArray_ScalarKind(intype->type_num, NULL); } else { newtype = PyArray_DescrFromObject(otmp, stype); Py_XDECREF(stype); stype = newtype; - scalarkind = PyArray_ScalarKind(newtype->type_num, - NULL); + scalarkind = PyArray_ScalarKind(newtype->type_num, NULL); mps[i] = (PyArrayObject *)Py_None; Py_INCREF(Py_None); } Py_XDECREF(otmp); } - if (intype==NULL) { /* all scalars */ + if (intype==NULL) { + /* all scalars */ allscalars = 1; intype = stype; Py_INCREF(intype); - for (i=0; itype_num, intype->type_num, scalarkind)) { @@ -2283,7 +2551,7 @@ Py_XDECREF(intype); intype = newtype; } - for (i=0; idescr); @@ -2366,29 +2645,33 @@ PyArrayObject *obj; int flags = NPY_CARRAY | NPY_UPDATEIFCOPY | NPY_FORCECAST; - if ((PyArray_NDIM(ret) != multi->nd) || - !PyArray_CompareLists(PyArray_DIMS(ret), multi->dimensions, - multi->nd)) { + if ((PyArray_NDIM(ret) != multi->nd) + || !PyArray_CompareLists( + PyArray_DIMS(ret), multi->dimensions, multi->nd)) { PyErr_SetString(PyExc_TypeError, "invalid shape for output array."); ret = NULL; goto fail; } if (clipmode == NPY_RAISE) { - /* we need to make sure and get a copy - so the input array is not changed - before the error is called - */ + /* + * we need to make sure and get a copy + * so the input array is not changed + * before the error is called + */ flags |= NPY_ENSURECOPY; } Py_INCREF(mps[0]->descr); - obj = (PyArrayObject *)PyArray_FromArray(ret, mps[0]->descr, - flags); - if (obj != ret) copyret = 1; + obj = (PyArrayObject *)PyArray_FromArray(ret, mps[0]->descr, flags); + if (obj != ret) { + copyret = 1; + } ret = obj; } - if (ret == NULL) goto fail; + if (ret == NULL) { + goto fail; + } elsize = ret->descr->elsize; ret_data = ret->data; @@ -2398,31 +2681,41 @@ switch(clipmode) { case NPY_RAISE: PyErr_SetString(PyExc_ValueError, - "invalid entry in choice "\ - "array"); + "invalid entry in choice "\ + "array"); goto fail; case NPY_WRAP: if (mi < 0) { - while(mi<0) mi += n; + while (mi < 0) { + mi += n; + } } else { - while(mi>=n) mi -= n; + while (mi >= n) { + mi -= n; + } } break; case NPY_CLIP: - if (mi < 0) mi=0; - else if (mi>=n) mi=n-1; + if (mi < 0) { + mi = 0; + } + else if (mi >= n) { + mi = n - 1; + } break; } } memmove(ret_data, PyArray_MultiIter_DATA(multi, mi), elsize); - ret_data += elsize; + ret_data += elsize; PyArray_MultiIter_NEXT(multi); } PyArray_INCREF(ret); Py_DECREF(multi); - for(i=0; idescr->elsize; astride = op->strides[axis]; - needcopy = !(op->flags & ALIGNED) || (astride != (intp) elsize) \ - || swap; + needcopy = !(op->flags & ALIGNED) || (astride != (intp) elsize) || swap; + if (needcopy) { + char *buffer = PyDataMem_NEW(N*elsize); - if (needcopy) { - char *buffer; - buffer = PyDataMem_NEW(N*elsize); while (size--) { _unaligned_strided_byte_copy(buffer, (intp) elsize, it->dataptr, astride, N, elsize); @@ -2511,7 +2801,6 @@ PyArray_ITER_NEXT(it); } } - NPY_END_THREADS_DESCR(op->descr); Py_DECREF(it); return 0; @@ -2526,10 +2815,10 @@ _new_argsort(PyArrayObject *op, int axis, NPY_SORTKIND which) { - PyArrayIterObject *it=NULL; - PyArrayIterObject *rit=NULL; + PyArrayIterObject *it = NULL; + PyArrayIterObject *rit = NULL; PyObject *ret; - int needcopy=0, i; + int needcopy = 0, i; intp N, size; int elsize, swap; intp astride, rstride, *iptr; @@ -2539,16 +2828,17 @@ ret = PyArray_New(op->ob_type, op->nd, op->dimensions, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)op); - if (ret == NULL) return NULL; - + if (ret == NULL) { + return NULL; + } it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)op, &axis); rit = (PyArrayIterObject *)PyArray_IterAllButAxis(ret, &axis); - if (rit == NULL || it == NULL) goto fail; - + if (rit == NULL || it == NULL) { + goto fail; + } swap = !PyArray_ISNOTSWAPPED(op); NPY_BEGIN_THREADS_DESCR(op->descr); - argsort = op->descr->f->argsort[which]; size = it->size; N = op->dimensions[axis]; @@ -2556,19 +2846,23 @@ astride = op->strides[axis]; rstride = PyArray_STRIDE(ret,axis); - needcopy = swap || !(op->flags & ALIGNED) || (astride != (intp) elsize) || \ - (rstride != sizeof(intp)); - + needcopy = swap || !(op->flags & ALIGNED) || (astride != (intp) elsize) || + (rstride != sizeof(intp)); if (needcopy) { char *valbuffer, *indbuffer; + valbuffer = PyDataMem_NEW(N*elsize); indbuffer = PyDataMem_NEW(N*sizeof(intp)); while (size--) { _unaligned_strided_byte_copy(valbuffer, (intp) elsize, it->dataptr, astride, N, elsize); - if (swap) _strided_byte_swap(valbuffer, (intp) elsize, N, elsize); + if (swap) { + _strided_byte_swap(valbuffer, (intp) elsize, N, elsize); + } iptr = (intp *)indbuffer; - for (i=0; idataptr; - for (i=0; idataptr, (intp *)rit->dataptr, - N, op) < 0) goto fail; + for (i = 0; i < N; i++) { + *iptr++ = i; + } + if (argsort(it->dataptr, (intp *)rit->dataptr, N, op) < 0) { + goto fail; + } PyArray_ITER_NEXT(it); PyArray_ITER_NEXT(rit); } @@ -2600,9 +2897,7 @@ return ret; fail: - NPY_END_THREADS; - Py_DECREF(ret); Py_XDECREF(it); Py_XDECREF(rit); @@ -2611,7 +2906,6 @@ /* Be sure to save this global_compare when necessary */ - static PyArrayObject *global_obj; static int @@ -2620,12 +2914,11 @@ return global_obj->descr->f->compare(a,b,global_obj); } -/* Consumes reference to ap (op gets it) - op contains a version of the array with axes swapped if - local variable axis is not the last dimension. - orign must be defined locally. -*/ - +/* + * Consumes reference to ap (op gets it) op contains a version of + * the array with axes swapped if local variable axis is not the + * last dimension. Origin must be defined locally. + */ #define SWAPAXES(op, ap) { \ orign = (ap)->nd-1; \ if (axis != orign) { \ @@ -2636,11 +2929,11 @@ else (op) = (ap); \ } -/* Consumes reference to ap (op gets it) - origin must be previously defined locally. - SWAPAXES must have been called previously. - op contains the swapped version of the array. -*/ +/* + * Consumes reference to ap (op gets it) origin must be previously + * defined locally. SWAPAXES must have been called previously. + * op contains the swapped version of the array. + */ #define SWAPBACK(op, ap) { \ if (axis != orign) { \ (op) = (PyAO *)PyArray_SwapAxes((ap), axis, orign); \ @@ -2670,22 +2963,24 @@ } /*NUMPY_API - Sort an array in-place -*/ + * Sort an array in-place + */ static int PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which) { - PyArrayObject *ap=NULL, *store_arr=NULL; + PyArrayObject *ap = NULL, *store_arr = NULL; char *ip; int i, n, m, elsize, orign; n = op->nd; - if ((n==0) || (PyArray_SIZE(op)==1)) return 0; - - if (axis < 0) axis += n; + if ((n == 0) || (PyArray_SIZE(op) == 1)) { + return 0; + } + if (axis < 0) { + axis += n; + } if ((axis < 0) || (axis >= n)) { - PyErr_Format(PyExc_ValueError, - "axis(=%d) out of bounds", axis); + PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", axis); return -1; } if (!PyArray_ISWRITEABLE(op)) { @@ -2698,9 +2993,8 @@ if (op->descr->f->sort[which] != NULL) { return _new_sort(op, axis, which); } - - if ((which != PyArray_QUICKSORT) || \ - op->descr->f->compare == NULL) { + if ((which != PyArray_QUICKSORT) + || op->descr->f->compare == NULL) { PyErr_SetString(PyExc_TypeError, "desired sort not supported for this type"); return -1; @@ -2711,30 +3005,33 @@ ap = (PyArrayObject *)PyArray_FromAny((PyObject *)op, NULL, 1, 0, DEFAULT | UPDATEIFCOPY, NULL); - if (ap == NULL) goto fail; - + if (ap == NULL) { + goto fail; + } elsize = ap->descr->elsize; m = ap->dimensions[ap->nd-1]; - if (m == 0) goto finish; - + if (m == 0) { + goto finish; + } n = PyArray_SIZE(ap)/m; /* Store global -- allows re-entry -- restore before leaving*/ store_arr = global_obj; global_obj = ap; - - for (ip=ap->data, i=0; idata, i = 0; i < n; i++, ip += elsize*m) { qsort(ip, m, elsize, qsortCompare); } - global_obj = store_arr; - if (PyErr_Occurred()) goto fail; + if (PyErr_Occurred()) { + goto fail; + } finish: Py_DECREF(ap); /* Should update op if needed */ SWAPBACK2(op); return 0; + fail: Py_XDECREF(ap); SWAPBACK2(op); @@ -2756,32 +3053,35 @@ } /*NUMPY_API - ArgSort an array -*/ + * ArgSort an array + */ static PyObject * PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which) { - PyArrayObject *ap=NULL, *ret=NULL, *store, *op2; + PyArrayObject *ap = NULL, *ret = NULL, *store, *op2; intp *ip; intp i, j, n, m, orign; int argsort_elsize; char *store_ptr; n = op->nd; - if ((n==0) || (PyArray_SIZE(op)==1)) { + if ((n == 0) || (PyArray_SIZE(op) == 1)) { ret = (PyArrayObject *)PyArray_New(op->ob_type, op->nd, op->dimensions, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)op); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } *((intp *)ret->data) = 0; return (PyObject *)ret; } /* Creates new reference op2 */ - if ((op2=(PyAO *)_check_axis(op, &axis, 0))==NULL) return NULL; - + if ((op2=(PyAO *)_check_axis(op, &axis, 0)) == NULL) { + return NULL; + } /* Determine if we should use new algorithm or not */ if (op2->descr->f->argsort[which] != NULL) { ret = (PyArrayObject *)_new_argsort(op2, axis, which); @@ -2799,39 +3099,39 @@ /* ap will contain the reference to op2 */ SWAPAXES(ap, op2); - op = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)ap, PyArray_NOTYPE, 1, 0); - Py_DECREF(ap); - if (op == NULL) return NULL; - + if (op == NULL) { + return NULL; + } ret = (PyArrayObject *)PyArray_New(op->ob_type, op->nd, op->dimensions, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)op); - if (ret == NULL) goto fail; - - + if (ret == NULL) { + goto fail; + } ip = (intp *)ret->data; argsort_elsize = op->descr->elsize; m = op->dimensions[op->nd-1]; - if (m == 0) goto finish; - + if (m == 0) { + goto finish; + } n = PyArray_SIZE(op)/m; store_ptr = global_data; global_data = op->data; store = global_obj; global_obj = op; - for (i=0; i 0 in lexsort"); - return NULL; - } + if (!PySequence_Check(sort_keys) + || ((n=PySequence_Size(sort_keys)) <= 0)) { + PyErr_SetString(PyExc_TypeError, + "need sequence of keys with len > 0 in lexsort"); + return NULL; + } mps = (PyArrayObject **) _pya_malloc(n*sizeof(PyArrayObject)); - if (mps==NULL) return PyErr_NoMemory(); + if (mps == NULL) { + return PyErr_NoMemory(); + } its = (PyArrayIterObject **) _pya_malloc(n*sizeof(PyArrayIterObject)); - if (its == NULL) {_pya_free(mps); return PyErr_NoMemory();} - for (i=0; i0) { - if ((mps[i]->nd != mps[0]->nd) || \ - (!PyArray_CompareLists(mps[i]->dimensions, + if (mps[i] == NULL) { + goto fail; + } + if (i > 0) { + if ((mps[i]->nd != mps[0]->nd) + || (!PyArray_CompareLists(mps[i]->dimensions, mps[0]->dimensions, mps[0]->nd))) { PyErr_SetString(PyExc_ValueError, @@ -2904,78 +3213,92 @@ "merge sort not available for item %d", i); goto fail; } - if (!object && - PyDataType_FLAGCHK(mps[i]->descr, NPY_NEEDS_PYAPI)) + if (!object + && PyDataType_FLAGCHK(mps[i]->descr, NPY_NEEDS_PYAPI)) { object = 1; - its[i] = (PyArrayIterObject *)PyArray_IterAllButAxis \ + } + its[i] = (PyArrayIterObject *)PyArray_IterAllButAxis ((PyObject *)mps[i], &axis); - if (its[i]==NULL) goto fail; + if (its[i] == NULL) { + goto fail; + } } /* Now we can check the axis */ nd = mps[0]->nd; - if ((nd==0) || (PyArray_SIZE(mps[0])==1)) { + if ((nd == 0) || (PyArray_SIZE(mps[0]) == 1)) { ret = (PyArrayObject *)PyArray_New(&PyArray_Type, mps[0]->nd, mps[0]->dimensions, PyArray_INTP, NULL, NULL, 0, 0, NULL); - if (ret == NULL) goto fail; + if (ret == NULL) { + goto fail; + } *((intp *)(ret->data)) = 0; goto finish; } - if (axis < 0) axis += nd; + if (axis < 0) { + axis += nd; + } if ((axis < 0) || (axis >= nd)) { - PyErr_Format(PyExc_ValueError, - "axis(=%d) out of bounds", axis); + PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", axis); goto fail; } /* Now do the sorting */ - ret = (PyArrayObject *)PyArray_New(&PyArray_Type, mps[0]->nd, mps[0]->dimensions, PyArray_INTP, NULL, NULL, 0, 0, NULL); - if (ret == NULL) goto fail; - - rit = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ret, &axis); - if (rit == NULL) goto fail; - - if (!object) {NPY_BEGIN_THREADS;} - + if (ret == NULL) { + goto fail; + } + rit = (PyArrayIterObject *) + PyArray_IterAllButAxis((PyObject *)ret, &axis); + if (rit == NULL) { + goto fail; + } + if (!object) { + NPY_BEGIN_THREADS; + } size = rit->size; N = mps[0]->dimensions[axis]; rstride = PyArray_STRIDE(ret,axis); - maxelsize = mps[0]->descr->elsize; needcopy = (rstride != sizeof(intp)); - for (j=0; jflags & ALIGNED) || \ - (mps[j]->strides[axis] != (intp)mps[j]->descr->elsize); - if (mps[j]->descr->elsize > maxelsize) + for (j = 0; j < n && !needcopy; j++) { + needcopy = PyArray_ISBYTESWAPPED(mps[j]) + || !(mps[j]->flags & ALIGNED) + || (mps[j]->strides[axis] != (intp)mps[j]->descr->elsize); + if (mps[j]->descr->elsize > maxelsize) { maxelsize = mps[j]->descr->elsize; + } } if (needcopy) { char *valbuffer, *indbuffer; int *swaps; + valbuffer = PyDataMem_NEW(N*maxelsize); indbuffer = PyDataMem_NEW(N*sizeof(intp)); swaps = malloc(n*sizeof(int)); - for (j=0; jdescr->elsize; astride = mps[j]->strides[axis]; argsort = mps[j]->descr->f->argsort[PyArray_MERGESORT]; _unaligned_strided_byte_copy(valbuffer, (intp) elsize, its[j]->dataptr, astride, N, elsize); - if (swaps[j]) + if (swaps[j]) { _strided_byte_swap(valbuffer, (intp) elsize, N, elsize); + } if (argsort(valbuffer, (intp *)indbuffer, N, mps[j]) < 0) { PyDataMem_FREE(valbuffer); PyDataMem_FREE(indbuffer); @@ -2995,21 +3318,30 @@ else { while (size--) { iptr = (intp *)rit->dataptr; - for (i=0; idescr->f->argsort[PyArray_MERGESORT]; if (argsort(its[j]->dataptr, (intp *)rit->dataptr, - N, mps[j]) < 0) goto fail; + N, mps[j]) < 0) { + goto fail; + } PyArray_ITER_NEXT(its[j]); } PyArray_ITER_NEXT(rit); } } - if (!object) {NPY_END_THREADS;} + if (!object) { + NPY_END_THREADS; + } finish: - for (i=0; idescr->elsize; intp i; - for(i = 0; i < nkeys; ++i) { + for (i = 0; i < nkeys; ++i) { intp imin = 0; intp imax = nelts; while (imin < imax) { intp imid = imin + ((imax - imin) >> 2); - if (compare(parr + elsize*imid, pkey, key) < 0) + if (compare(parr + elsize*imid, pkey, key) < 0) { imin = imid + 1; - else + } + else { imax = imid; + } } *pret = imin; pret += 1; @@ -3098,10 +3433,12 @@ intp imax = nelts; while (imin < imax) { intp imid = imin + ((imax - imin) >> 2); - if (compare(parr + elsize*imid, pkey, key) <= 0) + if (compare(parr + elsize*imid, pkey, key) <= 0) { imin = imid + 1; - else + } + else { imax = imid; + } } *pret = imin; pret += 1; @@ -3111,8 +3448,8 @@ /*NUMPY_API - Convert object to searchsorted side -*/ + * Convert object to searchsorted side + */ static int PyArray_SearchsideConverter(PyObject *obj, void *addr) { @@ -3125,10 +3462,12 @@ return PY_FAIL; } - if (str[0] == 'l' || str[0] == 'L') + if (str[0] == 'l' || str[0] == 'L') { *side = NPY_SEARCHLEFT; - else if (str[0] == 'r' || str[0] == 'R') + } + else if (str[0] == 'r' || str[0] == 'R') { *side = NPY_SEARCHRIGHT; + } else { PyErr_Format(PyExc_ValueError, "'%s' is an invalid value for keyword 'side'", str); @@ -3139,43 +3478,40 @@ /*NUMPY_API - Numeric.searchsorted(a,v) -*/ + * Numeric.searchsorted(a,v) + */ static PyObject * PyArray_SearchSorted(PyArrayObject *op1, PyObject *op2, NPY_SEARCHSIDE side) { - PyArrayObject *ap1=NULL; - PyArrayObject *ap2=NULL; - PyArrayObject *ret=NULL; + PyArrayObject *ap1 = NULL; + PyArrayObject *ap2 = NULL; + PyArrayObject *ret = NULL; PyArray_Descr *dtype; - NPY_BEGIN_THREADS_DEF; dtype = PyArray_DescrFromObject((PyObject *)op2, op1->descr); - /* need ap1 as contiguous array and of right type */ Py_INCREF(dtype); ap1 = (PyArrayObject *)PyArray_FromAny((PyObject *)op1, dtype, 1, 1, NPY_DEFAULT, NULL); - if (ap1 == NULL) { Py_DECREF(dtype); return NULL; } /* need ap2 as contiguous array and of right type */ - ap2 = (PyArrayObject *)PyArray_FromAny(op2, dtype, 0, 0, NPY_DEFAULT, NULL); - - if (ap2 == NULL) + ap2 = (PyArrayObject *)PyArray_FromAny(op2, dtype, + 0, 0, NPY_DEFAULT, NULL); + if (ap2 == NULL) { goto fail; - + } /* ret is a contiguous array of intp type to hold returned indices */ ret = (PyArrayObject *)PyArray_New(ap2->ob_type, ap2->nd, ap2->dimensions, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)ap2); - if (ret == NULL) + if (ret == NULL) { goto fail; - + } /* check that comparison function exists */ if (ap2->descr->f->compare == NULL) { PyErr_SetString(PyExc_TypeError, @@ -3205,9 +3541,9 @@ } /* - Make a new empty array, of the passed size, of a type that takes the - priority of ap1 and ap2 into account. -*/ + * Make a new empty array, of the passed size, of a type that takes the + * priority of ap1 and ap2 into account. + */ static PyArrayObject * new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, int nd, intp dimensions[], int typenum) @@ -3215,15 +3551,16 @@ PyArrayObject *ret; PyTypeObject *subtype; double prior1, prior2; - /* Need to choose an output array that can hold a sum - -- use priority to determine which subtype. - */ + /* + * Need to choose an output array that can hold a sum + * -- use priority to determine which subtype. + */ if (ap2->ob_type != ap1->ob_type) { prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); - subtype = (prior2 > prior1 ? ap2->ob_type : ap1->ob_type); - } else { + } + else { prior1 = prior2 = 0.0; subtype = ap1->ob_type; } @@ -3235,16 +3572,15 @@ return ret; } -/* Could perhaps be redone to not make contiguous arrays - */ +/* Could perhaps be redone to not make contiguous arrays */ /*NUMPY_API - Numeric.innerproduct(a,v) -*/ + * Numeric.innerproduct(a,v) + */ static PyObject * PyArray_InnerProduct(PyObject *op1, PyObject *op2) { - PyArrayObject *ap1, *ap2, *ret=NULL; + PyArrayObject *ap1, *ap2, *ret = NULL; PyArrayIterObject *it1, *it2; intp i, j, l; int typenum, nd, axis; @@ -3253,90 +3589,86 @@ intp dimensions[MAX_DIMS]; PyArray_DotFunc *dot; PyArray_Descr *typec; - NPY_BEGIN_THREADS_DEF; - typenum = PyArray_ObjectType(op1, 0); + typenum = PyArray_ObjectType(op1, 0); typenum = PyArray_ObjectType(op2, typenum); typec = PyArray_DescrFromType(typenum); Py_INCREF(typec); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, - BEHAVED, NULL); - if (ap1 == NULL) {Py_DECREF(typec); return NULL;} - ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, - BEHAVED, NULL); - if (ap2 == NULL) goto fail; - + ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, BEHAVED, NULL); + if (ap1 == NULL) { + Py_DECREF(typec); + return NULL; + } + ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, BEHAVED, NULL); + if (ap2 == NULL) { + goto fail; + } if (ap1->nd == 0 || ap2->nd == 0) { ret = (ap1->nd == 0 ? ap1 : ap2); - ret = (PyArrayObject *)ret->ob_type->tp_as_number->\ - nb_multiply((PyObject *)ap1, (PyObject *)ap2); + ret = (PyArrayObject *)ret->ob_type->tp_as_number->nb_multiply( + (PyObject *)ap1, (PyObject *)ap2); Py_DECREF(ap1); Py_DECREF(ap2); return (PyObject *)ret; } - l = ap1->dimensions[ap1->nd-1]; - - if (ap2->dimensions[ap2->nd-1] != l) { + l = ap1->dimensions[ap1->nd - 1]; + if (ap2->dimensions[ap2->nd - 1] != l) { PyErr_SetString(PyExc_ValueError, "matrices are not aligned"); goto fail; } - nd = ap1->nd+ap2->nd-2; + nd = ap1->nd + ap2->nd - 2; j = 0; - for(i=0; ind-1; i++) { + for (i = 0; i < ap1->nd - 1; i++) { dimensions[j++] = ap1->dimensions[i]; } - for(i=0; ind-1; i++) { + for (i = 0; i < ap2->nd - 1; i++) { dimensions[j++] = ap2->dimensions[i]; } - - /* Need to choose an output array that can hold a sum - -- use priority to determine which subtype. - */ + /* + * Need to choose an output array that can hold a sum + * -- use priority to determine which subtype. + */ ret = new_array_for_sum(ap1, ap2, nd, dimensions, typenum); - if (ret == NULL) goto fail; - + if (ret == NULL) { + goto fail; + } dot = (ret->descr->f->dotfunc); - if (dot == NULL) { PyErr_SetString(PyExc_ValueError, "dot not available for this type"); goto fail; } - - is1 = ap1->strides[ap1->nd-1]; - is2 = ap2->strides[ap2->nd-1]; + is1 = ap1->strides[ap1->nd - 1]; + is2 = ap2->strides[ap2->nd - 1]; op = ret->data; os = ret->descr->elsize; - - axis = ap1->nd-1; - it1 = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ap1, &axis); - axis = ap2->nd-1; - it2 = (PyArrayIterObject *)\ - PyArray_IterAllButAxis((PyObject *)ap2, &axis); - + axis = ap1->nd - 1; + it1 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap1, &axis); + axis = ap2->nd - 1; + it2 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap2, &axis); NPY_BEGIN_THREADS_DESCR(ap2->descr); - while(1) { - while(it2->index < it2->size) { - dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); - op += os; - PyArray_ITER_NEXT(it2); - } - PyArray_ITER_NEXT(it1); - if (it1->index >= it1->size) break; - PyArray_ITER_RESET(it2); + while (1) { + while (it2->index < it2->size) { + dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); + op += os; + PyArray_ITER_NEXT(it2); } + PyArray_ITER_NEXT(it1); + if (it1->index >= it1->size) { + break; + } + PyArray_ITER_RESET(it2); + } NPY_END_THREADS_DESCR(ap2->descr); - Py_DECREF(it1); + Py_DECREF(it1); Py_DECREF(it2); - - if (PyErr_Occurred()) goto fail; - - + if (PyErr_Occurred()) { + goto fail; + } Py_DECREF(ap1); Py_DECREF(ap2); return (PyObject *)ret; @@ -3349,14 +3681,14 @@ } -/* just like inner product but does the swapaxes stuff on the fly */ /*NUMPY_API - Numeric.matrixproduct(a,v) -*/ + *Numeric.matrixproduct(a,v) + * just like inner product but does the swapaxes stuff on the fly + */ static PyObject * PyArray_MatrixProduct(PyObject *op1, PyObject *op2) { - PyArrayObject *ap1, *ap2, *ret=NULL; + PyArrayObject *ap1, *ap2, *ret = NULL; PyArrayIterObject *it1, *it2; intp i, j, l; int typenum, nd, axis, matchDim; @@ -3365,54 +3697,50 @@ intp dimensions[MAX_DIMS]; PyArray_DotFunc *dot; PyArray_Descr *typec; - NPY_BEGIN_THREADS_DEF; - typenum = PyArray_ObjectType(op1, 0); + typenum = PyArray_ObjectType(op1, 0); typenum = PyArray_ObjectType(op2, typenum); - typec = PyArray_DescrFromType(typenum); Py_INCREF(typec); - ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, - BEHAVED, NULL); - if (ap1 == NULL) {Py_DECREF(typec); return NULL;} - ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, - BEHAVED, NULL); - if (ap2 == NULL) goto fail; - + ap1 = (PyArrayObject *)PyArray_FromAny(op1, typec, 0, 0, BEHAVED, NULL); + if (ap1 == NULL) { + Py_DECREF(typec); + return NULL; + } + ap2 = (PyArrayObject *)PyArray_FromAny(op2, typec, 0, 0, BEHAVED, NULL); + if (ap2 == NULL) { + goto fail; + } if (ap1->nd == 0 || ap2->nd == 0) { ret = (ap1->nd == 0 ? ap1 : ap2); - ret = (PyArrayObject *)ret->ob_type->tp_as_number->\ - nb_multiply((PyObject *)ap1, (PyObject *)ap2); + ret = (PyArrayObject *)ret->ob_type->tp_as_number->nb_multiply( + (PyObject *)ap1, (PyObject *)ap2); Py_DECREF(ap1); Py_DECREF(ap2); return (PyObject *)ret; } - - l = ap1->dimensions[ap1->nd-1]; + l = ap1->dimensions[ap1->nd - 1]; if (ap2->nd > 1) { matchDim = ap2->nd - 2; } else { matchDim = 0; } - if (ap2->dimensions[matchDim] != l) { PyErr_SetString(PyExc_ValueError, "objects are not aligned"); goto fail; } - - nd = ap1->nd+ap2->nd-2; + nd = ap1->nd + ap2->nd - 2; if (nd > NPY_MAXDIMS) { - PyErr_SetString(PyExc_ValueError, - "dot: too many dimensions in result"); + PyErr_SetString(PyExc_ValueError, "dot: too many dimensions in result"); goto fail; } j = 0; - for(i=0; ind-1; i++) { + for (i = 0; i < ap1->nd - 1; i++) { dimensions[j++] = ap1->dimensions[i]; } - for(i=0; ind-2; i++) { + for (i = 0; i < ap2->nd - 2; i++) { dimensions[j++] = ap2->dimensions[i]; } if(ap2->nd > 1) { @@ -3426,20 +3754,20 @@ */ is1 = ap1->strides[ap1->nd-1]; is2 = ap2->strides[matchDim]; - /* Choose which subtype to return */ ret = new_array_for_sum(ap1, ap2, nd, dimensions, typenum); - if (ret == NULL) goto fail; - + if (ret == NULL) { + goto fail; + } /* Ensure that multiarray.dot(,<0xM>) -> zeros((N,M)) */ if (PyArray_SIZE(ap1) == 0 && PyArray_SIZE(ap2) == 0) { memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret)); } - else { /* Ensure that multiarray.dot([],[]) -> 0 */ + else { + /* Ensure that multiarray.dot([],[]) -> 0 */ memset(PyArray_DATA(ret), 0, PyArray_ITEMSIZE(ret)); } - dot = ret->descr->f->dotfunc; if (dot == NULL) { PyErr_SetString(PyExc_ValueError, @@ -3448,29 +3776,31 @@ } op = ret->data; os = ret->descr->elsize; - axis = ap1->nd-1; - it1 = (PyArrayIterObject *)\ + it1 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap1, &axis); - it2 = (PyArrayIterObject *)\ + it2 = (PyArrayIterObject *) PyArray_IterAllButAxis((PyObject *)ap2, &matchDim); - NPY_BEGIN_THREADS_DESCR(ap2->descr); - while(1) { - while(it2->index < it2->size) { - dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); - op += os; - PyArray_ITER_NEXT(it2); - } - PyArray_ITER_NEXT(it1); - if (it1->index >= it1->size) break; - PyArray_ITER_RESET(it2); + while (1) { + while (it2->index < it2->size) { + dot(it1->dataptr, is1, it2->dataptr, is2, op, l, ret); + op += os; + PyArray_ITER_NEXT(it2); } + PyArray_ITER_NEXT(it1); + if (it1->index >= it1->size) { + break; + } + PyArray_ITER_RESET(it2); + } NPY_END_THREADS_DESCR(ap2->descr); - Py_DECREF(it1); + Py_DECREF(it1); Py_DECREF(it2); - if (PyErr_Occurred()) goto fail; /* only for OBJECT arrays */ - + if (PyErr_Occurred()) { + /* only for OBJECT arrays */ + goto fail; + } Py_DECREF(ap1); Py_DECREF(ap2); return (PyObject *)ret; @@ -3483,8 +3813,8 @@ } /*NUMPY_API - Fast Copy and Transpose -*/ + * Fast Copy and Transpose + */ static PyObject * PyArray_CopyAndTranspose(PyObject *op) { @@ -3498,9 +3828,12 @@ /* make sure it is well-behaved */ arr = PyArray_FromAny(op, NULL, 0, 0, CARRAY, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } nd = PyArray_NDIM(arr); - if (nd == 1) { /* we will give in to old behavior */ + if (nd == 1) { + /* we will give in to old behavior */ ret = PyArray_Copy((PyArrayObject *)arr); Py_DECREF(arr); return ret; @@ -3516,24 +3849,23 @@ dims[0] = PyArray_DIM(arr,1); dims[1] = PyArray_DIM(arr,0); elsize = PyArray_ITEMSIZE(arr); - Py_INCREF(PyArray_DESCR(arr)); ret = PyArray_NewFromDescr(arr->ob_type, PyArray_DESCR(arr), 2, dims, NULL, NULL, 0, arr); - if (ret == NULL) { Py_DECREF(arr); return NULL; } + /* do 2-d loop */ NPY_BEGIN_ALLOW_THREADS; optr = PyArray_DATA(ret); str2 = elsize*dims[0]; - for (i=0; idimensions[0]; n2 = ap2->dimensions[0]; - if (n1 < n2) { - ret = ap1; ap1 = ap2; ap2 = ret; - ret = NULL; i = n1;n1=n2;n2=i; + ret = ap1; + ap1 = ap2; + ap2 = ret; + ret = NULL; + i = n1; + n1 = n2; + n2 = i; } length = n1; n = n2; switch(mode) { case 0: - length = length-n+1; + length = length - n + 1; n_left = n_right = 0; break; case 1: n_left = (intp)(n/2); - n_right = n-n_left-1; + n_right = n - n_left - 1; break; case 2: - n_right = n-1; - n_left = n-1; - length = length+n-1; + n_right = n - 1; + n_left = n - 1; + length = length + n - 1; break; default: - PyErr_SetString(PyExc_ValueError, - "mode must be 0, 1, or 2"); + PyErr_SetString(PyExc_ValueError, "mode must be 0, 1, or 2"); goto fail; } - /* Need to choose an output array that can hold a sum - -- use priority to determine which subtype. - */ + /* + * Need to choose an output array that can hold a sum + * -- use priority to determine which subtype. + */ ret = new_array_for_sum(ap1, ap2, 1, &length, typenum); - if (ret == NULL) goto fail; - + if (ret == NULL) { + goto fail; + } dot = ret->descr->f->dotfunc; if (dot == NULL) { PyErr_SetString(PyExc_ValueError, @@ -3617,32 +3956,34 @@ } NPY_BEGIN_THREADS_DESCR(ret->descr); - - is1 = ap1->strides[0]; is2 = ap2->strides[0]; - op = ret->data; os = ret->descr->elsize; - - ip1 = ap1->data; ip2 = ap2->data+n_left*is2; - n = n-n_left; - for(i=0; istrides[0]; + is2 = ap2->strides[0]; + op = ret->data; + os = ret->descr->elsize; + ip1 = ap1->data; + ip2 = ap2->data + n_left*is2; + n = n - n_left; + for (i = 0; i < n_left; i++) { dot(ip1, is1, ip2, is2, op, n, ret); n++; ip2 -= is2; op += os; } - for(i=0; i<(n1-n2+1); i++) { + for (i = 0; i < (n1 - n2 + 1); i++) { dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } - for(i=0; idescr); - - if (PyErr_Occurred()) goto fail; + if (PyErr_Occurred()) { + goto fail; + } Py_DECREF(ap1); Py_DECREF(ap2); return (PyObject *)ret; @@ -3656,8 +3997,8 @@ /*NUMPY_API - ArgMin -*/ + * ArgMin + */ static PyObject * PyArray_ArgMin(PyArrayObject *ap, int axis, PyArrayObject *out) { Modified: branches/coremath/numpy/core/src/scalarmathmodule.c.src =================================================================== --- branches/coremath/numpy/core/src/scalarmathmodule.c.src 2009-02-21 23:54:31 UTC (rev 6444) +++ branches/coremath/numpy/core/src/scalarmathmodule.c.src 2009-02-22 03:41:06 UTC (rev 6445) @@ -64,8 +64,18 @@ ulonglong ah, al, bh, bl, w, x, y, z; /* Convert to non-negative quantities */ - if (a0 < 0) { a = -a0; } else { a = a0; } - if (b0 < 0) { b = -b0; } else { b = b0; } + if (a0 < 0) { + a = -a0; + } + else { + a = a0; + } + if (b0 < 0) { + b = -b0; + } + else { + b = b0; + } #if SIZEOF_LONGLONG == 64 @@ -109,55 +119,61 @@ /* Basic operations: + * + * BINARY: + * + * add, subtract, multiply, divide, remainder, divmod, power, + * floor_divide, true_divide + * + * lshift, rshift, and, or, xor (integers only) + * + * UNARY: + * + * negative, positive, absolute, nonzero, invert, int, long, float, oct, hex + * + */ - BINARY: - - add, subtract, multiply, divide, remainder, divmod, power, - floor_divide, true_divide - - lshift, rshift, and, or, xor (integers only) - - UNARY: - - negative, positive, absolute, nonzero, invert, int, long, float, oct, hex - -*/ - /**begin repeat - #name=byte,short,int,long,longlong# -**/ + * #name = byte, short, int, long, longlong# + */ static void @name at _ctype_add(@name@ a, @name@ b, @name@ *out) { *out = a + b; - if ((*out^a) >= 0 || (*out^b) >= 0) + if ((*out^a) >= 0 || (*out^b) >= 0) { return; + } generate_overflow_error(); return; } static void @name at _ctype_subtract(@name@ a, @name@ b, @name@ *out) { *out = a - b; - if ((*out^a) >= 0 || (*out^~b) >= 0) + if ((*out^a) >= 0 || (*out^~b) >= 0) { return; + } generate_overflow_error(); return; } /**end repeat**/ + /**begin repeat - #name=ubyte,ushort,uint,ulong,ulonglong# -**/ + * #name = ubyte, ushort, uint, ulong, ulonglong# + */ static void @name at _ctype_add(@name@ a, @name@ b, @name@ *out) { *out = a + b; - if (*out >= a && *out >= b) + if (*out >= a && *out >= b) { return; + } generate_overflow_error(); return; } static void @name at _ctype_subtract(@name@ a, @name@ b, @name@ *out) { *out = a - b; - if (a >= b) return; + if (a >= b) { + return; + } generate_overflow_error(); return; } @@ -168,13 +184,14 @@ #endif /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong# - #big=(int,uint)*2,(longlong,ulonglong)*2# - #NAME=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG# - #SIZENAME=BYTE*2,SHORT*2,INT*2,LONG*2# - #SIZE=INT*4,LONGLONG*4# - #neg=(1,0)*4# -**/ + * + * #name = byte, ubyte, short, ushort, int, uint, long, ulong# + * #big = (int,uint)*2, (longlong,ulonglong)*2# + * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG# + * #SIZENAME = BYTE*2, SHORT*2, INT*2, LONG*2# + * #SIZE = INT*4,LONGLONG*4# + * #neg = (1,0)*4# + */ #if SIZEOF_ at SIZE@ > SIZEOF_ at SIZENAME@ static void @name at _ctype_multiply(@name@ a, @name@ b, @name@ *out) { @@ -193,25 +210,29 @@ /**end repeat**/ /**begin repeat - #name=int,uint,long,ulong,longlong,ulonglong# - #SIZE=INT*2,LONG*2,LONGLONG*2# - #char=(s,u)*3# -**/ + * + * #name = int, uint, long, ulong, longlong, ulonglong# + * #SIZE = INT*2, LONG*2, LONGLONG*2# + * #char = (s,u)*3# + */ #if SIZEOF_LONGLONG == SIZEOF_ at SIZE@ static void @name at _ctype_multiply(@name@ a, @name@ b, @name@ *out) { *out = a * b; - if (@char at longlong_overflow(a, b)) + if (@char at longlong_overflow(a, b)) { generate_overflow_error(); + } return; } #endif /**end repeat**/ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #neg=(1,0)*5# -**/ + * + * #name = byte, ubyte, short, ushort, int, uint, long, + * ulong, longlong, ulonglong# + * #neg = (1,0)*5# + */ static void @name at _ctype_divide(@name@ a, @name@ b, @name@ *out) { if (b == 0) { @@ -228,13 +249,16 @@ #if @neg@ @name@ tmp; tmp = a / b; - if (((a > 0) != (b > 0)) && (a % b != 0)) tmp--; + if (((a > 0) != (b > 0)) && (a % b != 0)) { + tmp--; + } *out = tmp; #else *out = a / b; #endif } } + #define @name at _ctype_floor_divide @name at _ctype_divide static void @name at _ctype_remainder(@name@ a, @name@ b, @name@ *out) { @@ -247,7 +271,8 @@ else if ((a > 0) == (b > 0)) { *out = a % b; } - else { /* handled like Python does */ + else { + /* handled like Python does */ *out = a % b; if (*out) *out += b; } @@ -258,18 +283,21 @@ /**end repeat**/ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #otyp=float*4, double*6# -**/ + * + * #name = byte, ubyte, short, ushort, int, uint, long, + * ulong, longlong, ulonglong# + * #otyp = float*4, double*6# + */ #define @name at _ctype_true_divide(a, b, out) \ *(out) = ((@otyp@) (a)) / ((@otyp@) (b)); /**end repeat**/ /* b will always be positive in this call */ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #upc=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# -**/ + * + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong# + * #upc = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG# + */ static void @name at _ctype_power(@name@ a, @name@ b, @name@ *out) { @name@ temp, ix, mult; @@ -280,11 +308,14 @@ if (b & 1) { @name at _ctype_multiply(ix, temp, &mult); ix = mult; - if (temp == 0) - break; /* Avoid ix / 0 */ + if (temp == 0) { + break; + } } b >>= 1; /* Shift exponent down by 1 bit */ - if (b==0) break; + if (b==0) { + break; + } /* Square the value of temp */ @name at _ctype_multiply(temp, temp, &mult); temp = mult; @@ -298,16 +329,16 @@ /* QUESTION: Should we check for overflow / underflow in (l,r)shift? */ /**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong)*5# - #oper=and*10, xor*10, or*10, lshift*10, rshift*10# - #op=&*10, ^*10, |*10, <<*10, >>*10# -**/ + * #name = (byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong)*5# + * #oper = and*10, xor*10, or*10, lshift*10, rshift*10# + * #op = &*10, ^*10, |*10, <<*10, >>*10# + */ #define @name at _ctype_@oper@(arg1, arg2, out) *(out) = (arg1) @op@ (arg2) /**end repeat**/ /**begin repeat - #name=float, double, longdouble# -**/ + * #name = float, double, longdouble# + */ static @name@ (*_basic_ at name@_floor)(@name@); static @name@ (*_basic_ at name@_sqrt)(@name@); static @name@ (*_basic_ at name@_fmod)(@name@, @name@); @@ -321,44 +352,46 @@ /**end repeat**/ /**begin repeat - #name=cfloat, cdouble, clongdouble# - #rtype=float, double, longdouble# - #c=f,,l# -**/ + * #name = cfloat, cdouble, clongdouble# + * #rtype = float, double, longdouble# + * #c = f,,l# + */ #define @name at _ctype_add(a, b, outp) do{ \ (outp)->real = (a).real + (b).real; \ (outp)->imag = (a).imag + (b).imag; \ - }while(0) + } while(0) #define @name at _ctype_subtract(a, b, outp) do{ \ (outp)->real = (a).real - (b).real; \ (outp)->imag = (a).imag - (b).imag; \ - }while(0) + } while(0) #define @name at _ctype_multiply(a, b, outp) do{ \ (outp)->real = (a).real * (b).real - (a).imag * (b).imag; \ (outp)->imag = (a).real * (b).imag + (a).imag * (b).real; \ - }while(0) + } while(0) #define @name at _ctype_divide(a, b, outp) do{ \ @rtype@ d = (b).real*(b).real + (b).imag*(b).imag; \ (outp)->real = ((a).real*(b).real + (a).imag*(b).imag)/d; \ (outp)->imag = ((a).imag*(b).real - (a).real*(b).imag)/d; \ - }while(0) + } while(0) #define @name at _ctype_true_divide @name at _ctype_divide #define @name at _ctype_floor_divide(a, b, outp) do { \ (outp)->real = _basic_ at rtype@_floor \ (((a).real*(b).real + (a).imag*(b).imag) / \ ((b).real*(b).real + (b).imag*(b).imag)); \ (outp)->imag = 0; \ - }while(0) + } while(0) /**end repeat**/ /**begin repeat - #name=float,double,longdouble# -**/ + * #name = float, double, longdouble# + */ static void @name at _ctype_remainder(@name@ a, @name@ b, @name@ *out) { @name@ mod; mod = _basic_ at name@_fmod(a, b); - if (mod && (((b < 0) != (mod < 0)))) mod += b; + if (mod && (((b < 0) != (mod < 0)))) { + mod += b; + } *out = mod; } /**end repeat**/ @@ -366,8 +399,9 @@ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, + * ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble# + */ #define @name at _ctype_divmod(a, b, out, out2) { \ @name at _ctype_floor_divide(a, b, out); \ @name at _ctype_remainder(a, b, out2); \ @@ -375,8 +409,8 @@ /**end repeat**/ /**begin repeat - #name= float, double, longdouble# -**/ + * #name = float, double, longdouble# + */ static @name@ (*_basic_ at name@_pow)(@name@ a, @name@ b); static void @name at _ctype_power(@name@ a, @name@ b, @name@ *out) { @@ -385,9 +419,10 @@ /**end repeat**/ /**begin repeat - #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# - #uns=(0,1)*5,0*3# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, + * ulonglong, float, double, longdouble# + * #uns = (0,1)*5,0*3# + */ static void @name at _ctype_negative(@name@ a, @name@ *out) { @@ -400,8 +435,8 @@ /**begin repeat - #name= cfloat, cdouble, clongdouble# -**/ + * #name = cfloat, cdouble, clongdouble# + */ static void @name at _ctype_negative(@name@ a, @name@ *out) { @@ -411,8 +446,9 @@ /**end repeat**/ /**begin repeat - #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, + * ulonglong, float, double, longdouble# + */ static void @name at _ctype_positive(@name@ a, @name@ *out) { @@ -420,13 +456,14 @@ } /**end repeat**/ -/* Get the nc_powf, nc_pow, and nc_powl functions from - the data area of the power ufunc in umathmodule. -*/ +/* + * Get the nc_powf, nc_pow, and nc_powl functions from + * the data area of the power ufunc in umathmodule. + */ /**begin repeat - #name=cfloat, cdouble, clongdouble# -**/ + * #name = cfloat, cdouble, clongdouble# + */ static void @name at _ctype_positive(@name@ a, @name@ *out) { @@ -443,15 +480,15 @@ /**begin repeat - #name=ubyte, ushort, uint, ulong, ulonglong# -**/ + * #name = ubyte, ushort, uint, ulong, ulonglong# + */ #define @name at _ctype_absolute @name at _ctype_positive /**end repeat**/ /**begin repeat - #name=byte, short, int, long, longlong, float, double, longdouble# -**/ + * #name = byte, short, int, long, longlong, float, double, longdouble# + */ static void @name at _ctype_absolute(@name@ a, @name@ *out) { @@ -460,9 +497,9 @@ /**end repeat**/ /**begin repeat - #name= cfloat, cdouble, clongdouble# - #rname= float, double, longdouble# -**/ + * #name = cfloat, cdouble, clongdouble# + * #rname = float, double, longdouble# + */ static void @name at _ctype_absolute(@name@ a, @rname@ *out) { @@ -471,8 +508,9 @@ /**end repeat**/ /**begin repeat - #name=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, + * ulong, longlong, ulonglong# + */ #define @name at _ctype_invert(a, out) *(out) = ~a; /**end repeat**/ @@ -480,25 +518,27 @@ /* The general strategy for commutative binary operators is to + * + * 1) Convert the types to the common type if both are scalars (0 return) + * 2) If both are not scalars use ufunc machinery (-2 return) + * 3) If both are scalars but cannot be cast to the right type + * return NotImplmented (-1 return) + * + * 4) Perform the function on the C-type. + * 5) If an error condition occurred, check to see + * what the current error-handling is and handle the error. + * + * 6) Construct and return the output scalar. + */ - 1) Convert the types to the common type if both are scalars (0 return) - 2) If both are not scalars use ufunc machinery (-2 return) - 3) If both are scalars but cannot be cast to the right type - return NotImplmented (-1 return) - - 4) Perform the function on the C-type. - 5) If an error condition occurred, check to see - what the current error-handling is and handle the error. - - 6) Construct and return the output scalar. -*/ - - /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #Name=Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# - #NAME=BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, + * ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble# + * #Name = Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, + * ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble# + * #NAME = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, + * ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE# + */ static int _ at name@_convert_to_ctype(PyObject *a, @name@ *arg1) @@ -512,13 +552,17 @@ else if (PyArray_IsScalar(a, Generic)) { PyArray_Descr *descr1; int ret; - if (!PyArray_IsScalar(a, Number)) return -1; + if (!PyArray_IsScalar(a, Number)) { + return -1; + } descr1 = PyArray_DescrFromTypeObject((PyObject *)(a->ob_type)); if (PyArray_CanCastSafely(descr1->type_num, PyArray_ at NAME@)) { PyArray_CastScalarDirect(a, descr1, arg1, PyArray_ at NAME@); ret = 0; } - else ret = -1; + else { + ret = -1; + } Py_DECREF(descr1); return ret; } @@ -535,26 +579,29 @@ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,cfloat,cdouble# -**/ - + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, cfloat, cdouble# + */ static int _ at name@_convert2_to_ctypes(PyObject *a, @name@ *arg1, PyObject *b, @name@ *arg2) { int ret; ret = _ at name@_convert_to_ctype(a, arg1); - if (ret < 0) return ret; + if (ret < 0) { + return ret; + } ret = _ at name@_convert_to_ctype(b, arg2); - if (ret < 0) return ret; + if (ret < 0) { + return ret; + } return 0; } - /**end repeat**/ /**begin repeat - #name=longdouble, clongdouble# -**/ + * #name = longdouble, clongdouble# + */ static int _ at name@_convert2_to_ctypes(PyObject *a, @name@ *arg1, @@ -562,10 +609,16 @@ { int ret; ret = _ at name@_convert_to_ctype(a, arg1); - if (ret < 0) return ret; + if (ret < 0) { + return ret; + } ret = _ at name@_convert_to_ctype(b, arg2); - if (ret == -2) ret = -3; - if (ret < 0) return ret; + if (ret == -2) { + ret = -3; + } + if (ret < 0) { + return ret; + } return 0; } @@ -600,26 +653,34 @@ #endif switch(_ at name@_convert2_to_ctypes(a, &arg1, b, &arg2)) { - case 0: - break; - case -1: /* one of them can't be cast safely - must be mixed-types*/ - return PyArray_Type.tp_as_number->nb_ at oper@(a,b); - case -2: /* use default handling */ - if (PyErr_Occurred()) return NULL; - return PyGenericArrType_Type.tp_as_number->nb_ at oper@(a,b); - case -3: /* special case for longdouble and clongdouble - because they have a recursive getitem in their dtype */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; + case 0: + break; + case -1: + /* one of them can't be cast safely must be mixed-types*/ + return PyArray_Type.tp_as_number->nb_ at oper@(a,b); + case -2: + /* use default handling */ + if (PyErr_Occurred()) { + return NULL; + } + return PyGenericArrType_Type.tp_as_number->nb_ at oper@(a,b); + case -3: + /* + * special case for longdouble and clongdouble + * because they have a recursive getitem in their dtype + */ + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; } #if @fperr@ PyUFunc_clearfperr(); #endif - /* here we do the actual calculation with arg1 and arg2 */ - /* as a function call. */ + /* + * here we do the actual calculation with arg1 and arg2 + * as a function call. + */ #if @twoout@ @name at _ctype_@oper@(arg1, arg2, &out, &out2); #else @@ -632,9 +693,11 @@ if (retstatus) { int bufsize, errmask; PyObject *errobj; + if (PyUFunc_GetPyValues("@name at _scalars", &bufsize, &errmask, - &errobj) < 0) + &errobj) < 0) { return NULL; + } first = 1; if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { Py_XDECREF(errobj); @@ -647,18 +710,28 @@ #if @twoout@ ret = PyTuple_New(2); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } obj = PyArrayScalar_New(@OName@); - if (obj == NULL) {Py_DECREF(ret); return NULL;} + if (obj == NULL) { + Py_DECREF(ret); + return NULL; + } PyArrayScalar_ASSIGN(obj, @OName@, out); PyTuple_SET_ITEM(ret, 0, obj); obj = PyArrayScalar_New(@OName@); - if (obj == NULL) {Py_DECREF(ret); return NULL;} + if (obj == NULL) { + Py_DECREF(ret); + return NULL; + } PyArrayScalar_ASSIGN(obj, @OName@, out2); PyTuple_SET_ITEM(ret, 1, obj); #else ret = PyArrayScalar_New(@OName@); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_ASSIGN(ret, @OName@, out); #endif return ret; @@ -692,24 +765,32 @@ #endif switch(_ at name@_convert2_to_ctypes(a, &arg1, b, &arg2)) { - case 0: - break; - case -1: /* can't cast both safely - mixed-types? */ - return PyArray_Type.tp_as_number->nb_power(a,b,NULL); - case -2: /* use default handling */ - if (PyErr_Occurred()) return NULL; - return PyGenericArrType_Type.tp_as_number->nb_power(a,b,NULL); - case -3: /* special case for longdouble and clongdouble - because they have a recursive getitem in their dtype */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; + case 0: + break; + case -1: + /* can't cast both safely mixed-types? */ + return PyArray_Type.tp_as_number->nb_power(a,b,NULL); + case -2: + /* use default handling */ + if (PyErr_Occurred()) { + return NULL; + } + return PyGenericArrType_Type.tp_as_number->nb_power(a,b,NULL); + case -3: + /* + * special case for longdouble and clongdouble + * because they have a recursive getitem in their dtype + */ + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; } PyUFunc_clearfperr(); - /* here we do the actual calculation with arg1 and arg2 */ - /* as a function call. */ + /* + * here we do the actual calculation with arg1 and arg2 + * as a function call. + */ #if @cmplx@ if (arg2.real == 0 && arg1.real == 0) { out1.real = out.real = 1; @@ -735,9 +816,11 @@ if (retstatus) { int bufsize, errmask; PyObject *errobj; + if (PyUFunc_GetPyValues("@name at _scalars", &bufsize, &errmask, - &errobj) < 0) + &errobj) < 0) { return NULL; + } first = 1; if (PyUFunc_handlefperr(errmask, errobj, retstatus, &first)) { Py_XDECREF(errobj); @@ -749,17 +832,23 @@ #if @isint@ if (arg2 < 0) { ret = PyArrayScalar_New(@OName@); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_ASSIGN(ret, @OName@, out1); } else { ret = PyArrayScalar_New(@Name@); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_ASSIGN(ret, @Name@, out); } #else ret = PyArrayScalar_New(@Name@); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_ASSIGN(ret, @Name@, out); #endif @@ -769,26 +858,26 @@ /**begin repeat - #name=(cfloat,cdouble,clongdouble)*2# - #oper=divmod*3,remainder*3# -**/ + * #name = (cfloat,cdouble,clongdouble)*2# + * #oper = divmod*3,remainder*3# + */ #define @name at _@oper@ NULL /**end repeat**/ /**begin repeat - #name=(float,double,longdouble,cfloat,cdouble,clongdouble)*5# - #oper=lshift*6, rshift*6, and*6, or*6, xor*6# -**/ + * #name = (float,double,longdouble,cfloat,cdouble,clongdouble)*5# + * #oper = lshift*6, rshift*6, and*6, or*6, xor*6# + */ #define @name at _@oper@ NULL /**end repeat**/ /**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*3, byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #otyp=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,float,double,longdouble,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# - #OName=(Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble)*2, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, Float, Double, LongDouble, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong# - #oper=negative*16, positive*16, absolute*16, invert*10# -**/ + * #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*3, byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# + * #otyp=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,float,double,longdouble,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# + * #OName=(Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble)*2, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong, Float, Double, LongDouble, Float, Double, LongDouble, Byte, UByte, Short, UShort, Int, UInt, Long, ULong, LongLong, ULongLong# + * #oper=negative*16, positive*16, absolute*16, invert*10# + */ static PyObject * @name at _@oper@(PyObject *a) { @@ -799,16 +888,22 @@ switch(_ at name@_convert_to_ctype(a, &arg1)) { case 0: break; - case -1: /* can't cast both safely use different add function */ + case -1: + /* can't cast both safely use different add function */ Py_INCREF(Py_NotImplemented); return Py_NotImplemented; - case -2: /* use default handling */ - if (PyErr_Occurred()) return NULL; + case -2: + /* use default handling */ + if (PyErr_Occurred()) { + return NULL; + } return PyGenericArrType_Type.tp_as_number->nb_ at oper@(a); } - /* here we do the actual calculation with arg1 and arg2 */ - /* make it a function call. */ + /* + * here we do the actual calculation with arg1 and arg2 + * make it a function call. + */ @name at _ctype_@oper@(arg1, &out); @@ -820,15 +915,16 @@ /**end repeat**/ /**begin repeat - #name=float,double,longdouble,cfloat,cdouble,clongdouble# -**/ + * #name = float, double, longdouble, cfloat, cdouble, clongdouble# + */ #define @name at _invert NULL /**end repeat**/ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #simp=1*13,0*3# -**/ + * #name = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, + * ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble# + * #simp=1*13,0*3# + */ static int @name at _nonzero(PyObject *a) { @@ -836,12 +932,16 @@ @name@ arg1; if (_ at name@_convert_to_ctype(a, &arg1) < 0) { - if (PyErr_Occurred()) return -1; + if (PyErr_Occurred()) { + return -1; + } return PyGenericArrType_Type.tp_as_number->nb_nonzero(a); } - /* here we do the actual calculation with arg1 and arg2 */ - /* make it a function call. */ + /* + * here we do the actual calculation with arg1 and arg2 + * make it a function call. + */ #if @simp@ ret = (arg1 != 0); @@ -854,14 +954,15 @@ /**end repeat**/ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #Name=Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Float,Double,LongDouble,CFloat,CDouble,CLongDouble# - #cmplx=,,,,,,,,,,,,,.real,.real,.real# - #sign=(signed,unsigned)*5,,,,,,# - #ctype=long*8,PY_LONG_LONG*2,double*6# - #realtyp=0*10,1*6# - #func=(PyLong_FromLong,PyLong_FromUnsignedLong)*4,PyLong_FromLongLong,PyLong_FromUnsignedLongLong,PyLong_FromDouble*6# -**/ + * + * #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# + * #Name=Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Float,Double,LongDouble,CFloat,CDouble,CLongDouble# + * #cmplx=,,,,,,,,,,,,,.real,.real,.real# + * #sign=(signed,unsigned)*5,,,,,,# + * #ctype=long*8,PY_LONG_LONG*2,double*6# + * #realtyp=0*10,1*6# + * #func=(PyLong_FromLong,PyLong_FromUnsignedLong)*4,PyLong_FromLongLong,PyLong_FromUnsignedLongLong,PyLong_FromDouble*6# + */ static PyObject * @name at _int(PyObject *obj) { @@ -878,12 +979,13 @@ /**end repeat**/ /**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2# - #Name=(Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Float,Double,LongDouble,CFloat,CDouble,CLongDouble)*2# - #cmplx=(,,,,,,,,,,,,,.real,.real,.real)*2# - #which=long*16,float*16# - #func=(PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,PyLong_FromDouble*6,PyFloat_FromDouble*16# -**/ + * + * #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2# + * #Name=(Byte,UByte,Short,UShort,Int,UInt,Long,ULong,LongLong,ULongLong,Float,Double,LongDouble,CFloat,CDouble,CLongDouble)*2# + * #cmplx=(,,,,,,,,,,,,,.real,.real,.real)*2# + * #which=long*16,float*16# + * #func=(PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,PyLong_FromDouble*6,PyFloat_FromDouble*16# + */ static PyObject * @name at _@which@(PyObject *obj) { @@ -893,11 +995,12 @@ /**begin repeat - #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2# - #oper=oct*16, hex*16# - #kind=(int*5, long*5, int, long*2, int, long*2)*2# - #cap=(Int*5, Long*5, Int, Long*2, Int, Long*2)*2# -**/ + * + * #name=(byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble)*2# + * #oper=oct*16, hex*16# + * #kind=(int*5, long*5, int, long*2, int, long*2)*2# + * #cap=(Int*5, Long*5, Int, Long*2, Int, Long*2)*2# + */ static PyObject * @name at _@oper@(PyObject *obj) { @@ -910,9 +1013,9 @@ /**begin repeat - #oper=le,ge,lt,gt,eq,ne# - #op=<=,>=,<,>,==,!=# -**/ + * #oper=le,ge,lt,gt,eq,ne# + * #op=<=,>=,<,>,==,!=# + */ #define def_cmp_ at oper@(arg1, arg2) (arg1 @op@ arg2) #define cmplx_cmp_ at oper@(arg1, arg2) ((arg1.real == arg2.real) ? \ arg1.imag @op@ arg2.imag : \ @@ -920,9 +1023,9 @@ /**end repeat**/ /**begin repeat - #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# - #simp=def*13,cmplx*3# -**/ + * #name=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble# + * #simp=def*13,cmplx*3# + */ static PyObject* @name at _richcompare(PyObject *self, PyObject *other, int cmp_op) { Modified: branches/coremath/numpy/core/src/scalartypes.inc.src =================================================================== --- branches/coremath/numpy/core/src/scalartypes.inc.src 2009-02-21 23:54:31 UTC (rev 6444) +++ branches/coremath/numpy/core/src/scalartypes.inc.src 2009-02-22 03:41:06 UTC (rev 6445) @@ -13,72 +13,74 @@ {PyObject_HEAD_INIT(&PyBoolArrType_Type) 1}, }; -/* Inheritance established later when tp_bases is set (or tp_base for - single inheritance) */ +/* + * Inheritance is established later when tp_bases is set (or tp_base for + * single inheritance) + */ /**begin repeat - -#name=number, integer, signedinteger, unsignedinteger, inexact, floating, complexfloating, flexible, character# -#NAME=Number, Integer, SignedInteger, UnsignedInteger, Inexact, Floating, ComplexFloating, Flexible, Character# -*/ - + * #name = number, integer, signedinteger, unsignedinteger, inexact, + * floating, complexfloating, flexible, character# + * #NAME = Number, Integer, SignedInteger, UnsignedInteger, Inexact, + * Floating, ComplexFloating, Flexible, Character# + */ static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@", /*tp_name*/ - sizeof(PyObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ + 0, /* ob_size*/ + "numpy. at name@", /* tp_name*/ + sizeof(PyObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; /**end repeat**/ @@ -118,13 +120,18 @@ CASE(CLONGDOUBLE, CLongDouble); CASE(OBJECT, Object); #undef CASE - case NPY_STRING: return (void *)PyString_AS_STRING(scalar); - case NPY_UNICODE: return (void *)PyUnicode_AS_DATA(scalar); - case NPY_VOID: return ((PyVoidScalarObject *)scalar)->obval; + case NPY_STRING: + return (void *)PyString_AS_STRING(scalar); + case NPY_UNICODE: + return (void *)PyUnicode_AS_DATA(scalar); + case NPY_VOID: + return ((PyVoidScalarObject *)scalar)->obval; } - /* Must be a user-defined type --- check to see which - scalar it inherits from. */ + /* + * Must be a user-defined type --- check to see which + * scalar it inherits from. + */ #define _CHK(cls) (PyObject_IsInstance(scalar, \ (PyObject *)&Py##cls##ArrType_Type)) @@ -140,7 +147,8 @@ _IFCASE(Long); _IFCASE(LongLong); } - else { /* Unsigned Integer */ + else { + /* Unsigned Integer */ _IFCASE(UByte); _IFCASE(UShort); _IFCASE(UInt); @@ -148,49 +156,64 @@ _IFCASE(ULongLong); } } - else { /* Inexact */ + else { + /* Inexact */ if _CHK(Floating) { _IFCASE(Float); _IFCASE(Double); _IFCASE(LongDouble); } - else { /*ComplexFloating */ + else { + /*ComplexFloating */ _IFCASE(CFloat); _IFCASE(CDouble); _IFCASE(CLongDouble); } } } - else if _CHK(Bool) return _OBJ(Bool); - else if _CHK(Flexible) { - if _CHK(String) return (void *)PyString_AS_STRING(scalar); - if _CHK(Unicode) return (void *)PyUnicode_AS_DATA(scalar); - if _CHK(Void) return ((PyVoidScalarObject *)scalar)->obval; + else if (_CHK(Bool)) { + return _OBJ(Bool); } - else _IFCASE(Object); + else if (_CHK(Flexible)) { + if (_CHK(String)) { + return (void *)PyString_AS_STRING(scalar); + } + if (_CHK(Unicode)) { + return (void *)PyUnicode_AS_DATA(scalar); + } + if (_CHK(Void)) { + return ((PyVoidScalarObject *)scalar)->obval; + } + } + else { + _IFCASE(Object); + } - /* Use the alignment flag to figure out where the data begins - after a PyObject_HEAD + /* + * Use the alignment flag to figure out where the data begins + * after a PyObject_HEAD */ memloc = (intp)scalar; memloc += sizeof(PyObject); - /* now round-up to the nearest alignment value - */ + /* now round-up to the nearest alignment value */ align = descr->alignment; - if (align > 1) memloc = ((memloc + align - 1)/align)*align; + if (align > 1) { + memloc = ((memloc + align - 1)/align)*align; + } return (void *)memloc; #undef _IFCASE #undef _OBJ #undef _CHK } -/* no error checking is performed -- ctypeptr must be same type as scalar */ -/* in case of flexible type, the data is not copied - into ctypeptr which is expected to be a pointer to pointer */ /*NUMPY_API - Convert to c-type -*/ + * Convert to c-type + * + * no error checking is performed -- ctypeptr must be same type as scalar + * in case of flexible type, the data is not copied + * into ctypeptr which is expected to be a pointer to pointer + */ static void PyArray_ScalarAsCtype(PyObject *scalar, void *ctypeptr) { @@ -202,24 +225,23 @@ if (PyTypeNum_ISEXTENDED(typecode->type_num)) { void **ct = (void **)ctypeptr; *ct = newptr; - } else { + } + else { memcpy(ctypeptr, newptr, typecode->elsize); } Py_DECREF(typecode); return; } -/* The output buffer must be large-enough to receive the value */ -/* Even for flexible types which is different from ScalarAsCtype - where only a reference for flexible types is returned -*/ - -/* This may not work right on narrow builds for NumPy unicode scalars. +/*NUMPY_API + * Cast Scalar to c-type + * + * The output buffer must be large-enough to receive the value + * Even for flexible types which is different from ScalarAsCtype + * where only a reference for flexible types is returned + * + * This may not work right on narrow builds for NumPy unicode scalars. */ - -/*NUMPY_API - Cast Scalar to c-type -*/ static int PyArray_CastScalarToCtype(PyObject *scalar, void *ctypeptr, PyArray_Descr *outcode) @@ -229,7 +251,9 @@ descr = PyArray_DescrFromScalar(scalar); castfunc = PyArray_GetCastFunc(descr, outcode->type_num); - if (castfunc == NULL) return -1; + if (castfunc == NULL) { + return -1; + } if (PyTypeNum_ISEXTENDED(descr->type_num) || PyTypeNum_ISEXTENDED(outcode->type_num)) { PyArrayObject *ain, *aout; @@ -245,7 +269,10 @@ 0, NULL, NULL, ctypeptr, CARRAY, NULL); - if (aout == NULL) {Py_DECREF(ain); return -1;} + if (aout == NULL) { + Py_DECREF(ain); + return -1; + } castfunc(ain->data, aout->data, 1, ain, aout); Py_DECREF(ain); Py_DECREF(aout); @@ -258,8 +285,8 @@ } /*NUMPY_API - Cast Scalar to c-type -*/ + * Cast Scalar to c-type + */ static int PyArray_CastScalarDirect(PyObject *scalar, PyArray_Descr *indescr, void *ctypeptr, int outtype) @@ -267,22 +294,24 @@ PyArray_VectorUnaryFunc* castfunc; void *ptr; castfunc = PyArray_GetCastFunc(indescr, outtype); - if (castfunc == NULL) return -1; + if (castfunc == NULL) { + return -1; + } ptr = scalar_value(scalar, indescr); castfunc(ptr, ctypeptr, 1, NULL, NULL); return 0; } -/* 0-dim array from array-scalar object */ -/* always contains a copy of the data - unless outcode is NULL, it is of void type and the referrer does - not own it either. -*/ - -/* steals reference to outcode */ /*NUMPY_API - Get 0-dim array from scalar -*/ + * Get 0-dim array from scalar + * + * 0-dim array from array-scalar object + * always contains a copy of the data + * unless outcode is NULL, it is of void type and the referrer does + * not own it either. + * + * steals reference to outcode + */ static PyObject * PyArray_FromScalar(PyObject *scalar, PyArray_Descr *outcode) { @@ -310,8 +339,10 @@ typecode, 0, NULL, NULL, NULL, 0, NULL); - if (r==NULL) {Py_XDECREF(outcode); return NULL;} - + if (r==NULL) { + Py_XDECREF(outcode); + return NULL; + } if (PyDataType_FLAGCHK(typecode, NPY_USE_SETITEM)) { if (typecode->f->setitem(scalar, PyArray_DATA(r), r) < 0) { Py_XDECREF(outcode); Py_DECREF(r); @@ -328,7 +359,8 @@ (PyArray_UCS4 *)PyArray_DATA(r), PyUnicode_GET_SIZE(scalar), PyArray_ITEMSIZE(r) >> 2); - } else + } + else #endif { memcpy(PyArray_DATA(r), memptr, PyArray_ITEMSIZE(r)); @@ -338,8 +370,9 @@ } finish: - if (outcode == NULL) return r; - + if (outcode == NULL) { + return r; + } if (outcode->type_num == typecode->type_num) { if (!PyTypeNum_ISEXTENDED(typecode->type_num) || (outcode->elsize == typecode->elsize)) @@ -353,10 +386,10 @@ } /*NUMPY_API - Get an Array Scalar From a Python Object - Returns NULL if unsuccessful but error is only - set if another error occurred. Currently only Numeric-like - object supported. + * Get an Array Scalar From a Python Object + * + * Returns NULL if unsuccessful but error is only set if another error occurred. + * Currently only Numeric-like object supported. */ static PyObject * PyArray_ScalarFromObject(PyObject *object) @@ -367,17 +400,23 @@ } if (PyInt_Check(object)) { ret = PyArrayScalar_New(Long); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, Long) = PyInt_AS_LONG(object); } else if (PyFloat_Check(object)) { ret = PyArrayScalar_New(Double); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, Double) = PyFloat_AS_DOUBLE(object); } else if (PyComplex_Check(object)) { ret = PyArrayScalar_New(CDouble); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, CDouble).real = ((PyComplexObject *)object)->cval.real; PyArrayScalar_VAL(ret, CDouble).imag = @@ -391,7 +430,9 @@ return NULL; } ret = PyArrayScalar_New(LongLong); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } PyArrayScalar_VAL(ret, LongLong) = val; } else if (PyBool_Check(object)) { @@ -410,14 +451,16 @@ gentype_alloc(PyTypeObject *type, Py_ssize_t nitems) { PyObject *obj; - const size_t size = _PyObject_VAR_SIZE(type, nitems+1); + const size_t size = _PyObject_VAR_SIZE(type, nitems + 1); obj = (PyObject *)_pya_malloc(size); memset(obj, 0, size); - if (type->tp_itemsize == 0) + if (type->tp_itemsize == 0) { PyObject_INIT(obj, type); - else + } + else { (void) PyObject_INIT_VAR((PyVarObject *)obj, type, nitems); + } return obj; } @@ -436,8 +479,7 @@ if (!PyArray_IsScalar(m1,Generic)) { if (PyArray_Check(m1)) { - ret = m1->ob_type->tp_as_number->nb_power(m1,m2, - Py_None); + ret = m1->ob_type->tp_as_number->nb_power(m1,m2, Py_None); } else { if (!PyArray_IsScalar(m2,Generic)) { @@ -445,17 +487,17 @@ return NULL; } arr = PyArray_FromScalar(m2, NULL); - if (arr == NULL) return NULL; - ret = arr->ob_type->tp_as_number->nb_power(m1, arr, - Py_None); + if (arr == NULL) { + return NULL; + } + ret = arr->ob_type->tp_as_number->nb_power(m1, arr, Py_None); Py_DECREF(arr); } return ret; } if (!PyArray_IsScalar(m2, Generic)) { if (PyArray_Check(m2)) { - ret = m2->ob_type->tp_as_number->nb_power(m1,m2, - Py_None); + ret = m2->ob_type->tp_as_number->nb_power(m1,m2, Py_None); } else { if (!PyArray_IsScalar(m1, Generic)) { @@ -463,18 +505,21 @@ return NULL; } arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return NULL; - ret = arr->ob_type->tp_as_number->nb_power(arr, m2, - Py_None); + if (arr == NULL) { + return NULL; + } + ret = arr->ob_type->tp_as_number->nb_power(arr, m2, Py_None); Py_DECREF(arr); } return ret; } - arr=arg2=NULL; + arr = arg2 = NULL; arr = PyArray_FromScalar(m1, NULL); arg2 = PyArray_FromScalar(m2, NULL); if (arr == NULL || arg2 == NULL) { - Py_XDECREF(arr); Py_XDECREF(arg2); return NULL; + Py_XDECREF(arr); + Py_XDECREF(arg2); + return NULL; } ret = arr->ob_type->tp_as_number->nb_power(arr, arg2, Py_None); Py_DECREF(arr); @@ -489,26 +534,35 @@ PyObject *arr, *meth, *ret; arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } meth = PyObject_GetAttrString(arr, str); - if (meth == NULL) {Py_DECREF(arr); return NULL;} - if (kwds == NULL) + if (meth == NULL) { + Py_DECREF(arr); + return NULL; + } + if (kwds == NULL) { ret = PyObject_CallObject(meth, args); - else + } + else { ret = PyObject_Call(meth, args, kwds); + } Py_DECREF(meth); Py_DECREF(arr); - if (ret && PyArray_Check(ret)) + if (ret && PyArray_Check(ret)) { return PyArray_Return((PyArrayObject *)ret); - else + } + else { return ret; + } } /**begin repeat * - * #name=add, subtract, divide, remainder, divmod, lshift, rshift, and, xor, or, floor_divide, true_divide# + * #name = add, subtract, divide, remainder, divmod, lshift, rshift, + * and, xor, or, floor_divide, true_divide# */ - static PyObject * gentype_ at name@(PyObject *m1, PyObject *m2) { @@ -521,28 +575,30 @@ static PyObject * gentype_multiply(PyObject *m1, PyObject *m2) { - PyObject *ret=NULL; + PyObject *ret = NULL; long repeat; if (!PyArray_IsScalar(m1, Generic) && ((m1->ob_type->tp_as_number == NULL) || (m1->ob_type->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m2 to an int and try sequence - repeat */ + /* Try to convert m2 to an int and try sequence repeat */ repeat = PyInt_AsLong(m2); - if (repeat == -1 && PyErr_Occurred()) return NULL; + if (repeat == -1 && PyErr_Occurred()) { + return NULL; + } ret = PySequence_Repeat(m1, (int) repeat); } else if (!PyArray_IsScalar(m2, Generic) && ((m2->ob_type->tp_as_number == NULL) || (m2->ob_type->tp_as_number->nb_multiply == NULL))) { - /* Try to convert m1 to an int and try sequence - repeat */ + /* Try to convert m1 to an int and try sequence repeat */ repeat = PyInt_AsLong(m1); - if (repeat == -1 && PyErr_Occurred()) return NULL; + if (repeat == -1 && PyErr_Occurred()) { + return NULL; + } ret = PySequence_Repeat(m2, (int) repeat); } - if (ret==NULL) { + if (ret == NULL) { PyErr_Clear(); /* no effect if not set */ ret = PyArray_Type.tp_as_number->nb_multiply(m1, m2); } @@ -550,17 +606,18 @@ } /**begin repeat - -#name=positive, negative, absolute, invert, int, long, float, oct, hex# -*/ - + * + * #name=positive, negative, absolute, invert, int, long, float, oct, hex# + */ static PyObject * gentype_ at name@(PyObject *m1) { PyObject *arr, *ret; arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = arr->ob_type->tp_as_number->nb_ at name@(arr); Py_DECREF(arr); return ret; @@ -574,7 +631,9 @@ int ret; arr = PyArray_FromScalar(m1, NULL); - if (arr == NULL) return -1; + if (arr == NULL) { + return -1; + } ret = arr->ob_type->tp_as_number->nb_nonzero(arr); Py_DECREF(arr); return ret; @@ -587,7 +646,9 @@ PyObject *ret; arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr==NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = PyObject_Str((PyObject *)arr); Py_DECREF(arr); return ret; @@ -601,7 +662,9 @@ PyObject *ret; arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr==NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = PyObject_Str((PyObject *)arr); Py_DECREF(arr); return ret; @@ -613,9 +676,9 @@ #endif /**begin repeat - * #name=float, double, longdouble# - * #NAME=FLOAT, DOUBLE, LONGDOUBLE# - * #type=f, d, l# + * #name = float, double, longdouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# + * #type = f, d, l# */ #define _FMT1 "%%.%i" NPY_ at NAME@_FMT @@ -632,7 +695,7 @@ res = NumPyOS_ascii_format at type@(buf, buflen, format, val, 0); if (res == NULL) { fprintf(stderr, "Error while formatting\n"); - return; + return; } /* If nothing but digits after sign, append ".0" */ @@ -656,28 +719,28 @@ if (val.real == 0.0) { PyOS_snprintf(format, sizeof(format), _FMT1, prec); res = NumPyOS_ascii_format at type@(buf, buflen-1, format, val.imag, 0); - if (res == NULL) { + if (res == NULL) { fprintf(stderr, "Error while formatting\n"); - return; - } - strncat(buf, "j", 1); + return; + } + strncat(buf, "j", 1); } else { - char re[64], im[64]; - PyOS_snprintf(format, sizeof(format), _FMT1, prec); + char re[64], im[64]; + PyOS_snprintf(format, sizeof(format), _FMT1, prec); res = NumPyOS_ascii_format at type@(re, sizeof(re), format, val.real, 0); - if (res == NULL) { + if (res == NULL) { fprintf(stderr, "Error while formatting\n"); - return; - } + return; + } - PyOS_snprintf(format, sizeof(format), _FMT2, prec); + PyOS_snprintf(format, sizeof(format), _FMT2, prec); res = NumPyOS_ascii_format at type@(im, sizeof(im), format, val.imag, 0); - if (res == NULL) { + if (res == NULL) { fprintf(stderr, "Error while formatting\n"); - return; - } - PyOS_snprintf(buf, buflen, "(%s%sj)", re, im); + return; + } + PyOS_snprintf(buf, buflen, "(%s%sj)", re, im); } } @@ -686,19 +749,20 @@ /**end repeat**/ -/* over-ride repr and str of array-scalar strings and unicode to - remove NULL bytes and then call the corresponding functions - of string and unicode. +/* + * over-ride repr and str of array-scalar strings and unicode to + * remove NULL bytes and then call the corresponding functions + * of string and unicode. */ /**begin repeat -#name=string*2,unicode*2# -#form=(repr,str)*2# -#Name=String*2,Unicode*2# -#NAME=STRING*2,UNICODE*2# -#extra=AndSize*2,,# -#type=char*2, Py_UNICODE*2# -*/ + * #name = string*2,unicode*2# + * #form = (repr,str)*2# + * #Name = String*2,Unicode*2# + * #NAME = STRING*2,UNICODE*2# + * #extra = AndSize*2,,# + * #type = char*2, Py_UNICODE*2# + */ static PyObject * @name at type_@form@(PyObject *self) { @@ -710,9 +774,13 @@ ip = dptr = Py at Name@_AS_ at NAME@(self); len = Py at Name@_GET_SIZE(self); dptr += len-1; - while(len > 0 && *dptr-- == 0) len--; + while(len > 0 && *dptr-- == 0) { + len--; + } new = Py at Name@_From at Name@@extra@(ip, len); - if (new == NULL) return PyString_FromString(""); + if (new == NULL) { + return PyString_FromString(""); + } ret = Py at Name@_Type.tp_ at form@(new); Py_DECREF(new); return ret; @@ -737,10 +805,11 @@ * * These functions will return NULL if PyString creation fails. */ + /**begin repeat - * #name=float, double, longdouble# - * #Name=Float, Double, LongDouble# - * #NAME=FLOAT, DOUBLE, LONGDOUBLE# + * #name = float, double, longdouble# + * #Name = Float, Double, LongDouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# */ /**begin repeat1 * #kind = str, repr# @@ -778,38 +847,38 @@ * float type print (control print a, where a is a float type instance) */ /**begin repeat - * #name=float, double, longdouble# - * #Name=Float, Double, LongDouble# - * #NAME=FLOAT, DOUBLE, LONGDOUBLE# + * #name = float, double, longdouble# + * #Name = Float, Double, LongDouble# + * #NAME = FLOAT, DOUBLE, LONGDOUBLE# */ static int @name at type_print(PyObject *v, FILE *fp, int flags) { - char buf[100]; + char buf[100]; @name@ val = ((Py at Name@ScalarObject *)v)->obval; - format_ at name@(buf, sizeof(buf), val, - (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); - Py_BEGIN_ALLOW_THREADS - fputs(buf, fp); - Py_END_ALLOW_THREADS - return 0; + format_ at name@(buf, sizeof(buf), val, + (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); + Py_BEGIN_ALLOW_THREADS + fputs(buf, fp); + Py_END_ALLOW_THREADS + return 0; } static int c at name@type_print(PyObject *v, FILE *fp, int flags) { /* Size of buf: twice sizeof(real) + 2 (for the parenthesis) */ - char buf[202]; + char buf[202]; c at name@ val = ((PyC at Name@ScalarObject *)v)->obval; - format_c at name@(buf, sizeof(buf), val, - (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); - Py_BEGIN_ALLOW_THREADS - fputs(buf, fp); - Py_END_ALLOW_THREADS - return 0; + format_c at name@(buf, sizeof(buf), val, + (flags & Py_PRINT_RAW) ? @NAME at PREC_STR : @NAME at PREC_REPR); + Py_BEGIN_ALLOW_THREADS + fputs(buf, fp); + Py_END_ALLOW_THREADS + return 0; } /**end repeat**/ @@ -821,13 +890,13 @@ */ /**begin repeat - -#name=(int, long, hex, oct, float)*2# -#KIND=(Long*4, Float)*2# -#char=,,,,,c*5# -#CHAR=,,,,,C*5# -#POST=,,,,,.real*5# -*/ + * + * #name = (int, long, hex, oct, float)*2# + * #KIND = (Long*4, Float)*2# + * #char = ,,,,,c*5# + * #CHAR = ,,,,,C*5# + * #POST = ,,,,,.real*5# + */ static PyObject * @char at longdoubletype_@name@(PyObject *self) { @@ -844,46 +913,46 @@ static PyNumberMethods gentype_as_number = { - (binaryfunc)gentype_add, /*nb_add*/ - (binaryfunc)gentype_subtract, /*nb_subtract*/ - (binaryfunc)gentype_multiply, /*nb_multiply*/ - (binaryfunc)gentype_divide, /*nb_divide*/ - (binaryfunc)gentype_remainder, /*nb_remainder*/ - (binaryfunc)gentype_divmod, /*nb_divmod*/ - (ternaryfunc)gentype_power, /*nb_power*/ + (binaryfunc)gentype_add, /*nb_add*/ + (binaryfunc)gentype_subtract, /*nb_subtract*/ + (binaryfunc)gentype_multiply, /*nb_multiply*/ + (binaryfunc)gentype_divide, /*nb_divide*/ + (binaryfunc)gentype_remainder, /*nb_remainder*/ + (binaryfunc)gentype_divmod, /*nb_divmod*/ + (ternaryfunc)gentype_power, /*nb_power*/ (unaryfunc)gentype_negative, - (unaryfunc)gentype_positive, /*nb_pos*/ - (unaryfunc)gentype_absolute, /*(unaryfunc)gentype_abs,*/ - (inquiry)gentype_nonzero_number, /*nb_nonzero*/ - (unaryfunc)gentype_invert, /*nb_invert*/ - (binaryfunc)gentype_lshift, /*nb_lshift*/ - (binaryfunc)gentype_rshift, /*nb_rshift*/ - (binaryfunc)gentype_and, /*nb_and*/ - (binaryfunc)gentype_xor, /*nb_xor*/ - (binaryfunc)gentype_or, /*nb_or*/ - 0, /*nb_coerce*/ - (unaryfunc)gentype_int, /*nb_int*/ - (unaryfunc)gentype_long, /*nb_long*/ - (unaryfunc)gentype_float, /*nb_float*/ - (unaryfunc)gentype_oct, /*nb_oct*/ - (unaryfunc)gentype_hex, /*nb_hex*/ - 0, /*inplace_add*/ - 0, /*inplace_subtract*/ - 0, /*inplace_multiply*/ - 0, /*inplace_divide*/ - 0, /*inplace_remainder*/ - 0, /*inplace_power*/ - 0, /*inplace_lshift*/ - 0, /*inplace_rshift*/ - 0, /*inplace_and*/ - 0, /*inplace_xor*/ - 0, /*inplace_or*/ - (binaryfunc)gentype_floor_divide, /*nb_floor_divide*/ - (binaryfunc)gentype_true_divide, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ + (unaryfunc)gentype_positive, /*nb_pos*/ + (unaryfunc)gentype_absolute, /*(unaryfunc)gentype_abs,*/ + (inquiry)gentype_nonzero_number, /*nb_nonzero*/ + (unaryfunc)gentype_invert, /*nb_invert*/ + (binaryfunc)gentype_lshift, /*nb_lshift*/ + (binaryfunc)gentype_rshift, /*nb_rshift*/ + (binaryfunc)gentype_and, /*nb_and*/ + (binaryfunc)gentype_xor, /*nb_xor*/ + (binaryfunc)gentype_or, /*nb_or*/ + 0, /*nb_coerce*/ + (unaryfunc)gentype_int, /*nb_int*/ + (unaryfunc)gentype_long, /*nb_long*/ + (unaryfunc)gentype_float, /*nb_float*/ + (unaryfunc)gentype_oct, /*nb_oct*/ + (unaryfunc)gentype_hex, /*nb_hex*/ + 0, /*inplace_add*/ + 0, /*inplace_subtract*/ + 0, /*inplace_multiply*/ + 0, /*inplace_divide*/ + 0, /*inplace_remainder*/ + 0, /*inplace_power*/ + 0, /*inplace_lshift*/ + 0, /*inplace_rshift*/ + 0, /*inplace_and*/ + 0, /*inplace_xor*/ + 0, /*inplace_or*/ + (binaryfunc)gentype_floor_divide, /*nb_floor_divide*/ + (binaryfunc)gentype_true_divide, /*nb_true_divide*/ + 0, /*nb_inplace_floor_divide*/ + 0, /*nb_inplace_true_divide*/ #if PY_VERSION_HEX >= 0x02050000 - (unaryfunc)NULL, /*nb_index*/ + (unaryfunc)NULL, /*nb_index*/ #endif }; @@ -894,7 +963,9 @@ PyObject *arr, *ret; arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = arr->ob_type->tp_richcompare(arr, other, cmp_op); Py_DECREF(arr); return ret; @@ -917,7 +988,9 @@ { PyObject *flagobj; flagobj = PyArrayFlags_Type.tp_alloc(&PyArrayFlags_Type, 0); - if (flagobj == NULL) return NULL; + if (flagobj == NULL) { + return NULL; + } ((PyArrayFlagsObject *)flagobj)->arr = NULL; ((PyArrayFlagsObject *)flagobj)->flags = self->flags; return flagobj; @@ -1016,9 +1089,13 @@ PyObject *inter; arr = (PyArrayObject *)PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } inter = PyObject_GetAttrString((PyObject *)arr, "__array_interface__"); - if (inter != NULL) PyDict_SetItemString(inter, "__ref", (PyObject *)arr); + if (inter != NULL) { + PyDict_SetItemString(inter, "__ref", (PyObject *)arr); + } Py_DECREF(arr); return inter; } @@ -1076,7 +1153,9 @@ else if (PyArray_IsScalar(self, Object)) { PyObject *obj = ((PyObjectScalarObject *)self)->obval; ret = PyObject_GetAttrString(obj, "real"); - if (ret != NULL) return ret; + if (ret != NULL) { + return ret; + } PyErr_Clear(); } Py_INCREF(self); @@ -1094,8 +1173,7 @@ char *ptr; typecode = _realdescr_fromcomplexscalar(self, &typenum); ptr = (char *)scalar_value(self, NULL); - ret = PyArray_Scalar(ptr + typecode->elsize, - typecode, NULL); + ret = PyArray_Scalar(ptr + typecode->elsize, typecode, NULL); } else if (PyArray_IsScalar(self, Object)) { PyObject *obj = ((PyObjectScalarObject *)self)->obval; @@ -1131,7 +1209,9 @@ PyObject *ret, *arr; arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) return NULL; + if (arr == NULL) { + return NULL; + } ret = PyArray_IterNew(arr); Py_DECREF(arr); return ret; @@ -1279,10 +1359,11 @@ /**begin repeat - -#name=tolist, item, tostring, astype, copy, __deepcopy__, searchsorted, view, swapaxes, conj, conjugate, nonzero, flatten, ravel, fill, transpose, newbyteorder# -*/ - + * + * #name = tolist, item, tostring, astype, copy, __deepcopy__, searchsorted, + * view, swapaxes, conj, conjugate, nonzero, flatten, ravel, fill, + * transpose, newbyteorder# + */ static PyObject * gentype_ at name@(PyObject *self, PyObject *args) { @@ -1300,7 +1381,9 @@ static PyObject * gentype_squeeze(PyObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) return NULL; + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } Py_INCREF(self); return self; } @@ -1313,17 +1396,16 @@ { Bool inplace=FALSE; - if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) + if (!PyArg_ParseTuple(args, "|O&", PyArray_BoolConverter, &inplace)) { return NULL; - + } if (inplace) { PyErr_SetString(PyExc_ValueError, "cannot byteswap a scalar in-place"); return NULL; } else { - /* get the data, copyswap it and pass it to a new Array scalar - */ + /* get the data, copyswap it and pass it to a new Array scalar */ char *data; int numbytes; PyArray_Descr *descr; @@ -1333,8 +1415,13 @@ numbytes = gentype_getreadbuf(self, 0, (void **)&data); descr = PyArray_DescrFromScalar(self); newmem = _pya_malloc(descr->elsize); - if (newmem == NULL) {Py_DECREF(descr); return PyErr_NoMemory();} - else memcpy(newmem, data, descr->elsize); + if (newmem == NULL) { + Py_DECREF(descr); + return PyErr_NoMemory(); + } + else { + memcpy(newmem, data, descr->elsize); + } byte_swap_vector(newmem, 1, descr->elsize); new = PyArray_Scalar(newmem, descr, NULL); _pya_free(newmem); @@ -1345,10 +1432,12 @@ /**begin repeat - -#name=take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, round, argmax, argmin, max, min, ptp, any, all, resize, reshape, choose# -*/ - + * + * #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip, + * std, var, sum, cumsum, prod, cumprod, compress, sort, argsort, + * round, argmax, argmin, max, min, ptp, any, all, resize, reshape, + * choose# + */ static PyObject * gentype_ at name@(PyObject *self, PyObject *args, PyObject *kwds) { @@ -1362,7 +1451,9 @@ PyObject *ret; ret = gentype_generic_method((PyObject *)self, args, kwds, "getfield"); - if (!ret) return ret; + if (!ret) { + return ret; + } if (PyArray_IsScalar(ret, Generic) && \ (!PyArray_IsScalar(ret, Void))) { PyArray_Descr *new; @@ -1388,7 +1479,7 @@ static PyObject * voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) { - PyArray_Descr *typecode=NULL; + PyArray_Descr *typecode = NULL; int offset = 0; PyObject *value, *src; int mysize; @@ -1396,8 +1487,7 @@ static char *kwlist[] = {"value", "dtype", "offset", 0}; if ((self->flags & WRITEABLE) != WRITEABLE) { - PyErr_SetString(PyExc_RuntimeError, - "Can't write to memory"); + PyErr_SetString(PyExc_RuntimeError, "Can't write to memory"); return NULL; } if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|i", kwlist, @@ -1432,7 +1522,9 @@ else { /* Copy data from value to correct place in dptr */ src = PyArray_FromAny(value, typecode, 0, 0, CARRAY, NULL); - if (src == NULL) return NULL; + if (src == NULL) { + return NULL; + } typecode->f->copyswap(dptr, PyArray_DATA(src), !PyArray_ISNBO(self->descr->byteorder), src); @@ -1446,38 +1538,44 @@ static PyObject * gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args)) { - PyObject *ret=NULL, *obj=NULL, *mod=NULL; + PyObject *ret = NULL, *obj = NULL, *mod = NULL; const char *buffer; Py_ssize_t buflen; /* Return a tuple of (callable object, arguments) */ - ret = PyTuple_New(2); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } if (PyObject_AsReadBuffer(self, (const void **)&buffer, &buflen)<0) { - Py_DECREF(ret); return NULL; + Py_DECREF(ret); + return NULL; } mod = PyImport_ImportModule("numpy.core.multiarray"); - if (mod == NULL) return NULL; + if (mod == NULL) { + return NULL; + } obj = PyObject_GetAttrString(mod, "scalar"); Py_DECREF(mod); - if (obj == NULL) return NULL; + if (obj == NULL) { + return NULL; + } PyTuple_SET_ITEM(ret, 0, obj); obj = PyObject_GetAttrString((PyObject *)self, "dtype"); if (PyArray_IsScalar(self, Object)) { mod = ((PyObjectScalarObject *)self)->obval; - PyTuple_SET_ITEM(ret, 1, - Py_BuildValue("NO", obj, mod)); + PyTuple_SET_ITEM(ret, 1, Py_BuildValue("NO", obj, mod)); } else { #ifndef Py_UNICODE_WIDE - /* We need to expand the buffer so that we always write - UCS4 to disk for pickle of unicode scalars. - - This could be in a unicode_reduce function, but - that would require re-factoring. - */ - int alloc=0; + /* + * We need to expand the buffer so that we always write + * UCS4 to disk for pickle of unicode scalars. + * + * This could be in a unicode_reduce function, but + * that would require re-factoring. + */ + int alloc = 0; char *tmp; int newlen; @@ -1526,13 +1624,16 @@ static PyObject * gentype_dump(PyObject *self, PyObject *args) { - PyObject *file=NULL; + PyObject *file = NULL; int ret; - if (!PyArg_ParseTuple(args, "O", &file)) + if (!PyArg_ParseTuple(args, "O", &file)) { return NULL; + } ret = PyArray_Dump(self, file, 2); - if (ret < 0) return NULL; + if (ret < 0) { + return NULL; + } Py_INCREF(Py_None); return Py_None; } @@ -1540,15 +1641,17 @@ static PyObject * gentype_dumps(PyObject *self, PyObject *args) { - if (!PyArg_ParseTuple(args, "")) + if (!PyArg_ParseTuple(args, "")) { return NULL; + } return PyArray_Dumps(self, 2); } /* setting flags cannot be done for scalars */ static PyObject * -gentype_setflags(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) +gentype_setflags(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), + PyObject *NPY_UNUSED(kwds)) { Py_INCREF(Py_None); return Py_None; @@ -1776,7 +1879,9 @@ } flist = self->descr->names; m = PyTuple_GET_SIZE(flist); - if (n < 0) n += m; + if (n < 0) { + n += m; + } if (n < 0 || n >= m) { PyErr_Format(PyExc_IndexError, "invalid index (%d)", (int) n); return NULL; @@ -1803,14 +1908,17 @@ if (PyString_Check(ind) || PyUnicode_Check(ind)) { /* look up in fields */ fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) goto fail; + if (!fieldinfo) { + goto fail; + } return voidtype_getfield(self, fieldinfo, NULL); } /* try to convert it to a number */ n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) goto fail; - + if (error_converting(n)) { + goto fail; + } return voidtype_item(self, (Py_ssize_t)n); fail: @@ -1833,8 +1941,12 @@ flist = self->descr->names; m = PyTuple_GET_SIZE(flist); - if (n < 0) n += m; - if (n < 0 || n >= m) goto fail; + if (n < 0) { + n += m; + } + if (n < 0 || n >= m) { + goto fail; + } fieldinfo = PyDict_GetItem(self->descr->fields, PyTuple_GET_ITEM(flist, n)); newtup = Py_BuildValue("(OOO)", val, @@ -1842,7 +1954,9 @@ PyTuple_GET_ITEM(fieldinfo, 1)); res = voidtype_setfield(self, newtup, NULL); Py_DECREF(newtup); - if (!res) return -1; + if (!res) { + return -1; + } Py_DECREF(res); return 0; @@ -1868,20 +1982,26 @@ if (PyString_Check(ind) || PyUnicode_Check(ind)) { /* look up in fields */ fieldinfo = PyDict_GetItem(self->descr->fields, ind); - if (!fieldinfo) goto fail; + if (!fieldinfo) { + goto fail; + } newtup = Py_BuildValue("(OOO)", val, PyTuple_GET_ITEM(fieldinfo, 0), PyTuple_GET_ITEM(fieldinfo, 1)); res = voidtype_setfield(self, newtup, NULL); Py_DECREF(newtup); - if (!res) return -1; + if (!res) { + return -1; + } Py_DECREF(res); return 0; } /* try to convert it to a number */ n = PyArray_PyIntAsIntp(ind); - if (error_converting(n)) goto fail; + if (error_converting(n)) { + goto fail; + } return voidtype_ass_item(self, (Py_ssize_t)n, val); fail: @@ -1891,35 +2011,35 @@ static PyMappingMethods voidtype_as_mapping = { #if PY_VERSION_HEX >= 0x02050000 - (lenfunc)voidtype_length, /*mp_length*/ + (lenfunc)voidtype_length, /*mp_length*/ #else - (inquiry)voidtype_length, /*mp_length*/ + (inquiry)voidtype_length, /*mp_length*/ #endif - (binaryfunc)voidtype_subscript, /*mp_subscript*/ - (objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/ + (binaryfunc)voidtype_subscript, /*mp_subscript*/ + (objobjargproc)voidtype_ass_subscript, /*mp_ass_subscript*/ }; static PySequenceMethods voidtype_as_sequence = { #if PY_VERSION_HEX >= 0x02050000 - (lenfunc)voidtype_length, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - (ssizeargfunc)voidtype_item, /*sq_item*/ - 0, /*sq_slice*/ - (ssizeobjargproc)voidtype_ass_item, /*sq_ass_item*/ + (lenfunc)voidtype_length, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + (ssizeargfunc)voidtype_item, /*sq_item*/ + 0, /*sq_slice*/ + (ssizeobjargproc)voidtype_ass_item, /*sq_ass_item*/ #else - (inquiry)voidtype_length, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - (intargfunc)voidtype_item, /*sq_item*/ - 0, /*sq_slice*/ - (intobjargproc)voidtype_ass_item, /*sq_ass_item*/ + (inquiry)voidtype_length, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + (intargfunc)voidtype_item, /*sq_item*/ + 0, /*sq_slice*/ + (intobjargproc)voidtype_ass_item, /*sq_ass_item*/ #endif - 0, /* ssq_ass_slice */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0, /* sq_inplace_repeat */ + 0, /* ssq_ass_slice */ + 0, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0, /* sq_inplace_repeat */ }; @@ -1970,9 +2090,10 @@ static Py_ssize_t gentype_getcharbuf(PyObject *self, Py_ssize_t segment, constchar **ptrptr) { - if (PyArray_IsScalar(self, String) || \ - PyArray_IsScalar(self, Unicode)) + if (PyArray_IsScalar(self, String) || + PyArray_IsScalar(self, Unicode)) { return gentype_getreadbuf(self, segment, (void **)ptrptr); + } else { PyErr_SetString(PyExc_TypeError, "Non-character array cannot be interpreted "\ @@ -1983,10 +2104,10 @@ static PyBufferProcs gentype_as_buffer = { - gentype_getreadbuf, /*bf_getreadbuffer*/ - NULL, /*bf_getwritebuffer*/ - gentype_getsegcount, /*bf_getsegcount*/ - gentype_getcharbuf, /*bf_getcharbuffer*/ + gentype_getreadbuf, /* bf_getreadbuffer*/ + NULL, /* bf_getwritebuffer*/ + gentype_getsegcount, /* bf_getsegcount*/ + gentype_getcharbuf, /* bf_getcharbuffer*/ }; @@ -1995,69 +2116,70 @@ static PyTypeObject PyGenericArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.generic", /*tp_name*/ - sizeof(PyObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ + 0, /* ob_size*/ + "numpy.generic", /* tp_name*/ + sizeof(PyObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; static void void_dealloc(PyVoidScalarObject *v) { - if (v->flags & OWNDATA) + if (v->flags & OWNDATA) { PyDataMem_FREE(v->obval); + } Py_XDECREF(v->descr); Py_XDECREF(v->base); v->ob_type->tp_free(v); @@ -2070,11 +2192,13 @@ v->ob_type->tp_free(v); } -/* string and unicode inherit from Python Type first and so GET_ITEM is different to get to the Python Type. +/* + * string and unicode inherit from Python Type first and so GET_ITEM + * is different to get to the Python Type. + * + * ok is a work-around for a bug in complex_new that doesn't allocate + * memory from the sub-types memory allocator. */ -/* ok is a work-around for a bug in complex_new that doesn't allocate - memory from the sub-types memory allocator. -*/ #define _WORK(num) \ if (type->tp_bases && (PyTuple_GET_SIZE(type->tp_bases)==2)) { \ @@ -2093,14 +2217,18 @@ #define _WORKz _WORK(0) #define _WORK0 -/**begin repeat1 -#name=byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, string, unicode, object# -#TYPE=BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, OBJECT# -#work=0,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,z,z,0# -#default=0*16,1*2,2# -*/ +/**begin repeat + * #name = byte, short, int, long, longlong, ubyte, ushort, uint, ulong, + * ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, + * string, unicode, object# + * #TYPE = BYTE, SHORT, INT, LONG, LONGLONG, UBYTE, USHORT, UINT, ULONG, + * ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, + * STRING, UNICODE, OBJECT# + * #work = 0,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,z,z,0# + * #default = 0*16,1*2,2# + */ -#define _NPY_UNUSED2_1 +#define _NPY_UNUSED2_1 #define _NPY_UNUSED2_z #define _NPY_UNUSED2_0 NPY_UNUSED #define _NPY_UNUSED1_0 @@ -2119,17 +2247,20 @@ void *dest, *src; #endif - /* allow base-class (if any) to do conversion */ - /* If successful, this will jump to finish: */ + /* + * allow base-class (if any) to do conversion + * If successful, this will jump to finish: + */ _WORK at work@ if (!PyArg_ParseTuple(args, "|O", &obj)) { return NULL; } typecode = PyArray_DescrFromType(PyArray_ at TYPE@); - /* typecode is new reference and stolen by - PyArray_FromAny but not PyArray_Scalar - */ + /* + * typecode is new reference and stolen by + * PyArray_FromAny but not PyArray_Scalar + */ if (obj == NULL) { #if @default@ == 0 char *mem = malloc(sizeof(@name@)); @@ -2140,30 +2271,32 @@ #elif @default@ == 1 robj = PyArray_Scalar(NULL, typecode, NULL); #elif @default@ == 2 - Py_INCREF(Py_None); - robj = Py_None; + Py_INCREF(Py_None); + robj = Py_None; #endif - Py_DECREF(typecode); + Py_DECREF(typecode); goto finish; } - /* It is expected at this point that robj is a PyArrayScalar - (even for Object Data Type) - */ + /* + * It is expected at this point that robj is a PyArrayScalar + * (even for Object Data Type) + */ arr = PyArray_FromAny(obj, typecode, 0, 0, FORCECAST, NULL); if ((arr == NULL) || (PyArray_NDIM(arr) > 0)) { return arr; } /* 0-d array */ robj = PyArray_ToScalar(PyArray_DATA(arr), (NPY_AO *)arr); - Py_DECREF(arr); + Py_DECREF(arr); finish: - -#if @default@ == 2 /* In OBJECT case, robj is no longer a - PyArrayScalar at this point but the - remaining code assumes it is - */ + /* + * In OBJECT case, robj is no longer a + * PyArrayScalar at this point but the + * remaining code assumes it is + */ +#if @default@ == 2 return robj; #else /* Normal return */ @@ -2171,9 +2304,11 @@ return robj; } - /* This return path occurs when the requested type is not created - but another scalar object is created instead (i.e. when - the base-class does the conversion in _WORK macro) */ + /* + * This return path occurs when the requested type is not created + * but another scalar object is created instead (i.e. when + * the base-class does the conversion in _WORK macro) + */ /* Need to allocate new type and copy data-area over */ if (type->tp_itemsize) { @@ -2196,7 +2331,7 @@ *((npy_ at name@ *)dest) = *((npy_ at name@ *)src); #elif @default@ == 1 /* unicode and strings */ if (itemsize == 0) { /* unicode */ - itemsize = ((PyUnicodeObject *)robj)->length * sizeof(Py_UNICODE); + itemsize = ((PyUnicodeObject *)robj)->length * sizeof(Py_UNICODE); } memcpy(dest, src, itemsize); /* @default@ == 2 won't get here */ @@ -2216,16 +2351,21 @@ static PyObject * bool_arrtype_new(PyTypeObject *NPY_UNUSED(type), PyObject *args, PyObject *NPY_UNUSED(kwds)) { - PyObject *obj=NULL; + PyObject *obj = NULL; PyObject *arr; - if (!PyArg_ParseTuple(args, "|O", &obj)) return NULL; - if (obj == NULL) + if (!PyArg_ParseTuple(args, "|O", &obj)) { + return NULL; + } + if (obj == NULL) { PyArrayScalar_RETURN_FALSE; - if (obj == Py_False) + } + if (obj == Py_False) { PyArrayScalar_RETURN_FALSE; - if (obj == Py_True) + } + if (obj == Py_True) { PyArrayScalar_RETURN_TRUE; + } arr = PyArray_FROM_OTF(obj, PyArray_BOOL, FORCECAST); if (arr && 0 == PyArray_NDIM(arr)) { Bool val = *((Bool *)PyArray_DATA(arr)); @@ -2238,27 +2378,30 @@ static PyObject * bool_arrtype_and(PyObject *a, PyObject *b) { - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) + if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { PyArrayScalar_RETURN_BOOL_FROM_LONG ((a == PyArrayScalar_True)&(b == PyArrayScalar_True)); + } return PyGenericArrType_Type.tp_as_number->nb_and(a, b); } static PyObject * bool_arrtype_or(PyObject *a, PyObject *b) { - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) + if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { PyArrayScalar_RETURN_BOOL_FROM_LONG ((a == PyArrayScalar_True)|(b == PyArrayScalar_True)); + } return PyGenericArrType_Type.tp_as_number->nb_or(a, b); } static PyObject * bool_arrtype_xor(PyObject *a, PyObject *b) { - if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) + if (PyArray_IsScalar(a, Bool) && PyArray_IsScalar(b, Bool)) { PyArrayScalar_RETURN_BOOL_FROM_LONG ((a == PyArrayScalar_True)^(b == PyArrayScalar_True)); + } return PyGenericArrType_Type.tp_as_number->nb_xor(a, b); } @@ -2270,10 +2413,13 @@ #if PY_VERSION_HEX >= 0x02050000 /**begin repeat -#name=byte, short, int, long, ubyte, ushort, longlong, uint, ulong, ulonglong# -#Name=Byte, Short, Int, Long, UByte, UShort, LongLong, UInt, ULong, ULongLong# -#type=PyInt_FromLong*6, PyLong_FromLongLong*1, PyLong_FromUnsignedLong*2, PyLong_FromUnsignedLongLong# -*/ + * #name = byte, short, int, long, ubyte, ushort, longlong, uint, ulong, + * ulonglong# + * #Name = Byte, Short, Int, Long, UByte, UShort, LongLong, UInt, ULong, + * ULongLong# + * #type = PyInt_FromLong*6, PyLong_FromLongLong*1, PyLong_FromUnsignedLong*2, + * PyLong_FromUnsignedLongLong# + */ static PyNumberMethods @name at _arrtype_as_number; static PyObject * @name at _index(PyObject *self) @@ -2281,6 +2427,7 @@ return @type@(PyArrayScalar_VAL(self, @Name@)); } /**end repeat**/ + static PyObject * bool_index(PyObject *a) { @@ -2290,50 +2437,50 @@ /* Arithmetic methods -- only so we can override &, |, ^. */ static PyNumberMethods bool_arrtype_as_number = { - 0, /* nb_add */ - 0, /* nb_subtract */ - 0, /* nb_multiply */ - 0, /* nb_divide */ - 0, /* nb_remainder */ - 0, /* nb_divmod */ - 0, /* nb_power */ - 0, /* nb_negative */ - 0, /* nb_positive */ - 0, /* nb_absolute */ - (inquiry)bool_arrtype_nonzero, /* nb_nonzero */ - 0, /* nb_invert */ - 0, /* nb_lshift */ - 0, /* nb_rshift */ - (binaryfunc)bool_arrtype_and, /* nb_and */ - (binaryfunc)bool_arrtype_xor, /* nb_xor */ - (binaryfunc)bool_arrtype_or, /* nb_or */ - 0, /* nb_coerce */ - 0, /* nb_int */ - 0, /* nb_long */ - 0, /* nb_float */ - 0, /* nb_oct */ - 0, /* nb_hex */ + 0, /* nb_add */ + 0, /* nb_subtract */ + 0, /* nb_multiply */ + 0, /* nb_divide */ + 0, /* nb_remainder */ + 0, /* nb_divmod */ + 0, /* nb_power */ + 0, /* nb_negative */ + 0, /* nb_positive */ + 0, /* nb_absolute */ + (inquiry)bool_arrtype_nonzero, /* nb_nonzero */ + 0, /* nb_invert */ + 0, /* nb_lshift */ + 0, /* nb_rshift */ + (binaryfunc)bool_arrtype_and, /* nb_and */ + (binaryfunc)bool_arrtype_xor, /* nb_xor */ + (binaryfunc)bool_arrtype_or, /* nb_or */ + 0, /* nb_coerce */ + 0, /* nb_int */ + 0, /* nb_long */ + 0, /* nb_float */ + 0, /* nb_oct */ + 0, /* nb_hex */ /* Added in release 2.0 */ - 0, /* nb_inplace_add */ - 0, /* nb_inplace_subtract */ - 0, /* nb_inplace_multiply */ - 0, /* nb_inplace_divide */ - 0, /* nb_inplace_remainder */ - 0, /* nb_inplace_power */ - 0, /* nb_inplace_lshift */ - 0, /* nb_inplace_rshift */ - 0, /* nb_inplace_and */ - 0, /* nb_inplace_xor */ - 0, /* nb_inplace_or */ + 0, /* nb_inplace_add */ + 0, /* nb_inplace_subtract */ + 0, /* nb_inplace_multiply */ + 0, /* nb_inplace_divide */ + 0, /* nb_inplace_remainder */ + 0, /* nb_inplace_power */ + 0, /* nb_inplace_lshift */ + 0, /* nb_inplace_rshift */ + 0, /* nb_inplace_and */ + 0, /* nb_inplace_xor */ + 0, /* nb_inplace_or */ /* Added in release 2.2 */ /* The following require the Py_TPFLAGS_HAVE_CLASS flag */ - 0, /* nb_floor_divide */ - 0, /* nb_true_divide */ - 0, /* nb_inplace_floor_divide */ - 0, /* nb_inplace_true_divide */ + 0, /* nb_floor_divide */ + 0, /* nb_true_divide */ + 0, /* nb_inplace_floor_divide */ + 0, /* nb_inplace_true_divide */ /* Added in release 2.5 */ #if PY_VERSION_HEX >= 0x02050000 - 0, /* nb_index */ + 0, /* nb_index */ #endif }; @@ -2341,18 +2488,20 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds)) { PyObject *obj, *arr; - ulonglong memu=1; - PyObject *new=NULL; + ulonglong memu = 1; + PyObject *new = NULL; char *destptr; - if (!PyArg_ParseTuple(args, "O", &obj)) return NULL; - /* For a VOID scalar first see if obj is an integer or long - and create new memory of that size (filled with 0) for the scalar - */ - - if (PyLong_Check(obj) || PyInt_Check(obj) || \ + if (!PyArg_ParseTuple(args, "O", &obj)) { + return NULL; + } + /* + * For a VOID scalar first see if obj is an integer or long + * and create new memory of that size (filled with 0) for the scalar + */ + if (PyLong_Check(obj) || PyInt_Check(obj) || PyArray_IsScalar(obj, Integer) || - (PyArray_Check(obj) && PyArray_NDIM(obj)==0 && \ + (PyArray_Check(obj) && PyArray_NDIM(obj)==0 && PyArray_ISINTEGER(obj))) { new = obj->ob_type->tp_as_number->nb_long(obj); } @@ -2368,7 +2517,9 @@ return NULL; } destptr = PyDataMem_NEW((int) memu); - if (destptr == NULL) return PyErr_NoMemory(); + if (destptr == NULL) { + return PyErr_NoMemory(); + } ret = type->tp_alloc(type, 0); if (ret == NULL) { PyDataMem_FREE(destptr); @@ -2376,8 +2527,8 @@ } ((PyVoidScalarObject *)ret)->obval = destptr; ((PyVoidScalarObject *)ret)->ob_size = (int) memu; - ((PyVoidScalarObject *)ret)->descr = \ - PyArray_DescrNewFromType(PyArray_VOID); + ((PyVoidScalarObject *)ret)->descr = + PyArray_DescrNewFromType(PyArray_VOID); ((PyVoidScalarObject *)ret)->descr->elsize = (int) memu; ((PyVoidScalarObject *)ret)->flags = BEHAVED | OWNDATA; ((PyVoidScalarObject *)ret)->base = NULL; @@ -2393,8 +2544,8 @@ /**************** Define Hash functions ********************/ /**begin repeat -#lname=bool,ubyte,ushort# -#name=Bool,UByte, UShort# + * #lname = bool,ubyte,ushort# + * #name = Bool,UByte, UShort# */ static long @lname at _arrtype_hash(PyObject *obj) @@ -2404,14 +2555,16 @@ /**end repeat**/ /**begin repeat -#lname=byte,short,uint,ulong# -#name=Byte,Short,UInt,ULong# + * #lname=byte,short,uint,ulong# + * #name=Byte,Short,UInt,ULong# */ static long @lname at _arrtype_hash(PyObject *obj) { long x = (long)(((Py at name@ScalarObject *)obj)->obval); - if (x == -1) x=-2; + if (x == -1) { + x = -2; + } return x; } /**end repeat**/ @@ -2421,16 +2574,18 @@ int_arrtype_hash(PyObject *obj) { long x = (long)(((PyIntScalarObject *)obj)->obval); - if (x == -1) x=-2; + if (x == -1) { + x = -2; + } return x; } #endif /**begin repeat -#char=,u# -#Char=,U# -#ext=&& (x >= LONG_MIN),# -*/ + * #char = ,u# + * #Char = ,U# + * #ext = && (x >= LONG_MIN),# + */ #if SIZEOF_LONG != SIZEOF_LONGLONG /* we assume SIZEOF_LONGLONG=2*SIZEOF_LONG */ static long @@ -2451,7 +2606,9 @@ both.v = x; y = both.hashvals[0] + (1000003)*both.hashvals[1]; } - if (y == -1) y = -2; + if (y == -1) { + y = -2; + } return y; } #endif @@ -2462,7 +2619,9 @@ ulonglong_arrtype_hash(PyObject *obj) { long x = (long)(((PyULongLongScalarObject *)obj)->obval); - if (x == -1) x=-2; + if (x == -1) { + x = -2; + } return x; } #endif @@ -2470,9 +2629,10 @@ /* Wrong thing to do for longdouble, but....*/ + /**begin repeat -#lname=float, longdouble# -#name=Float, LongDouble# + * #lname = float, longdouble# + * #name = Float, LongDouble# */ static long @lname at _arrtype_hash(PyObject *obj) @@ -2485,16 +2645,21 @@ c at lname@_arrtype_hash(PyObject *obj) { long hashreal, hashimag, combined; - hashreal = _Py_HashDouble((double) \ + hashreal = _Py_HashDouble((double) (((PyC at name@ScalarObject *)obj)->obval).real); - if (hashreal == -1) return -1; - hashimag = _Py_HashDouble((double) \ + if (hashreal == -1) { + return -1; + } + hashimag = _Py_HashDouble((double) (((PyC at name@ScalarObject *)obj)->obval).imag); - if (hashimag == -1) return -1; - + if (hashimag == -1) { + return -1; + } combined = hashreal + 1000003 * hashimag; - if (combined == -1) combined = -2; + if (combined == -1) { + combined = -2; + } return combined; } /**end repeat**/ @@ -2520,7 +2685,9 @@ /* first look in object and then hand off to generic type */ res = PyObject_GenericGetAttr(obj->obval, attr); - if (res) return res; + if (res) { + return res; + } PyErr_Clear(); return PyObject_GenericGetAttr((PyObject *)obj, attr); } @@ -2531,7 +2698,9 @@ /* first look in object and then hand off to generic type */ res = PyObject_GenericSetAttr(obj->obval, attr, val); - if (res >= 0) return res; + if (res >= 0) { + return res; + } PyErr_Clear(); return PyObject_GenericSetAttr((PyObject *)obj, attr, val); } @@ -2587,27 +2756,27 @@ static PySequenceMethods object_arrtype_as_sequence = { #if PY_VERSION_HEX >= 0x02050000 - (lenfunc)object_arrtype_length, /*sq_length*/ - (binaryfunc)object_arrtype_concat, /*sq_concat*/ - (ssizeargfunc)object_arrtype_repeat, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - (objobjproc)object_arrtype_contains, /* sq_contains */ - (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ - (ssizeargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ + (lenfunc)object_arrtype_length, /*sq_length*/ + (binaryfunc)object_arrtype_concat, /*sq_concat*/ + (ssizeargfunc)object_arrtype_repeat, /*sq_repeat*/ + 0, /*sq_item*/ + 0, /*sq_slice*/ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + (objobjproc)object_arrtype_contains, /* sq_contains */ + (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ + (ssizeargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ #else - (inquiry)object_arrtype_length, /*sq_length*/ - (binaryfunc)object_arrtype_concat, /*sq_concat*/ - (intargfunc)object_arrtype_repeat, /*sq_repeat*/ - 0, /*sq_item*/ - 0, /*sq_slice*/ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - (objobjproc)object_arrtype_contains, /* sq_contains */ - (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ - (intargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ + (inquiry)object_arrtype_length, /*sq_length*/ + (binaryfunc)object_arrtype_concat, /*sq_concat*/ + (intargfunc)object_arrtype_repeat, /*sq_repeat*/ + 0, /*sq_item*/ + 0, /*sq_slice*/ + 0, /* sq_ass_item */ + 0, /* sq_ass_slice */ + (objobjproc)object_arrtype_contains, /* sq_contains */ + (binaryfunc)object_arrtype_inplace_concat, /* sq_inplace_concat */ + (intargfunc)object_arrtype_inplace_repeat, /* sq_inplace_repeat */ #endif }; @@ -2630,14 +2799,14 @@ int cnt; PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ - pb->bf_getsegcount == NULL || \ - (cnt = (*pb->bf_getsegcount)(self->obval, &newlen)) != 1) + if (pb == NULL || + pb->bf_getsegcount == NULL || + (cnt = (*pb->bf_getsegcount)(self->obval, &newlen)) != 1) { return 0; - - if (lenp) + } + if (lenp) { *lenp = newlen; - + } return cnt; } @@ -2646,14 +2815,13 @@ { PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ + if (pb == NULL || pb->bf_getreadbuffer == NULL || pb->bf_getsegcount == NULL) { PyErr_SetString(PyExc_TypeError, "expected a readable buffer object"); return -1; } - return (*pb->bf_getreadbuffer)(self->obval, segment, ptrptr); } @@ -2662,14 +2830,13 @@ { PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ + if (pb == NULL || pb->bf_getwritebuffer == NULL || pb->bf_getsegcount == NULL) { PyErr_SetString(PyExc_TypeError, "expected a writeable buffer object"); return -1; } - return (*pb->bf_getwritebuffer)(self->obval, segment, ptrptr); } @@ -2679,14 +2846,13 @@ { PyBufferProcs *pb = self->obval->ob_type->tp_as_buffer; - if (pb == NULL || \ + if (pb == NULL || pb->bf_getcharbuffer == NULL || pb->bf_getsegcount == NULL) { PyErr_SetString(PyExc_TypeError, "expected a character buffer object"); return -1; } - return (*pb->bf_getcharbuffer)(self->obval, segment, ptrptr); } @@ -2707,64 +2873,64 @@ static PyObject * object_arrtype_call(PyObjectScalarObject *obj, PyObject *args, PyObject *kwds) { - return PyObject_Call(obj->obval, args, kwds); + return PyObject_Call(obj->obval, args, kwds); } static PyTypeObject PyObjectArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy.object_", /*tp_name*/ - sizeof(PyObjectScalarObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ - (destructor)object_arrtype_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - &object_arrtype_as_sequence, /* tp_as_sequence */ - &object_arrtype_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - (ternaryfunc)object_arrtype_call, /* tp_call */ - 0, /* tp_str */ - (getattrofunc)object_arrtype_getattro, /* tp_getattro */ - (setattrofunc)object_arrtype_setattro, /* tp_setattro */ - &object_arrtype_as_buffer, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy.object_", /* tp_name*/ + sizeof(PyObjectScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + (destructor)object_arrtype_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + &object_arrtype_as_sequence, /* tp_as_sequence */ + &object_arrtype_as_mapping, /* tp_as_mapping */ + 0, /* tp_hash */ + (ternaryfunc)object_arrtype_call, /* tp_call */ + 0, /* tp_str */ + (getattrofunc)object_arrtype_getattro, /* tp_getattro */ + (setattrofunc)object_arrtype_setattro, /* tp_setattro */ + &object_arrtype_as_buffer, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -2778,12 +2944,12 @@ static PyObject * gen_arrtype_subscript(PyObject *self, PyObject *key) { - /* Only [...], [...,], [, ...], - is allowed for indexing a scalar - - These return a new N-d array with a copy of - the data where N is the number of None's in . - + /* + * Only [...], [...,], [, ...], + * is allowed for indexing a scalar + * + * These return a new N-d array with a copy of + * the data where N is the number of None's in . */ PyObject *res, *ret; int N; @@ -2797,19 +2963,19 @@ "invalid index to scalar variable."); return NULL; } - - if (key == Py_Ellipsis) + if (key == Py_Ellipsis) { return res; - + } if (key == Py_None) { ret = add_new_axes_0d((PyArrayObject *)res, 1); Py_DECREF(res); return ret; } /* Must be a Tuple */ - N = count_new_axes_0d(key); - if (N < 0) return NULL; + if (N < 0) { + return NULL; + } ret = add_new_axes_0d((PyArrayObject *)res, N); Py_DECREF(res); return ret; @@ -2817,74 +2983,75 @@ /**begin repeat - * #name=bool, string, unicode, void# - * #NAME=Bool, String, Unicode, Void# - * #ex=_,_,_,# + * #name = bool, string, unicode, void# + * #NAME = Bool, String, Unicode, Void# + * #ex = _,_,_,# */ static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@@ex@", /*tp_name*/ - sizeof(Py at NAME@ScalarObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy. at name@@ex@", /* tp_name*/ + sizeof(Py at NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; /**end repeat**/ /**begin repeat -#NAME=Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble# -#name=int*5, uint*5, float*3# -#CNAME=(CHAR, SHORT, INT, LONG, LONGLONG)*2, FLOAT, DOUBLE, LONGDOUBLE# -*/ + * #NAME = Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, + * ULongLong, Float, Double, LongDouble# + * #name = int*5, uint*5, float*3# + * #CNAME = (CHAR, SHORT, INT, LONG, LONGLONG)*2, FLOAT, DOUBLE, LONGDOUBLE# + */ #if BITSOF_ at CNAME@ == 8 #define _THIS_SIZE "8" #elif BITSOF_ at CNAME@ == 16 @@ -2904,59 +3071,59 @@ #endif static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@" _THIS_SIZE, /*tp_name*/ - sizeof(Py at NAME@ScalarObject), /*tp_basicsize*/ - 0, /* tp_itemsize */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy. at name@" _THIS_SIZE, /* tp_name*/ + sizeof(Py at NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize */ + 0, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + 0, /* tp_flags */ + 0, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; @@ -2972,10 +3139,10 @@ /**begin repeat -#NAME=CFloat, CDouble, CLongDouble# -#name=complex*3# -#CNAME=FLOAT, DOUBLE, LONGDOUBLE# -*/ + * #NAME = CFloat, CDouble, CLongDouble# + * #name = complex*3# + * #CNAME = FLOAT, DOUBLE, LONGDOUBLE# + */ #if BITSOF_ at CNAME@ == 16 #define _THIS_SIZE2 "16" #define _THIS_SIZE1 "32" @@ -2998,65 +3165,69 @@ #define _THIS_SIZE2 "256" #define _THIS_SIZE1 "512" #endif -static PyTypeObject Py at NAME@ArrType_Type = { + +#define _THIS_DOC "Composed of two " _THIS_SIZE2 " bit floats" + + static PyTypeObject Py at NAME@ArrType_Type = { PyObject_HEAD_INIT(NULL) - 0, /*ob_size*/ - "numpy. at name@" _THIS_SIZE1, /*tp_name*/ - sizeof(Py at NAME@ScalarObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - 0, /*tp_dealloc*/ - 0, /*tp_print*/ - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - 0, /*tp_compare*/ - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash */ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT, /*tp_flags*/ - "Composed of two " _THIS_SIZE2 " bit floats", /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + 0, /* ob_size*/ + "numpy. at name@" _THIS_SIZE1, /* tp_name*/ + sizeof(Py at NAME@ScalarObject), /* tp_basicsize*/ + 0, /* tp_itemsize*/ + 0, /* tp_dealloc*/ + 0, /* tp_print*/ + 0, /* tp_getattr*/ + 0, /* tp_setattr*/ + 0, /* tp_compare*/ + 0, /* tp_repr*/ + 0, /* tp_as_number*/ + 0, /* tp_as_sequence*/ + 0, /* tp_as_mapping*/ + 0, /* tp_hash */ + 0, /* tp_call*/ + 0, /* tp_str*/ + 0, /* tp_getattro*/ + 0, /* tp_setattro*/ + 0, /* tp_as_buffer*/ + Py_TPFLAGS_DEFAULT, /* tp_flags*/ + _THIS_DOC, /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; #undef _THIS_SIZE1 #undef _THIS_SIZE2 +#undef _THIS_DOC /**end repeat**/ @@ -3084,12 +3255,15 @@ PyBoolArrType_Type.tp_as_number = &bool_arrtype_as_number; #if PY_VERSION_HEX >= 0x02050000 - /* need to add dummy versions with filled-in nb_index - in-order for PyType_Ready to fill in .__index__() method + /* + * need to add dummy versions with filled-in nb_index + * in-order for PyType_Ready to fill in .__index__() method */ /**begin repeat -#name=byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong# -#NAME=Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong# + * #name = byte, short, int, long, longlong, ubyte, ushort, + * uint, ulong, ulonglong# + * #NAME = Byte, Short, Int, Long, LongLong, UByte, UShort, + * UInt, ULong, ULongLong# */ Py at NAME@ArrType_Type.tp_as_number = &@name at _arrtype_as_number; Py at NAME@ArrType_Type.tp_as_number->nb_index = (unaryfunc)@name at _index; @@ -3113,15 +3287,19 @@ PyVoidArrType_Type.tp_as_sequence = &voidtype_as_sequence; /**begin repeat -#NAME=Number, Integer, SignedInteger, UnsignedInteger, Inexact, Floating, -ComplexFloating, Flexible, Character# + * #NAME= Number, Integer, SignedInteger, UnsignedInteger, Inexact, + * Floating, ComplexFloating, Flexible, Character# */ Py at NAME@ArrType_Type.tp_flags = BASEFLAGS; /**end repeat**/ /**begin repeat -#name=bool, byte, short, int, long, longlong, ubyte, ushort, uint, ulong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, string, unicode, void, object# -#NAME=Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, CLongDouble, String, Unicode, Void, Object# + * #name = bool, byte, short, int, long, longlong, ubyte, ushort, uint, + * ulong, ulonglong, float, double, longdouble, cfloat, cdouble, + * clongdouble, string, unicode, void, object# + * #NAME = Bool, Byte, Short, Int, Long, LongLong, UByte, UShort, UInt, + * ULong, ULongLong, Float, Double, LongDouble, CFloat, CDouble, + * CLongDouble, String, Unicode, Void, Object# */ Py at NAME@ArrType_Type.tp_flags = BASEFLAGS; Py at NAME@ArrType_Type.tp_new = @name at _arrtype_new; @@ -3129,8 +3307,10 @@ /**end repeat**/ /**begin repeat -#name=bool, byte, short, ubyte, ushort, uint, ulong, ulonglong, float, longdouble, cfloat, clongdouble, void, object# -#NAME=Bool, Byte, Short, UByte, UShort, UInt, ULong, ULongLong, Float, LongDouble, CFloat, CLongDouble, Void, Object# + * #name = bool, byte, short, ubyte, ushort, uint, ulong, ulonglong, + * float, longdouble, cfloat, clongdouble, void, object# + * #NAME = Bool, Byte, Short, UByte, UShort, UInt, ULong, ULongLong, + * Float, LongDouble, CFloat, CLongDouble, Void, Object# */ Py at NAME@ArrType_Type.tp_hash = @name at _arrtype_hash; /**end repeat**/ @@ -3146,7 +3326,7 @@ #endif /**begin repeat - *#name = repr, str# + * #name = repr, str# */ PyFloatArrType_Type.tp_ at name@ = floattype_ at name@; PyCFloatArrType_Type.tp_ at name@ = cfloattype_ at name@; @@ -3163,15 +3343,16 @@ PyCDoubleArrType_Type.tp_print = cdoubletype_print; PyCLongDoubleArrType_Type.tp_print = clongdoubletype_print; - /* These need to be coded specially because getitem does not - return a normal Python type + /* + * These need to be coded specially because getitem does not + * return a normal Python type */ PyLongDoubleArrType_Type.tp_as_number = &longdoubletype_as_number; PyCLongDoubleArrType_Type.tp_as_number = &clongdoubletype_as_number; /**begin repeat - * #name=int, long, hex, oct, float, repr, str# - * #kind=tp_as_number->nb*5, tp*2# + * #name = int, long, hex, oct, float, repr, str# + * #kind = tp_as_number->nb*5, tp*2# */ PyLongDoubleArrType_Type. at kind@_ at name@ = longdoubletype_ at name@; PyCLongDoubleArrType_Type. at kind@_ at name@ = clongdoubletype_ at name@; @@ -3225,8 +3406,9 @@ i++; } - if (!user) return typenum; - + if (!user) { + return typenum; + } /* Search any registered types */ i = 0; while (i < PyArray_NUMUSERTYPES) { @@ -3267,36 +3449,41 @@ } /* Check the generic types */ - if ((type == (PyObject *) &PyNumberArrType_Type) || \ - (type == (PyObject *) &PyInexactArrType_Type) || \ - (type == (PyObject *) &PyFloatingArrType_Type)) + if ((type == (PyObject *) &PyNumberArrType_Type) || + (type == (PyObject *) &PyInexactArrType_Type) || + (type == (PyObject *) &PyFloatingArrType_Type)) { typenum = PyArray_DOUBLE; - else if (type == (PyObject *)&PyComplexFloatingArrType_Type) + } + else if (type == (PyObject *)&PyComplexFloatingArrType_Type) { typenum = PyArray_CDOUBLE; - else if ((type == (PyObject *)&PyIntegerArrType_Type) || \ - (type == (PyObject *)&PySignedIntegerArrType_Type)) + } + else if ((type == (PyObject *)&PyIntegerArrType_Type) || + (type == (PyObject *)&PySignedIntegerArrType_Type)) { typenum = PyArray_LONG; - else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type) + } + else if (type == (PyObject *) &PyUnsignedIntegerArrType_Type) { typenum = PyArray_ULONG; - else if (type == (PyObject *) &PyCharacterArrType_Type) + } + else if (type == (PyObject *) &PyCharacterArrType_Type) { typenum = PyArray_STRING; - else if ((type == (PyObject *) &PyGenericArrType_Type) || \ - (type == (PyObject *) &PyFlexibleArrType_Type)) + } + else if ((type == (PyObject *) &PyGenericArrType_Type) || + (type == (PyObject *) &PyFlexibleArrType_Type)) { typenum = PyArray_VOID; + } if (typenum != PyArray_NOTYPE) { return PyArray_DescrFromType(typenum); } - /* Otherwise --- type is a sub-type of an array scalar - not corresponding to a registered data-type object. + /* + * Otherwise --- type is a sub-type of an array scalar + * not corresponding to a registered data-type object. */ - /* Do special thing for VOID sub-types - */ + /* Do special thing for VOID sub-types */ if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) { new = PyArray_DescrNewFromType(PyArray_VOID); - conv = _arraydescr_fromobj(type); if (conv) { new->fields = conv->fields; @@ -3317,8 +3504,8 @@ } /*NUMPY_API - Return the tuple of ordered field names from a dictionary. -*/ + * Return the tuple of ordered field names from a dictionary. + */ static PyObject * PyArray_FieldNames(PyObject *fields) { @@ -3332,20 +3519,25 @@ return NULL; } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; + if (_numpy_internal == NULL) { + return NULL; + } tup = PyObject_CallMethod(_numpy_internal, "_makenames_list", "O", fields); Py_DECREF(_numpy_internal); - if (tup == NULL) return NULL; + if (tup == NULL) { + return NULL; + } ret = PyTuple_GET_ITEM(tup, 0); ret = PySequence_Tuple(ret); Py_DECREF(tup); return ret; } -/* New reference */ /*NUMPY_API - Return descr object from array scalar. -*/ + * Return descr object from array scalar. + * + * New reference + */ static PyArray_Descr * PyArray_DescrFromScalar(PyObject *sc) { @@ -3361,8 +3553,9 @@ if (descr->elsize == 0) { PyArray_DESCR_REPLACE(descr); type_num = descr->type_num; - if (type_num == PyArray_STRING) + if (type_num == PyArray_STRING) { descr->elsize = PyString_GET_SIZE(sc); + } else if (type_num == PyArray_UNICODE) { descr->elsize = PyUnicode_GET_DATA_SIZE(sc); #ifndef Py_UNICODE_WIDE @@ -3378,18 +3571,20 @@ Py_XDECREF(descr->fields); descr->fields = NULL; } - if (descr->fields) + if (descr->fields) { descr->names = PyArray_FieldNames(descr->fields); + } PyErr_Clear(); } } return descr; } -/* New reference */ /*NUMPY_API - Get a typeobject from a type-number -- can return NULL. -*/ + * Get a typeobject from a type-number -- can return NULL. + * + * New reference + */ static PyObject * PyArray_TypeObjectFromType(int type) { @@ -3397,7 +3592,9 @@ PyObject *obj; descr = PyArray_DescrFromType(type); - if (descr == NULL) return NULL; + if (descr == NULL) { + return NULL; + } obj = (PyObject *)descr->typeobj; Py_XINCREF(obj); Py_DECREF(descr); Modified: branches/coremath/numpy/distutils/command/build_ext.py =================================================================== --- branches/coremath/numpy/distutils/command/build_ext.py 2009-02-21 23:54:31 UTC (rev 6444) +++ branches/coremath/numpy/distutils/command/build_ext.py 2009-02-22 03:41:06 UTC (rev 6445) @@ -16,7 +16,7 @@ from numpy.distutils.system_info import combine_paths from numpy.distutils.misc_util import filter_sources, has_f_sources, \ has_cxx_sources, get_ext_source_files, \ - get_numpy_include_dirs, is_sequence + get_numpy_include_dirs, is_sequence, get_build_architecture from numpy.distutils.command.config_compiler import show_fortran_compilers try: Modified: branches/coremath/numpy/distutils/fcompiler/compaq.py =================================================================== --- branches/coremath/numpy/distutils/fcompiler/compaq.py 2009-02-21 23:54:31 UTC (rev 6444) +++ branches/coremath/numpy/distutils/fcompiler/compaq.py 2009-02-22 03:41:06 UTC (rev 6445) @@ -79,7 +79,7 @@ m.initialize() ar_exe = m.lib except DistutilsPlatformError, msg: - print 'Ignoring "%s" (one should fix me in fcompiler/compaq.py)' % (msg) + pass except AttributeError, msg: if '_MSVCCompiler__root' in str(msg): print 'Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg) Modified: branches/coremath/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/coremath/numpy/distutils/mingw32ccompiler.py 2009-02-21 23:54:31 UTC (rev 6444) +++ branches/coremath/numpy/distutils/mingw32ccompiler.py 2009-02-22 03:41:06 UTC (rev 6445) @@ -12,6 +12,8 @@ import subprocess import sys import log +import subprocess +import re # Overwrite certain distutils.ccompiler functions: import numpy.distutils.ccompiler @@ -29,8 +31,12 @@ from distutils.unixccompiler import UnixCCompiler from distutils.msvccompiler import get_build_version as get_build_msvc_version -from numpy.distutils.misc_util import msvc_runtime_library +from numpy.distutils.misc_util import msvc_runtime_library, get_build_architecture +# Useful to generate table of symbols from a dll +_START = re.compile(r'\[Ordinal/Name Pointer\] Table') +_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') + # the same as cygwin plus some additional parameters class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): """ A modified MingW32 compiler compatible with an MSVC built Python. @@ -89,17 +95,29 @@ # linker_exe='gcc -mno-cygwin', # linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s' # % (self.linker, entry_point)) - if self.gcc_version <= "3.0.0": - self.set_executables(compiler='gcc -mno-cygwin -O2 -w', - compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='%s -mno-cygwin -mdll -static %s' - % (self.linker, entry_point)) + + # MS_WIN64 should be defined when building for amd64 on windows, but + # python headers define it only for MS compilers, which has all kind of + # bad consequences, like using Py_ModuleInit4 instead of + # Py_ModuleInit4_64, etc... So we add it here + if get_build_architecture() == 'AMD64': + self.set_executables( + compiler='gcc -DMS_WIN64 -mno-cygwin -O0 -Wall', + compiler_so='gcc -DMS_WIN64 -mno-cygwin -O0 -Wall -Wstrict-prototypes', + linker_exe='gcc -mno-cygwin', + linker_so='gcc -mno-cygwin -shared') else: - self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall', - compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes', - linker_exe='g++ -mno-cygwin', - linker_so='g++ -mno-cygwin -shared') + if self.gcc_version <= "3.0.0": + self.set_executables(compiler='gcc -mno-cygwin -O2 -w', + compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes', + linker_exe='g++ -mno-cygwin', + linker_so='%s -mno-cygwin -mdll -static %s' + % (self.linker, entry_point)) + else: + self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall', + compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes', + linker_exe='g++ -mno-cygwin', + linker_so='g++ -mno-cygwin -shared') # added for python2.3 support # we can't pass it through set_executables because pre 2.2 would fail self.compiler_cxx = ['g++'] @@ -191,11 +209,102 @@ # object_filenames () +def find_python_dll(): + maj, min, micro = [int(i) for i in sys.version_info[:3]] + dllname = 'python%d%d.dll' % (maj, min) + print "Looking for %s" % dllname + + # We can't do much here: + # - find it in python main dir + # - in system32, + # - ortherwise (Sxs), I don't know how to get it. + lib_dirs = [] + lib_dirs.append(os.path.join(sys.prefix, 'lib')) + try: + lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32')) + except KeyError: + pass + + for d in lib_dirs: + dll = os.path.join(d, dllname) + if os.path.exists(dll): + return dll + + raise ValueError("%s not found in %s" % (dllname, lib_dirs)) + +def dump_table(dll): + st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE) + return st.stdout.readlines() + +def generate_def(dll, dfile): + """Given a dll file location, get all its exported symbols and dump them + into the given def file. + + The .def file will be overwritten""" + dump = dump_table(dll) + for i in range(len(dump)): + if _START.match(dump[i]): + break + + if i == len(dump): + raise ValueError("Symbol table not found") + + syms = [] + for j in range(i+1, len(dump)): + m = _TABLE.match(dump[j]) + if m: + syms.append((int(m.group(1).strip()), m.group(2))) + else: + break + + if len(syms) == 0: + log.warn('No symbols found in %s' % dll) + + d = open(dfile, 'w') + d.write('LIBRARY %s\n' % os.path.basename(dll)) + d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') + d.write(';DATA PRELOAD SINGLE\n') + d.write('\nEXPORTS\n') + for s in syms: + #d.write('@%d %s\n' % (s[0], s[1])) + d.write('%s\n' % s[1]) + d.close() + def build_import_library(): + if os.name != 'nt': + return + + arch = get_build_architecture() + if arch == 'AMD64': + return _build_import_library_amd64() + elif arch == 'Intel': + return _build_import_library_x86() + else: + raise ValueError("Unhandled arch %s" % arch) + +def _build_import_library_amd64(): + dll_file = find_python_dll() + + out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) + out_file = os.path.join(sys.prefix, 'libs', out_name) + if os.path.isfile(out_file): + log.debug('Skip building import library: "%s" exists' % (out_file)) + return + + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix,'libs',def_name) + + log.info('Building import library (arch=AMD64): "%s" (from %s)' \ + % (out_file, dll_file)) + + generate_def(dll_file, def_file) + + cmd = ['dlltool', '-d', def_file, '-l', out_file] + subprocess.Popen(cmd) + +def _build_import_library_x86(): """ Build the import libraries for Mingw32-gcc on Windows """ - if os.name != 'nt': - return lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) lib_file = os.path.join(sys.prefix,'libs',lib_name) out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) @@ -206,7 +315,7 @@ if os.path.isfile(out_file): log.debug('Skip building import library: "%s" exists' % (out_file)) return - log.info('Building import library: "%s"' % (out_file)) + log.info('Building import library (ARCH=x86): "%s"' % (out_file)) from numpy.distutils import lib2def @@ -254,6 +363,9 @@ _MSVCRVER_TO_FULLVER['90'] = msvcrt.CRT_ASSEMBLY_VERSION else: _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" + # I took one version in my SxS directory: no idea if it is the good + # one, and we can't retrieve it from python + _MSVCRVER_TO_FULLVER['90'] = "8.0.50727.42" except ImportError: # If we are here, means python was not built with MSVC. Not sure what to do # in that case: manifest building will fail, but it should not be used in @@ -344,7 +456,7 @@ def generate_manifest(config): msver = get_build_msvc_version() if msver is not None: - if msver >= 8: + if msver >= 9: check_embedded_msvcr_match_linked(msver) ma = int(msver) mi = int((msver - ma) * 10) Modified: branches/coremath/numpy/lib/tests/test_io.py =================================================================== --- branches/coremath/numpy/lib/tests/test_io.py 2009-02-21 23:54:31 UTC (rev 6444) +++ branches/coremath/numpy/lib/tests/test_io.py 2009-02-22 03:41:06 UTC (rev 6445) @@ -6,11 +6,19 @@ import StringIO from tempfile import NamedTemporaryFile -import sys +import sys, time +from datetime import datetime MAJVER, MINVER = sys.version_info[:2] +def strptime(s, fmt=None): + """This function is available in the datetime module only + from Python >= 2.5. + + """ + return datetime(*time.strptime(s, fmt)[:3]) + class RoundtripTest(object): def roundtrip(self, save_func, *args, **kwargs): """ @@ -538,12 +546,11 @@ def test_converters_cornercases(self): "Test the conversion to datetime." - from datetime import datetime - converter = {'date':lambda s: datetime.strptime(s,'%Y-%m-%d %H:%M:%SZ')} + converter = {'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} data = StringIO.StringIO('2009-02-03 12:00:00Z, 72214.0') test = np.ndfromtxt(data, delimiter=',', dtype=None, names=['date','stid'], converters=converter) - control = np.array((datetime(2009,02,03,12,0), 72214.), + control = np.array((datetime(2009,02,03), 72214.), dtype=[('date', np.object_), ('stid', float)]) assert_equal(test, control) @@ -583,11 +590,11 @@ 2; 2002-01-31 """ ndtype = [('idx', int), ('code', np.object)] - func = lambda s: date(*(time.strptime(s.strip(), "%Y-%m-%d")[:3])) + func = lambda s: strptime(s.strip(), "%Y-%m-%d") converters = {1: func} test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", dtype=ndtype, converters=converters) - control = np.array([(1, date(2001,1,1)), (2, date(2002,1,31))], + control = np.array([(1, datetime(2001,1,1)), (2, datetime(2002,1,31))], dtype=ndtype) assert_equal(test, control) # From numpy-svn at scipy.org Sat Feb 21 22:53:47 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 21:53:47 -0600 (CST) Subject: [Numpy-svn] r6446 - trunk/numpy/core Message-ID: <20090222035347.0E261C7C009@scipy.org> Author: cdavid Date: 2009-02-21 21:53:41 -0600 (Sat, 21 Feb 2009) New Revision: 6446 Modified: trunk/numpy/core/setup.py Log: Remove hardcoded size of long double in config.h generation - should have been removed before merging. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-22 03:41:06 UTC (rev 6445) +++ trunk/numpy/core/setup.py 2009-02-22 03:53:41 UTC (rev 6446) @@ -490,7 +490,6 @@ #else fprintf(fp, "/* PY_LONG_LONG not defined */\n"); #endif - fprintf(fp, "#define SIZEOF_LONG_DOUBLE 8\n"); #ifndef CHAR_BIT { unsigned char var = 2; From numpy-svn at scipy.org Sat Feb 21 23:13:06 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 22:13:06 -0600 (CST) Subject: [Numpy-svn] r6447 - trunk Message-ID: <20090222041306.896A4C7C009@scipy.org> Author: cdavid Date: 2009-02-21 22:12:48 -0600 (Sat, 21 Feb 2009) New Revision: 6447 Modified: trunk/ Log: Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/branches/numpy-mingw-w64 Property changes on: trunk ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6440 /trunk:1-2871 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /trunk:1-2871 From numpy-svn at scipy.org Sat Feb 21 23:13:55 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 22:13:55 -0600 (CST) Subject: [Numpy-svn] r6448 - branches/numpy-mingw-w64 Message-ID: <20090222041355.5AE8BC8412A@scipy.org> Author: cdavid Date: 2009-02-21 22:13:41 -0600 (Sat, 21 Feb 2009) New Revision: 6448 Modified: branches/numpy-mingw-w64/ Log: Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/trunk Property changes on: branches/numpy-mingw-w64 ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6077 /trunk:1-6424 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/multicore:1-3687 /branches/visualstudio_manifest:1-6077 From numpy-svn at scipy.org Sat Feb 21 23:15:12 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 22:15:12 -0600 (CST) Subject: [Numpy-svn] r6449 - branches Message-ID: <20090222041512.95A26C8412C@scipy.org> Author: cdavid Date: 2009-02-21 22:14:57 -0600 (Sat, 21 Feb 2009) New Revision: 6449 Removed: branches/numpy-mingw-w64/ Log: Remove numpy-mingw-w64 branch: integrated into trunk From numpy-svn at scipy.org Sun Feb 22 00:27:52 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sat, 21 Feb 2009 23:27:52 -0600 (CST) Subject: [Numpy-svn] r6450 - in branches/coremath: . numpy/core/src Message-ID: <20090222052752.0F29EC7C031@scipy.org> Author: cdavid Date: 2009-02-21 23:27:40 -0600 (Sat, 21 Feb 2009) New Revision: 6450 Modified: branches/coremath/ branches/coremath/numpy/core/src/multiarraymodule.c Log: Merged revisions 6444,6446-6447 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6444 | charris | 2009-02-22 08:54:31 +0900 (Sun, 22 Feb 2009) | 1 line Coding style cleanups. Finishes multiarraymodule.c. ........ r6446 | cdavid | 2009-02-22 12:53:41 +0900 (Sun, 22 Feb 2009) | 1 line Remove hardcoded size of long double in config.h generation - should have been removed before merging. ........ r6447 | cdavid | 2009-02-22 13:12:48 +0900 (Sun, 22 Feb 2009) | 3 lines Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/branches/numpy-mingw-w64 ........ Property changes on: branches/coremath ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6443 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6449 Modified: branches/coremath/numpy/core/src/multiarraymodule.c =================================================================== --- branches/coremath/numpy/core/src/multiarraymodule.c 2009-02-22 04:14:57 UTC (rev 6449) +++ branches/coremath/numpy/core/src/multiarraymodule.c 2009-02-22 05:27:40 UTC (rev 6450) @@ -4009,34 +4009,37 @@ "argmax is unsupported for this type"); return NULL; } - else if (PyArray_ISUNSIGNED(ap)) + else if (PyArray_ISUNSIGNED(ap)) { obj = PyInt_FromLong((long) -1); - - else if (PyArray_TYPE(ap)==PyArray_BOOL) + } + else if (PyArray_TYPE(ap) == PyArray_BOOL) { obj = PyInt_FromLong((long) 1); - - else + } + else { obj = PyInt_FromLong((long) 0); - + } new = PyArray_EnsureAnyArray(PyNumber_Subtract(obj, (PyObject *)ap)); Py_DECREF(obj); - if (new == NULL) return NULL; + if (new == NULL) { + return NULL; + } ret = PyArray_ArgMax((PyArrayObject *)new, axis, out); Py_DECREF(new); return ret; } /*NUMPY_API - Max -*/ + * Max + */ static PyObject * PyArray_Max(PyArrayObject *ap, int axis, PyArrayObject *out) { PyArrayObject *arr; PyObject *ret; - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL) + if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0)) == NULL) { return NULL; + } ret = PyArray_GenericReduceFunction(arr, n_ops.maximum, axis, arr->descr->type_num, out); Py_DECREF(arr); @@ -4044,16 +4047,17 @@ } /*NUMPY_API - Min -*/ + * Min + */ static PyObject * PyArray_Min(PyArrayObject *ap, int axis, PyArrayObject *out) { PyArrayObject *arr; PyObject *ret; - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL) + if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0)) == NULL) { return NULL; + } ret = PyArray_GenericReduceFunction(arr, n_ops.minimum, axis, arr->descr->type_num, out); Py_DECREF(arr); @@ -4061,21 +4065,26 @@ } /*NUMPY_API - Ptp -*/ + * Ptp + */ static PyObject * PyArray_Ptp(PyArrayObject *ap, int axis, PyArrayObject *out) { PyArrayObject *arr; PyObject *ret; - PyObject *obj1=NULL, *obj2=NULL; + PyObject *obj1 = NULL, *obj2 = NULL; - if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0))==NULL) + if ((arr=(PyArrayObject *)_check_axis(ap, &axis, 0)) == NULL) { return NULL; + } obj1 = PyArray_Max(arr, axis, out); - if (obj1 == NULL) goto fail; + if (obj1 == NULL) { + goto fail; + } obj2 = PyArray_Min(arr, axis, NULL); - if (obj2 == NULL) goto fail; + if (obj2 == NULL) { + goto fail; + } Py_DECREF(arr); if (out) { ret = PyObject_CallFunction(n_ops.subtract, "OOO", out, obj2, out); @@ -4096,58 +4105,60 @@ /*NUMPY_API - ArgMax -*/ + * ArgMax + */ static PyObject * PyArray_ArgMax(PyArrayObject *op, int axis, PyArrayObject *out) { - PyArrayObject *ap=NULL, *rp=NULL; + PyArrayObject *ap = NULL, *rp = NULL; PyArray_ArgFunc* arg_func; char *ip; intp *rptr; intp i, n, m; int elsize; - int copyret=0; - + int copyret = 0; NPY_BEGIN_THREADS_DEF; - if ((ap=(PyAO *)_check_axis(op, &axis, 0))==NULL) return NULL; - - /* We need to permute the array so that axis is placed at the end. - And all other dimensions are shifted left. - */ + if ((ap=(PyAO *)_check_axis(op, &axis, 0)) == NULL) { + return NULL; + } + /* + * We need to permute the array so that axis is placed at the end. + * And all other dimensions are shifted left. + */ if (axis != ap->nd-1) { PyArray_Dims newaxes; intp dims[MAX_DIMS]; int i; + newaxes.ptr = dims; newaxes.len = ap->nd; - for (i=0; ind-1; i++) dims[i] = i+1; - dims[ap->nd-1] = axis; + for (i = 0; i < axis; i++) dims[i] = i; + for (i = axis; i < ap->nd - 1; i++) dims[i] = i + 1; + dims[ap->nd - 1] = axis; op = (PyAO *)PyArray_Transpose(ap, &newaxes); Py_DECREF(ap); - if (op == NULL) return NULL; + if (op == NULL) { + return NULL; + } } else { op = ap; } - /* Will get native-byte order contiguous copy. - */ - ap = (PyArrayObject *)\ + /* Will get native-byte order contiguous copy. */ + ap = (PyArrayObject *) PyArray_ContiguousFromAny((PyObject *)op, op->descr->type_num, 1, 0); - Py_DECREF(op); - if (ap == NULL) return NULL; - + if (ap == NULL) { + return NULL; + } arg_func = ap->descr->f->argmax; if (arg_func == NULL) { PyErr_SetString(PyExc_TypeError, "data type not ordered"); goto fail; } - elsize = ap->descr->elsize; m = ap->dimensions[ap->nd-1]; if (m == 0) { @@ -4162,11 +4173,13 @@ ap->dimensions, PyArray_INTP, NULL, NULL, 0, 0, (PyObject *)ap); - if (rp == NULL) goto fail; + if (rp == NULL) { + goto fail; + } } else { - if (PyArray_SIZE(out) != \ - PyArray_MultiplyList(ap->dimensions, ap->nd-1)) { + if (PyArray_SIZE(out) != + PyArray_MultiplyList(ap->dimensions, ap->nd - 1)) { PyErr_SetString(PyExc_TypeError, "invalid shape for output array."); } @@ -4174,14 +4187,18 @@ PyArray_FromArray(out, PyArray_DescrFromType(PyArray_INTP), NPY_CARRAY | NPY_UPDATEIFCOPY); - if (rp == NULL) goto fail; - if (rp != out) copyret = 1; + if (rp == NULL) { + goto fail; + } + if (rp != out) { + copyret = 1; + } } NPY_BEGIN_THREADS_DESCR(ap->descr); n = PyArray_SIZE(ap)/m; rptr = (intp *)rp->data; - for (ip = ap->data, i=0; idata, i = 0; i < n; i++, ip += elsize*m) { arg_func(ip, m, rptr, ap); rptr += 1; } @@ -4205,8 +4222,8 @@ /*NUMPY_API - Take -*/ + * Take + */ static PyObject * PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, PyArrayObject *ret, NPY_CLIPMODE clipmode) @@ -4216,29 +4233,33 @@ intp nd, i, j, n, m, max_item, tmp, chunk, nelem; intp shape[MAX_DIMS]; char *src, *dest; - int copyret=0; + int copyret = 0; int err; indices = NULL; self = (PyAO *)_check_axis(self0, &axis, CARRAY); - if (self == NULL) return NULL; - + if (self == NULL) { + return NULL; + } indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, PyArray_INTP, 1, 0); - if (indices == NULL) goto fail; - + if (indices == NULL) { + goto fail; + } n = m = chunk = 1; nd = self->nd + indices->nd - 1; - for (i=0; i< nd; i++) { + for (i = 0; i < nd; i++) { if (i < axis) { shape[i] = self->dimensions[i]; n *= shape[i]; - } else { + } + else { if (i < axis+indices->nd) { shape[i] = indices->dimensions[i-axis]; m *= shape[i]; - } else { + } + else { shape[i] = self->dimensions[i-indices->nd+1]; chunk *= shape[i]; } @@ -4252,7 +4273,9 @@ NULL, NULL, 0, (PyObject *)self); - if (ret == NULL) goto fail; + if (ret == NULL) { + goto fail; + } } else { PyArrayObject *obj; @@ -4268,17 +4291,22 @@ } if (clipmode == NPY_RAISE) { - /* we need to make sure and get a copy - so the input array is not changed - before the error is called - */ + /* + * we need to make sure and get a copy + * so the input array is not changed + * before the error is called + */ flags |= NPY_ENSURECOPY; } obj = (PyArrayObject *)PyArray_FromArray(ret, self->descr, flags); - if (obj != ret) copyret = 1; + if (obj != ret) { + copyret = 1; + } ret = obj; - if (ret == NULL) goto fail; + if (ret == NULL) { + goto fail; + } } max_item = self->dimensions[axis]; @@ -4289,63 +4317,73 @@ func = self->descr->f->fasttake; if (func == NULL) { - - switch(clipmode) { - case NPY_RAISE: - for(i=0; idata))[j]; - if (tmp < 0) tmp = tmp+max_item; - if ((tmp < 0) || (tmp >= max_item)) { - PyErr_SetString(PyExc_IndexError, - "index out of range "\ - "for array"); - goto fail; + switch(clipmode) { + case NPY_RAISE: + for (i = 0; i < n; i++) { + for (j = 0; j < m; j++) { + tmp = ((intp *)(indices->data))[j]; + if (tmp < 0) { + tmp = tmp + max_item; + } + if ((tmp < 0) || (tmp >= max_item)) { + PyErr_SetString(PyExc_IndexError, + "index out of range "\ + "for array"); + goto fail; + } + memmove(dest, src + tmp*chunk, chunk); + dest += chunk; } - memmove(dest, src+tmp*chunk, chunk); - dest += chunk; + src += chunk*max_item; } - src += chunk*max_item; - } - break; - case NPY_WRAP: - for(i=0; idata))[j]; - if (tmp < 0) while (tmp < 0) tmp += max_item; - else if (tmp >= max_item) - while (tmp >= max_item) - tmp -= max_item; - memmove(dest, src+tmp*chunk, chunk); - dest += chunk; + break; + case NPY_WRAP: + for (i = 0; i < n; i++) { + for (j = 0; j < m; j++) { + tmp = ((intp *)(indices->data))[j]; + if (tmp < 0) { + while (tmp < 0) { + tmp += max_item; + } + } + else if (tmp >= max_item) { + while (tmp >= max_item) { + tmp -= max_item; + } + } + memmove(dest, src + tmp*chunk, chunk); + dest += chunk; + } + src += chunk*max_item; } - src += chunk*max_item; - } - break; - case NPY_CLIP: - for(i=0; idata))[j]; - if (tmp < 0) - tmp = 0; - else if (tmp >= max_item) - tmp = max_item-1; - memmove(dest, src+tmp*chunk, chunk); - dest += chunk; + break; + case NPY_CLIP: + for (i = 0; i < n; i++) { + for (j = 0; j < m; j++) { + tmp = ((intp *)(indices->data))[j]; + if (tmp < 0) { + tmp = 0; + } + else if (tmp >= max_item) { + tmp = max_item - 1; + } + memmove(dest, src+tmp*chunk, chunk); + dest += chunk; + } + src += chunk*max_item; } - src += chunk*max_item; + break; } - break; } - } else { err = func(dest, src, (intp *)(indices->data), max_item, n, m, nelem, clipmode); - if (err) goto fail; + if (err) { + goto fail; + } } PyArray_INCREF(ret); - Py_XDECREF(indices); Py_XDECREF(self); if (copyret) { @@ -4355,10 +4393,8 @@ Py_DECREF(ret); ret = (PyArrayObject *)obj; } - return (PyObject *)ret; - fail: PyArray_XDECREF_ERR(ret); Py_XDECREF(indices); @@ -4367,8 +4403,8 @@ } /*NUMPY_API - Put values into an array -*/ + * Put values into an array + */ static PyObject * PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, NPY_CLIPMODE clipmode) @@ -4380,7 +4416,6 @@ indices = NULL; values = NULL; - if (!PyArray_Check(self)) { PyErr_SetString(PyExc_TypeError, "put: first argument must be an array"); @@ -4389,68 +4424,86 @@ if (!PyArray_ISCONTIGUOUS(self)) { PyArrayObject *obj; int flags = NPY_CARRAY | NPY_UPDATEIFCOPY; + if (clipmode == NPY_RAISE) { flags |= NPY_ENSURECOPY; } Py_INCREF(self->descr); obj = (PyArrayObject *)PyArray_FromArray(self, self->descr, flags); - if (obj != self) copied = 1; + if (obj != self) { + copied = 1; + } self = obj; } max_item = PyArray_SIZE(self); dest = self->data; chunk = self->descr->elsize; - indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, PyArray_INTP, 0, 0); - if (indices == NULL) goto fail; + if (indices == NULL) { + goto fail; + } ni = PyArray_SIZE(indices); - Py_INCREF(self->descr); values = (PyArrayObject *)PyArray_FromAny(values0, self->descr, 0, 0, DEFAULT | FORCECAST, NULL); - if (values == NULL) goto fail; + if (values == NULL) { + goto fail; + } nv = PyArray_SIZE(values); - if (nv <= 0) goto finish; + if (nv <= 0) { + goto finish; + } if (PyDataType_REFCHK(self->descr)) { switch(clipmode) { case NPY_RAISE: - for(i=0; idata + chunk * (i % nv); + for (i = 0; i < ni; i++) { + src = values->data + chunk*(i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = tmp+max_item; + if (tmp < 0) { + tmp = tmp + max_item; + } if ((tmp < 0) || (tmp >= max_item)) { PyErr_SetString(PyExc_IndexError, - "index out of " \ - "range for array"); + "index out of " \ + "range for array"); goto fail; } PyArray_Item_INCREF(src, self->descr); PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); - memmove(dest + tmp * chunk, src, chunk); + memmove(dest + tmp*chunk, src, chunk); } break; case NPY_WRAP: - for(i=0; idata + chunk * (i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) while(tmp < 0) tmp+=max_item; - else if (tmp >= max_item) - while(tmp >= max_item) + if (tmp < 0) { + while (tmp < 0) { + tmp += max_item; + } + } + else if (tmp >= max_item) { + while (tmp >= max_item) { tmp -= max_item; + } + } PyArray_Item_INCREF(src, self->descr); PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); memmove(dest + tmp * chunk, src, chunk); } break; case NPY_CLIP: - for(i=0; idata + chunk * (i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = 0; - else if (tmp >= max_item) + if (tmp < 0) { + tmp = 0; + } + else if (tmp >= max_item) { tmp = max_item - 1; + } PyArray_Item_INCREF(src, self->descr); PyArray_Item_XDECREF(dest+tmp*chunk, self->descr); memmove(dest + tmp * chunk, src, chunk); @@ -4461,37 +4514,48 @@ else { switch(clipmode) { case NPY_RAISE: - for(i=0; idata + chunk * (i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = tmp+max_item; + if (tmp < 0) { + tmp = tmp + max_item; + } if ((tmp < 0) || (tmp >= max_item)) { PyErr_SetString(PyExc_IndexError, - "index out of " \ - "range for array"); + "index out of " \ + "range for array"); goto fail; } memmove(dest + tmp * chunk, src, chunk); } break; case NPY_WRAP: - for(i=0; idata + chunk * (i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) while(tmp < 0) tmp+=max_item; - else if (tmp >= max_item) - while(tmp >= max_item) + if (tmp < 0) { + while (tmp < 0) { + tmp += max_item; + } + } + else if (tmp >= max_item) { + while (tmp >= max_item) { tmp -= max_item; + } + } memmove(dest + tmp * chunk, src, chunk); } break; case NPY_CLIP: - for(i=0; idata + chunk * (i % nv); tmp = ((intp *)(indices->data))[i]; - if (tmp < 0) tmp = 0; - else if (tmp >= max_item) + if (tmp < 0) { + tmp = 0; + } + else if (tmp >= max_item) { tmp = max_item - 1; + } memmove(dest + tmp * chunk, src, chunk); } break; @@ -4526,15 +4590,15 @@ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!OO:putmask", kwlist, &PyArray_Type, - &array, &mask, &values)) + &array, &mask, &values)) { return NULL; - + } return PyArray_PutMask((PyArrayObject *)array, values, mask); } /*NUMPY_API - Put values into an array according to a mask. -*/ + * Put values into an array according to a mask. + */ static PyObject * PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) { @@ -4542,11 +4606,10 @@ PyArrayObject *mask, *values; int i, chunk, ni, max_item, nv, tmp; char *src, *dest; - int copied=0; + int copied = 0; mask = NULL; values = NULL; - if (!PyArray_Check(self)) { PyErr_SetString(PyExc_TypeError, "putmask: first argument must "\ @@ -4556,20 +4619,24 @@ if (!PyArray_ISCONTIGUOUS(self)) { PyArrayObject *obj; int flags = NPY_CARRAY | NPY_UPDATEIFCOPY; + Py_INCREF(self->descr); obj = (PyArrayObject *)PyArray_FromArray(self, self->descr, flags); - if (obj != self) copied = 1; + if (obj != self) { + copied = 1; + } self = obj; } max_item = PyArray_SIZE(self); dest = self->data; chunk = self->descr->elsize; - mask = (PyArrayObject *)\ PyArray_FROM_OTF(mask0, PyArray_BOOL, CARRAY | FORCECAST); - if (mask == NULL) goto fail; + if (mask == NULL) { + goto fail; + } ni = PyArray_SIZE(mask); if (ni != max_item) { PyErr_SetString(PyExc_ValueError, @@ -4580,8 +4647,10 @@ Py_INCREF(self->descr); values = (PyArrayObject *)\ PyArray_FromAny(values0, self->descr, 0, 0, NPY_CARRAY, NULL); - if (values == NULL) goto fail; - nv = PyArray_SIZE(values); /* zero if null array */ + if (values == NULL) { + goto fail; + } + nv = PyArray_SIZE(values); /* zero if null array */ if (nv <= 0) { Py_XDECREF(values); Py_XDECREF(mask); @@ -4589,7 +4658,7 @@ return Py_None; } if (PyDataType_REFCHK(self->descr)) { - for(i=0; idata))[i]; if (tmp) { src = values->data + chunk * (i % nv); @@ -4602,11 +4671,11 @@ else { func = self->descr->f->fastputmask; if (func == NULL) { - for(i=0; idata))[i]; if (tmp) { - src = values->data + chunk * (i % nv); - memmove(dest + i * chunk, src, chunk); + src = values->data + chunk*(i % nv); + memmove(dest + i*chunk, src, chunk); } } } @@ -4633,18 +4702,17 @@ } -/* This conversion function can be used with the "O&" argument for - PyArg_ParseTuple. It will immediately return an object of array type - or will convert to a CARRAY any other object. - - If you use PyArray_Converter, you must DECREF the array when finished - as you get a new reference to it. -*/ - /*NUMPY_API - Useful to pass as converter function for O& processing in - PyArgs_ParseTuple. -*/ + * + * Useful to pass as converter function for O& processing in PyArgs_ParseTuple. + * + * This conversion function can be used with the "O&" argument for + * PyArg_ParseTuple. It will immediately return an object of array type + * or will convert to a CARRAY any other object. + * + * If you use PyArray_Converter, you must DECREF the array when finished + * as you get a new reference to it. + */ static int PyArray_Converter(PyObject *object, PyObject **address) { @@ -4655,15 +4723,17 @@ } else { *address = PyArray_FromAny(object, NULL, 0, 0, CARRAY, NULL); - if (*address == NULL) return PY_FAIL; + if (*address == NULL) { + return PY_FAIL; + } return PY_SUCCEED; } } /*NUMPY_API - Useful to pass as converter function for O& processing in - PyArgs_ParseTuple for output arrays -*/ + * Useful to pass as converter function for O& processing in + * PyArgs_ParseTuple for output arrays + */ static int PyArray_OutputConverter(PyObject *object, PyArrayObject **address) { @@ -4685,22 +4755,26 @@ /*NUMPY_API - Convert an object to true / false -*/ + * Convert an object to true / false + */ static int PyArray_BoolConverter(PyObject *object, Bool *val) { - if (PyObject_IsTrue(object)) - *val=TRUE; - else *val=FALSE; - if (PyErr_Occurred()) + if (PyObject_IsTrue(object)) { + *val = TRUE; + } + else { + *val = FALSE; + } + if (PyErr_Occurred()) { return PY_FAIL; + } return PY_SUCCEED; } /*NUMPY_API - Convert an object to FORTRAN / C / ANY -*/ + * Convert an object to FORTRAN / C / ANY + */ static int PyArray_OrderConverter(PyObject *object, NPY_ORDER *val) { @@ -4709,12 +4783,15 @@ *val = PyArray_ANYORDER; } else if (!PyString_Check(object) || PyString_GET_SIZE(object) < 1) { - if (PyObject_IsTrue(object)) + if (PyObject_IsTrue(object)) { *val = PyArray_FORTRANORDER; - else + } + else { *val = PyArray_CORDER; - if (PyErr_Occurred()) + } + if (PyErr_Occurred()) { return PY_FAIL; + } return PY_SUCCEED; } else { @@ -4738,8 +4815,8 @@ } /*NUMPY_API - Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP -*/ + * Convert an object to NPY_RAISE / NPY_CLIP / NPY_WRAP + */ static int PyArray_ClipmodeConverter(PyObject *object, NPY_CLIPMODE *val) { @@ -4765,13 +4842,17 @@ } } else { - int number; - number = PyInt_AsLong(object); - if (number == -1 && PyErr_Occurred()) goto fail; - if (number <= (int) NPY_RAISE && - number >= (int) NPY_CLIP) + int number = PyInt_AsLong(object); + if (number == -1 && PyErr_Occurred()) { + goto fail; + } + if (number <= (int) NPY_RAISE + && number >= (int) NPY_CLIP) { *val = (NPY_CLIPMODE) number; - else goto fail; + } + else { + goto fail; + } } return PY_SUCCEED; @@ -4784,18 +4865,20 @@ /*NUMPY_API - Typestr converter -*/ + * Typestr converter + */ static int PyArray_TypestrConvert(int itemsize, int gentype) { register int newtype = gentype; if (gentype == PyArray_GENBOOLLTR) { - if (itemsize == 1) + if (itemsize == 1) { newtype = PyArray_BOOL; - else + } + else { newtype = PyArray_NOTYPE; + } } else if (gentype == PyArray_SIGNEDLTR) { switch(itemsize) { @@ -4820,7 +4903,6 @@ newtype = PyArray_NOTYPE; } } - else if (gentype == PyArray_UNSIGNEDLTR) { switch(itemsize) { case 1: @@ -4872,7 +4954,6 @@ newtype = PyArray_NOTYPE; } } - else if (gentype == PyArray_COMPLEXLTR) { switch(itemsize) { case 8: @@ -4900,25 +4981,22 @@ newtype = PyArray_NOTYPE; } } - return newtype; } -/* this function takes a Python object which exposes the (single-segment) - buffer interface and returns a pointer to the data segment - - You should increment the reference count by one of buf->base - if you will hang on to a reference - - You only get a borrowed reference to the object. Do not free the - memory... -*/ - - /*NUMPY_API - Get buffer chunk from object -*/ + * Get buffer chunk from object + * + * this function takes a Python object which exposes the (single-segment) + * buffer interface and returns a pointer to the data segment + * + * You should increment the reference count by one of buf->base + * if you will hang on to a reference + * + * You only get a borrowed reference to the object. Do not free the + * memory... + */ static int PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf) { @@ -4927,38 +5005,40 @@ buf->ptr = NULL; buf->flags = BEHAVED; buf->base = NULL; - - if (obj == Py_None) + if (obj == Py_None) { return PY_SUCCEED; - + } if (PyObject_AsWriteBuffer(obj, &(buf->ptr), &buflen) < 0) { PyErr_Clear(); buf->flags &= ~WRITEABLE; if (PyObject_AsReadBuffer(obj, (const void **)&(buf->ptr), - &buflen) < 0) + &buflen) < 0) { return PY_FAIL; + } } buf->len = (intp) buflen; /* Point to the base of the buffer object if present */ - if (PyBuffer_Check(obj)) buf->base = ((PyArray_Chunk *)obj)->base; - if (buf->base == NULL) buf->base = obj; - + if (PyBuffer_Check(obj)) { + buf->base = ((PyArray_Chunk *)obj)->base; + } + if (buf->base == NULL) { + buf->base = obj; + } return PY_SUCCEED; } -/* This function takes a Python sequence object and allocates and - fills in an intp array with the converted values. - - **Remember to free the pointer seq.ptr when done using - PyDimMem_FREE(seq.ptr)** -*/ - /*NUMPY_API - Get intp chunk from sequence -*/ + * Get intp chunk from sequence + * + * This function takes a Python sequence object and allocates and + * fills in an intp array with the converted values. + * + * Remember to free the pointer seq.ptr when done using + * PyDimMem_FREE(seq.ptr)** + */ static int PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq) { @@ -4967,10 +5047,15 @@ seq->ptr = NULL; seq->len = 0; - if (obj == Py_None) return PY_SUCCEED; + if (obj == Py_None) { + return PY_SUCCEED; + } len = PySequence_Size(obj); - if (len == -1) { /* Check to see if it is a number */ - if (PyNumber_Check(obj)) len = 1; + if (len == -1) { + /* Check to see if it is a number */ + if (PyNumber_Check(obj)) { + len = 1; + } } if (len < 0) { PyErr_SetString(PyExc_TypeError, @@ -4993,27 +5078,28 @@ nd = PyArray_IntpFromSequence(obj, (intp *)seq->ptr, len); if (nd == -1 || nd != len) { PyDimMem_FREE(seq->ptr); - seq->ptr=NULL; + seq->ptr = NULL; return PY_FAIL; } return PY_SUCCEED; } -/* A tuple type would be either (generic typeobject, typesize) - or (fixed-length data-type, shape) - - or (inheriting data-type, new-data-type) - The new data-type must have the same itemsize as the inheriting data-type - unless the latter is 0 - - Thus (int32, {'real':(int16,0),'imag',(int16,2)}) - - is one way to specify a descriptor that will give - a['real'] and a['imag'] to an int32 array. -*/ - -/* leave type reference alone */ +/* + * A tuple type would be either (generic typeobject, typesize) + * or (fixed-length data-type, shape) + * + * or (inheriting data-type, new-data-type) + * The new data-type must have the same itemsize as the inheriting data-type + * unless the latter is 0 + * + * Thus (int32, {'real':(int16,0),'imag',(int16,2)}) + * + * is one way to specify a descriptor that will give + * a['real'] and a['imag'] to an int32 array. + * + * leave type reference alone + */ static PyArray_Descr * _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag) { @@ -5026,8 +5112,9 @@ } *errflag = 1; new = PyArray_DescrNew(type); - if (new == NULL) goto fail; - + if (new == NULL) { + goto fail; + } if (new->elsize && new->elsize != conv->elsize) { PyErr_SetString(PyExc_ValueError, "mismatch in size of old "\ @@ -5049,7 +5136,6 @@ fail: Py_DECREF(conv); return NULL; - } static PyArray_Descr * @@ -5059,26 +5145,33 @@ PyObject *val; int errflag; - if (PyTuple_GET_SIZE(obj) != 2) return NULL; - - if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj,0), &type)) + if (PyTuple_GET_SIZE(obj) != 2) { return NULL; + } + if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj,0), &type)) { + return NULL; + } val = PyTuple_GET_ITEM(obj,1); /* try to interpret next item as a type */ res = _use_inherit(type, val, &errflag); if (res || errflag) { Py_DECREF(type); - if (res) return res; - else return NULL; + if (res) { + return res; + } + else { + return NULL; + } } PyErr_Clear(); - /* We get here if res was NULL but errflag wasn't set - --- i.e. the conversion to a data-descr failed in _use_inherit - */ + /* + * We get here if res was NULL but errflag wasn't set + * --- i.e. the conversion to a data-descr failed in _use_inherit + */ + if (type->elsize == 0) { + /* interpret next item as a typesize */ + int itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); - if (type->elsize == 0) { /* interpret next item as a typesize */ - int itemsize; - itemsize = PyArray_PyIntAsInt(PyTuple_GET_ITEM(obj,1)); if (error_converting(itemsize)) { PyErr_SetString(PyExc_ValueError, "invalid itemsize in generic type "\ @@ -5086,20 +5179,24 @@ goto fail; } PyArray_DESCR_REPLACE(type); - if (type->type_num == PyArray_UNICODE) + if (type->type_num == PyArray_UNICODE) { type->elsize = itemsize << 2; - else + } + else { type->elsize = itemsize; + } } else { - /* interpret next item as shape (if it's a tuple) - and reset the type to PyArray_VOID with - a new fields attribute. - */ - PyArray_Dims shape={NULL,-1}; + /* + * interpret next item as shape (if it's a tuple) + * and reset the type to PyArray_VOID with + * a new fields attribute. + */ + PyArray_Dims shape = {NULL, -1}; PyArray_Descr *newdescr; - if (!(PyArray_IntpConverter(val, &shape)) || - (shape.len > MAX_DIMS)) { + + if (!(PyArray_IntpConverter(val, &shape)) + || (shape.len > MAX_DIMS)) { PyDimMem_FREE(shape.ptr); PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple."); @@ -5107,16 +5204,18 @@ } /* If (type, 1) was given, it is equivalent to type... or (type, ()) was given it is equivalent to type... */ - if ((shape.len == 1 && shape.ptr[0] == 1 && PyNumber_Check(val)) || \ - (shape.len == 0 && PyTuple_Check(val))) { + if ((shape.len == 1 && shape.ptr[0] == 1 && PyNumber_Check(val)) + || (shape.len == 0 && PyTuple_Check(val))) { PyDimMem_FREE(shape.ptr); return type; } newdescr = PyArray_DescrNewFromType(PyArray_VOID); - if (newdescr == NULL) {PyDimMem_FREE(shape.ptr); goto fail;} + if (newdescr == NULL) { + PyDimMem_FREE(shape.ptr); + goto fail; + } newdescr->elsize = type->elsize; - newdescr->elsize *= PyArray_MultiplyList(shape.ptr, - shape.len); + newdescr->elsize *= PyArray_MultiplyList(shape.ptr, shape.len); PyDimMem_FREE(shape.ptr); newdescr->subarray = _pya_malloc(sizeof(PyArray_ArrayDescr)); newdescr->subarray->base = type; @@ -5136,11 +5235,12 @@ return NULL; } -/* obj is a list. Each item is a tuple with - - (field-name, data-type (either a list or a string), and an optional - shape parameter). -*/ +/* + * obj is a list. Each item is a tuple with + * + * (field-name, data-type (either a list or a string), and an optional + * shape parameter). + */ static PyArray_Descr * _convert_from_array_descr(PyObject *obj, int align) { @@ -5151,34 +5251,43 @@ PyObject *nameslist; PyArray_Descr *new; PyArray_Descr *conv; - int dtypeflags=0; + int dtypeflags = 0; int maxalign = 0; n = PyList_GET_SIZE(obj); nameslist = PyTuple_New(n); - if (!nameslist) return NULL; + if (!nameslist) { + return NULL; + } totalsize = 0; fields = PyDict_New(); - for (i=0; ialignment; - if (_align > 1) totalsize = \ - ((totalsize + _align - 1)/_align)*_align; + if (_align > 1) { + totalsize = ((totalsize + _align - 1)/_align)*_align; + } maxalign = MAX(maxalign, _align); } PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize)); - /* Title can be "meta-data". Only insert it - into the fields dictionary if it is a string - */ + /* + * Title can be "meta-data". Only insert it + * into the fields dictionary if it is a string + */ if (title != NULL) { Py_INCREF(title); PyTuple_SET_ITEM(tup, 2, title); - if (PyString_Check(title) || PyUnicode_Check(title)) + if (PyString_Check(title) || PyUnicode_Check(title)) { PyDict_SetItem(fields, title, tup); + } } PyDict_SetItem(fields, name, tup); totalsize += conv->elsize; @@ -5243,7 +5359,9 @@ if (maxalign > 1) { totalsize = ((totalsize+maxalign-1)/maxalign)*maxalign; } - if (align) new->alignment = maxalign; + if (align) { + new->alignment = maxalign; + } return new; fail: @@ -5253,37 +5371,45 @@ } -/* a list specifying a data-type can just be - a list of formats. The names for the fields - will default to f0, f1, f2, and so forth. -*/ - +/* + * a list specifying a data-type can just be + * a list of formats. The names for the fields + * will default to f0, f1, f2, and so forth. + */ static PyArray_Descr * _convert_from_list(PyObject *obj, int align) { int n, i; int totalsize; PyObject *fields; - PyArray_Descr *conv=NULL; + PyArray_Descr *conv = NULL; PyArray_Descr *new; PyObject *key, *tup; - PyObject *nameslist=NULL; + PyObject *nameslist = NULL; int ret; - int maxalign=0; - int dtypeflags=0; + int maxalign = 0; + int dtypeflags = 0; n = PyList_GET_SIZE(obj); - /* Ignore any empty string at end which _internal._commastring - can produce */ + /* + * Ignore any empty string at end which _internal._commastring + * can produce + */ key = PyList_GET_ITEM(obj, n-1); - if (PyString_Check(key) && PyString_GET_SIZE(key) == 0) n = n-1; + if (PyString_Check(key) && PyString_GET_SIZE(key) == 0) { + n = n - 1; + } /* End ignore code.*/ totalsize = 0; - if (n==0) return NULL; + if (n == 0) { + return NULL; + } nameslist = PyTuple_New(n); - if (!nameslist) return NULL; + if (!nameslist) { + return NULL; + } fields = PyDict_New(); - for (i=0; ialignment; - if (_align > 1) totalsize = \ - ((totalsize + _align - 1)/_align)*_align; + if (_align > 1) { + totalsize = ((totalsize + _align - 1)/_align)*_align; + } maxalign = MAX(maxalign, _align); } PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize)); @@ -5314,7 +5442,9 @@ if (maxalign > 1) { totalsize = ((totalsize+maxalign-1)/maxalign)*maxalign; } - if (align) new->alignment = maxalign; + if (align) { + new->alignment = maxalign; + } new->elsize = totalsize; return new; @@ -5325,15 +5455,15 @@ } -/* comma-separated string */ -/* this is the format developed by the numarray records module */ -/* and implemented by the format parser in that module */ -/* this is an alternative implementation found in the _internal.py - file patterned after that one -- the approach is to try to convert - to a list (with tuples if any repeat information is present) - and then call the _convert_from_list) -*/ - +/* + * comma-separated string + * this is the format developed by the numarray records module + * and implemented by the format parser in that module + * this is an alternative implementation found in the _internal.py + * file patterned after that one -- the approach is to try to convert + * to a list (with tuples if any repeat information is present) + * and then call the _convert_from_list) + */ static PyArray_Descr * _convert_from_commastring(PyObject *obj, int align) { @@ -5341,13 +5471,18 @@ PyArray_Descr *res; PyObject *_numpy_internal; - if (!PyString_Check(obj)) return NULL; + if (!PyString_Check(obj)) { + return NULL; + } _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; - listobj = PyObject_CallMethod(_numpy_internal, "_commastring", - "O", obj); + if (_numpy_internal == NULL) { + return NULL; + } + listobj = PyObject_CallMethod(_numpy_internal, "_commastring", "O", obj); Py_DECREF(_numpy_internal); - if (!listobj) return NULL; + if (!listobj) { + return NULL; + } if (!PyList_Check(listobj) || PyList_GET_SIZE(listobj)<1) { PyErr_SetString(PyExc_RuntimeError, "_commastring is " \ "not returning a list with len >= 1"); @@ -5372,48 +5507,51 @@ -/* a dictionary specifying a data-type - must have at least two and up to four - keys These must all be sequences of the same length. - - "names" --- field names - "formats" --- the data-type descriptors for the field. - - Optional: - - "offsets" --- integers indicating the offset into the - record of the start of the field. - if not given, then "consecutive offsets" - will be assumed and placed in the dictionary. - - "titles" --- Allows the use of an additional key - for the fields dictionary.(if these are strings - or unicode objects) or - this can also be meta-data to - be passed around with the field description. - - Attribute-lookup-based field names merely has to query the fields - dictionary of the data-descriptor. Any result present can be used - to return the correct field. - - So, the notion of what is a name and what is a title is really quite - arbitrary. - - What does distinguish a title, however, is that if it is not None, - it will be placed at the end of the tuple inserted into the - fields dictionary.and can therefore be used to carry meta-data around. - - If the dictionary does not have "names" and "formats" entries, - then it will be checked for conformity and used directly. -*/ - +/* + * a dictionary specifying a data-type + * must have at least two and up to four + * keys These must all be sequences of the same length. + * + * "names" --- field names + * "formats" --- the data-type descriptors for the field. + * + * Optional: + * + * "offsets" --- integers indicating the offset into the + * record of the start of the field. + * if not given, then "consecutive offsets" + * will be assumed and placed in the dictionary. + * + * "titles" --- Allows the use of an additional key + * for the fields dictionary.(if these are strings + * or unicode objects) or + * this can also be meta-data to + * be passed around with the field description. + * + * Attribute-lookup-based field names merely has to query the fields + * dictionary of the data-descriptor. Any result present can be used + * to return the correct field. + * + * So, the notion of what is a name and what is a title is really quite + * arbitrary. + * + * What does distinguish a title, however, is that if it is not None, + * it will be placed at the end of the tuple inserted into the + * fields dictionary.and can therefore be used to carry meta-data around. + * + * If the dictionary does not have "names" and "formats" entries, + * then it will be checked for conformity and used directly. + */ static PyArray_Descr * _use_fields_dict(PyObject *obj, int align) { PyObject *_numpy_internal; PyArray_Descr *res; + _numpy_internal = PyImport_ImportModule("numpy.core._internal"); - if (_numpy_internal == NULL) return NULL; + if (_numpy_internal == NULL) { + return NULL; + } res = (PyArray_Descr *)PyObject_CallMethod(_numpy_internal, "_usefields", "Oi", obj, align); @@ -5425,19 +5563,19 @@ _convert_from_dict(PyObject *obj, int align) { PyArray_Descr *new; - PyObject *fields=NULL; + PyObject *fields = NULL; PyObject *names, *offsets, *descrs, *titles; int n, i; int totalsize; - int maxalign=0; - int dtypeflags=0; + int maxalign = 0; + int dtypeflags = 0; fields = PyDict_New(); - if (fields == NULL) return (PyArray_Descr *)PyErr_NoMemory(); - + if (fields == NULL) { + return (PyArray_Descr *)PyErr_NoMemory(); + } names = PyDict_GetItemString(obj, "names"); descrs = PyDict_GetItemString(obj, "formats"); - if (!names || !descrs) { Py_DECREF(fields); return _use_fields_dict(obj, align); @@ -5445,9 +5583,9 @@ n = PyObject_Length(names); offsets = PyDict_GetItemString(obj, "offsets"); titles = PyDict_GetItemString(obj, "titles"); - if ((n > PyObject_Length(descrs)) || \ - (offsets && (n > PyObject_Length(offsets))) || \ - (titles && (n > PyObject_Length(titles)))) { + if ((n > PyObject_Length(descrs)) + || (offsets && (n > PyObject_Length(offsets))) + || (titles && (n > PyObject_Length(titles)))) { PyErr_SetString(PyExc_ValueError, "all items in the dictionary must have" \ " the same length."); @@ -5455,9 +5593,9 @@ } totalsize = 0; - for(i=0; i 1) { - totalsize = ((totalsize + _align - 1) \ - /_align)*_align; + totalsize = ((totalsize + _align - 1)/_align)*_align; } PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(totalsize)); } - if (len == 3) PyTuple_SET_ITEM(tup, 2, item); + if (len == 3) { + PyTuple_SET_ITEM(tup, 2, item); + } name = PyObject_GetItem(names, index); Py_DECREF(index); if (!(PyString_Check(name) || PyUnicode_Check(name))) { @@ -5523,8 +5666,8 @@ PyDict_SetItem(fields, name, tup); Py_DECREF(name); if (len == 3) { - if ((PyString_Check(item) || PyUnicode_Check(item)) && - PyDict_GetItem(fields, item) != NULL) { + if ((PyString_Check(item) || PyUnicode_Check(item)) + && PyDict_GetItem(fields, item) != NULL) { PyErr_SetString(PyExc_ValueError, "title already used as a "\ "name or title."); @@ -5535,16 +5678,23 @@ } } Py_DECREF(tup); - if ((ret == PY_FAIL) || (newdescr->elsize == 0)) goto fail; + if ((ret == PY_FAIL) || (newdescr->elsize == 0)) { + goto fail; + } dtypeflags |= (newdescr->hasobject & NPY_FROM_FIELDS); totalsize += newdescr->elsize; } new = PyArray_DescrNewFromType(PyArray_VOID); - if (new == NULL) goto fail; - if (maxalign > 1) + if (new == NULL) { + goto fail; + } + if (maxalign > 1) { totalsize = ((totalsize + maxalign - 1)/maxalign)*maxalign; - if (align) new->alignment = maxalign; + } + if (align) { + new->alignment = maxalign; + } new->elsize = totalsize; if (!PyTuple_Check(names)) { names = PySequence_Tuple(names); @@ -5573,59 +5723,56 @@ /* Check for ints at start of string */ if ((type[0] >= '0' && type[0] <= '9') || ((len > 1) && _chk_byteorder(type[0]) && - (type[1] >= '0' && type[1] <= '9'))) + (type[1] >= '0' && type[1] <= '9'))) { return 1; - + } /* Check for empty tuple */ if (((len > 1) && (type[0] == '(' && type[1] == ')')) || ((len > 3) && _chk_byteorder(type[0]) && - (type[1] == '(' && type[2] == ')'))) + (type[1] == '(' && type[2] == ')'))) { return 1; - + } /* Check for presence of commas */ - for (i=1;i= size computed from fields - - The .fields attribute must return a convertible dictionary if - present. Result inherits from PyArray_VOID. -*/ - - /*NUMPY_API - Get type-descriptor from an object forcing alignment if possible - None goes to DEFAULT type. + * Get type-descriptor from an object forcing alignment if possible + * None goes to DEFAULT type. + * + * any object with the .fields attribute and/or .itemsize attribute (if the + *.fields attribute does not give the total size -- i.e. a partial record + * naming). If itemsize is given it must be >= size computed from fields + * + * The .fields attribute must return a convertible dictionary if present. + * Result inherits from PyArray_VOID. */ static int PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at) { if PyDict_Check(obj) { - *at = _convert_from_dict(obj, 1); - } + *at = _convert_from_dict(obj, 1); + } else if PyString_Check(obj) { - *at = _convert_from_commastring(obj, 1); - } + *at = _convert_from_commastring(obj, 1); + } else if PyList_Check(obj) { - *at = _convert_from_array_descr(obj, 1); - } + *at = _convert_from_array_descr(obj, 1); + } else { return PyArray_DescrConverter(obj, at); } if (*at == NULL) { if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ValueError, - "data-type-descriptor not understood"); + "data-type-descriptor not understood"); } return PY_FAIL; } @@ -5633,28 +5780,28 @@ } /*NUMPY_API - Get type-descriptor from an object forcing alignment if possible - None goes to NULL. -*/ + * Get type-descriptor from an object forcing alignment if possible + * None goes to NULL. + */ static int PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at) { if PyDict_Check(obj) { - *at = _convert_from_dict(obj, 1); - } + *at = _convert_from_dict(obj, 1); + } else if PyString_Check(obj) { - *at = _convert_from_commastring(obj, 1); - } + *at = _convert_from_commastring(obj, 1); + } else if PyList_Check(obj) { - *at = _convert_from_array_descr(obj, 1); - } + *at = _convert_from_array_descr(obj, 1); + } else { return PyArray_DescrConverter2(obj, at); } if (*at == NULL) { if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ValueError, - "data-type-descriptor not understood"); + "data-type-descriptor not understood"); } return PY_FAIL; } @@ -5663,8 +5810,8 @@ /*NUMPY_API - Get typenum from an object -- None goes to NULL -*/ + * Get typenum from an object -- None goes to NULL + */ static int PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at) { @@ -5672,41 +5819,39 @@ *at = NULL; return PY_SUCCEED; } - else return PyArray_DescrConverter(obj, at); + else { + return PyArray_DescrConverter(obj, at); + } } -/* This function takes a Python object representing a type and converts it - to a the correct PyArray_Descr * structure to describe the type. - - Many objects can be used to represent a data-type which in NumPy is - quite a flexible concept. - - This is the central code that converts Python objects to - Type-descriptor objects that are used throughout numpy. -*/ - -/* new reference in *at */ /*NUMPY_API - Get typenum from an object -- None goes to PyArray_DEFAULT -*/ + * Get typenum from an object -- None goes to PyArray_DEFAULT + * This function takes a Python object representing a type and converts it + * to a the correct PyArray_Descr * structure to describe the type. + * + * Many objects can be used to represent a data-type which in NumPy is + * quite a flexible concept. + * + * This is the central code that converts Python objects to + * Type-descriptor objects that are used throughout numpy. + * new reference in *at + */ static int PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at) { char *type; - int check_num=PyArray_NOTYPE+10; + int check_num = PyArray_NOTYPE + 10; int len; PyObject *item; int elsize = 0; char endian = '='; - *at=NULL; - + *at = NULL; /* default */ if (obj == Py_None) { *at = PyArray_DescrFromType(PyArray_DEFAULT); return PY_SUCCEED; } - if (PyArray_DescrCheck(obj)) { *at = (PyArray_Descr *)obj; Py_INCREF(*at); @@ -5717,134 +5862,165 @@ if (PyType_IsSubtype((PyTypeObject *)obj, &PyGenericArrType_Type)) { *at = PyArray_DescrFromTypeObject(obj); - if (*at) return PY_SUCCEED; - else return PY_FAIL; + if (*at) { + return PY_SUCCEED; + } + else { + return PY_FAIL; + } } check_num = PyArray_OBJECT; - if (obj == (PyObject *)(&PyInt_Type)) + if (obj == (PyObject *)(&PyInt_Type)) { check_num = PyArray_LONG; - else if (obj == (PyObject *)(&PyLong_Type)) + } + else if (obj == (PyObject *)(&PyLong_Type)) { check_num = PyArray_LONGLONG; - else if (obj == (PyObject *)(&PyFloat_Type)) + } + else if (obj == (PyObject *)(&PyFloat_Type)) { check_num = PyArray_DOUBLE; - else if (obj == (PyObject *)(&PyComplex_Type)) + } + else if (obj == (PyObject *)(&PyComplex_Type)) { check_num = PyArray_CDOUBLE; - else if (obj == (PyObject *)(&PyBool_Type)) + } + else if (obj == (PyObject *)(&PyBool_Type)) { check_num = PyArray_BOOL; - else if (obj == (PyObject *)(&PyString_Type)) + } + else if (obj == (PyObject *)(&PyString_Type)) { check_num = PyArray_STRING; - else if (obj == (PyObject *)(&PyUnicode_Type)) + } + else if (obj == (PyObject *)(&PyUnicode_Type)) { check_num = PyArray_UNICODE; - else if (obj == (PyObject *)(&PyBuffer_Type)) + } + else if (obj == (PyObject *)(&PyBuffer_Type)) { check_num = PyArray_VOID; + } else { *at = _arraydescr_fromobj(obj); - if (*at) return PY_SUCCEED; + if (*at) { + return PY_SUCCEED; + } } goto finish; } /* or a typecode string */ - if (PyString_Check(obj)) { /* Check for a string typecode. */ type = PyString_AS_STRING(obj); len = PyString_GET_SIZE(obj); - if (len <= 0) goto fail; - - /* check for commas present - or first (or second) element a digit */ + if (len <= 0) { + goto fail; + } + /* check for commas present or first (or second) element a digit */ if (_check_for_commastring(type, len)) { *at = _convert_from_commastring(obj, 0); - if (*at) return PY_SUCCEED; + if (*at) { + return PY_SUCCEED; + } return PY_FAIL; } check_num = (int) type[0]; - if ((char) check_num == '>' || (char) check_num == '<' || \ - (char) check_num == '|' || (char) check_num == '=') { - if (len <= 1) goto fail; + if ((char) check_num == '>' || (char) check_num == '<' + || (char) check_num == '|' || (char) check_num == '=') { + if (len <= 1) { + goto fail; + } endian = (char) check_num; type++; len--; check_num = (int) type[0]; - if (endian == '|') endian = '='; + if (endian == '|') { + endian = '='; + } } if (len > 1) { - elsize = atoi(type+1); + elsize = atoi(type + 1); if (elsize == 0) { check_num = PyArray_NOTYPE+10; } - /* When specifying length of UNICODE - the number of characters is given to match - the STRING interface. Each character can be - more than one byte and itemsize must be - the number of bytes. - */ + /* + * When specifying length of UNICODE + * the number of characters is given to match + * the STRING interface. Each character can be + * more than one byte and itemsize must be + * the number of bytes. + */ else if (check_num == PyArray_UNICODELTR) { elsize <<= 2; } - /* Support for generic processing - c4, i4, f8, etc... - */ - else if ((check_num != PyArray_STRINGLTR) && \ - (check_num != PyArray_VOIDLTR) && \ - (check_num != PyArray_STRINGLTR2)) { - check_num = \ - PyArray_TypestrConvert(elsize, - check_num); - if (check_num == PyArray_NOTYPE) + /* Support for generic processing c4, i4, f8, etc...*/ + else if ((check_num != PyArray_STRINGLTR) + && (check_num != PyArray_VOIDLTR) + && (check_num != PyArray_STRINGLTR2)) { + check_num = PyArray_TypestrConvert(elsize, check_num); + if (check_num == PyArray_NOTYPE) { check_num += 10; + } elsize = 0; } } } - /* or a tuple */ else if (PyTuple_Check(obj)) { + /* or a tuple */ *at = _convert_from_tuple(obj); if (*at == NULL){ - if (PyErr_Occurred()) return PY_FAIL; + if (PyErr_Occurred()) { + return PY_FAIL; + } goto fail; } return PY_SUCCEED; } - /* or a list */ else if (PyList_Check(obj)) { + /* or a list */ *at = _convert_from_array_descr(obj,0); if (*at == NULL) { - if (PyErr_Occurred()) return PY_FAIL; + if (PyErr_Occurred()) { + return PY_FAIL; + } goto fail; } return PY_SUCCEED; } - /* or a dictionary */ else if (PyDict_Check(obj)) { + /* or a dictionary */ *at = _convert_from_dict(obj,0); if (*at == NULL) { - if (PyErr_Occurred()) return PY_FAIL; + if (PyErr_Occurred()) { + return PY_FAIL; + } goto fail; } return PY_SUCCEED; } - else if (PyArray_Check(obj)) goto fail; - else /* goto fail;*/ { + else if (PyArray_Check(obj)) { + goto fail; + } + else { *at = _arraydescr_fromobj(obj); - if (*at) return PY_SUCCEED; - if (PyErr_Occurred()) return PY_FAIL; + if (*at) { + return PY_SUCCEED; + } + if (PyErr_Occurred()) { + return PY_FAIL; + } goto fail; } - if (PyErr_Occurred()) goto fail; - + if (PyErr_Occurred()) { + goto fail; + } /* if (check_num == PyArray_NOTYPE) return PY_FAIL; */ finish: - if ((check_num == PyArray_NOTYPE+10) || \ - (*at = PyArray_DescrFromType(check_num))==NULL) { - /* Now check to see if the object is registered - in typeDict */ + if ((check_num == PyArray_NOTYPE + 10) + || (*at = PyArray_DescrFromType(check_num)) == NULL) { + /* Now check to see if the object is registered in typeDict */ if (typeDict != NULL) { item = PyDict_GetItem(typeDict, obj); - if (item) return PyArray_DescrConverter(item, at); + if (item) { + return PyArray_DescrConverter(item, at); + } } goto fail; } @@ -5853,51 +6029,58 @@ PyArray_DESCR_REPLACE(*at); (*at)->elsize = elsize; } - if (endian != '=' && PyArray_ISNBO(endian)) endian = '='; - - if (endian != '=' && (*at)->byteorder != '|' && \ - (*at)->byteorder != endian) { + if (endian != '=' && PyArray_ISNBO(endian)) { + endian = '='; + } + if (endian != '=' && (*at)->byteorder != '|' + && (*at)->byteorder != endian) { PyArray_DESCR_REPLACE(*at); (*at)->byteorder = endian; } - return PY_SUCCEED; fail: - PyErr_SetString(PyExc_TypeError, - "data type not understood"); - *at=NULL; + PyErr_SetString(PyExc_TypeError, "data type not understood"); + *at = NULL; return PY_FAIL; } /*NUMPY_API - Convert object to endian -*/ + * Convert object to endian + */ static int PyArray_ByteorderConverter(PyObject *obj, char *endian) { char *str; + *endian = PyArray_SWAP; str = PyString_AsString(obj); - if (!str) return PY_FAIL; + if (!str) { + return PY_FAIL; + } if (strlen(str) < 1) { PyErr_SetString(PyExc_ValueError, "Byteorder string must be at least length 1"); return PY_FAIL; } *endian = str[0]; - if (str[0] != PyArray_BIG && str[0] != PyArray_LITTLE && \ - str[0] != PyArray_NATIVE && str[0] != PyArray_IGNORE) { - if (str[0] == 'b' || str[0] == 'B') + if (str[0] != PyArray_BIG && str[0] != PyArray_LITTLE + && str[0] != PyArray_NATIVE && str[0] != PyArray_IGNORE) { + if (str[0] == 'b' || str[0] == 'B') { *endian = PyArray_BIG; - else if (str[0] == 'l' || str[0] == 'L') + } + else if (str[0] == 'l' || str[0] == 'L') { *endian = PyArray_LITTLE; - else if (str[0] == 'n' || str[0] == 'N') + } + else if (str[0] == 'n' || str[0] == 'N') { *endian = PyArray_NATIVE; - else if (str[0] == 'i' || str[0] == 'I') + } + else if (str[0] == 'i' || str[0] == 'I') { *endian = PyArray_IGNORE; - else if (str[0] == 's' || str[0] == 'S') + } + else if (str[0] == 's' || str[0] == 'S') { *endian = PyArray_SWAP; + } else { PyErr_Format(PyExc_ValueError, "%s is an unrecognized byteorder", @@ -5909,26 +6092,32 @@ } /*NUMPY_API - Convert object to sort kind -*/ + * Convert object to sort kind + */ static int PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind) { char *str; + *sortkind = PyArray_QUICKSORT; str = PyString_AsString(obj); - if (!str) return PY_FAIL; + if (!str) { + return PY_FAIL; + } if (strlen(str) < 1) { PyErr_SetString(PyExc_ValueError, "Sort kind string must be at least length 1"); return PY_FAIL; } - if (str[0] == 'q' || str[0] == 'Q') + if (str[0] == 'q' || str[0] == 'Q') { *sortkind = PyArray_QUICKSORT; - else if (str[0] == 'h' || str[0] == 'H') + } + else if (str[0] == 'h' || str[0] == 'H') { *sortkind = PyArray_HEAPSORT; - else if (str[0] == 'm' || str[0] == 'M') + } + else if (str[0] == 'm' || str[0] == 'M') { *sortkind = PyArray_MERGESORT; + } else { PyErr_Format(PyExc_ValueError, "%s is an unrecognized kind of sort", @@ -5939,44 +6128,54 @@ } -/* compare the field dictionary for two types - return 1 if the same or 0 if not -*/ - +/* + * compare the field dictionary for two types + * return 1 if the same or 0 if not + */ static int _equivalent_fields(PyObject *field1, PyObject *field2) { int same, val; - if (field1 == field2) return 1; - if (field1 == NULL || field2 == NULL) return 0; + if (field1 == field2) { + return 1; + } + if (field1 == NULL || field2 == NULL) { + return 0; + } val = PyObject_Compare(field1, field2); - if (val != 0 || PyErr_Occurred()) same = 0; - else same = 1; + if (val != 0 || PyErr_Occurred()) { + same = 0; + } + else { + same = 1; + } PyErr_Clear(); return same; } -/* This function returns true if the two typecodes are - equivalent (same basic kind and same itemsize). -*/ -/*NUMPY_API*/ +/*NUMPY_API + * + * This function returns true if the two typecodes are + * equivalent (same basic kind and same itemsize). + */ static unsigned char PyArray_EquivTypes(PyArray_Descr *typ1, PyArray_Descr *typ2) { - register int typenum1=typ1->type_num; - register int typenum2=typ2->type_num; - register int size1=typ1->elsize; - register int size2=typ2->elsize; + int typenum1 = typ1->type_num; + int typenum2 = typ2->type_num; + int size1 = typ1->elsize; + int size2 = typ2->elsize; - if (size1 != size2) return FALSE; - - if (PyArray_ISNBO(typ1->byteorder) != PyArray_ISNBO(typ2->byteorder)) + if (size1 != size2) { return FALSE; - - if (typenum1 == PyArray_VOID || \ - typenum2 == PyArray_VOID) { + } + if (PyArray_ISNBO(typ1->byteorder) != PyArray_ISNBO(typ2->byteorder)) { + return FALSE; + } + if (typenum1 == PyArray_VOID + || typenum2 == PyArray_VOID) { return ((typenum1 == typenum2) && _equivalent_fields(typ1->fields, typ2->fields)); } @@ -5989,6 +6188,7 @@ { PyArray_Descr *d1, *d2; Bool ret; + d1 = PyArray_DescrFromType(typenum1); d2 = PyArray_DescrFromType(typenum2); ret = PyArray_EquivTypes(d1, d2); @@ -6004,16 +6204,16 @@ { intp newdims[MAX_DIMS]; intp newstrides[MAX_DIMS]; - int i,k,num; + int i, k, num; PyObject *ret; - num = ndmin-nd; - for (i=0; idescr->elsize; } - for (i=num;idimensions[k]; newstrides[i] = arr->strides[k]; } @@ -6021,8 +6221,7 @@ ret = PyArray_NewFromDescr(arr->ob_type, arr->descr, ndmin, newdims, newstrides, arr->data, arr->flags, (PyObject *)arr); - /* steals a reference to arr --- so don't increment - here */ + /* steals a reference to arr --- so don't increment here */ PyArray_BASE(ret) = (PyObject *)arr; return ret; } @@ -6039,23 +6238,22 @@ static PyObject * _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { - PyObject *op, *ret=NULL; + PyObject *op, *ret = NULL; static char *kwd[]= {"object", "dtype", "copy", "order", "subok", "ndmin", NULL}; - Bool subok=FALSE; - Bool copy=TRUE; - int ndmin=0, nd; - PyArray_Descr *type=NULL; - PyArray_Descr *oldtype=NULL; + Bool subok = FALSE; + Bool copy = TRUE; + int ndmin = 0, nd; + PyArray_Descr *type = NULL; + PyArray_Descr *oldtype = NULL; NPY_ORDER order=PyArray_ANYORDER; - int flags=0; + int flags = 0; if (PyTuple_GET_SIZE(args) > 2) { PyErr_SetString(PyExc_ValueError, "only 2 non-keyword arguments accepted"); return NULL; } - if(!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i", kwd, &op, PyArray_DescrConverter2, &type, @@ -6067,19 +6265,17 @@ return NULL; } - /* fast exit if simple call */ - if ((subok && PyArray_Check(op)) || - (!subok && PyArray_CheckExact(op))) { - if (type==NULL) { + if ((subok && PyArray_Check(op)) + || (!subok && PyArray_CheckExact(op))) { + if (type == NULL) { if (!copy && STRIDING_OK(op, order)) { Py_INCREF(op); ret = op; goto finish; } else { - ret = PyArray_NewCopy((PyArrayObject*)op, - order); + ret = PyArray_NewCopy((PyArrayObject*)op, order); goto finish; } } @@ -6092,9 +6288,10 @@ goto finish; } else { - ret = PyArray_NewCopy((PyArrayObject*)op, - order); - if (oldtype == type) goto finish; + ret = PyArray_NewCopy((PyArrayObject*)op, order); + if (oldtype == type) { + goto finish; + } Py_INCREF(oldtype); Py_DECREF(PyArray_DESCR(ret)); PyArray_DESCR(ret) = oldtype; @@ -6109,9 +6306,9 @@ if (order == PyArray_CORDER) { flags |= CONTIGUOUS; } - else if ((order == PyArray_FORTRANORDER) || + else if ((order == PyArray_FORTRANORDER) /* order == PyArray_ANYORDER && */ - (PyArray_Check(op) && PyArray_ISFORTRAN(op))) { + || (PyArray_Check(op) && PyArray_ISFORTRAN(op))) { flags |= FORTRAN; } if (!subok) { @@ -6119,23 +6316,27 @@ } flags |= NPY_FORCECAST; - Py_XINCREF(type); ret = PyArray_CheckFromAny(op, type, 0, 0, flags, NULL); finish: Py_XDECREF(type); - if (!ret || (nd=PyArray_NDIM(ret)) >= ndmin) return ret; - /* create a new array from the same data with ones in the shape */ - /* steals a reference to ret */ + if (!ret || (nd=PyArray_NDIM(ret)) >= ndmin) { + return ret; + } + /* + * create a new array from the same data with ones in the shape + * steals a reference to ret + */ return _prepend_ones((PyArrayObject *)ret, nd, ndmin); } -/* accepts NULL type */ -/* steals referenct to type */ /*NUMPY_API - Empty -*/ + * Empty + * + * accepts NULL type + * steals referenct to type + */ static PyObject * PyArray_Empty(int nd, intp *dims, PyArray_Descr *type, int fortran) { @@ -6146,11 +6347,15 @@ type, nd, dims, NULL, NULL, fortran, NULL); - if (ret == NULL) return NULL; - + if (ret == NULL) { + return NULL; + } if (PyDataType_REFCHK(type)) { PyArray_FillObjectArray(ret, Py_None); - if (PyErr_Occurred()) {Py_DECREF(ret); return NULL;} + if (PyErr_Occurred()) { + Py_DECREF(ret); + return NULL; + } } return (PyObject *)ret; } @@ -6160,23 +6365,26 @@ { static char *kwlist[] = {"shape","dtype","order",NULL}; - PyArray_Descr *typecode=NULL; + PyArray_Descr *typecode = NULL; PyArray_Dims shape = {NULL, 0}; NPY_ORDER order = PyArray_CORDER; Bool fortran; - PyObject *ret=NULL; + PyObject *ret = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&", kwlist, PyArray_IntpConverter, &shape, PyArray_DescrConverter, &typecode, - PyArray_OrderConverter, &order)) + PyArray_OrderConverter, &order)) { goto fail; - - if (order == PyArray_FORTRANORDER) fortran = TRUE; - else fortran = FALSE; - + } + if (order == PyArray_FORTRANORDER) { + fortran = TRUE; + } + else { + fortran = FALSE; + } ret = PyArray_Empty(shape.len, shape.ptr, typecode, fortran); PyDimMem_FREE(shape.ptr); return ret; @@ -6187,17 +6395,18 @@ return NULL; } -/* This function is needed for supporting Pickles of - numpy scalar objects. -*/ +/* + * This function is needed for supporting Pickles of + * numpy scalar objects. + */ static PyObject * array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) { static char *kwlist[] = {"dtype","obj", NULL}; PyArray_Descr *typecode; - PyObject *obj=NULL; - int alloc=0; + PyObject *obj = NULL; + int alloc = 0; void *dptr; PyObject *ret; @@ -6205,17 +6414,18 @@ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|O", kwlist, &PyArrayDescr_Type, &typecode, - &obj)) + &obj)) { return NULL; - + } if (typecode->elsize == 0) { - PyErr_SetString(PyExc_ValueError, \ - "itemsize cannot be zero"); + PyErr_SetString(PyExc_ValueError, "itemsize cannot be zero"); return NULL; } if (PyDataType_FLAGCHK(typecode, NPY_ITEM_IS_POINTER)) { - if (obj == NULL) obj = Py_None; + if (obj == NULL) { + obj = Py_None; + } dptr = &obj; } else { @@ -6247,29 +6457,37 @@ ret = PyArray_Scalar(dptr, typecode, NULL); /* free dptr which contains zeros */ - if (alloc) _pya_free(dptr); + if (alloc) { + _pya_free(dptr); + } return ret; } -/* steal a reference */ -/* accepts NULL type */ /*NUMPY_API - Zeros -*/ + * Zeros + * + * steal a reference + * accepts NULL type + */ static PyObject * PyArray_Zeros(int nd, intp *dims, PyArray_Descr *type, int fortran) { PyArrayObject *ret; - if (!type) type = PyArray_DescrFromType(PyArray_DEFAULT); + if (!type) { + type = PyArray_DescrFromType(PyArray_DEFAULT); + } ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, type, nd, dims, NULL, NULL, fortran, NULL); - if (ret == NULL) return NULL; - - if (_zerofill(ret) < 0) return NULL; + if (ret == NULL) { + return NULL; + } + if (_zerofill(ret) < 0) { + return NULL; + } return (PyObject *)ret; } @@ -6278,11 +6496,11 @@ array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) { static char *kwlist[] = {"shape","dtype","order",NULL}; /* XXX ? */ - PyArray_Descr *typecode=NULL; + PyArray_Descr *typecode = NULL; PyArray_Dims shape = {NULL, 0}; NPY_ORDER order = PyArray_CORDER; Bool fortran = FALSE; - PyObject *ret=NULL; + PyObject *ret = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&", kwlist, PyArray_IntpConverter, @@ -6290,11 +6508,15 @@ PyArray_DescrConverter, &typecode, PyArray_OrderConverter, - &order)) + &order)) { goto fail; - - if (order == PyArray_FORTRANORDER) fortran = TRUE; - else fortran = FALSE; + } + if (order == PyArray_FORTRANORDER) { + fortran = TRUE; + } + else { + fortran = FALSE; + } ret = PyArray_Zeros(shape.len, shape.ptr, typecode, (int) fortran); PyDimMem_FREE(shape.ptr); return ret; @@ -6309,23 +6531,28 @@ array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args) { PyObject *dict; - if (!PyArg_ParseTuple(args, "O", &dict)) return NULL; - Py_XDECREF(typeDict); /* Decrement old reference (if any)*/ + + if (!PyArg_ParseTuple(args, "O", &dict)) { + return NULL; + } + /* Decrement old reference (if any)*/ + Py_XDECREF(typeDict); typeDict = dict; - Py_INCREF(dict); /* Create an internal reference to it */ + /* Create an internal reference to it */ + Py_INCREF(dict); Py_INCREF(Py_None); return Py_None; } -/* Reading from a file or a string. +/* + * Reading from a file or a string. + * + * As much as possible, we try to use the same code for both files and strings, + * so the semantics for fromstring and fromfile are the same, especially with + * regards to the handling of text representations. + */ - As much as possible, we try to use the same code for both files and strings, - so the semantics for fromstring and fromfile are the same, especially with - regards to the handling of text representations. -*/ - - typedef int (*next_element)(void **, void *, PyArray_Descr *, void *); typedef int (*skip_separator)(void **, const char *, void *); @@ -6348,14 +6575,16 @@ return dtype->f->scanfunc(*fp, dptr, NULL, dtype); } -/* Remove multiple whitespace from the separator, and add a space to the - beginning and end. This simplifies the separator-skipping code below. -*/ +/* + * Remove multiple whitespace from the separator, and add a space to the + * beginning and end. This simplifies the separator-skipping code below. + */ static char * swab_separator(char *sep) { int skip_space = 0; char *s, *start; + s = start = malloc(strlen(sep)+3); /* add space to front if there isn't one */ if (*sep != '\0' && !isspace(*sep)) { @@ -6365,14 +6594,18 @@ if (isspace(*sep)) { if (skip_space) { sep++; - } else { + } + else { *s = ' '; - s++; sep++; + s++; + sep++; skip_space = 1; } - } else { + } + else { *s = *sep; - s++; sep++; + s++; + sep++; skip_space = 0; } } @@ -6385,17 +6618,17 @@ return start; } -/* Assuming that the separator is the next bit in the string (file), skip it. - - Single spaces in the separator are matched to arbitrary-long sequences - of whitespace in the input. If the separator consists only of spaces, - it matches one or more whitespace characters. - - If we can't match the separator, return -2. - If we hit the end of the string (file), return -1. - Otherwise, return 0. -*/ - +/* + * Assuming that the separator is the next bit in the string (file), skip it. + * + * Single spaces in the separator are matched to arbitrary-long sequences + * of whitespace in the input. If the separator consists only of spaces, + * it matches one or more whitespace characters. + * + * If we can't match the separator, return -2. + * If we hit the end of the string (file), return -1. + * Otherwise, return 0. + */ static int fromstr_skip_separator(char **s, const char *sep, const char *end) { @@ -6406,26 +6639,31 @@ if (c == '\0' || (end != NULL && string >= end)) { result = -1; break; - } else if (*sep == '\0') { + } + else if (*sep == '\0') { if (string != *s) { /* matched separator */ result = 0; break; - } else { + } + else { /* separator was whitespace wildcard that didn't match */ result = -2; break; } - } else if (*sep == ' ') { + } + else if (*sep == ' ') { /* whitespace wildcard */ if (!isspace(c)) { sep++; continue; } - } else if (*sep != c) { + } + else if (*sep != c) { result = -2; break; - } else { + } + else { sep++; } string++; @@ -6439,46 +6677,54 @@ { int result = 0; const char *sep_start = sep; + while (1) { int c = fgetc(*fp); + if (c == EOF) { result = -1; break; - } else if (*sep == '\0') { + } + else if (*sep == '\0') { ungetc(c, *fp); if (sep != sep_start) { /* matched separator */ result = 0; break; - } else { + } + else { /* separator was whitespace wildcard that didn't match */ result = -2; break; } - } else if (*sep == ' ') { + } + else if (*sep == ' ') { /* whitespace wildcard */ if (!isspace(c)) { sep++; sep_start++; ungetc(c, *fp); - } else if (sep == sep_start) { + } + else if (sep == sep_start) { sep_start--; } - } else if (*sep != c) { + } + else if (*sep != c) { ungetc(c, *fp); result = -2; break; - } else { + } + else { sep++; } } return result; } -/* Create an array by reading from the given stream, using the passed - next_element and skip_separator functions. -*/ - +/* + * Create an array by reading from the given stream, using the passed + * next_element and skip_separator functions. + */ #define FROM_BUFFER_SIZE 4096 static PyArrayObject * array_from_text(PyArray_Descr *dtype, intp num, char *sep, size_t *nread, @@ -6494,21 +6740,23 @@ intp bytes, totalbytes; size = (num >= 0) ? num : FROM_BUFFER_SIZE; - r = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &size, NULL, NULL, 0, NULL); - if (r == NULL) return NULL; + if (r == NULL) { + return NULL; + } clean_sep = swab_separator(sep); NPY_BEGIN_ALLOW_THREADS; totalbytes = bytes = size * dtype->elsize; dptr = r->data; - for (i=0; num < 0 || i < num; i++) { - if (next(&stream, dptr, dtype, stream_data) < 0) + for (i= 0; num < 0 || i < num; i++) { + if (next(&stream, dptr, dtype, stream_data) < 0) { break; + } *nread += 1; thisbuf += 1; dptr += dtype->elsize; @@ -6523,12 +6771,15 @@ dptr = tmp + (totalbytes - bytes); thisbuf = 0; } - if (skip_sep(&stream, clean_sep, stream_data) < 0) + if (skip_sep(&stream, clean_sep, stream_data) < 0) { break; + } } if (num < 0) { tmp = PyDataMem_RENEW(r->data, (*nread)*dtype->elsize); - if (tmp == NULL) err=1; + if (tmp == NULL) { + err = 1; + } else { PyArray_DIM(r,0) = *nread; r->data = tmp; @@ -6536,7 +6787,9 @@ } NPY_END_ALLOW_THREADS; free(clean_sep); - if (err == 1) PyErr_NoMemory(); + if (err == 1) { + PyErr_NoMemory(); + } if (PyErr_Occurred()) { Py_DECREF(r); return NULL; @@ -6546,26 +6799,26 @@ #undef FROM_BUFFER_SIZE /*NUMPY_API - - Given a pointer to a string ``data``, a string length ``slen``, and - a ``PyArray_Descr``, return an array corresponding to the data - encoded in that string. - - If the dtype is NULL, the default array type is used (double). - If non-null, the reference is stolen. - - If ``slen`` is < 0, then the end of string is used for text data. - It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs - would be the norm). - - The number of elements to read is given as ``num``; if it is < 0, then - then as many as possible are read. - - If ``sep`` is NULL or empty, then binary data is assumed, else - text data, with ``sep`` as the separator between elements. Whitespace in - the separator matches any length of whitespace in the text, and a match - for whitespace around the separator is added. -*/ + * + * Given a pointer to a string ``data``, a string length ``slen``, and + * a ``PyArray_Descr``, return an array corresponding to the data + * encoded in that string. + * + * If the dtype is NULL, the default array type is used (double). + * If non-null, the reference is stolen. + * + * If ``slen`` is < 0, then the end of string is used for text data. + * It is an error for ``slen`` to be < 0 for binary data (since embedded NULLs + * would be the norm). + * + * The number of elements to read is given as ``num``; if it is < 0, then + * then as many as possible are read. + * + * If ``sep`` is NULL or empty, then binary data is assumed, else + * text data, with ``sep`` as the separator between elements. Whitespace in + * the separator matches any length of whitespace in the text, and a match + * for whitespace around the separator is added. + */ static PyObject * PyArray_FromString(char *data, intp slen, PyArray_Descr *dtype, intp num, char *sep) @@ -6574,9 +6827,9 @@ PyArrayObject *ret; Bool binary; - if (dtype == NULL) + if (dtype == NULL) { dtype=PyArray_DescrFromType(PyArray_DEFAULT); - + } if (PyDataType_FLAGCHK(dtype, NPY_ITEM_IS_POINTER)) { PyErr_SetString(PyExc_ValueError, "Cannot create an object array from" \ @@ -6584,7 +6837,6 @@ Py_DECREF(dtype); return NULL; } - itemsize = dtype->elsize; if (itemsize == 0) { PyErr_SetString(PyExc_ValueError, "zero-valued itemsize"); @@ -6593,7 +6845,6 @@ } binary = ((sep == NULL) || (strlen(sep) == 0)); - if (binary) { if (num < 0 ) { if (slen % itemsize != 0) { @@ -6604,7 +6855,8 @@ return NULL; } num = slen/itemsize; - } else { + } + else { if (slen < num*itemsize) { PyErr_SetString(PyExc_ValueError, "string is smaller than " \ @@ -6613,17 +6865,20 @@ return NULL; } } - ret = (PyArrayObject *) PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &num, NULL, NULL, 0, NULL); - if (ret == NULL) return NULL; + if (ret == NULL) { + return NULL; + } memcpy(ret->data, data, num*dtype->elsize); - } else { + } + else { /* read from character-based string */ size_t nread = 0; char *end; + if (dtype->f->scanfunc == NULL) { PyErr_SetString(PyExc_ValueError, "don't know how to read " \ @@ -6634,7 +6889,8 @@ } if (slen < 0) { end = NULL; - } else { + } + else { end = data + slen; } ret = array_from_text(dtype, num, sep, &nread, @@ -6650,11 +6906,11 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) { char *data; - Py_ssize_t nin=-1; - char *sep=NULL; + Py_ssize_t nin = -1; + char *sep = NULL; Py_ssize_t s; static char *kwlist[] = {"string", "dtype", "count", "sep", NULL}; - PyArray_Descr *descr=NULL; + PyArray_Descr *descr = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "s#|O&" NPY_SSIZE_T_PYFMT "s", kwlist, @@ -6664,7 +6920,6 @@ Py_XDECREF(descr); return NULL; } - return PyArray_FromString(data, (intp)s, descr, (intp)nin, sep); } @@ -6677,14 +6932,23 @@ intp start, numbytes; if (num < 0) { - int fail=0; + int fail = 0; + start = (intp )ftell(fp); - if (start < 0) fail=1; - if (fseek(fp, 0, SEEK_END) < 0) fail=1; + if (start < 0) { + fail = 1; + } + if (fseek(fp, 0, SEEK_END) < 0) { + fail = 1; + } numbytes = (intp) ftell(fp); - if (numbytes < 0) fail=1; + if (numbytes < 0) { + fail = 1; + } numbytes -= start; - if (fseek(fp, start, SEEK_SET) < 0) fail=1; + if (fseek(fp, start, SEEK_SET) < 0) { + fail = 1; + } if (fail) { PyErr_SetString(PyExc_IOError, "could not seek in file"); @@ -6698,7 +6962,9 @@ 1, &num, NULL, NULL, 0, NULL); - if (r==NULL) return NULL; + if (r == NULL) { + return NULL; + } NPY_BEGIN_ALLOW_THREADS; *nread = fread(r->data, dtype->elsize, num, fp); NPY_END_ALLOW_THREADS; @@ -6706,24 +6972,24 @@ } /*NUMPY_API - - Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an - array corresponding to the data encoded in that file. - - If the dtype is NULL, the default array type is used (double). - If non-null, the reference is stolen. - - The number of elements to read is given as ``num``; if it is < 0, then - then as many as possible are read. - - If ``sep`` is NULL or empty, then binary data is assumed, else - text data, with ``sep`` as the separator between elements. Whitespace in - the separator matches any length of whitespace in the text, and a match - for whitespace around the separator is added. - - For memory-mapped files, use the buffer interface. No more data than - necessary is read by this routine. -*/ + * + * Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an + * array corresponding to the data encoded in that file. + * + * If the dtype is NULL, the default array type is used (double). + * If non-null, the reference is stolen. + * + * The number of elements to read is given as ``num``; if it is < 0, then + * then as many as possible are read. + * + * If ``sep`` is NULL or empty, then binary data is assumed, else + * text data, with ``sep`` as the separator between elements. Whitespace in + * the separator matches any length of whitespace in the text, and a match + * for whitespace around the separator is added. + * + * For memory-mapped files, use the buffer interface. No more data than + * necessary is read by this routine. + */ static PyObject * PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, intp num, char *sep) { @@ -6742,10 +7008,10 @@ Py_DECREF(dtype); return NULL; } - if ((sep == NULL) || (strlen(sep) == 0)) { ret = array_fromfile_binary(fp, dtype, num, &nread); - } else { + } + else { if (dtype->f->scanfunc == NULL) { PyErr_SetString(PyExc_ValueError, "don't know how to read " \ @@ -6782,12 +7048,12 @@ static PyObject * array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) { - PyObject *file=NULL, *ret; + PyObject *file = NULL, *ret; FILE *fp; - char *sep=""; - Py_ssize_t nin=-1; + char *sep = ""; + Py_ssize_t nin = -1; static char *kwlist[] = {"file", "dtype", "count", "sep", NULL}; - PyArray_Descr *type=NULL; + PyArray_Descr *type = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "O|O&" NPY_SSIZE_T_PYFMT "s", @@ -6798,16 +7064,16 @@ Py_XDECREF(type); return NULL; } - if (PyString_Check(file) || PyUnicode_Check(file)) { file = PyObject_CallFunction((PyObject *)&PyFile_Type, "Os", file, "rb"); - if (file==NULL) return NULL; + if (file == NULL) { + return NULL; + } } else { Py_INCREF(file); } - fp = PyFile_AsFile(file); if (fp == NULL) { PyErr_SetString(PyExc_IOError, @@ -6815,17 +7081,19 @@ Py_DECREF(file); return NULL; } - - if (type == NULL) type = PyArray_DescrFromType(PyArray_DEFAULT); - + if (type == NULL) { + type = PyArray_DescrFromType(PyArray_DEFAULT); + } ret = PyArray_FromFile(fp, type, (intp) nin, sep); Py_DECREF(file); return ret; } -/* steals a reference to dtype (which cannot be NULL) */ -/*NUMPY_API */ +/*NUMPY_API + * + * steals a reference to dtype (which cannot be NULL) + */ static PyObject * PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, intp count) { @@ -6835,8 +7103,9 @@ intp i, elsize, elcount; char *item, *new_data; - if (iter == NULL) goto done; - + if (iter == NULL) { + goto done; + } elcount = (count < 0) ? 0 : count; if ((elsize=dtype->elsize) == 0) { PyErr_SetString(PyExc_ValueError, "Must specify length "\ @@ -6844,9 +7113,10 @@ goto done; } - /* We would need to alter the memory RENEW code to decrement any - reference counts before throwing away any memory. - */ + /* + * We would need to alter the memory RENEW code to decrement any + * reference counts before throwing away any memory. + */ if (PyDataType_REFCHK(dtype)) { PyErr_SetString(PyExc_ValueError, "cannot create "\ "object arrays from iterator"); @@ -6856,11 +7126,11 @@ ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &elcount, NULL,NULL, 0, NULL); dtype = NULL; - if (ret == NULL) goto done; - + if (ret == NULL) { + goto done; + } for (i = 0; (i < count || count == -1) && (value = PyIter_Next(iter)); i++) { - if (i >= elcount) { /* Grow ret->data: @@ -6868,10 +7138,12 @@ 50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ... */ elcount = (i >> 1) + (i < 4 ? 4 : 2) + i; - if (elcount <= (intp)((~(size_t)0) / elsize)) + if (elcount <= (intp)((~(size_t)0) / elsize)) { new_data = PyDataMem_RENEW(ret->data, elcount * elsize); - else + } + else { new_data = NULL; + } if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate array memory"); @@ -6880,15 +7152,14 @@ } ret->data = new_data; } - ret->dimensions[0] = i+1; + ret->dimensions[0] = i + 1; - if (((item = index2ptr(ret, i)) == NULL) || - (ret->descr->f->setitem(value, item, ret) == -1)) { + if (((item = index2ptr(ret, i)) == NULL) + || (ret->descr->f->setitem(value, item, ret) == -1)) { Py_DECREF(value); goto done; } Py_DECREF(value); - } if (i < count) { @@ -6897,10 +7168,12 @@ } /* - Realloc the data so that don't keep extra memory tied up - (assuming realloc is reasonably good about reusing space...) - */ - if (i==0) i = 1; + * Realloc the data so that don't keep extra memory tied up + * (assuming realloc is reasonably good about reusing space...) + */ + if (i == 0) { + i = 1; + } new_data = PyDataMem_RENEW(ret->data, i * elsize); if (new_data == NULL) { PyErr_SetString(PyExc_MemoryError, "cannot allocate array memory"); @@ -6922,9 +7195,9 @@ array_fromiter(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) { PyObject *iter; - Py_ssize_t nin=-1; + Py_ssize_t nin = -1; static char *kwlist[] = {"iter", "dtype", "count", NULL}; - PyArray_Descr *descr=NULL; + PyArray_Descr *descr = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "OO&|" NPY_SSIZE_T_PYFMT, @@ -6935,7 +7208,6 @@ Py_XDECREF(descr); return NULL; } - return PyArray_FromIter(iter, descr, (intp)nin); } @@ -6950,7 +7222,7 @@ Py_ssize_t ts; intp s, n; int itemsize; - int write=1; + int write = 1; if (PyDataType_REFCHK(type)) { @@ -6966,21 +7238,25 @@ Py_DECREF(type); return NULL; } - - if (buf->ob_type->tp_as_buffer == NULL || \ - (buf->ob_type->tp_as_buffer->bf_getwritebuffer == NULL && \ - buf->ob_type->tp_as_buffer->bf_getreadbuffer == NULL)) { + if (buf->ob_type->tp_as_buffer == NULL + || (buf->ob_type->tp_as_buffer->bf_getwritebuffer == NULL + && buf->ob_type->tp_as_buffer->bf_getreadbuffer == NULL)) { PyObject *newbuf; newbuf = PyObject_GetAttrString(buf, "__buffer__"); - if (newbuf == NULL) {Py_DECREF(type); return NULL;} + if (newbuf == NULL) { + Py_DECREF(type); + return NULL; + } buf = newbuf; } - else {Py_INCREF(buf);} + else { + Py_INCREF(buf); + } - if (PyObject_AsWriteBuffer(buf, (void *)&data, &ts)==-1) { + if (PyObject_AsWriteBuffer(buf, (void *)&data, &ts) == -1) { write = 0; PyErr_Clear(); - if (PyObject_AsReadBuffer(buf, (void *)&data, &ts)==-1) { + if (PyObject_AsReadBuffer(buf, (void *)&data, &ts) == -1) { Py_DECREF(buf); Py_DECREF(type); return NULL; @@ -6997,7 +7273,6 @@ s = (intp)ts - offset; n = (intp)count; itemsize = type->elsize; - if (n < 0 ) { if (s % itemsize != 0) { PyErr_SetString(PyExc_ValueError, @@ -7008,7 +7283,8 @@ return NULL; } n = s/itemsize; - } else { + } + else { if (s < n*itemsize) { PyErr_SetString(PyExc_ValueError, "buffer is smaller than requested"\ @@ -7029,8 +7305,9 @@ return NULL; } - if (!write) ret->flags &= ~WRITEABLE; - + if (!write) { + ret->flags &= ~WRITEABLE; + } /* Store a reference for decref on deallocation */ ret->base = buf; PyArray_UpdateFlags(ret, ALIGNED); @@ -7040,10 +7317,10 @@ static PyObject * array_frombuffer(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds) { - PyObject *obj=NULL; - Py_ssize_t nin=-1, offset=0; + PyObject *obj = NULL; + Py_ssize_t nin = -1, offset = 0; static char *kwlist[] = {"buffer", "dtype", "count", "offset", NULL}; - PyArray_Descr *type=NULL; + PyArray_Descr *type = NULL; if (!PyArg_ParseTupleAndKeywords(args, keywds, "O|O&" NPY_SSIZE_T_PYFMT @@ -7054,9 +7331,9 @@ Py_XDECREF(type); return NULL; } - if (type==NULL) + if (type == NULL) { type = PyArray_DescrFromType(PyArray_DEFAULT); - + } return PyArray_FromBuffer(obj, type, (intp)nin, (intp)offset); } @@ -7064,48 +7341,53 @@ array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *a0; - int axis=0; + int axis = 0; static char *kwlist[] = {"seq", "axis", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&", kwlist, &a0, - PyArray_AxisConverter, &axis)) + PyArray_AxisConverter, &axis)) { return NULL; + } return PyArray_Concatenate(a0, axis); } static PyObject *array_innerproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *b0, *a0; - if (!PyArg_ParseTuple(args, "OO", &a0, &b0)) return NULL; - + if (!PyArg_ParseTuple(args, "OO", &a0, &b0)) { + return NULL; + } return _ARET(PyArray_InnerProduct(a0, b0)); } static PyObject *array_matrixproduct(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *v, *a; - if (!PyArg_ParseTuple(args, "OO", &a, &v)) return NULL; - + if (!PyArg_ParseTuple(args, "OO", &a, &v)) { + return NULL; + } return _ARET(PyArray_MatrixProduct(a, v)); } static PyObject *array_fastCopyAndTranspose(PyObject *NPY_UNUSED(dummy), PyObject *args) { PyObject *a0; - if (!PyArg_ParseTuple(args, "O", &a0)) return NULL; - + if (!PyArg_ParseTuple(args, "O", &a0)) { + return NULL; + } return _ARET(PyArray_CopyAndTranspose(a0)); } static PyObject *array_correlate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *shape, *a0; - int mode=0; + int mode = 0; static char *kwlist[] = {"a", "v", "mode", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i", kwlist, - &a0, &shape, &mode)) return NULL; - + &a0, &shape, &mode)) { + return NULL; + } return PyArray_Correlate(a0, shape, mode); } @@ -7129,37 +7411,45 @@ return PyArray_New(&PyArray_Type, 1, &length, type_num, NULL, NULL, 0, 0, NULL); } - range = PyArray_New(&PyArray_Type, 1, &length, type_num, NULL, NULL, 0, 0, NULL); - if (range == NULL) return NULL; - + if (range == NULL) { + return NULL; + } funcs = PyArray_DESCR(range)->f; - /* place start in the buffer and the next value in the second position */ - /* if length > 2, then call the inner loop, otherwise stop */ - + /* + * place start in the buffer and the next value in the second position + * if length > 2, then call the inner loop, otherwise stop + */ obj = PyFloat_FromDouble(start); ret = funcs->setitem(obj, PyArray_DATA(range), (PyArrayObject *)range); Py_DECREF(obj); - if (ret < 0) goto fail; - if (length == 1) return range; - + if (ret < 0) { + goto fail; + } + if (length == 1) { + return range; + } obj = PyFloat_FromDouble(start + step); ret = funcs->setitem(obj, PyArray_BYTES(range)+PyArray_ITEMSIZE(range), (PyArrayObject *)range); Py_DECREF(obj); - if (ret < 0) goto fail; - if (length == 2) return range; - + if (ret < 0) { + goto fail; + } + if (length == 2) { + return range; + } if (!funcs->fill) { PyErr_SetString(PyExc_ValueError, "no fill-function for data-type."); Py_DECREF(range); return NULL; } funcs->fill(PyArray_DATA(range), length, (PyArrayObject *)range); - if (PyErr_Occurred()) goto fail; - + if (PyErr_Occurred()) { + goto fail; + } return range; fail: @@ -7167,9 +7457,9 @@ return NULL; } -/* the formula is - len = (intp) ceil((start - stop) / step); -*/ +/* + * the formula is len = (intp) ceil((start - stop) / step); + */ static intp _calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, int cmplx) { @@ -7188,35 +7478,48 @@ return -1; } val = PyNumber_TrueDivide(*next, step); - Py_DECREF(*next); *next=NULL; - if (!val) return -1; + Py_DECREF(*next); + *next = NULL; + if (!val) { + return -1; + } if (cmplx && PyComplex_Check(val)) { value = PyComplex_RealAsDouble(val); - if (error_converting(value)) {Py_DECREF(val); return -1;} + if (error_converting(value)) { + Py_DECREF(val); + return -1; + } len = (intp) ceil(value); value = PyComplex_ImagAsDouble(val); Py_DECREF(val); - if (error_converting(value)) return -1; + if (error_converting(value)) { + return -1; + } len = MIN(len, (intp) ceil(value)); } else { value = PyFloat_AsDouble(val); Py_DECREF(val); - if (error_converting(value)) return -1; + if (error_converting(value)) { + return -1; + } len = (intp) ceil(value); } - if (len > 0) { *next = PyNumber_Add(start, step); - if (!next) return -1; + if (!next) { + return -1; + } } return len; } -/* this doesn't change the references */ /*NUMPY_API - ArangeObj, -*/ + * + * ArangeObj, + * + * this doesn't change the references + */ static PyObject * PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject *step, PyArray_Descr *dtype) { @@ -7224,7 +7527,7 @@ PyArray_ArrFuncs *funcs; PyObject *next; intp length; - PyArray_Descr *native=NULL; + PyArray_Descr *native = NULL; int swap; if (!dtype) { @@ -7247,33 +7550,41 @@ } dtype = deftype; } - else Py_INCREF(dtype); - + else { + Py_INCREF(dtype); + } if (!step || step == Py_None) { step = PyInt_FromLong(1); } - else Py_XINCREF(step); - + else { + Py_XINCREF(step); + } if (!stop || stop == Py_None) { stop = start; start = PyInt_FromLong(0); } - else Py_INCREF(start); - + else { + Py_INCREF(start); + } /* calculate the length and next = start + step*/ length = _calc_length(start, stop, step, &next, PyTypeNum_ISCOMPLEX(dtype->type_num)); - - if (PyErr_Occurred()) {Py_DECREF(dtype); goto fail;} + if (PyErr_Occurred()) { + Py_DECREF(dtype); + goto fail; + } if (length <= 0) { length = 0; range = PyArray_SimpleNewFromDescr(1, &length, dtype); - Py_DECREF(step); Py_DECREF(start); return range; + Py_DECREF(step); + Py_DECREF(start); + return range; } - /* If dtype is not in native byte-order then get native-byte - order version. And then swap on the way out. - */ + /* + * If dtype is not in native byte-order then get native-byte + * order version. And then swap on the way out. + */ if (!PyArray_ISNBO(dtype->byteorder)) { native = PyArray_DescrNewByteorder(dtype, PyArray_NATBYTE); swap = 1; @@ -7284,28 +7595,38 @@ } range = PyArray_SimpleNewFromDescr(1, &length, native); - if (range == NULL) goto fail; + if (range == NULL) { + goto fail; + } + /* + * place start in the buffer and the next value in the second position + * if length > 2, then call the inner loop, otherwise stop + */ funcs = PyArray_DESCR(range)->f; - - /* place start in the buffer and the next value in the second position */ - /* if length > 2, then call the inner loop, otherwise stop */ - - if (funcs->setitem(start, PyArray_DATA(range), (PyArrayObject *)range) < 0) + if (funcs->setitem( + start, PyArray_DATA(range), (PyArrayObject *)range) < 0) { goto fail; - if (length == 1) goto finish; + } + if (length == 1) { + goto finish; + } if (funcs->setitem(next, PyArray_BYTES(range)+PyArray_ITEMSIZE(range), - (PyArrayObject *)range) < 0) goto fail; - if (length == 2) goto finish; - + (PyArrayObject *)range) < 0) { + goto fail; + } + if (length == 2) { + goto finish; + } if (!funcs->fill) { PyErr_SetString(PyExc_ValueError, "no fill-function for data-type."); Py_DECREF(range); goto fail; } funcs->fill(PyArray_DATA(range), length, (PyArrayObject *)range); - if (PyErr_Occurred()) goto fail; - + if (PyErr_Occurred()) { + goto fail; + } finish: if (swap) { PyObject *new; @@ -7314,7 +7635,6 @@ Py_DECREF(PyArray_DESCR(range)); PyArray_DESCR(range) = dtype; /* steals the reference */ } - Py_DECREF(start); Py_DECREF(step); Py_DECREF(next); @@ -7329,9 +7649,9 @@ static PyObject * array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { - PyObject *o_start=NULL, *o_stop=NULL, *o_step=NULL; + PyObject *o_start = NULL, *o_stop = NULL, *o_step = NULL; static char *kwd[]= {"start", "stop", "step", "dtype", NULL}; - PyArray_Descr *typecode=NULL; + PyArray_Descr *typecode = NULL; if(!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&", kwd, &o_start, &o_stop, &o_step, @@ -7340,14 +7660,13 @@ Py_XDECREF(typecode); return NULL; } - return PyArray_ArangeObj(o_start, o_stop, o_step, typecode); } /* - Included at the very first so not auto-grabbed and thus not - labeled. -*/ + * Included at the very first so not auto-grabbed and thus not + * labeled. + */ static unsigned int PyArray_GetNDArrayCVersion(void) { @@ -7358,8 +7677,10 @@ array__get_ndarray_c_version(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { static char *kwlist[] = {NULL}; - if(!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) return NULL; + if(!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) { + return NULL; + } return PyInt_FromLong( (long) PyArray_GetNDArrayCVersion() ); } @@ -7370,46 +7691,51 @@ PyObject *ret; PyTypeObject *subtype; PyArray_Dims shape = {NULL, 0}; - PyArray_Descr *dtype=NULL; + PyArray_Descr *dtype = NULL; if (!PyArg_ParseTuple(args, "O!O&O&", &PyType_Type, &subtype, PyArray_IntpConverter, &shape, - PyArray_DescrConverter, &dtype)) + PyArray_DescrConverter, &dtype)) { goto fail; - + } if (!PyType_IsSubtype(subtype, &PyArray_Type)) { PyErr_SetString(PyExc_TypeError, "_reconstruct: First argument must be " \ "a sub-type of ndarray"); goto fail; } - ret = PyArray_NewFromDescr(subtype, dtype, (int)shape.len, shape.ptr, NULL, NULL, 0, NULL); - if (shape.ptr) PyDimMem_FREE(shape.ptr); + if (shape.ptr) { + PyDimMem_FREE(shape.ptr); + } return ret; fail: Py_XDECREF(dtype); - if (shape.ptr) PyDimMem_FREE(shape.ptr); + if (shape.ptr) { + PyDimMem_FREE(shape.ptr); + } return NULL; } static PyObject * array_set_string_function(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { - PyObject *op=NULL; + PyObject *op = NULL; int repr=1; static char *kwlist[] = {"f", "repr", NULL}; if(!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi", kwlist, - &op, &repr)) return NULL; - + &op, &repr)) { + return NULL; + } /* reset the array_repr function to built-in */ - if (op == Py_None) op = NULL; + if (op == Py_None) { + op = NULL; + } if (op != NULL && !PyCallable_Check(op)) { - PyErr_SetString(PyExc_TypeError, - "Argument must be callable."); + PyErr_SetString(PyExc_TypeError, "Argument must be callable."); return NULL; } PyArray_SetStringFunction(op, repr); @@ -7420,18 +7746,19 @@ static PyObject * array_set_ops_function(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), PyObject *kwds) { - PyObject *oldops=NULL; + PyObject *oldops = NULL; - if ((oldops = PyArray_GetNumericOps())==NULL) return NULL; - - /* Should probably ensure that objects are at least callable */ - /* Leave this to the caller for now --- error will be raised - later when use is attempted - */ + if ((oldops = PyArray_GetNumericOps()) == NULL) { + return NULL; + } + /* + * Should probably ensure that objects are at least callable + * Leave this to the caller for now --- error will be raised + * later when use is attempted + */ if (kwds && PyArray_SetNumericOps(kwds) == -1) { Py_DECREF(oldops); - PyErr_SetString(PyExc_ValueError, - "one or more objects not callable"); + PyErr_SetString(PyExc_ValueError, "one or more objects not callable"); return NULL; } return oldops; @@ -7439,26 +7766,25 @@ /*NUMPY_API - Where -*/ + * Where + */ static PyObject * PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) { PyArrayObject *arr; - PyObject *tup=NULL, *obj=NULL; - PyObject *ret=NULL, *zero=NULL; + PyObject *tup = NULL, *obj = NULL; + PyObject *ret = NULL, *zero = NULL; - arr = (PyArrayObject *)PyArray_FromAny(condition, NULL, 0, 0, 0, NULL); - if (arr == NULL) return NULL; - - if ((x==NULL) && (y==NULL)) { + if (arr == NULL) { + return NULL; + } + if ((x == NULL) && (y == NULL)) { ret = PyArray_Nonzero(arr); Py_DECREF(arr); return ret; } - - if ((x==NULL) || (y==NULL)) { + if ((x == NULL) || (y == NULL)) { Py_DECREF(arr); PyErr_SetString(PyExc_ValueError, "either both or neither " "of x and y should be given"); @@ -7467,18 +7793,19 @@ zero = PyInt_FromLong((long) 0); - obj = PyArray_EnsureAnyArray(PyArray_GenericBinaryFunction(arr, zero, n_ops.not_equal)); Py_DECREF(zero); Py_DECREF(arr); - if (obj == NULL) return NULL; - + if (obj == NULL) { + return NULL; + } tup = Py_BuildValue("(OO)", y, x); - if (tup == NULL) {Py_DECREF(obj); return NULL;} - + if (tup == NULL) { + Py_DECREF(obj); + return NULL; + } ret = PyArray_Choose((PyAO *)obj, tup, NULL, NPY_RAISE); - Py_DECREF(obj); Py_DECREF(tup); return ret; @@ -7487,23 +7814,25 @@ static PyObject * array_where(PyObject *NPY_UNUSED(ignored), PyObject *args) { - PyObject *obj=NULL, *x=NULL, *y=NULL; + PyObject *obj = NULL, *x = NULL, *y = NULL; - if (!PyArg_ParseTuple(args, "O|OO", &obj, &x, &y)) return NULL; - + if (!PyArg_ParseTuple(args, "O|OO", &obj, &x, &y)) { + return NULL; + } return PyArray_Where(obj, x, y); } static PyObject * array_lexsort(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) { - int axis=-1; + int axis = -1; PyObject *obj; static char *kwlist[] = {"keys", "axis", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|i", kwlist, - &obj, &axis)) return NULL; - + &obj, &axis)) { + return NULL; + } return _ARET(PyArray_LexSort(obj, axis)); } @@ -7512,10 +7841,10 @@ static PyObject * array_can_cast_safely(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { - PyArray_Descr *d1=NULL; - PyArray_Descr *d2=NULL; + PyArray_Descr *d1 = NULL; + PyArray_Descr *d2 = NULL; Bool ret; - PyObject *retobj=NULL; + PyObject *retobj = NULL; static char *kwlist[] = {"from", "to", NULL}; if(!PyArg_ParseTupleAndKeywords(args, kwds, "O&O&", kwlist, @@ -7531,7 +7860,7 @@ } ret = PyArray_CanCastTo(d1, d2); - retobj = (ret ? Py_True : Py_False); + retobj = ret ? Py_True : Py_False; Py_INCREF(retobj); finish: @@ -7545,9 +7874,9 @@ { int size; - if(!PyArg_ParseTuple(args, "i", &size)) + if(!PyArg_ParseTuple(args, "i", &size)) { return NULL; - + } return PyBuffer_New(size); } @@ -7555,22 +7884,22 @@ buffer_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { PyObject *obj; - Py_ssize_t offset=0, size=Py_END_OF_BUFFER, n; + Py_ssize_t offset = 0, size = Py_END_OF_BUFFER, n; void *unused; static char *kwlist[] = {"object", "offset", "size", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|" NPY_SSIZE_T_PYFMT \ + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT, kwlist, - &obj, &offset, &size)) + &obj, &offset, &size)) { return NULL; - - + } if (PyObject_AsWriteBuffer(obj, &unused, &n) < 0) { PyErr_Clear(); return PyBuffer_FromObject(obj, offset, size); } - else + else { return PyBuffer_FromReadWriteObject(obj, offset, size); + } } #ifndef _MSC_VER @@ -7602,22 +7931,29 @@ { PyObject *mem; Py_ssize_t size; - Bool ro=FALSE, check=TRUE; + Bool ro = FALSE, check = TRUE; void *memptr; static char *kwlist[] = {"mem", "size", "readonly", "check", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O" \ NPY_SSIZE_T_PYFMT "|O&O&", kwlist, &mem, &size, PyArray_BoolConverter, &ro, PyArray_BoolConverter, - &check)) return NULL; + &check)) { + return NULL; + } memptr = PyLong_AsVoidPtr(mem); - if (memptr == NULL) return NULL; - + if (memptr == NULL) { + return NULL; + } if (check) { - /* Try to dereference the start and end of the memory region */ - /* Catch segfault and report error if it occurs */ + /* + * Try to dereference the start and end of the memory region + * Catch segfault and report error if it occurs + */ char test; - int err=0; + int err = 0; + #ifdef _MSC_VER __try { _test_code(); @@ -7628,7 +7964,6 @@ #else PyOS_sighandler_t _npy_sig_save; _npy_sig_save = PyOS_setsig(SIGSEGV, _SigSegv_Handler); - if (setjmp(_NPY_SIGSEGV_BUF) == 0) { _test_code(); } @@ -7689,49 +8024,66 @@ Bool rstrip; char *cmp_str; Py_ssize_t strlen; - PyObject *res=NULL; - static char msg[] = \ - "comparision must be '==', '!=', '<', '>', '<=', '>='"; - + PyObject *res = NULL; + static char msg[] = "comparision must be '==', '!=', '<', '>', '<=', '>='"; static char *kwlist[] = {"a1", "a2", "cmp", "rstrip", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOs#O&", kwlist, &array, &other, &cmp_str, &strlen, - PyArray_BoolConverter, &rstrip)) + PyArray_BoolConverter, &rstrip)) { return NULL; - - if (strlen < 1 || strlen > 2) goto err; + } + if (strlen < 1 || strlen > 2) { + goto err; + } if (strlen > 1) { - if (cmp_str[1] != '=') goto err; - if (cmp_str[0] == '=') cmp_op = Py_EQ; - else if (cmp_str[0] == '!') cmp_op = Py_NE; - else if (cmp_str[0] == '<') cmp_op = Py_LE; - else if (cmp_str[0] == '>') cmp_op = Py_GE; - else goto err; + if (cmp_str[1] != '=') { + goto err; + } + if (cmp_str[0] == '=') { + cmp_op = Py_EQ; + } + else if (cmp_str[0] == '!') { + cmp_op = Py_NE; + } + else if (cmp_str[0] == '<') { + cmp_op = Py_LE; + } + else if (cmp_str[0] == '>') { + cmp_op = Py_GE; + } + else { + goto err; + } } else { - if (cmp_str[0] == '<') cmp_op = Py_LT; - else if (cmp_str[0] == '>') cmp_op = Py_GT; - else goto err; + if (cmp_str[0] == '<') { + cmp_op = Py_LT; + } + else if (cmp_str[0] == '>') { + cmp_op = Py_GT; + } + else { + goto err; + } } newarr = (PyArrayObject *)PyArray_FROM_O(array); - if (newarr == NULL) return NULL; + if (newarr == NULL) { + return NULL; + } newoth = (PyArrayObject *)PyArray_FROM_O(other); if (newoth == NULL) { Py_DECREF(newarr); return NULL; } - if (PyArray_ISSTRING(newarr) && PyArray_ISSTRING(newoth)) { res = _strings_richcompare(newarr, newoth, cmp_op, rstrip != 0); } else { - PyErr_SetString(PyExc_TypeError, - "comparison of non-string arrays"); + PyErr_SetString(PyExc_TypeError, "comparison of non-string arrays"); } - Py_DECREF(newarr); Py_DECREF(newoth); return res; @@ -7783,120 +8135,158 @@ static PyObject * test_interrupt(PyObject *NPY_UNUSED(self), PyObject *args) { - int kind=0; + int kind = 0; int a = 0; - if (!PyArg_ParseTuple(args, "|i", &kind)) return NULL; - + if (!PyArg_ParseTuple(args, "|i", &kind)) { + return NULL; + } if (kind) { Py_BEGIN_ALLOW_THREADS; - while (a>=0) { - if ((a % 1000 == 0) && - PyOS_InterruptOccurred()) break; - a+=1; + while (a >= 0) { + if ((a % 1000 == 0) && PyOS_InterruptOccurred()) { + break; } + a += 1; + } Py_END_ALLOW_THREADS; - } + } else { - NPY_SIGINT_ON - - while(a>=0) { - a += 1; - } - + while(a >= 0) { + a += 1; + } NPY_SIGINT_OFF - } - + } return PyInt_FromLong(a); } static struct PyMethodDef array_module_methods[] = { - {"_get_ndarray_c_version", (PyCFunction)array__get_ndarray_c_version, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"_reconstruct", (PyCFunction)array__reconstruct, - METH_VARARGS, NULL}, - {"set_string_function", (PyCFunction)array_set_string_function, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"set_numeric_ops", (PyCFunction)array_set_ops_function, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"set_typeDict", (PyCFunction)array_set_typeDict, - METH_VARARGS, NULL}, + {"_get_ndarray_c_version", + (PyCFunction)array__get_ndarray_c_version, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"_reconstruct", + (PyCFunction)array__reconstruct, + METH_VARARGS, NULL}, + {"set_string_function", + (PyCFunction)array_set_string_function, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"set_numeric_ops", + (PyCFunction)array_set_ops_function, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"set_typeDict", + (PyCFunction)array_set_typeDict, + METH_VARARGS, NULL}, - {"array", (PyCFunction)_array_fromobject, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"arange", (PyCFunction)array_arange, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"zeros", (PyCFunction)array_zeros, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"empty", (PyCFunction)array_empty, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"scalar", (PyCFunction)array_scalar, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"where", (PyCFunction)array_where, - METH_VARARGS, NULL}, - {"lexsort", (PyCFunction)array_lexsort, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"putmask", (PyCFunction)array_putmask, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"fromstring",(PyCFunction)array_fromstring, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"fromiter",(PyCFunction)array_fromiter, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"concatenate", (PyCFunction)array_concatenate, - METH_VARARGS|METH_KEYWORDS, NULL}, - {"inner", (PyCFunction)array_innerproduct, - METH_VARARGS, NULL}, - {"dot", (PyCFunction)array_matrixproduct, - METH_VARARGS, NULL}, - {"_fastCopyAndTranspose", (PyCFunction)array_fastCopyAndTranspose, - METH_VARARGS, NULL}, - {"correlate", (PyCFunction)array_correlate, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"frombuffer", (PyCFunction)array_frombuffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"fromfile", (PyCFunction)array_fromfile, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"can_cast", (PyCFunction)array_can_cast_safely, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"newbuffer", (PyCFunction)new_buffer, - METH_VARARGS, NULL}, - {"getbuffer", (PyCFunction)buffer_buffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"int_asbuffer", (PyCFunction)as_buffer, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"format_longfloat", (PyCFunction)format_longfloat, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"compare_chararrays", (PyCFunction)compare_chararrays, - METH_VARARGS | METH_KEYWORDS, NULL}, - {"test_interrupt", (PyCFunction)test_interrupt, - METH_VARARGS, NULL}, + {"array", + (PyCFunction)_array_fromobject, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"arange", + (PyCFunction)array_arange, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"zeros", + (PyCFunction)array_zeros, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"empty", + (PyCFunction)array_empty, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"scalar", + (PyCFunction)array_scalar, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"where", + (PyCFunction)array_where, + METH_VARARGS, NULL}, + {"lexsort", + (PyCFunction)array_lexsort, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"putmask", + (PyCFunction)array_putmask, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"fromstring", + (PyCFunction)array_fromstring, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"fromiter", + (PyCFunction)array_fromiter, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"concatenate", + (PyCFunction)array_concatenate, + METH_VARARGS|METH_KEYWORDS, NULL}, + {"inner", + (PyCFunction)array_innerproduct, + METH_VARARGS, NULL}, + {"dot", + (PyCFunction)array_matrixproduct, + METH_VARARGS, NULL}, + {"_fastCopyAndTranspose", + (PyCFunction)array_fastCopyAndTranspose, + METH_VARARGS, NULL}, + {"correlate", + (PyCFunction)array_correlate, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"frombuffer", + (PyCFunction)array_frombuffer, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"fromfile", + (PyCFunction)array_fromfile, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"can_cast", + (PyCFunction)array_can_cast_safely, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"newbuffer", + (PyCFunction)new_buffer, + METH_VARARGS, NULL}, + {"getbuffer", + (PyCFunction)buffer_buffer, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"int_asbuffer", + (PyCFunction)as_buffer, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"format_longfloat", + (PyCFunction)format_longfloat, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"compare_chararrays", + (PyCFunction)compare_chararrays, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"test_interrupt", + (PyCFunction)test_interrupt, + METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; #include "__multiarray_api.c" -/* Establish scalar-type hierarchy */ - -/* For dual inheritance we need to make sure that the objects being - inherited from have the tp->mro object initialized. This is - not necessarily true for the basic type objects of Python (it is - checked for single inheritance but not dual in PyType_Ready). - - Thus, we call PyType_Ready on the standard Python Types, here. -*/ +/* Establish scalar-type hierarchy + * + * For dual inheritance we need to make sure that the objects being + * inherited from have the tp->mro object initialized. This is + * not necessarily true for the basic type objects of Python (it is + * checked for single inheritance but not dual in PyType_Ready). + * + * Thus, we call PyType_Ready on the standard Python Types, here. + */ static int setup_scalartypes(PyObject *NPY_UNUSED(dict)) { - initialize_numeric_types(); - if (PyType_Ready(&PyBool_Type) < 0) return -1; - if (PyType_Ready(&PyInt_Type) < 0) return -1; - if (PyType_Ready(&PyFloat_Type) < 0) return -1; - if (PyType_Ready(&PyComplex_Type) < 0) return -1; - if (PyType_Ready(&PyString_Type) < 0) return -1; - if (PyType_Ready(&PyUnicode_Type) < 0) return -1; + if (PyType_Ready(&PyBool_Type) < 0) { + return -1; + } + if (PyType_Ready(&PyInt_Type) < 0) { + return -1; + } + if (PyType_Ready(&PyFloat_Type) < 0) { + return -1; + } + if (PyType_Ready(&PyComplex_Type) < 0) { + return -1; + } + if (PyType_Ready(&PyString_Type) < 0) { + return -1; + } + if (PyType_Ready(&PyUnicode_Type) < 0) { + return -1; + } #define SINGLE_INHERIT(child, parent) \ Py##child##ArrType_Type.tp_base = &Py##parent##ArrType_Type; \ @@ -7908,9 +8298,9 @@ return -1; \ } - if (PyType_Ready(&PyGenericArrType_Type) < 0) + if (PyType_Ready(&PyGenericArrType_Type) < 0) { return -1; - + } SINGLE_INHERIT(Number, Generic); SINGLE_INHERIT(Integer, Number); SINGLE_INHERIT(Inexact, Number); @@ -7968,7 +8358,11 @@ SINGLE_INHERIT(LongLong, SignedInteger); #endif - /* fprintf(stderr, "tp_free = %p, PyObject_Del = %p, int_tp_free = %p, base.tp_free = %p\n", PyIntArrType_Type.tp_free, PyObject_Del, PyInt_Type.tp_free, PySignedIntegerArrType_Type.tp_free); + /* + fprintf(stderr, + "tp_free = %p, PyObject_Del = %p, int_tp_free = %p, base.tp_free = %p\n", + PyIntArrType_Type.tp_free, PyObject_Del, PyInt_Type.tp_free, + PySignedIntegerArrType_Type.tp_free); */ SINGLE_INHERIT(UByte, UnsignedInteger); SINGLE_INHERIT(UShort, UnsignedInteger); @@ -7996,9 +8390,10 @@ #undef SINGLE_INHERIT #undef DUAL_INHERIT - /* Clean up string and unicode array types so they act more like - strings -- get their tables from the standard types. - */ + /* + * Clean up string and unicode array types so they act more like + * strings -- get their tables from the standard types. + */ } /* place a flag dictionary in d */ @@ -8050,45 +8445,48 @@ /* Create the module and add the functions */ m = Py_InitModule("multiarray", array_module_methods); - if (!m) goto err; - + if (!m) { + goto err; + } /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); - if (!d) goto err; - + if (!d) { + goto err; + } PyArray_Type.tp_free = _pya_free; - if (PyType_Ready(&PyArray_Type) < 0) + if (PyType_Ready(&PyArray_Type) < 0) { return; - - if (setup_scalartypes(d) < 0) goto err; - + } + if (setup_scalartypes(d) < 0) { + goto err; + } PyArrayIter_Type.tp_iter = PyObject_SelfIter; PyArrayMultiIter_Type.tp_iter = PyObject_SelfIter; PyArrayMultiIter_Type.tp_free = _pya_free; - if (PyType_Ready(&PyArrayIter_Type) < 0) + if (PyType_Ready(&PyArrayIter_Type) < 0) { return; - - if (PyType_Ready(&PyArrayMapIter_Type) < 0) + } + if (PyType_Ready(&PyArrayMapIter_Type) < 0) { return; - - if (PyType_Ready(&PyArrayMultiIter_Type) < 0) + } + if (PyType_Ready(&PyArrayMultiIter_Type) < 0) { return; - + } PyArrayDescr_Type.tp_hash = (hashfunc)_Py_HashPointer; - if (PyType_Ready(&PyArrayDescr_Type) < 0) + if (PyType_Ready(&PyArrayDescr_Type) < 0) { return; - - if (PyType_Ready(&PyArrayFlags_Type) < 0) + } + if (PyType_Ready(&PyArrayFlags_Type) < 0) { return; - + } c_api = PyCObject_FromVoidPtr((void *)PyArray_API, NULL); PyDict_SetItemString(d, "_ARRAY_API", c_api); Py_DECREF(c_api); - if (PyErr_Occurred()) goto err; - + if (PyErr_Occurred()) { + goto err; + } MultiArrayError = PyString_FromString ("multiarray.error"); PyDict_SetItemString (d, "error", MultiArrayError); - s = PyString_FromString("3.0"); PyDict_SetItemString(d, "__version__", s); Py_DECREF(s); @@ -8131,7 +8529,9 @@ set_flaginfo(d); - if (set_typeinfo(d) != 0) goto err; + if (set_typeinfo(d) != 0) { + goto err; + } return; err: From numpy-svn at scipy.org Sun Feb 22 04:12:10 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 22 Feb 2009 03:12:10 -0600 (CST) Subject: [Numpy-svn] r6451 - trunk/numpy/distutils Message-ID: <20090222091210.B3FCCC7C02E@scipy.org> Author: cdavid Date: 2009-02-22 03:12:04 -0600 (Sun, 22 Feb 2009) New Revision: 6451 Modified: trunk/numpy/distutils/mingw32ccompiler.py Log: Fix typo in MSVC runtime info for mingw. Modified: trunk/numpy/distutils/mingw32ccompiler.py =================================================================== --- trunk/numpy/distutils/mingw32ccompiler.py 2009-02-22 05:27:40 UTC (rev 6450) +++ trunk/numpy/distutils/mingw32ccompiler.py 2009-02-22 09:12:04 UTC (rev 6451) @@ -365,7 +365,7 @@ _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" # I took one version in my SxS directory: no idea if it is the good # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['90'] = "8.0.50727.42" + _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" except ImportError: # If we are here, means python was not built with MSVC. Not sure what to do # in that case: manifest building will fail, but it should not be used in From numpy-svn at scipy.org Sun Feb 22 04:14:26 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 22 Feb 2009 03:14:26 -0600 (CST) Subject: [Numpy-svn] r6452 - in branches/coremath: . numpy/distutils Message-ID: <20090222091426.96189C7C02E@scipy.org> Author: cdavid Date: 2009-02-22 03:14:18 -0600 (Sun, 22 Feb 2009) New Revision: 6452 Modified: branches/coremath/ branches/coremath/numpy/distutils/mingw32ccompiler.py Log: Merged revisions 6451 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6451 | cdavid | 2009-02-22 18:12:04 +0900 (Sun, 22 Feb 2009) | 1 line Fix typo in MSVC runtime info for mingw. ........ Property changes on: branches/coremath ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6449 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6451 Modified: branches/coremath/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/coremath/numpy/distutils/mingw32ccompiler.py 2009-02-22 09:12:04 UTC (rev 6451) +++ branches/coremath/numpy/distutils/mingw32ccompiler.py 2009-02-22 09:14:18 UTC (rev 6452) @@ -365,7 +365,7 @@ _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" # I took one version in my SxS directory: no idea if it is the good # one, and we can't retrieve it from python - _MSVCRVER_TO_FULLVER['90'] = "8.0.50727.42" + _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" except ImportError: # If we are here, means python was not built with MSVC. Not sure what to do # in that case: manifest building will fail, but it should not be used in From numpy-svn at scipy.org Sun Feb 22 04:15:32 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 22 Feb 2009 03:15:32 -0600 (CST) Subject: [Numpy-svn] r6453 - trunk/numpy/distutils Message-ID: <20090222091532.B3A16C7C02E@scipy.org> Author: cdavid Date: 2009-02-22 03:15:26 -0600 (Sun, 22 Feb 2009) New Revision: 6453 Modified: trunk/numpy/distutils/mingw32ccompiler.py Log: Fix wrong merge for manifest version. Modified: trunk/numpy/distutils/mingw32ccompiler.py =================================================================== --- trunk/numpy/distutils/mingw32ccompiler.py 2009-02-22 09:14:18 UTC (rev 6452) +++ trunk/numpy/distutils/mingw32ccompiler.py 2009-02-22 09:15:26 UTC (rev 6453) @@ -456,7 +456,7 @@ def generate_manifest(config): msver = get_build_msvc_version() if msver is not None: - if msver >= 9: + if msver >= 8: check_embedded_msvcr_match_linked(msver) ma = int(msver) mi = int((msver - ma) * 10) From numpy-svn at scipy.org Sun Feb 22 08:45:30 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 22 Feb 2009 07:45:30 -0600 (CST) Subject: [Numpy-svn] r6454 - trunk/numpy/lib Message-ID: <20090222134530.EF4C8C7C042@scipy.org> Author: stefan Date: 2009-02-22 07:45:15 -0600 (Sun, 22 Feb 2009) New Revision: 6454 Modified: trunk/numpy/lib/io.py Log: Add GzipFile wrapper to support the "whence" keyword in GzipFile.seek. Modified: trunk/numpy/lib/io.py =================================================================== --- trunk/numpy/lib/io.py 2009-02-22 09:15:26 UTC (rev 6453) +++ trunk/numpy/lib/io.py 2009-02-22 13:45:15 UTC (rev 6454) @@ -22,6 +22,37 @@ _file = file _string_like = _is_string_like +def seek_gzip_factory(f): + """Use this factory to produce the class so that we can do a lazy + import on gzip. + + """ + import gzip, new + + def seek(self, offset, whence=0): + # figure out new position (we can only seek forwards) + if whence == 1: + offset = self.offset + offset + + if whence not in [0, 1]: + raise IOError, "Illegal argument" + + if offset < self.offset: + # for negative seek, rewind and do positive seek + self.rewind() + count = offset - self.offset + for i in range(count // 1024): + self.read(1024) + self.read(count % 1024) + + def tell(self): + return self.offset + + f.seek = new.instancemethod(seek, f) + f.tell = new.instancemethod(tell, f) + + return f + class BagObj(object): """A simple class that converts attribute lookups to getitems on the class passed in. @@ -138,8 +169,12 @@ memmap([4, 5, 6]) """ + import gzip + if isinstance(file, basestring): fid = _file(file,"rb") + elif isinstance(file, gzip.GzipFile): + fid = seek_gzip_factory(file) else: fid = file @@ -346,7 +381,7 @@ if _is_string_like(fname): if fname.endswith('.gz'): import gzip - fh = gzip.open(fname) + fh = seek_gzip_factory(fname) elif fname.endswith('.bz2'): import bz2 fh = bz2.BZ2File(fname) From numpy-svn at scipy.org Sun Feb 22 08:47:34 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 22 Feb 2009 07:47:34 -0600 (CST) Subject: [Numpy-svn] r6455 - trunk/numpy/lib/tests Message-ID: <20090222134734.DB784C7C042@scipy.org> Author: stefan Date: 2009-02-22 07:47:14 -0600 (Sun, 22 Feb 2009) New Revision: 6455 Modified: trunk/numpy/lib/tests/test_io.py Log: Whitespace cleanup. Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2009-02-22 13:45:15 UTC (rev 6454) +++ trunk/numpy/lib/tests/test_io.py 2009-02-22 13:47:14 UTC (rev 6455) @@ -1,4 +1,3 @@ - import numpy as np import numpy.ma as ma from numpy.ma.testutils import * @@ -699,7 +698,7 @@ def test_user_missing_values(self): - datastr ="A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + datastr ="A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" data = StringIO.StringIO(datastr) basekwargs = dict(dtype=None, delimiter=',', names=True, missing='N/A') mdtype = [('A', int), ('B', float), ('C', complex)] @@ -712,7 +711,7 @@ assert_equal(test, control) # data.seek(0) - test = np.mafromtxt(data, + test = np.mafromtxt(data, missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs) control = ma.array([( 0, 0.0, 0j), (1, -999, 1j), ( -9, 2.2, -999j), (3, -99, 3j)], @@ -721,7 +720,7 @@ assert_equal(test, control) # data.seek(0) - test = np.mafromtxt(data, + test = np.mafromtxt(data, missing_values={0:-9, 'B':-99, 'C':-999j}, **basekwargs) control = ma.array([( 0, 0.0, 0j), (1, -999, 1j), From numpy-svn at scipy.org Sun Feb 22 08:48:43 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 22 Feb 2009 07:48:43 -0600 (CST) Subject: [Numpy-svn] r6456 - trunk/numpy/lib/tests Message-ID: <20090222134843.8706CC7C042@scipy.org> Author: stefan Date: 2009-02-22 07:48:21 -0600 (Sun, 22 Feb 2009) New Revision: 6456 Modified: trunk/numpy/lib/tests/test_io.py Log: Add test for Gzip loader. Modified: trunk/numpy/lib/tests/test_io.py =================================================================== --- trunk/numpy/lib/tests/test_io.py 2009-02-22 13:47:14 UTC (rev 6455) +++ trunk/numpy/lib/tests/test_io.py 2009-02-22 13:48:21 UTC (rev 6456) @@ -805,8 +805,22 @@ self.failUnless(isinstance(test, np.recarray)) assert_equal(test, control) +def test_gzip_load(): + import gzip + from StringIO import StringIO + a = np.random.random((5, 5)) + s = StringIO() + f = gzip.GzipFile(fileobj=s, mode="w") + np.save(f, a) + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.load(f), a) + + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Sun Feb 22 17:10:42 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 22 Feb 2009 16:10:42 -0600 (CST) Subject: [Numpy-svn] r6457 - trunk/numpy/testing Message-ID: <20090222221042.C10F2C7C01D@scipy.org> Author: stefan Date: 2009-02-22 16:10:30 -0600 (Sun, 22 Feb 2009) New Revision: 6457 Modified: trunk/numpy/testing/utils.py Log: Allow subclasses of arrays in testing. Modified: trunk/numpy/testing/utils.py =================================================================== --- trunk/numpy/testing/utils.py 2009-02-22 13:48:21 UTC (rev 6456) +++ trunk/numpy/testing/utils.py 2009-02-22 22:10:30 UTC (rev 6457) @@ -240,9 +240,9 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header=''): - from numpy.core import asarray, isnan, any - x = asarray(x) - y = asarray(y) + from numpy.core import array, isnan, any + x = array(x, copy=False, subok=True) + y = array(y, copy=False, subok=True) def isnumber(x): return x.dtype.char in '?bhilqpBHILQPfdgFDG' From numpy-svn at scipy.org Sun Feb 22 17:36:24 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Sun, 22 Feb 2009 16:36:24 -0600 (CST) Subject: [Numpy-svn] r6458 - trunk/numpy/core/src Message-ID: <20090222223624.52A08C7C01D@scipy.org> Author: charris Date: 2009-02-22 16:36:18 -0600 (Sun, 22 Feb 2009) New Revision: 6458 Modified: trunk/numpy/core/src/umath_ufunc_object.inc Log: Coding style cleanups. This finishes umath_ufunc_object.inc. Modified: trunk/numpy/core/src/umath_ufunc_object.inc =================================================================== --- trunk/numpy/core/src/umath_ufunc_object.inc 2009-02-22 22:10:30 UTC (rev 6457) +++ trunk/numpy/core/src/umath_ufunc_object.inc 2009-02-22 22:36:18 UTC (rev 6458) @@ -24,102 +24,100 @@ * */ - #define USE_USE_DEFAULTS 1 - - - /* ---------------------------------------------------------------- */ +/* + * fpstatus is the ufunc_formatted hardware status + * errmask is the handling mask specified by the user. + * errobj is a Python object with (string, callable object or None) + * or NULL + */ -/* fpstatus is the ufunc_formatted hardware status - errmask is the handling mask specified by the user. - errobj is a Python object with (string, callable object or None) - or NULL -*/ - /* - 2. for each of the flags - determine whether to ignore, warn, raise error, or call Python function. - If ignore, do nothing - If warn, print a warning and continue - If raise return an error - If call, call a user-defined function with string -*/ + * 2. for each of the flags + * determine whether to ignore, warn, raise error, or call Python function. + * If ignore, do nothing + * If warn, print a warning and continue + * If raise return an error + * If call, call a user-defined function with string + */ static int _error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first) { PyObject *pyfunc, *ret, *args; - char *name=PyString_AS_STRING(PyTuple_GET_ITEM(errobj,0)); + char *name = PyString_AS_STRING(PyTuple_GET_ITEM(errobj,0)); char msg[100]; + ALLOW_C_API_DEF; - ALLOW_C_API_DEF - - ALLOW_C_API - - switch(method) { - case UFUNC_ERR_WARN: - PyOS_snprintf(msg, sizeof(msg), - "%s encountered in %s", errtype, name); - if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) goto fail; - break; - case UFUNC_ERR_RAISE: - PyErr_Format(PyExc_FloatingPointError, - "%s encountered in %s", - errtype, name); + ALLOW_C_API; + switch(method) { + case UFUNC_ERR_WARN: + PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name); + if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) { goto fail; - case UFUNC_ERR_CALL: + } + break; + case UFUNC_ERR_RAISE: + PyErr_Format(PyExc_FloatingPointError, "%s encountered in %s", + errtype, name); + goto fail; + case UFUNC_ERR_CALL: + pyfunc = PyTuple_GET_ITEM(errobj, 1); + if (pyfunc == Py_None) { + PyErr_Format(PyExc_NameError, + "python callback specified for %s (in " \ + " %s) but no function found.", + errtype, name); + goto fail; + } + args = Py_BuildValue("NN", PyString_FromString(errtype), + PyInt_FromLong((long) retstatus)); + if (args == NULL) { + goto fail; + } + ret = PyObject_CallObject(pyfunc, args); + Py_DECREF(args); + if (ret == NULL) { + goto fail; + } + Py_DECREF(ret); + break; + case UFUNC_ERR_PRINT: + if (*first) { + fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name); + *first = 0; + } + break; + case UFUNC_ERR_LOG: + if (first) { + *first = 0; pyfunc = PyTuple_GET_ITEM(errobj, 1); - if (pyfunc == Py_None) { PyErr_Format(PyExc_NameError, - "python callback specified for %s (in " \ - " %s) but no function found.", - errtype, name); + "log specified for %s (in %s) but no " \ + "object with write method found.", + errtype, name); goto fail; } - args = Py_BuildValue("NN", PyString_FromString(errtype), - PyInt_FromLong((long) retstatus)); - if (args == NULL) goto fail; - ret = PyObject_CallObject(pyfunc, args); - Py_DECREF(args); - if (ret == NULL) goto fail; + PyOS_snprintf(msg, sizeof(msg), + "Warning: %s encountered in %s\n", errtype, name); + ret = PyObject_CallMethod(pyfunc, "write", "s", msg); + if (ret == NULL) { + goto fail; + } Py_DECREF(ret); - - break; - case UFUNC_ERR_PRINT: - if (*first) { - fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name); - *first = 0; - } - break; - case UFUNC_ERR_LOG: - if (first) { - *first = 0; - pyfunc = PyTuple_GET_ITEM(errobj, 1); - if (pyfunc == Py_None) { - PyErr_Format(PyExc_NameError, - "log specified for %s (in %s) but no " \ - "object with write method found.", - errtype, name); - goto fail; - } - PyOS_snprintf(msg, sizeof(msg), - "Warning: %s encountered in %s\n", errtype, name); - ret = PyObject_CallMethod(pyfunc, "write", "s", msg); - if (ret == NULL) goto fail; - Py_DECREF(ret); - } - break; } - DISABLE_C_API - return 0; + break; + } + DISABLE_C_API; + return 0; - fail: - DISABLE_C_API - return -1; +fail: + DISABLE_C_API; + return -1; } @@ -184,8 +182,8 @@ #define ONE_EL_REDUCELOOP 1 #define NOBUFFER_UFUNCLOOP 2 #define NOBUFFER_REDUCELOOP 2 -#define BUFFER_UFUNCLOOP 3 -#define BUFFER_REDUCELOOP 3 +#define BUFFER_UFUNCLOOP 3 +#define BUFFER_REDUCELOOP 3 #define SIGNATURE_NOBUFFER_UFUNCLOOP 4 @@ -221,12 +219,13 @@ static char *_types_msg = "function not supported for these types, " \ "and can't coerce safely to supported types"; -/* Called for non-NULL user-defined functions. - The object should be a CObject pointing to a linked-list of functions - storing the function, data, and signature of all user-defined functions. - There must be a match with the input argument types or an error - will occur. -*/ +/* + * Called for non-NULL user-defined functions. + * The object should be a CObject pointing to a linked-list of functions + * storing the function, data, and signature of all user-defined functions. + * There must be a match with the input argument types or an error + * will occur. + */ static int _find_matching_userloop(PyObject *obj, int *arg_types, PyArray_SCALARKIND *scalars, @@ -235,20 +234,21 @@ { PyUFunc_Loop1d *funcdata; int i; + funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); while (funcdata != NULL) { - for(i=0; iarg_types[i], scalars[i])) break; } - if (i==nin) { /* match found */ + if (i == nin) { + /* match found */ *function = funcdata->func; *data = funcdata->data; - /* Make sure actual arg_types supported - by the loop are used */ - for(i=0; iarg_types[i]; } return 0; @@ -258,27 +258,27 @@ return -1; } -/* if only one type is specified then it is the "first" output data-type - and the first signature matching this output data-type is returned. - - if a tuple of types is specified then an exact match to the signature - is searched and it much match exactly or an error occurs -*/ +/* + * if only one type is specified then it is the "first" output data-type + * and the first signature matching this output data-type is returned. + * + * if a tuple of types is specified then an exact match to the signature + * is searched and it much match exactly or an error occurs + */ static int extract_specified_loop(PyUFuncObject *self, int *arg_types, PyUFuncGenericFunction *function, void **data, PyObject *type_tup, int userdef) { - Py_ssize_t n=1; + Py_ssize_t n = 1; int *rtypenums; static char msg[] = "loop written to specified type(s) not found"; PyArray_Descr *dtype; int nargs; int i, j; - int strtype=0; + int strtype = 0; nargs = self->nargs; - if (PyTuple_Check(type_tup)) { n = PyTuple_GET_SIZE(type_tup); if (n != 1 && n != nargs) { @@ -292,17 +292,19 @@ else if PyString_Check(type_tup) { Py_ssize_t slen; char *thestr; + slen = PyString_GET_SIZE(type_tup); thestr = PyString_AS_STRING(type_tup); - for(i=0; i < slen-2; i++) { - if (thestr[i] == '-' && thestr[i+1] == '>') + for (i = 0; i < slen - 2; i++) { + if (thestr[i] == '-' && thestr[i+1] == '>') { break; + } } if (i < slen-2) { strtype = 1; - n = slen-2; - if (i != self->nin || - slen-2-i != self->nout) { + n = slen - 2; + if (i != self->nin + || slen - 2 - i != self->nout) { PyErr_Format(PyExc_ValueError, "a type-string for %s, " \ "requires %d typecode(s) before " \ @@ -314,7 +316,7 @@ } } rtypenums = (int *)_pya_malloc(n*sizeof(int)); - if (rtypenums==NULL) { + if (rtypenums == NULL) { PyErr_NoMemory(); return -1; } @@ -329,18 +331,21 @@ continue; } dtype = PyArray_DescrFromType((int) *ptr); - if (dtype == NULL) goto fail; + if (dtype == NULL) { + goto fail; + } rtypenums[i] = dtype->type_num; Py_DECREF(dtype); - ptr++; i++; + ptr++; + i++; } } else if (PyTuple_Check(type_tup)) { - for(i=0; itype_num; Py_DECREF(dtype); } @@ -353,12 +358,16 @@ Py_DECREF(dtype); } - if (userdef > 0) { /* search in the user-defined functions */ + if (userdef > 0) { + /* search in the user-defined functions */ PyObject *key, *obj; PyUFunc_Loop1d *funcdata; + obj = NULL; key = PyInt_FromLong((long) userdef); - if (key == NULL) goto fail; + if (key == NULL) { + goto fail; + } obj = PyDict_GetItem(self->userloops, key); Py_DECREF(key); if (obj == NULL) { @@ -367,25 +376,29 @@ " with no registered loops"); goto fail; } - /* extract the correct function - data and argtypes - */ + /* + * extract the correct function + * data and argtypes + */ funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); while (funcdata != NULL) { if (n != 1) { - for(i=0; iarg_types[i]) + for (i = 0; i < nargs; i++) { + if (rtypenums[i] != funcdata->arg_types[i]) { break; + } } } else if (rtypenums[0] == funcdata->arg_types[self->nin]) { i = nargs; } - else i = -1; + else { + i = -1; + } if (i == nargs) { *function = funcdata->func; *data = funcdata->data; - for(i=0; iarg_types[i]; } Py_DECREF(obj); @@ -398,22 +411,24 @@ } /* look for match in self->functions */ - - for(j=0; jntypes; j++) { + for (j = 0; j < self->ntypes; j++) { if (n != 1) { - for(i=0; itypes[j*nargs + i]) + for(i = 0; i < nargs; i++) { + if (rtypenums[i] != self->types[j*nargs + i]) { break; + } } } else if (rtypenums[0] == self->types[j*nargs+self->nin]) { i = nargs; } - else i = -1; + else { + i = -1; + } if (i == nargs) { *function = self->functions[j]; *data = self->data[j]; - for(i=0; itypes[j*nargs+i]; } goto finish; @@ -421,7 +436,6 @@ } PyErr_SetString(PyExc_TypeError, msg); - fail: _pya_free(rtypenums); return -1; @@ -429,7 +443,6 @@ finish: _pya_free(rtypenums); return 0; - } @@ -437,7 +450,6 @@ * Called to determine coercion * Can change arg_types. */ - static int select_types(PyUFuncObject *self, int *arg_types, PyUFuncGenericFunction *function, void **data, @@ -547,9 +559,9 @@ } #if USE_USE_DEFAULTS==1 -static int PyUFunc_NUM_NODEFAULTS=0; +static int PyUFunc_NUM_NODEFAULTS = 0; #endif -static PyObject *PyUFunc_PYVALS_NAME=NULL; +static PyObject *PyUFunc_PYVALS_NAME = NULL; static int @@ -569,9 +581,9 @@ if ((*bufsize == -1) && PyErr_Occurred()) { return -1; } - if ((*bufsize < PyArray_MIN_BUFSIZE) || - (*bufsize > PyArray_MAX_BUFSIZE) || - (*bufsize % 16 != 0)) { + if ((*bufsize < PyArray_MIN_BUFSIZE) + || (*bufsize > PyArray_MAX_BUFSIZE) + || (*bufsize % 16 != 0)) { PyErr_Format(PyExc_ValueError, "buffer size (%d) is not in range " "(%"INTP_FMT" - %"INTP_FMT") or not a multiple of 16", @@ -605,13 +617,10 @@ Py_DECREF(temp); } - *errobj = Py_BuildValue("NO", - PyString_FromString(name), - retval); + *errobj = Py_BuildValue("NO", PyString_FromString(name), retval); if (*errobj == NULL) { return -1; } - return 0; } @@ -628,8 +637,7 @@ if (PyUFunc_NUM_NODEFAULTS != 0) { #endif if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = \ - PyString_InternFromString(UFUNC_PYVALS_NAME); + PyUFunc_PYVALS_NAME = PyString_InternFromString(UFUNC_PYVALS_NAME); } thedict = PyThreadState_GetDict(); if (thedict == NULL) { @@ -641,20 +649,18 @@ #endif if (ref == NULL) { *errmask = UFUNC_ERR_DEFAULT; - *errobj = Py_BuildValue("NO", - PyString_FromString(name), - Py_None); + *errobj = Py_BuildValue("NO", PyString_FromString(name), Py_None); *bufsize = PyArray_BUFSIZE; return 0; } return _extract_pyvals(ref, name, bufsize, errmask, errobj); } -/* Create copies for any arrays that are less than loop->bufsize - in total size (or core_enabled) and are mis-behaved or in need - of casting. -*/ - +/* + * Create copies for any arrays that are less than loop->bufsize + * in total size (or core_enabled) and are mis-behaved or in need + * of casting. + */ static int _create_copies(PyUFuncLoopObject *loop, int *arg_types, PyArrayObject **mps) { @@ -665,12 +671,12 @@ PyArray_Descr *ntype; PyArray_Descr *atype; - for(i=0; idescr; atype = PyArray_DescrFromType(arg_types[i]); @@ -680,19 +686,20 @@ Py_DECREF(atype); } if (size < loop->bufsize || loop->ufunc->core_enabled) { - if (!(PyArray_ISBEHAVED_RO(mps[i])) || \ - PyArray_TYPE(mps[i]) != arg_types[i]) { + if (!(PyArray_ISBEHAVED_RO(mps[i])) + || PyArray_TYPE(mps[i]) != arg_types[i]) { ntype = PyArray_DescrFromType(arg_types[i]); new = PyArray_FromAny((PyObject *)mps[i], ntype, 0, 0, FORCECAST | ALIGNED, NULL); - if (new == NULL) return -1; + if (new == NULL) { + return -1; + } Py_DECREF(mps[i]); mps[i] = (PyArrayObject *)new; } } } - return 0; } @@ -721,13 +728,14 @@ #undef _GETATTR_ -/* Return the position of next non-white-space char in the string -*/ +/* Return the position of next non-white-space char in the string */ static int _next_non_white_space(const char* str, int offset) { int ret = offset; - while (str[ret] == ' ' || str[ret] == '\t') ret++; + while (str[ret] == ' ' || str[ret] == '\t') { + ret++; + } return ret; } @@ -743,34 +751,41 @@ return _is_alpha_underscore(ch) || (ch >= '0' && ch <= '9'); } -/* Return the ending position of a variable name -*/ +/* + * Return the ending position of a variable name + */ static int _get_end_of_name(const char* str, int offset) { int ret = offset; - while (_is_alnum_underscore(str[ret])) ret++; + while (_is_alnum_underscore(str[ret])) { + ret++; + } return ret; } -/* Returns 1 if the dimension names pointed by s1 and s2 are the same, - otherwise returns 0. -*/ +/* + * Returns 1 if the dimension names pointed by s1 and s2 are the same, + * otherwise returns 0. + */ static int _is_same_name(const char* s1, const char* s2) { while (_is_alnum_underscore(*s1) && _is_alnum_underscore(*s2)) { - if (*s1 != *s2) return 0; + if (*s1 != *s2) { + return 0; + } s1++; s2++; } return !_is_alnum_underscore(*s1) && !_is_alnum_underscore(*s2); } -/* Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets, - and core_signature in PyUFuncObject "self". Returns 0 unless an - error occured. -*/ +/* + * Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets, + * and core_signature in PyUFuncObject "self". Returns 0 unless an + * error occured. + */ static int _parse_signature(PyUFuncObject *self, const char *signature) { @@ -790,54 +805,59 @@ len = strlen(signature); self->core_signature = _pya_malloc(sizeof(char) * (len+1)); - if (self->core_signature) + if (self->core_signature) { strcpy(self->core_signature, signature); - + } /* Allocate sufficient memory to store pointers to all dimension names */ var_names = _pya_malloc(sizeof(char const*) * len); if (var_names == NULL) { PyErr_NoMemory(); return -1; } - + self->core_enabled = 1; self->core_num_dim_ix = 0; self->core_num_dims = _pya_malloc(sizeof(int) * self->nargs); self->core_dim_ixs = _pya_malloc(sizeof(int) * len); /* shrink this later */ self->core_offsets = _pya_malloc(sizeof(int) * self->nargs); - if (self->core_num_dims == NULL || self->core_dim_ixs == NULL || - self->core_offsets == NULL) { + if (self->core_num_dims == NULL || self->core_dim_ixs == NULL + || self->core_offsets == NULL) { PyErr_NoMemory(); goto fail; } i = _next_non_white_space(signature, 0); - - while (signature[i] != '\0') { /* loop over input/output arguments */ + while (signature[i] != '\0') { + /* loop over input/output arguments */ if (cur_arg == self->nin) { /* expect "->" */ if (signature[i] != '-' || signature[i+1] != '>') { parse_error = "expect '->'"; goto fail; } - i = _next_non_white_space(signature, i+2); + i = _next_non_white_space(signature, i + 2); } - /* parse core dimensions of one argument, e.g. "()", "(i)", or - "(i,j)" */ + /* + * parse core dimensions of one argument, + * e.g. "()", "(i)", or "(i,j)" + */ if (signature[i] != '(') { parse_error = "expect '('"; goto fail; } - i = _next_non_white_space(signature, i+1); - while (signature[i] != ')') { /* loop over core dimensions */ + i = _next_non_white_space(signature, i + 1); + while (signature[i] != ')') { + /* loop over core dimensions */ int j = 0; if (!_is_alpha_underscore(signature[i])) { parse_error = "expect dimension name"; goto fail; } while (j < self->core_num_dim_ix) { - if (_is_same_name(signature+i, var_names[j])) break; + if (_is_same_name(signature+i, var_names[j])) { + break; + } j++; } if (j >= self->core_num_dim_ix) { @@ -855,7 +875,7 @@ } if (signature[i] == ',') { - i = _next_non_white_space(signature, i+1); + i = _next_non_white_space(signature, i + 1); if (signature[i] == ')') { parse_error = "',' must not be followed by ')'"; goto fail; @@ -866,16 +886,18 @@ self->core_offsets[cur_arg] = cur_core_dim-nd; cur_arg++; nd = 0; - i = _next_non_white_space(signature, i+1); + i = _next_non_white_space(signature, i + 1); if (cur_arg != self->nin && cur_arg != self->nargs) { - /* The list of input arguments (or output arguments) was - only read partially */ + /* + * The list of input arguments (or output arguments) was + * only read partially + */ if (signature[i] != ',') { parse_error = "expect ','"; goto fail; } - i = _next_non_white_space(signature, i+1); + i = _next_non_white_space(signature, i + 1); } } if (cur_arg != self->nargs) { @@ -883,12 +905,14 @@ goto fail; } self->core_dim_ixs = _pya_realloc(self->core_dim_ixs, - sizeof(int) * cur_core_dim); + sizeof(int)*cur_core_dim); /* check for trivial core-signature, e.g. "(),()->()" */ - if (cur_core_dim == 0) + if (cur_core_dim == 0) { self->core_enabled = 0; + } _pya_free((void*)var_names); return 0; + fail: _pya_free((void*)var_names); if (parse_error) { @@ -906,10 +930,11 @@ return -1; } -/* Concatenate the loop and core dimensions of - PyArrayMultiIterObject's iarg-th argument, to recover a full - dimension array (used for output arguments). -*/ +/* + * Concatenate the loop and core dimensions of + * PyArrayMultiIterObject's iarg-th argument, to recover a full + * dimension array (used for output arguments). + */ static npy_intp* _compute_output_dims(PyUFuncLoopObject *loop, int iarg, int *out_nd, npy_intp *tmp_dims) @@ -933,14 +958,14 @@ memcpy(tmp_dims, loop->dimensions, sizeof(npy_intp) * loop->nd); /* copy core dimension */ - for (i = 0; i < ufunc->core_num_dims[iarg]; i++) + for (i = 0; i < ufunc->core_num_dims[iarg]; i++) { tmp_dims[loop->nd + i] = loop->core_dim_sizes[1 + - ufunc->core_dim_ixs[ufunc->core_offsets[iarg]+i]]; + ufunc->core_dim_ixs[ufunc->core_offsets[iarg] + i]]; + } return tmp_dims; } -/* Check and set core_dim_sizes and core_strides for the i-th argument. -*/ +/* Check and set core_dim_sizes and core_strides for the i-th argument. */ static int _compute_dimension_size(PyUFuncLoopObject *loop, PyArrayObject **mps, int i) { @@ -949,7 +974,7 @@ int k = PyArray_NDIM(mps[i]) - ufunc->core_num_dims[i]; int ind; for (ind = 0; ind < ufunc->core_num_dims[i]; ind++, j++, k++) { - npy_intp dim = k<0 ? 1 : PyArray_DIM(mps[i], k); + npy_intp dim = k < 0 ? 1 : PyArray_DIM(mps[i], k); /* First element of core_dim_sizes will be used for looping */ int dim_ix = ufunc->core_dim_ixs[j] + 1; if (loop->core_dim_sizes[dim_ix] == 1) { @@ -957,8 +982,7 @@ loop->core_dim_sizes[dim_ix] = dim; } else if (dim != 1 && dim != loop->core_dim_sizes[dim_ix]) { - PyErr_SetString(PyExc_ValueError, - "core dimensions mismatch"); + PyErr_SetString(PyExc_ValueError, "core dimensions mismatch"); return -1; } /* First ufunc->nargs elements will be used for looping */ @@ -974,23 +998,25 @@ { PyArrayObject *ret; int nd = ap->nd - core_nd; - if (nd < 0) nd = 0; + if (nd < 0) { + nd = 0; + } /* The following code is basically taken from PyArray_Transpose */ - Py_INCREF(ap->descr); /* NewFromDescr will steal this reference */ + /* NewFromDescr will steal this reference */ + Py_INCREF(ap->descr); ret = (PyArrayObject *) PyArray_NewFromDescr(ap->ob_type, ap->descr, nd, ap->dimensions, ap->strides, ap->data, ap->flags, (PyObject *)ap); - if (ret == NULL) return NULL; - + if (ret == NULL) { + return NULL; + } /* point at true owner of memory: */ ret->base = (PyObject *)ap; Py_INCREF(ap); - PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - return ret; } @@ -1018,8 +1044,7 @@ /* Check number of arguments */ nargs = PyTuple_Size(args); if ((nargs < self->nin) || (nargs > self->nargs)) { - PyErr_SetString(PyExc_ValueError, - "invalid number of arguments"); + PyErr_SetString(PyExc_ValueError, "invalid number of arguments"); return -1; } @@ -1046,7 +1071,8 @@ if (!object && PyTypeNum_ISOBJECT(arg_types[i])) { object = 1; } - /* debug + /* + * debug * fprintf(stderr, "array %d has reference %d\n", i, * (mps[i])->ob_refcnt); */ @@ -1083,31 +1109,30 @@ * different kinds of lesser kinds then use normal coercion rules */ if (allscalars || (maxsckind > maxarrkind)) { - for(i = 0; i < self->nin; i++) { + for (i = 0; i < self->nin; i++) { scalars[i] = PyArray_NOSCALAR; } } /* Select an appropriate function for these argument types. */ if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, typetup) == -1) + &(loop->funcdata), scalars, typetup) == -1) { return -1; - + } /* * FAIL with NotImplemented if the other object has * the __r__ method and has __array_priority__ as * an attribute (signalling it can handle ndarray's) * and is not already an ndarray or a subtype of the same type. */ - if ((arg_types[1] == PyArray_OBJECT) && \ - (loop->ufunc->nin==2) && (loop->ufunc->nout == 1)) { + if ((arg_types[1] == PyArray_OBJECT) + && (loop->ufunc->nin==2) && (loop->ufunc->nout == 1)) { PyObject *_obj = PyTuple_GET_ITEM(args, 1); - if (!PyArray_CheckExact(_obj) && + if (!PyArray_CheckExact(_obj) /* If both are same subtype of object arrays, then proceed */ - !(_obj->ob_type == (PyTuple_GET_ITEM(args, 0))->ob_type) && \ - - PyObject_HasAttrString(_obj, "__array_priority__") && \ - _has_reflected_op(_obj, loop->ufunc->name)) { + && !(_obj->ob_type == (PyTuple_GET_ITEM(args, 0))->ob_type) + && PyObject_HasAttrString(_obj, "__array_priority__") + && _has_reflected_op(_obj, loop->ufunc->name)) { loop->notimplemented = 1; return nargs; } @@ -1121,33 +1146,34 @@ return -1; } - /* Only use loop dimensions when constructing Iterator: + /* + * Only use loop dimensions when constructing Iterator: * temporarily replace mps[i] (will be recovered below). */ if (self->core_enabled) { for (i = 0; i < self->nin; i++) { PyArrayObject *ao; - if (_compute_dimension_size(loop, mps, i) < 0) + if (_compute_dimension_size(loop, mps, i) < 0) { return -1; - + } ao = _trunc_coredim(mps[i], self->core_num_dims[i]); - if (ao == NULL) + if (ao == NULL) { return -1; + } mps[i] = ao; } } /* Create Iterators for the Inputs */ - for(i = 0; i < self->nin; i++) { - loop->iters[i] = (PyArrayIterObject *) \ + for (i = 0; i < self->nin; i++) { + loop->iters[i] = (PyArrayIterObject *) PyArray_IterNew((PyObject *)mps[i]); if (loop->iters[i] == NULL) { return -1; } } - /* Recover mps[i]. */ if (self->core_enabled) { for (i = 0; i < self->nin; i++) { @@ -1164,7 +1190,7 @@ } /* Get any return arguments */ - for(i = self->nin; i < nargs; i++) { + for (i = self->nin; i < nargs; i++) { mps[i] = (PyArrayObject *)PyTuple_GET_ITEM(args, i); if (((PyObject *)mps[i])==Py_None) { mps[i] = NULL; @@ -1188,27 +1214,25 @@ return -1; } } - if (self->core_enabled) { - if (_compute_dimension_size(loop, mps, i) < 0) + if (_compute_dimension_size(loop, mps, i) < 0) { return -1; + } } out_dims = _compute_output_dims(loop, i, &out_nd, temp_dims); - if (!out_dims) return -1; - - if (mps[i]->nd != out_nd || - !PyArray_CompareLists(mps[i]->dimensions, - out_dims, out_nd)) { - PyErr_SetString(PyExc_ValueError, - "invalid return array shape"); + if (!out_dims) { + return -1; + } + if (mps[i]->nd != out_nd + || !PyArray_CompareLists(mps[i]->dimensions, out_dims, out_nd)) { + PyErr_SetString(PyExc_ValueError, "invalid return array shape"); Py_DECREF(mps[i]); mps[i] = NULL; return -1; } if (!PyArray_ISWRITEABLE(mps[i])) { - PyErr_SetString(PyExc_ValueError, - "return array is not writeable"); + PyErr_SetString(PyExc_ValueError, "return array is not writeable"); Py_DECREF(mps[i]); mps[i] = NULL; return -1; @@ -1221,8 +1245,9 @@ if (mps[i] == NULL) { out_dims = _compute_output_dims(loop, i, &out_nd, temp_dims); - if (!out_dims) return -1; - + if (!out_dims) { + return -1; + } mps[i] = (PyArrayObject *)PyArray_New(subtype, out_nd, out_dims, @@ -1250,8 +1275,8 @@ } /* still not the same -- or will we have to use buffers?*/ - if (mps[i]->descr->type_num != arg_types[i] || - !PyArray_ISBEHAVED_RO(mps[i])) { + if (mps[i]->descr->type_num != arg_types[i] + || !PyArray_ISBEHAVED_RO(mps[i])) { if (loop->size < loop->bufsize || self->core_enabled) { PyObject *new; /* @@ -1276,17 +1301,18 @@ PyArrayObject *ao; /* computer for all output arguments, and set strides in "loop" */ - if (_compute_dimension_size(loop, mps, i) < 0) + if (_compute_dimension_size(loop, mps, i) < 0) { return -1; - + } ao = _trunc_coredim(mps[i], self->core_num_dims[i]); - if (ao == NULL) + if (ao == NULL) { return -1; + } /* Temporarily modify mps[i] for constructing iterator. */ mps[i] = ao; } - loop->iters[i] = (PyArrayIterObject *) \ + loop->iters[i] = (PyArrayIterObject *) PyArray_IterNew((PyObject *)mps[i]); if (loop->iters[i] == NULL) { return -1; @@ -1307,22 +1333,18 @@ */ loop->bufcnt = 0; loop->obj = 0; - /* Determine looping method needed */ loop->meth = NO_UFUNCLOOP; - if (loop->size == 0) { return nargs; } - if (self->core_enabled) { loop->meth = SIGNATURE_NOBUFFER_UFUNCLOOP; } - - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { loop->needbuffer[i] = 0; - if (arg_types[i] != mps[i]->descr->type_num || - !PyArray_ISBEHAVED_RO(mps[i])) { + if (arg_types[i] != mps[i]->descr->type_num + || !PyArray_ISBEHAVED_RO(mps[i])) { if (self->core_enabled) { PyErr_SetString(PyExc_RuntimeError, "never reached; copy should have been made"); @@ -1331,25 +1353,24 @@ loop->meth = BUFFER_UFUNCLOOP; loop->needbuffer[i] = 1; } - if (!loop->obj && ((mps[i]->descr->type_num == PyArray_OBJECT) || - (arg_types[i] == PyArray_OBJECT))) { + if (!loop->obj + && ((mps[i]->descr->type_num == PyArray_OBJECT) + || (arg_types[i] == PyArray_OBJECT))) { loop->obj = 1; } } - if (self->core_enabled && loop->obj) { PyErr_SetString(PyExc_TypeError, "Object type not allowed in ufunc with signature"); return -1; } - if (loop->meth == NO_UFUNCLOOP) { loop->meth = ONE_UFUNCLOOP; /* All correct type and BEHAVED */ /* Check for non-uniform stridedness */ - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { if (!(loop->iters[i]->contiguous)) { /* * May still have uniform stride @@ -1363,7 +1384,7 @@ } } if (loop->meth == ONE_UFUNCLOOP) { - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { loop->bufptr[i] = mps[i]->data; } } @@ -1398,23 +1419,21 @@ * Thus, choose the axis for which strides of the last iterator is * smallest but non-zero. */ - - for(i = 0; i < loop->nd; i++) { + for (i = 0; i < loop->nd; i++) { stride_sum[i] = 0; - for(j = 0; j < loop->numiter; j++) { + for (j = 0; j < loop->numiter; j++) { stride_sum[i] += loop->iters[j]->strides[i]; } } ldim = loop->nd - 1; - minsum = stride_sum[loop->nd-1]; - for(i = loop->nd - 2; i >= 0; i--) { + minsum = stride_sum[loop->nd - 1]; + for (i = loop->nd - 2; i >= 0; i--) { if (stride_sum[i] < minsum ) { ldim = i; minsum = stride_sum[i]; } } - maxdim = loop->dimensions[ldim]; loop->size /= maxdim; loop->bufcnt = maxdim; @@ -1426,10 +1445,10 @@ * setting the size to 1 in that dimension * (just in the iterators) */ - for(i = 0; i < loop->numiter; i++) { + for (i = 0; i < loop->numiter; i++) { it = loop->iters[i]; it->contiguous = 0; - it->size /= (it->dims_m1[ldim]+1); + it->size /= (it->dims_m1[ldim] + 1); it->dims_m1[ldim] = 0; it->backstrides[ldim] = 0; @@ -1461,7 +1480,7 @@ if (loop->meth == BUFFER_UFUNCLOOP) { loop->leftover = maxdim % loop->bufsize; loop->ninnerloops = (maxdim / loop->bufsize) + 1; - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { if (loop->needbuffer[i] && loop->steps[i]) { loop->steps[i] = mps[i]->descr->elsize; } @@ -1471,11 +1490,13 @@ } else if (loop->meth == ONE_UFUNCLOOP) { /* uniformly-strided case */ - for(i = 0; i < self->nargs; i++) { - if (PyArray_SIZE(mps[i]) == 1) + for (i = 0; i < self->nargs; i++) { + if (PyArray_SIZE(mps[i]) == 1) { loop->steps[i] = 0; - else - loop->steps[i] = mps[i]->strides[mps[i]->nd-1]; + } + else { + loop->steps[i] = mps[i]->strides[mps[i]->nd - 1]; + } } } @@ -1487,20 +1508,20 @@ * not copied multiple times */ if (loop->meth == BUFFER_UFUNCLOOP) { - int cnt = 0, cntcast = 0; /* keeps track of bytes to allocate */ + int cnt = 0, cntcast = 0; int scnt = 0, scntcast = 0; char *castptr; char *bufptr; - int last_was_scalar=0; - int last_cast_was_scalar=0; - int oldbufsize=0; - int oldsize=0; + int last_was_scalar = 0; + int last_cast_was_scalar = 0; + int oldbufsize = 0; + int oldsize = 0; int scbufsize = 4*sizeof(double); int memsize; PyArray_Descr *descr; /* compute the element size */ - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { if (!loop->needbuffer[i]) { continue; } @@ -1536,11 +1557,11 @@ memsize = loop->bufsize*(cnt+cntcast) + scbufsize*(scnt+scntcast); loop->buffer[0] = PyDataMem_NEW(memsize); - /* debug + /* + * debug * fprintf(stderr, "Allocated buffer at %p of size %d, cnt=%d, cntcast=%d\n", * loop->buffer[0], loop->bufsize * (cnt + cntcast), cnt, cntcast); */ - if (loop->buffer[0] == NULL) { PyErr_NoMemory(); return -1; @@ -1551,11 +1572,11 @@ castptr = loop->buffer[0] + loop->bufsize*cnt + scbufsize*scnt; bufptr = loop->buffer[0]; loop->objfunc = 0; - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { if (!loop->needbuffer[i]) { continue; } - loop->buffer[i] = bufptr + (last_was_scalar ? scbufsize : \ + loop->buffer[i] = bufptr + (last_was_scalar ? scbufsize : loop->bufsize)*oldbufsize; last_was_scalar = (loop->steps[i] == 0); bufptr = loop->buffer[i]; @@ -1563,7 +1584,7 @@ /* fprintf(stderr, "buffer[%d] = %p\n", i, loop->buffer[i]); */ if (loop->cast[i]) { PyArray_Descr *descr; - loop->castbuf[i] = castptr + (last_cast_was_scalar ? scbufsize : \ + loop->castbuf[i] = castptr + (last_cast_was_scalar ? scbufsize : loop->bufsize)*oldsize; last_cast_was_scalar = last_was_scalar; /* fprintf(stderr, "castbuf[%d] = %p\n", i, loop->castbuf[i]); */ @@ -1572,8 +1593,9 @@ Py_DECREF(descr); loop->bufptr[i] = loop->castbuf[i]; castptr = loop->castbuf[i]; - if (loop->steps[i]) + if (loop->steps[i]) { loop->steps[i] = oldsize; + } } else { loop->bufptr[i] = loop->buffer[i]; @@ -1597,7 +1619,9 @@ Py_XDECREF(self->ret); Py_XDECREF(self->errobj); Py_XDECREF(self->decref); - if (self->buffer) PyDataMem_FREE(self->buffer); + if (self->buffer) { + PyDataMem_FREE(self->buffer); + } Py_DECREF(self->ufunc); } _pya_free(self); @@ -1609,12 +1633,15 @@ int i; if (self->ufunc != NULL) { - if (self->core_dim_sizes) + if (self->core_dim_sizes) { _pya_free(self->core_dim_sizes); - if (self->core_strides) + } + if (self->core_strides) { _pya_free(self->core_strides); - for(i = 0; i < self->ufunc->nargs; i++) + } + for (i = 0; i < self->ufunc->nargs; i++) { Py_XDECREF(self->iters[i]); + } if (self->buffer[0]) { PyDataMem_FREE(self->buffer[0]); } @@ -1646,7 +1673,7 @@ loop->ufunc = self; Py_INCREF(self); loop->buffer[0] = NULL; - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { loop->iters[i] = NULL; loop->cast[i] = NULL; } @@ -1658,19 +1685,19 @@ if (self->core_enabled) { int num_dim_ix = 1 + self->core_num_dim_ix; - int nstrides = self->nargs + self->core_offsets[self->nargs-1] - + self->core_num_dims[self->nargs-1]; - loop->core_dim_sizes = _pya_malloc(sizeof(npy_intp) * num_dim_ix); - loop->core_strides = _pya_malloc(sizeof(npy_intp) * nstrides); + int nstrides = self->nargs + self->core_offsets[self->nargs - 1] + + self->core_num_dims[self->nargs - 1]; + loop->core_dim_sizes = _pya_malloc(sizeof(npy_intp)*num_dim_ix); + loop->core_strides = _pya_malloc(sizeof(npy_intp)*nstrides); if (loop->core_dim_sizes == NULL || loop->core_strides == NULL) { PyErr_NoMemory(); goto fail; } memset(loop->core_strides, 0, sizeof(npy_intp) * nstrides); - for (i = 0; i < num_dim_ix; i++) + for (i = 0; i < num_dim_ix; i++) { loop->core_dim_sizes[i] = 1; + } } - name = self->name ? self->name : ""; /* @@ -1680,9 +1707,10 @@ */ if (kwds != NULL) { PyObject *key, *value; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(kwds, &pos, &key, &value)) { char *keystring = PyString_AsString(key); + if (keystring == NULL) { PyErr_Clear(); PyErr_SetString(PyExc_TypeError, "invalid keyword"); @@ -1721,7 +1749,6 @@ if (construct_arrays(loop, args, mps, typetup) < 0) { goto fail; } - PyUFunc_clearfperr(); return loop; @@ -1794,13 +1821,12 @@ * */ -/* +/*UFUNC_API + * * This generic function is called with the ufunc object, the arguments to it, * and an array of (pointers to) PyArrayObjects which are NULL. The * arguments are parsed and placed in mps in construct_loop (construct_arrays) */ - -/*UFUNC_API*/ static int PyUFunc_GenericFunction(PyUFuncObject *self, PyObject *args, PyObject *kwds, PyArrayObject **mps) @@ -1824,298 +1850,296 @@ NPY_LOOP_BEGIN_THREADS; switch(loop->meth) { - case ONE_UFUNCLOOP: - /* - * Everything is contiguous, notswapped, aligned, - * and of the right type. -- Fastest. - * Or if not contiguous, then a single-stride - * increment moves through the entire array. - */ - /*fprintf(stderr, "ONE...%d\n", loop->size);*/ - loop->function((char **)loop->bufptr, &(loop->size), + case ONE_UFUNCLOOP: + /* + * Everything is contiguous, notswapped, aligned, + * and of the right type. -- Fastest. + * Or if not contiguous, then a single-stride + * increment moves through the entire array. + */ + /*fprintf(stderr, "ONE...%d\n", loop->size);*/ + loop->function((char **)loop->bufptr, &(loop->size), + loop->steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + break; + case NOBUFFER_UFUNCLOOP: + /* + * Everything is notswapped, aligned and of the + * right type but not contiguous. -- Almost as fast. + */ + /*fprintf(stderr, "NOBUFFER...%d\n", loop->size);*/ + while (loop->index < loop->size) { + for (i = 0; i < self->nargs; i++) { + loop->bufptr[i] = loop->iters[i]->dataptr; + } + loop->function((char **)loop->bufptr, &(loop->bufcnt), loop->steps, loop->funcdata); UFUNC_CHECK_ERROR(loop); - break; - case NOBUFFER_UFUNCLOOP: - /* - * Everything is notswapped, aligned and of the - * right type but not contiguous. -- Almost as fast. - */ - /*fprintf(stderr, "NOBUFFER...%d\n", loop->size);*/ - - while (loop->index < loop->size) { - for(i = 0; i < self->nargs; i++) { - loop->bufptr[i] = loop->iters[i]->dataptr; - } - loop->function((char **)loop->bufptr, &(loop->bufcnt), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); + /* Adjust loop pointers */ + for (i = 0; i < self->nargs; i++) { + PyArray_ITER_NEXT(loop->iters[i]); + } + loop->index++; + } + break; + case SIGNATURE_NOBUFFER_UFUNCLOOP: + while (loop->index < loop->size) { + for (i = 0; i < self->nargs; i++) { + loop->bufptr[i] = loop->iters[i]->dataptr; + } + loop->function((char **)loop->bufptr, loop->core_dim_sizes, + loop->core_strides, loop->funcdata); + UFUNC_CHECK_ERROR(loop); - /* Adjust loop pointers */ - for(i = 0; i < self->nargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - loop->index++; + /* Adjust loop pointers */ + for (i = 0; i < self->nargs; i++) { + PyArray_ITER_NEXT(loop->iters[i]); } - break; + loop->index++; + } + break; + case BUFFER_UFUNCLOOP: { + /* This should be a function */ + PyArray_CopySwapNFunc *copyswapn[NPY_MAXARGS]; + PyArrayIterObject **iters=loop->iters; + int *swap=loop->swap; + char **dptr=loop->dptr; + int mpselsize[NPY_MAXARGS]; + intp laststrides[NPY_MAXARGS]; + int fastmemcpy[NPY_MAXARGS]; + int *needbuffer = loop->needbuffer; + intp index=loop->index, size=loop->size; + int bufsize; + intp bufcnt; + int copysizes[NPY_MAXARGS]; + char **bufptr = loop->bufptr; + char **buffer = loop->buffer; + char **castbuf = loop->castbuf; + intp *steps = loop->steps; + char *tptr[NPY_MAXARGS]; + int ninnerloops = loop->ninnerloops; + Bool pyobject[NPY_MAXARGS]; + int datasize[NPY_MAXARGS]; + int j, k, stopcondition; + char *myptr1, *myptr2; - case SIGNATURE_NOBUFFER_UFUNCLOOP: - while (loop->index < loop->size) { - for(i = 0; i < self->nargs; i++) { - loop->bufptr[i] = loop->iters[i]->dataptr; - } - loop->function((char **)loop->bufptr, loop->core_dim_sizes, - loop->core_strides, loop->funcdata); - UFUNC_CHECK_ERROR(loop); + for (i = 0; i nargs; i++) { + copyswapn[i] = mps[i]->descr->f->copyswapn; + mpselsize[i] = mps[i]->descr->elsize; + pyobject[i] = (loop->obj + && (mps[i]->descr->type_num == PyArray_OBJECT)); + laststrides[i] = iters[i]->strides[loop->lastdim]; + if (steps[i] && laststrides[i] != mpselsize[i]) { + fastmemcpy[i] = 0; + } + else { + fastmemcpy[i] = 1; + } + } + /* Do generic buffered looping here (works for any kind of + * arrays -- some need buffers, some don't. + * + * + * New algorithm: N is the largest dimension. B is the buffer-size. + * quotient is loop->ninnerloops-1 + * remainder is loop->leftover + * + * Compute N = quotient * B + remainder. + * quotient = N / B # integer math + * (store quotient + 1) as the number of innerloops + * remainder = N % B # integer remainder + * + * On the inner-dimension we will have (quotient + 1) loops where + * the size of the inner function is B for all but the last when the niter size is + * remainder. + * + * So, the code looks very similar to NOBUFFER_LOOP except the inner-most loop is + * replaced with... + * + * for(i=0; inargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - loop->index++; - } - break; - case BUFFER_UFUNCLOOP: { - PyArray_CopySwapNFunc *copyswapn[NPY_MAXARGS]; - PyArrayIterObject **iters=loop->iters; - int *swap=loop->swap; - char **dptr=loop->dptr; - int mpselsize[NPY_MAXARGS]; - intp laststrides[NPY_MAXARGS]; - int fastmemcpy[NPY_MAXARGS]; - int *needbuffer=loop->needbuffer; - intp index=loop->index, size=loop->size; - int bufsize; - intp bufcnt; - int copysizes[NPY_MAXARGS]; - char **bufptr = loop->bufptr; - char **buffer = loop->buffer; - char **castbuf = loop->castbuf; - intp *steps = loop->steps; - char *tptr[NPY_MAXARGS]; - int ninnerloops = loop->ninnerloops; - Bool pyobject[NPY_MAXARGS]; - int datasize[NPY_MAXARGS]; - int j, k, stopcondition; - char *myptr1, *myptr2; + /* + * fprintf(stderr, "BUFFER...%d,%d,%d\n", loop->size, + * loop->ninnerloops, loop->leftover); + */ + /* + * for(i=0; inargs; i++) { + * fprintf(stderr, "iters[%d]->dataptr = %p, %p of size %d\n", i, + * iters[i], iters[i]->ao->data, PyArray_NBYTES(iters[i]->ao)); + * } + */ + stopcondition = ninnerloops; + if (loop->leftover == 0) { + stopcondition--; + } + while (index < size) { + bufsize=loop->bufsize; + for(i = 0; inargs; i++) { + tptr[i] = loop->iters[i]->dataptr; + if (needbuffer[i]) { + dptr[i] = bufptr[i]; + datasize[i] = (steps[i] ? bufsize : 1); + copysizes[i] = datasize[i] * mpselsize[i]; + } + else { + dptr[i] = tptr[i]; + } + } - for(i = 0; i nargs; i++) { - copyswapn[i] = mps[i]->descr->f->copyswapn; - mpselsize[i] = mps[i]->descr->elsize; - pyobject[i] = (loop->obj && \ - (mps[i]->descr->type_num == PyArray_OBJECT)); - laststrides[i] = iters[i]->strides[loop->lastdim]; - if (steps[i] && laststrides[i] != mpselsize[i]) { - fastmemcpy[i] = 0; - } - else { - fastmemcpy[i] = 1; - } - } - /* Do generic buffered looping here (works for any kind of - * arrays -- some need buffers, some don't. - * - * - * New algorithm: N is the largest dimension. B is the buffer-size. - * quotient is loop->ninnerloops-1 - * remainder is loop->leftover - * - * Compute N = quotient * B + remainder. - * quotient = N / B # integer math - * (store quotient + 1) as the number of innerloops - * remainder = N % B # integer remainder - * - * On the inner-dimension we will have (quotient + 1) loops where - * the size of the inner function is B for all but the last when the niter size is - * remainder. - * - * So, the code looks very similar to NOBUFFER_LOOP except the inner-most loop is - * replaced with... - * - * for(i=0; ileftover; + for (i=0; inargs;i++) { + if (!needbuffer[i]) { + continue; + } + datasize[i] = (steps[i] ? bufsize : 1); + copysizes[i] = datasize[i] * mpselsize[i]; + } + } + for (i = 0; i < self->nin; i++) { + if (!needbuffer[i]) { + continue; + } + if (fastmemcpy[i]) { + memcpy(buffer[i], tptr[i], copysizes[i]); + } + else { + myptr1 = buffer[i]; + myptr2 = tptr[i]; + for (j = 0; j < bufsize; j++) { + memcpy(myptr1, myptr2, mpselsize[i]); + myptr1 += mpselsize[i]; + myptr2 += laststrides[i]; + } + } + /* swap the buffer if necessary */ + if (swap[i]) { + /* fprintf(stderr, "swapping...\n");*/ + copyswapn[i](buffer[i], mpselsize[i], NULL, -1, + (intp) datasize[i], 1, + mps[i]); + } + /* cast to the other buffer if necessary */ + if (loop->cast[i]) { + /* fprintf(stderr, "casting... %d, %p %p\n", i, buffer[i]); */ + loop->cast[i](buffer[i], castbuf[i], + (intp) datasize[i], + NULL, NULL); + } + } - /* - * fprintf(stderr, "BUFFER...%d,%d,%d\n", loop->size, - * loop->ninnerloops, loop->leftover); - */ - /* - * for(i=0; inargs; i++) { - * fprintf(stderr, "iters[%d]->dataptr = %p, %p of size %d\n", i, - * iters[i], iters[i]->ao->data, PyArray_NBYTES(iters[i]->ao)); - * } - */ - stopcondition = ninnerloops; - if (loop->leftover == 0) stopcondition--; - while (index < size) { - bufsize=loop->bufsize; - for(i = 0; inargs; i++) { - tptr[i] = loop->iters[i]->dataptr; - if (needbuffer[i]) { - dptr[i] = bufptr[i]; - datasize[i] = (steps[i] ? bufsize : 1); - copysizes[i] = datasize[i] * mpselsize[i]; - } - else { - dptr[i] = tptr[i]; - } - } + bufcnt = (intp) bufsize; + loop->function((char **)dptr, &bufcnt, steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); - /* This is the inner function over the last dimension */ - for(k = 1; k<=stopcondition; k++) { - if (k == ninnerloops) { - bufsize = loop->leftover; - for(i=0; inargs;i++) { - if (!needbuffer[i]) { - continue; - } - datasize[i] = (steps[i] ? bufsize : 1); - copysizes[i] = datasize[i] * mpselsize[i]; - } - } - for(i = 0; i < self->nin; i++) { - if (!needbuffer[i]) { - continue; - } - if (fastmemcpy[i]) { - memcpy(buffer[i], tptr[i], copysizes[i]); - } - else { - myptr1 = buffer[i]; - myptr2 = tptr[i]; - for(j = 0; j < bufsize; j++) { - memcpy(myptr1, myptr2, mpselsize[i]); - myptr1 += mpselsize[i]; - myptr2 += laststrides[i]; - } - } + for (i = self->nin; i < self->nargs; i++) { + if (!needbuffer[i]) { + continue; + } + if (loop->cast[i]) { + /* fprintf(stderr, "casting back... %d, %p", i, castbuf[i]); */ + loop->cast[i](castbuf[i], + buffer[i], + (intp) datasize[i], + NULL, NULL); + } + if (swap[i]) { + copyswapn[i](buffer[i], mpselsize[i], NULL, -1, + (intp) datasize[i], 1, + mps[i]); + } + /* + * copy back to output arrays + * decref what's already there for object arrays + */ + if (pyobject[i]) { + myptr1 = tptr[i]; + for (j = 0; j < datasize[i]; j++) { + Py_XDECREF(*((PyObject **)myptr1)); + myptr1 += laststrides[i]; + } + } + if (fastmemcpy[i]) { + memcpy(tptr[i], buffer[i], copysizes[i]); + } + else { + myptr2 = buffer[i]; + myptr1 = tptr[i]; + for (j = 0; j < bufsize; j++) { + memcpy(myptr1, myptr2, mpselsize[i]); + myptr1 += laststrides[i]; + myptr2 += mpselsize[i]; + } + } + } + if (k == stopcondition) { + continue; + } + for (i = 0; i < self->nargs; i++) { + tptr[i] += bufsize * laststrides[i]; + if (!needbuffer[i]) { + dptr[i] = tptr[i]; + } + } + } + /* end inner function over last dimension */ - /* swap the buffer if necessary */ - if (swap[i]) { - /* fprintf(stderr, "swapping...\n");*/ - copyswapn[i](buffer[i], mpselsize[i], NULL, -1, - (intp) datasize[i], 1, - mps[i]); - } - /* cast to the other buffer if necessary */ - if (loop->cast[i]) { - /* fprintf(stderr, "casting... %d, %p %p\n", i, buffer[i]); */ - loop->cast[i](buffer[i], castbuf[i], - (intp) datasize[i], - NULL, NULL); - } - } + if (loop->objfunc) { + /* + * DECREF castbuf when underlying function used + * object arrays and casting was needed to get + * to object arrays + */ + for (i = 0; i < self->nargs; i++) { + if (loop->cast[i]) { + if (steps[i] == 0) { + Py_XDECREF(*((PyObject **)castbuf[i])); + } + else { + int size = loop->bufsize; - bufcnt = (intp) bufsize; - loop->function((char **)dptr, &bufcnt, steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); + PyObject **objptr = (PyObject **)castbuf[i]; + /* + * size is loop->bufsize unless there + * was only one loop + */ + if (ninnerloops == 1) { + size = loop->leftover; + } + for (j = 0; j < size; j++) { + Py_XDECREF(*objptr); + *objptr = NULL; + objptr += 1; + } + } + } + } + } + /* fixme -- probably not needed here*/ + UFUNC_CHECK_ERROR(loop); - for(i=self->nin; inargs; i++) { - if (!needbuffer[i]) { - continue; - } - if (loop->cast[i]) { - /* fprintf(stderr, "casting back... %d, %p", i, castbuf[i]); */ - loop->cast[i](castbuf[i], - buffer[i], - (intp) datasize[i], - NULL, NULL); - } - if (swap[i]) { - copyswapn[i](buffer[i], mpselsize[i], NULL, -1, - (intp) datasize[i], 1, - mps[i]); - } - /* - * copy back to output arrays - * decref what's already there for object arrays - */ - if (pyobject[i]) { - myptr1 = tptr[i]; - for(j = 0; j < datasize[i]; j++) { - Py_XDECREF(*((PyObject **)myptr1)); - myptr1 += laststrides[i]; - } - } - if (fastmemcpy[i]) - memcpy(tptr[i], buffer[i], copysizes[i]); - else { - myptr2 = buffer[i]; - myptr1 = tptr[i]; - for(j = 0; j < bufsize; j++) { - memcpy(myptr1, myptr2, - mpselsize[i]); - myptr1 += laststrides[i]; - myptr2 += mpselsize[i]; - } - } - } - if (k == stopcondition) { - continue; - } - for(i = 0; i < self->nargs; i++) { - tptr[i] += bufsize * laststrides[i]; - if (!needbuffer[i]) { - dptr[i] = tptr[i]; - } - } - } - /* end inner function over last dimension */ - - if (loop->objfunc) { - /* - * DECREF castbuf when underlying function used - * object arrays and casting was needed to get - * to object arrays - */ - for(i = 0; i < self->nargs; i++) { - if (loop->cast[i]) { - if (steps[i] == 0) { - Py_XDECREF(*((PyObject **)castbuf[i])); - } - else { - int size = loop->bufsize; - - PyObject **objptr = (PyObject **)castbuf[i]; - /* - * size is loop->bufsize unless there - * was only one loop - */ - if (ninnerloops == 1) { - size = loop->leftover; - } - for(j = 0; j < size; j++) { - Py_XDECREF(*objptr); - *objptr = NULL; - objptr += 1; - } - } - } - } - - } - /* fixme -- probably not needed here*/ - UFUNC_CHECK_ERROR(loop); - - for(i=0; inargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - index++; - } - } + for (i = 0; i < self->nargs; i++) { + PyArray_ITER_NEXT(loop->iters[i]); + } + index++; + } + } /* end of last case statement */ } NPY_LOOP_END_THREADS; @@ -2124,7 +2148,9 @@ fail: NPY_LOOP_END_THREADS; - if (loop) ufuncloop_dealloc(loop); + if (loop) { + ufuncloop_dealloc(loop); + } return -1; } @@ -2162,8 +2188,8 @@ maxsize = PyArray_SIZE(*arr); if (maxsize < loop->bufsize) { - if (!(PyArray_ISBEHAVED_RO(*arr)) || - PyArray_TYPE(*arr) != rtype) { + if (!(PyArray_ISBEHAVED_RO(*arr)) + || PyArray_TYPE(*arr) != rtype) { ntype = PyArray_DescrFromType(rtype); new = PyArray_FromAny((PyObject *)(*arr), ntype, 0, 0, @@ -2176,14 +2202,14 @@ } } - /* Don't decref *arr before re-assigning - because it was not going to be DECREF'd anyway. - - If a copy is made, then the copy will be removed - on deallocation of the loop structure by setting - loop->decref. - */ - + /* + * Don't decref *arr before re-assigning + * because it was not going to be DECREF'd anyway. + * + * If a copy is made, then the copy will be removed + * on deallocation of the loop structure by setting + * loop->decref. + */ return 0; } @@ -2194,30 +2220,29 @@ PyUFuncReduceObject *loop; PyArrayObject *idarr; PyArrayObject *aar; - intp loop_i[MAX_DIMS], outsize=0; + intp loop_i[MAX_DIMS], outsize = 0; int arg_types[3]; PyArray_SCALARKIND scalars[3] = {PyArray_NOSCALAR, PyArray_NOSCALAR, PyArray_NOSCALAR}; int i, j, nd; int flags; - /* Reduce type is the type requested of the input - during reduction */ + /* Reduce type is the type requested of the input during reduction */ if (self->core_enabled) { PyErr_Format(PyExc_RuntimeError, "construct_reduce not allowed on ufunc with signature"); return NULL; } - nd = (*arr)->nd; arg_types[0] = otype; arg_types[1] = otype; arg_types[2] = otype; - if ((loop = _pya_malloc(sizeof(PyUFuncReduceObject)))==NULL) { - PyErr_NoMemory(); return loop; + if ((loop = _pya_malloc(sizeof(PyUFuncReduceObject))) == NULL) { + PyErr_NoMemory(); + return loop; } - loop->retbase=0; + loop->retbase = 0; loop->swap = 0; loop->index = 0; loop->ufunc = self; @@ -2229,39 +2254,43 @@ loop->rit = NULL; loop->errobj = NULL; loop->first = 1; - loop->decref=NULL; + loop->decref = NULL; loop->N = (*arr)->dimensions[axis]; loop->instrides = (*arr)->strides[axis]; - if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, NULL) == -1) goto fail; - - /* output type may change -- if it does - reduction is forced into that type - and we need to select the reduction function again - */ + &(loop->funcdata), scalars, NULL) == -1) { + goto fail; + } + /* + * output type may change -- if it does + * reduction is forced into that type + * and we need to select the reduction function again + */ if (otype != arg_types[2]) { otype = arg_types[2]; arg_types[0] = otype; arg_types[1] = otype; if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, NULL) == -1) + &(loop->funcdata), scalars, NULL) == -1) { goto fail; + } } /* get looping parameters from Python */ if (PyUFunc_GetPyValues(str, &(loop->bufsize), &(loop->errormask), - &(loop->errobj)) < 0) goto fail; - + &(loop->errobj)) < 0) { + goto fail; + } /* Make copy if misbehaved or not otype for small arrays */ - if (_create_reduce_copy(loop, arr, otype) < 0) goto fail; + if (_create_reduce_copy(loop, arr, otype) < 0) { + goto fail; + } aar = *arr; if (loop->N == 0) { loop->meth = ZERO_EL_REDUCELOOP; } - else if (PyArray_ISBEHAVED_RO(aar) && \ - otype == (aar)->descr->type_num) { + else if (PyArray_ISBEHAVED_RO(aar) && otype == (aar)->descr->type_num) { if (loop->N == 1) { loop->meth = ONE_EL_REDUCELOOP; } @@ -2277,14 +2306,17 @@ } /* Determine if object arrays are involved */ - if (otype == PyArray_OBJECT || aar->descr->type_num == PyArray_OBJECT) + if (otype == PyArray_OBJECT || aar->descr->type_num == PyArray_OBJECT) { loop->obj = 1; - else + } + else { loop->obj = 0; - + } if (loop->meth == ZERO_EL_REDUCELOOP) { idarr = _getidentity(self, otype, str); - if (idarr == NULL) goto fail; + if (idarr == NULL) { + goto fail; + } if (idarr->descr->elsize > UFUNC_MAXIDENTITY) { PyErr_Format(PyExc_RuntimeError, "UFUNC_MAXIDENTITY (%d)" \ @@ -2301,24 +2333,24 @@ flags = NPY_CARRAY | NPY_UPDATEIFCOPY | NPY_FORCECAST; switch(operation) { case UFUNC_REDUCE: - for(j=0, i=0; idimensions[i]; - + } } if (out == NULL) { - loop->ret = (PyArrayObject *) \ + loop->ret = (PyArrayObject *) PyArray_New(aar->ob_type, aar->nd-1, loop_i, otype, NULL, NULL, 0, 0, (PyObject *)aar); } else { - outsize = PyArray_MultiplyList(loop_i, aar->nd-1); + outsize = PyArray_MultiplyList(loop_i, aar->nd - 1); } break; case UFUNC_ACCUMULATE: if (out == NULL) { - loop->ret = (PyArrayObject *) \ + loop->ret = (PyArrayObject *) PyArray_New(aar->ob_type, aar->nd, aar->dimensions, otype, NULL, NULL, 0, 0, (PyObject *)aar); } @@ -2331,7 +2363,7 @@ /* Index is 1-d array */ loop_i[axis] = ind_size; if (out == NULL) { - loop->ret = (PyArrayObject *) \ + loop->ret = (PyArrayObject *) PyArray_New(aar->ob_type, aar->nd, loop_i, otype, NULL, NULL, 0, 0, (PyObject *)aar); } @@ -2342,8 +2374,9 @@ loop->meth = ZERO_EL_REDUCELOOP; return loop; } - if (loop->meth == ONE_EL_REDUCELOOP) + if (loop->meth == ONE_EL_REDUCELOOP) { loop->meth = NOBUFFER_REDUCELOOP; + } break; } if (out) { @@ -2352,14 +2385,15 @@ "wrong shape for output"); goto fail; } - loop->ret = (PyArrayObject *) \ - PyArray_FromArray(out, PyArray_DescrFromType(otype), - flags); + loop->ret = (PyArrayObject *) + PyArray_FromArray(out, PyArray_DescrFromType(otype), flags); if (loop->ret && loop->ret != out) { loop->retbase = 1; } } - if (loop->ret == NULL) goto fail; + if (loop->ret == NULL) { + goto fail; + } loop->insize = aar->descr->elsize; loop->outsize = loop->ret->descr->elsize; loop->bufptr[0] = loop->ret->data; @@ -2370,74 +2404,82 @@ } loop->it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)aar); - if (loop->it == NULL) return NULL; - + if (loop->it == NULL) { + return NULL; + } if (loop->meth == ONE_EL_REDUCELOOP) { loop->size = loop->it->size; return loop; } - /* Fix iterator to loop over correct dimension */ - /* Set size in axis dimension to 1 */ - + /* + * Fix iterator to loop over correct dimension + * Set size in axis dimension to 1 + */ loop->it->contiguous = 0; loop->it->size /= (loop->it->dims_m1[axis]+1); loop->it->dims_m1[axis] = 0; loop->it->backstrides[axis] = 0; - - loop->size = loop->it->size; - if (operation == UFUNC_REDUCE) { loop->steps[0] = 0; } else { loop->rit = (PyArrayIterObject *) \ PyArray_IterNew((PyObject *)(loop->ret)); - if (loop->rit == NULL) return NULL; - - /* Fix iterator to loop over correct dimension */ - /* Set size in axis dimension to 1 */ - + if (loop->rit == NULL) { + return NULL; + } + /* + * Fix iterator to loop over correct dimension + * Set size in axis dimension to 1 + */ loop->rit->contiguous = 0; - loop->rit->size /= (loop->rit->dims_m1[axis]+1); + loop->rit->size /= (loop->rit->dims_m1[axis] + 1); loop->rit->dims_m1[axis] = 0; loop->rit->backstrides[axis] = 0; - if (operation == UFUNC_ACCUMULATE) + if (operation == UFUNC_ACCUMULATE) { loop->steps[0] = loop->ret->strides[axis]; - else + } + else { loop->steps[0] = 0; + } } loop->steps[2] = loop->steps[0]; loop->bufptr[2] = loop->bufptr[0] + loop->steps[2]; - - if (loop->meth == BUFFER_UFUNCLOOP) { int _size; + loop->steps[1] = loop->outsize; if (otype != aar->descr->type_num) { - _size=loop->bufsize*(loop->outsize + \ - aar->descr->elsize); + _size=loop->bufsize*(loop->outsize + aar->descr->elsize); loop->buffer = PyDataMem_NEW(_size); - if (loop->buffer == NULL) goto fail; - if (loop->obj) memset(loop->buffer, 0, _size); - loop->castbuf = loop->buffer + \ - loop->bufsize*aar->descr->elsize; + if (loop->buffer == NULL) { + goto fail; + } + if (loop->obj) { + memset(loop->buffer, 0, _size); + } + loop->castbuf = loop->buffer + loop->bufsize*aar->descr->elsize; loop->bufptr[1] = loop->castbuf; loop->cast = PyArray_GetCastFunc(aar->descr, otype); - if (loop->cast == NULL) goto fail; + if (loop->cast == NULL) { + goto fail; + } } else { _size = loop->bufsize * loop->outsize; loop->buffer = PyDataMem_NEW(_size); - if (loop->buffer == NULL) goto fail; - if (loop->obj) memset(loop->buffer, 0, _size); + if (loop->buffer == NULL) { + goto fail; + } + if (loop->obj) { + memset(loop->buffer, 0, _size); + } loop->bufptr[1] = loop->buffer; } } - - PyUFunc_clearfperr(); return loop; @@ -2447,19 +2489,18 @@ } -/* We have two basic kinds of loops */ -/* One is used when arr is not-swapped and aligned and output type - is the same as input type. - and another using buffers when one of these is not satisfied. - - Zero-length and one-length axes-to-be-reduced are handled separately. -*/ - - static PyObject * +/* + * We have two basic kinds of loops. One is used when arr is not-swapped + * and aligned and output type is the same as input type. The other uses + * buffers when one of these is not satisfied. + * + * Zero-length and one-length axes-to-be-reduced are handled separately. + */ +static PyObject * PyUFunc_Reduce(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, int axis, int otype) { - PyArrayObject *ret=NULL; + PyArrayObject *ret = NULL; PyUFuncReduceObject *loop; intp i, n; char *dptr; @@ -2468,133 +2509,130 @@ /* Construct loop object */ loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_REDUCE, 0, "reduce"); - if (!loop) return NULL; + if (!loop) { + return NULL; + } NPY_LOOP_BEGIN_THREADS; - switch(loop->meth) { - case ZERO_EL_REDUCELOOP: - /* fprintf(stderr, "ZERO..%d\n", loop->size); */ - for(i=0; isize; i++) { - if (loop->obj) Py_INCREF(*((PyObject **)loop->idptr)); - memmove(loop->bufptr[0], loop->idptr, loop->outsize); - loop->bufptr[0] += loop->outsize; + switch(loop->meth) { + case ZERO_EL_REDUCELOOP: + /* fprintf(stderr, "ZERO..%d\n", loop->size); */ + for (i = 0; i < loop->size; i++) { + if (loop->obj) { + Py_INCREF(*((PyObject **)loop->idptr)); } - break; - case ONE_EL_REDUCELOOP: - /*fprintf(stderr, "ONEDIM..%d\n", loop->size); */ - while(loop->index < loop->size) { - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - PyArray_ITER_NEXT(loop->it); - loop->bufptr[0] += loop->outsize; - loop->index++; + memmove(loop->bufptr[0], loop->idptr, loop->outsize); + loop->bufptr[0] += loop->outsize; + } + break; + case ONE_EL_REDUCELOOP: + /*fprintf(stderr, "ONEDIM..%d\n", loop->size); */ + while (loop->index < loop->size) { + if (loop->obj) { + Py_INCREF(*((PyObject **)loop->it->dataptr)); } - break; - case NOBUFFER_UFUNCLOOP: - /*fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { - /* Copy first element to output */ - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - /* Adjust input pointer */ - loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; - loop->function((char **)loop->bufptr, - &(loop->N), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - - PyArray_ITER_NEXT(loop->it) - loop->bufptr[0] += loop->outsize; - loop->bufptr[2] = loop->bufptr[0]; - loop->index++; + memmove(loop->bufptr[0], loop->it->dataptr, loop->outsize); + PyArray_ITER_NEXT(loop->it); + loop->bufptr[0] += loop->outsize; + loop->index++; + } + break; + case NOBUFFER_UFUNCLOOP: + /*fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ + while (loop->index < loop->size) { + /* Copy first element to output */ + if (loop->obj) { + Py_INCREF(*((PyObject **)loop->it->dataptr)); } - break; - case BUFFER_UFUNCLOOP: - /* use buffer for arr */ - /* - For each row to reduce - 1. copy first item over to output (casting if necessary) - 2. Fill inner buffer - 3. When buffer is filled or end of row - a. Cast input buffers if needed - b. Call inner function. - 4. Repeat 2 until row is done. - */ - /* fprintf(stderr, "BUFFERED..%d %d\n", loop->size, - loop->swap); */ - while(loop->index < loop->size) { - loop->inptr = loop->it->dataptr; - /* Copy (cast) First term over to output */ - if (loop->cast) { - /* A little tricky because we need to - cast it first */ - arr->descr->f->copyswap(loop->buffer, - loop->inptr, - loop->swap, - NULL); - loop->cast(loop->buffer, loop->castbuf, - 1, NULL, NULL); - if (loop->obj) { - Py_XINCREF(*((PyObject **)loop->castbuf)); + memmove(loop->bufptr[0], loop->it->dataptr, loop->outsize); + /* Adjust input pointer */ + loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; + loop->function((char **)loop->bufptr, &(loop->N), + loop->steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + PyArray_ITER_NEXT(loop->it); + loop->bufptr[0] += loop->outsize; + loop->bufptr[2] = loop->bufptr[0]; + loop->index++; + } + break; + case BUFFER_UFUNCLOOP: + /* + * use buffer for arr + * + * For each row to reduce + * 1. copy first item over to output (casting if necessary) + * 2. Fill inner buffer + * 3. When buffer is filled or end of row + * a. Cast input buffers if needed + * b. Call inner function. + * 4. Repeat 2 until row is done. + */ + /* fprintf(stderr, "BUFFERED..%d %d\n", loop->size, loop->swap); */ + while(loop->index < loop->size) { + loop->inptr = loop->it->dataptr; + /* Copy (cast) First term over to output */ + if (loop->cast) { + /* A little tricky because we need to cast it first */ + arr->descr->f->copyswap(loop->buffer, loop->inptr, + loop->swap, NULL); + loop->cast(loop->buffer, loop->castbuf, 1, NULL, NULL); + if (loop->obj) { + Py_XINCREF(*((PyObject **)loop->castbuf)); + } + memcpy(loop->bufptr[0], loop->castbuf, loop->outsize); + } + else { + /* Simple copy */ + arr->descr->f->copyswap(loop->bufptr[0], loop->inptr, + loop->swap, NULL); + } + loop->inptr += loop->instrides; + n = 1; + while(n < loop->N) { + /* Copy up to loop->bufsize elements to buffer */ + dptr = loop->buffer; + for (i = 0; i < loop->bufsize; i++, n++) { + if (n == loop->N) { + break; } - memcpy(loop->bufptr[0], loop->castbuf, - loop->outsize); + arr->descr->f->copyswap(dptr, loop->inptr, + loop->swap, NULL); + loop->inptr += loop->instrides; + dptr += loop->insize; } - else { /* Simple copy */ - arr->descr->f->copyswap(loop->bufptr[0], - loop->inptr, - loop->swap, NULL); + if (loop->cast) { + loop->cast(loop->buffer, loop->castbuf, i, NULL, NULL); } - loop->inptr += loop->instrides; - n = 1; - while(n < loop->N) { - /* Copy up to loop->bufsize elements to - buffer */ - dptr = loop->buffer; - for(i=0; ibufsize; i++, n++) { - if (n == loop->N) break; - arr->descr->f->copyswap(dptr, - loop->inptr, - loop->swap, - NULL); - loop->inptr += loop->instrides; - dptr += loop->insize; - } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - i, NULL, NULL); - loop->function((char **)loop->bufptr, - &i, - loop->steps, loop->funcdata); - loop->bufptr[0] += loop->steps[0]*i; - loop->bufptr[2] += loop->steps[2]*i; - UFUNC_CHECK_ERROR(loop); - } - PyArray_ITER_NEXT(loop->it); - loop->bufptr[0] += loop->outsize; - loop->bufptr[2] = loop->bufptr[0]; - loop->index++; + loop->function((char **)loop->bufptr, &i, + loop->steps, loop->funcdata); + loop->bufptr[0] += loop->steps[0]*i; + loop->bufptr[2] += loop->steps[2]*i; + UFUNC_CHECK_ERROR(loop); } + PyArray_ITER_NEXT(loop->it); + loop->bufptr[0] += loop->outsize; + loop->bufptr[2] = loop->bufptr[0]; + loop->index++; } + } + NPY_LOOP_END_THREADS; + /* Hang on to this reference -- will be decref'd with loop */ + if (loop->retbase) { + ret = (PyArrayObject *)loop->ret->base; + } + else { + ret = loop->ret; + } + Py_INCREF(ret); + ufuncreduce_dealloc(loop); + return (PyObject *)ret; - NPY_LOOP_END_THREADS; - - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; - Py_INCREF(ret); - ufuncreduce_dealloc(loop); - return (PyObject *)ret; - fail: NPY_LOOP_END_THREADS; - - if (loop) ufuncreduce_dealloc(loop); + if (loop) { + ufuncreduce_dealloc(loop); + } return NULL; } @@ -2603,55 +2641,59 @@ PyUFunc_Accumulate(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, int axis, int otype) { - PyArrayObject *ret=NULL; + PyArrayObject *ret = NULL; PyUFuncReduceObject *loop; intp i, n; char *dptr; NPY_BEGIN_THREADS_DEF; - /* Construct loop object */ - loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_ACCUMULATE, 0, - "accumulate"); - if (!loop) return NULL; + /* Construct loop object */ + loop = construct_reduce(self, &arr, out, axis, otype, + UFUNC_ACCUMULATE, 0, "accumulate"); + if (!loop) { + return NULL; + } NPY_LOOP_BEGIN_THREADS; switch(loop->meth) { - case ZERO_EL_REDUCELOOP: /* Accumulate */ + case ZERO_EL_REDUCELOOP: + /* Accumulate */ /* fprintf(stderr, "ZERO..%d\n", loop->size); */ - for(i=0; isize; i++) { - if (loop->obj) + for (i = 0; i < loop->size; i++) { + if (loop->obj) { Py_INCREF(*((PyObject **)loop->idptr)); + } memcpy(loop->bufptr[0], loop->idptr, loop->outsize); loop->bufptr[0] += loop->outsize; } break; - case ONE_EL_REDUCELOOP: /* Accumulate */ + case ONE_EL_REDUCELOOP: + /* Accumulate */ /* fprintf(stderr, "ONEDIM..%d\n", loop->size); */ - while(loop->index < loop->size) { - if (loop->obj) + while (loop->index < loop->size) { + if (loop->obj) { Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); + } + memmove(loop->bufptr[0], loop->it->dataptr, loop->outsize); PyArray_ITER_NEXT(loop->it); loop->bufptr[0] += loop->outsize; loop->index++; } break; - case NOBUFFER_UFUNCLOOP: /* Accumulate */ + case NOBUFFER_UFUNCLOOP: + /* Accumulate */ /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { + while (loop->index < loop->size) { /* Copy first element to output */ - if (loop->obj) + if (loop->obj) { Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); + } + memmove(loop->bufptr[0], loop->it->dataptr, loop->outsize); /* Adjust input pointer */ - loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; - loop->function((char **)loop->bufptr, - &(loop->N), + loop->bufptr[1] = loop->it->dataptr + loop->steps[1]; + loop->function((char **)loop->bufptr, &(loop->N), loop->steps, loop->funcdata); UFUNC_CHECK_ERROR(loop); - PyArray_ITER_NEXT(loop->it); PyArray_ITER_NEXT(loop->rit); loop->bufptr[0] = loop->rit->dataptr; @@ -2659,64 +2701,57 @@ loop->index++; } break; - case BUFFER_UFUNCLOOP: /* Accumulate */ - /* use buffer for arr */ - /* - For each row to reduce - 1. copy identity over to output (casting if necessary) - 2. Fill inner buffer - 3. When buffer is filled or end of row - a. Cast input buffers if needed - b. Call inner function. - 4. Repeat 2 until row is done. - */ - /* fprintf(stderr, "BUFFERED..%d %p\n", loop->size, - loop->cast); */ - while(loop->index < loop->size) { + case BUFFER_UFUNCLOOP: + /* Accumulate + * + * use buffer for arr + * + * For each row to reduce + * 1. copy identity over to output (casting if necessary) + * 2. Fill inner buffer + * 3. When buffer is filled or end of row + * a. Cast input buffers if needed + * b. Call inner function. + * 4. Repeat 2 until row is done. + */ + /* fprintf(stderr, "BUFFERED..%d %p\n", loop->size, loop->cast); */ + while (loop->index < loop->size) { loop->inptr = loop->it->dataptr; /* Copy (cast) First term over to output */ if (loop->cast) { /* A little tricky because we need to cast it first */ - arr->descr->f->copyswap(loop->buffer, - loop->inptr, - loop->swap, - NULL); - loop->cast(loop->buffer, loop->castbuf, - 1, NULL, NULL); + arr->descr->f->copyswap(loop->buffer, loop->inptr, + loop->swap, NULL); + loop->cast(loop->buffer, loop->castbuf, 1, NULL, NULL); if (loop->obj) { Py_XINCREF(*((PyObject **)loop->castbuf)); } - memcpy(loop->bufptr[0], loop->castbuf, - loop->outsize); + memcpy(loop->bufptr[0], loop->castbuf, loop->outsize); } - else { /* Simple copy */ - arr->descr->f->copyswap(loop->bufptr[0], - loop->inptr, - loop->swap, - NULL); + else { + /* Simple copy */ + arr->descr->f->copyswap(loop->bufptr[0], loop->inptr, + loop->swap, NULL); } loop->inptr += loop->instrides; n = 1; - while(n < loop->N) { - /* Copy up to loop->bufsize elements to - buffer */ + while (n < loop->N) { + /* Copy up to loop->bufsize elements to buffer */ dptr = loop->buffer; - for(i=0; ibufsize; i++, n++) { - if (n == loop->N) break; - arr->descr->f->copyswap(dptr, - loop->inptr, - loop->swap, - NULL); + for (i = 0; i < loop->bufsize; i++, n++) { + if (n == loop->N) { + break; + } + arr->descr->f->copyswap(dptr, loop->inptr, + loop->swap, NULL); loop->inptr += loop->instrides; dptr += loop->insize; } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - i, NULL, NULL); - loop->function((char **)loop->bufptr, - &i, + if (loop->cast) { + loop->cast(loop->buffer, loop->castbuf, i, NULL, NULL); + } + loop->function((char **)loop->bufptr, &i, loop->steps, loop->funcdata); loop->bufptr[0] += loop->steps[0]*i; loop->bufptr[2] += loop->steps[2]*i; @@ -2729,57 +2764,60 @@ loop->index++; } } - NPY_LOOP_END_THREADS; - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; + if (loop->retbase) { + ret = (PyArrayObject *)loop->ret->base; + } + else { + ret = loop->ret; + } Py_INCREF(ret); ufuncreduce_dealloc(loop); return (PyObject *)ret; fail: NPY_LOOP_END_THREADS; - - if (loop) ufuncreduce_dealloc(loop); + if (loop) { + ufuncreduce_dealloc(loop); + } return NULL; } -/* Reduceat performs a reduce over an axis using the indices as a guide - - op.reduceat(array,indices) computes - op.reduce(array[indices[i]:indices[i+1]] - for i=0..end with an implicit indices[i+1]=len(array) - assumed when i=end-1 - - if indices[i+1] <= indices[i]+1 - then the result is array[indices[i]] for that value - - op.accumulate(array) is the same as - op.reduceat(array,indices)[::2] - where indices is range(len(array)-1) with a zero placed in every other sample - indices = zeros(len(array)*2-1) - indices[1::2] = range(1,len(array)) - - output shape is based on the size of indices -*/ - +/* + * Reduceat performs a reduce over an axis using the indices as a guide + * + * op.reduceat(array,indices) computes + * op.reduce(array[indices[i]:indices[i+1]] + * for i=0..end with an implicit indices[i+1]=len(array) + * assumed when i=end-1 + * + * if indices[i+1] <= indices[i]+1 + * then the result is array[indices[i]] for that value + * + * op.accumulate(array) is the same as + * op.reduceat(array,indices)[::2] + * where indices is range(len(array)-1) with a zero placed in every other sample + * indices = zeros(len(array)*2-1) + * indices[1::2] = range(1,len(array)) + * + * output shape is based on the size of indices + */ static PyObject * PyUFunc_Reduceat(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *ind, PyArrayObject *out, int axis, int otype) { PyArrayObject *ret; PyUFuncReduceObject *loop; - intp *ptr=(intp *)ind->data; - intp nn=ind->dimensions[0]; - intp mm=arr->dimensions[axis]-1; + intp *ptr = (intp *)ind->data; + intp nn = ind->dimensions[0]; + intp mm = arr->dimensions[axis] - 1; intp n, i, j; char *dptr; NPY_BEGIN_THREADS_DEF; /* Check for out-of-bounds values in indices array */ - for(i=0; i mm)) { PyErr_Format(PyExc_IndexError, "index out-of-bounds (0, %d)", (int) mm); @@ -2790,37 +2828,38 @@ ptr = (intp *)ind->data; /* Construct loop object */ - loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_REDUCEAT, nn, - "reduceat"); - if (!loop) return NULL; + loop = construct_reduce(self, &arr, out, axis, otype, + UFUNC_REDUCEAT, nn, "reduceat"); + if (!loop) { + return NULL; + } NPY_LOOP_BEGIN_THREADS; switch(loop->meth) { - /* zero-length index -- return array immediately */ case ZERO_EL_REDUCELOOP: + /* zero-length index -- return array immediately */ /* fprintf(stderr, "ZERO..\n"); */ break; - /* NOBUFFER -- behaved array and same type */ - case NOBUFFER_UFUNCLOOP: /* Reduceat */ + case NOBUFFER_UFUNCLOOP: + /* Reduceat + * NOBUFFER -- behaved array and same type + */ /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { + while (loop->index < loop->size) { ptr = (intp *)ind->data; - for(i=0; ibufptr[1] = loop->it->dataptr + \ - (*ptr)*loop->instrides; + for (i = 0; i < nn; i++) { + loop->bufptr[1] = loop->it->dataptr + (*ptr)*loop->instrides; if (loop->obj) { Py_XINCREF(*((PyObject **)loop->bufptr[1])); } - memcpy(loop->bufptr[0], loop->bufptr[1], - loop->outsize); - mm = (i==nn-1 ? arr->dimensions[axis]-*ptr : \ - *(ptr+1) - *ptr) - 1; + memcpy(loop->bufptr[0], loop->bufptr[1], loop->outsize); + mm = (i == nn - 1 ? arr->dimensions[axis] - *ptr : + *(ptr + 1) - *ptr) - 1; if (mm > 0) { loop->bufptr[1] += loop->instrides; loop->bufptr[2] = loop->bufptr[0]; - loop->function((char **)loop->bufptr, - &mm, loop->steps, - loop->funcdata); + loop->function((char **)loop->bufptr, &mm, + loop->steps, loop->funcdata); UFUNC_CHECK_ERROR(loop); } loop->bufptr[0] += loop->ret->strides[axis]; @@ -2833,44 +2872,43 @@ } break; - /* BUFFER -- misbehaved array or different types */ - case BUFFER_UFUNCLOOP: /* Reduceat */ - /* fprintf(stderr, "BUFFERED..%d\n", loop->size); */ - while(loop->index < loop->size) { + case BUFFER_UFUNCLOOP: + /* Reduceat + * BUFFER -- misbehaved array or different types + */ + /* fprintf(stderr, "BUFFERED..%d\n", loop->size); */ + while (loop->index < loop->size) { ptr = (intp *)ind->data; - for(i=0; iobj) { Py_XINCREF(*((PyObject **)loop->idptr)); } - memcpy(loop->bufptr[0], loop->idptr, - loop->outsize); + memcpy(loop->bufptr[0], loop->idptr, loop->outsize); n = 0; - mm = (i==nn-1 ? arr->dimensions[axis] - *ptr :\ - *(ptr+1) - *ptr); - if (mm < 1) mm = 1; - loop->inptr = loop->it->dataptr + \ - (*ptr)*loop->instrides; + mm = (i == nn - 1 ? arr->dimensions[axis] - *ptr : + *(ptr + 1) - *ptr); + if (mm < 1) { + mm = 1; + } + loop->inptr = loop->it->dataptr + (*ptr)*loop->instrides; while (n < mm) { - /* Copy up to loop->bufsize elements - to buffer */ + /* Copy up to loop->bufsize elements to buffer */ dptr = loop->buffer; - for(j=0; jbufsize; j++, n++) { - if (n == mm) break; - arr->descr->f->copyswap\ - (dptr, - loop->inptr, + for (j = 0; j < loop->bufsize; j++, n++) { + if (n == mm) { + break; + } + arr->descr->f->copyswap(dptr, loop->inptr, loop->swap, NULL); loop->inptr += loop->instrides; dptr += loop->insize; } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - j, NULL, NULL); + if (loop->cast) { + loop->cast(loop->buffer, loop->castbuf, j, NULL, NULL); + } loop->bufptr[2] = loop->bufptr[0]; - loop->function((char **)loop->bufptr, - &j, loop->steps, - loop->funcdata); + loop->function((char **)loop->bufptr, &j, + loop->steps, loop->funcdata); UFUNC_CHECK_ERROR(loop); loop->bufptr[0] += j*loop->steps[0]; } @@ -2884,55 +2922,56 @@ } break; } - NPY_LOOP_END_THREADS; - - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; + /* Hang on to this reference -- will be decref'd with loop */ + if (loop->retbase) { + ret = (PyArrayObject *)loop->ret->base; + } + else { + ret = loop->ret; + } Py_INCREF(ret); ufuncreduce_dealloc(loop); return (PyObject *)ret; fail: NPY_LOOP_END_THREADS; - - if (loop) ufuncreduce_dealloc(loop); + if (loop) { + ufuncreduce_dealloc(loop); + } return NULL; } -/* This code handles reduce, reduceat, and accumulate - (accumulate and reduce are special cases of the more general reduceat - but they are handled separately for speed) -*/ - +/* + * This code handles reduce, reduceat, and accumulate + * (accumulate and reduce are special cases of the more general reduceat + * but they are handled separately for speed) + */ static PyObject * PyUFunc_GenericReduction(PyUFuncObject *self, PyObject *args, PyObject *kwds, int operation) { int axis=0; PyArrayObject *mp, *ret = NULL; - PyObject *op, *res=NULL; + PyObject *op, *res = NULL; PyObject *obj_ind, *context; PyArrayObject *indices = NULL; - PyArray_Descr *otype=NULL; - PyArrayObject *out=NULL; + PyArray_Descr *otype = NULL; + PyArrayObject *out = NULL; static char *kwlist1[] = {"array", "axis", "dtype", "out", NULL}; static char *kwlist2[] = {"array", "indices", "axis", "dtype", "out", NULL}; - static char *_reduce_type[] = {"reduce", "accumulate", \ - "reduceat", NULL}; + static char *_reduce_type[] = {"reduce", "accumulate", "reduceat", NULL}; + if (self == NULL) { PyErr_SetString(PyExc_ValueError, "function not supported"); return NULL; } - if (self->core_enabled) { PyErr_Format(PyExc_RuntimeError, "Reduction not defined on ufunc with signature"); return NULL; } - if (self->nin != 2) { PyErr_Format(PyExc_ValueError, "%s only supported for binary functions", @@ -2961,7 +3000,10 @@ } indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype, 1, 1, CARRAY, NULL); - if (indices == NULL) {Py_XDECREF(otype); return NULL;} + if (indices == NULL) { + Py_XDECREF(otype); + return NULL; + } } else { if(!PyArg_ParseTupleAndKeywords(args, kwds, "O|iO&O&", kwlist1, @@ -2974,7 +3016,6 @@ return NULL; } } - /* Ensure input is an array */ if (!PyArray_Check(op) && !PyArray_IsScalar(op, Generic)) { context = Py_BuildValue("O(O)i", self, op, 0); @@ -2984,8 +3025,9 @@ } mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, context); Py_XDECREF(context); - if (mp == NULL) return NULL; - + if (mp == NULL) { + return NULL; + } /* Check to see if input is zero-dimensional */ if (mp->nd == 0) { PyErr_Format(PyExc_TypeError, "cannot %s on a scalar", @@ -2994,7 +3036,6 @@ Py_DECREF(mp); return NULL; } - /* Check to see that type (and otype) is not FLEXIBLE */ if (PyArray_ISFLEXIBLE(mp) || (otype && PyTypeNum_ISFLEXIBLE(otype->type_num))) { @@ -3006,37 +3047,42 @@ return NULL; } - if (axis < 0) axis += mp->nd; + if (axis < 0) { + axis += mp->nd; + } if (axis < 0 || axis >= mp->nd) { PyErr_SetString(PyExc_ValueError, "axis not in array"); Py_XDECREF(otype); Py_DECREF(mp); return NULL; } - - /* If out is specified it determines otype unless otype - already specified. - */ + /* + * If out is specified it determines otype + * unless otype already specified. + */ if (otype == NULL && out != NULL) { otype = out->descr; Py_INCREF(otype); } - if (otype == NULL) { - /* For integer types --- make sure at - least a long is used for add and multiply - reduction --- to avoid overflow */ + /* + * For integer types --- make sure at least a long + * is used for add and multiply reduction to avoid overflow + */ int typenum = PyArray_TYPE(mp); - if ((typenum < NPY_FLOAT) && \ - ((strcmp(self->name,"add")==0) || \ - (strcmp(self->name,"multiply")==0))) { - if (PyTypeNum_ISBOOL(typenum)) + if ((typenum < NPY_FLOAT) + && ((strcmp(self->name,"add") == 0) + || (strcmp(self->name,"multiply") == 0))) { + if (PyTypeNum_ISBOOL(typenum)) { typenum = PyArray_LONG; + } else if (mp->descr->elsize < sizeof(long)) { - if (PyTypeNum_ISUNSIGNED(typenum)) + if (PyTypeNum_ISUNSIGNED(typenum)) { typenum = PyArray_ULONG; - else + } + else { typenum = PyArray_LONG; + } } } otype = PyArray_DescrFromType(typenum); @@ -3060,36 +3106,41 @@ } Py_DECREF(mp); Py_DECREF(otype); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } if (op->ob_type != ret->ob_type) { res = PyObject_CallMethod(op, "__array_wrap__", "O", ret); - if (res == NULL) PyErr_Clear(); - else if (res == Py_None) Py_DECREF(res); + if (res == NULL) { + PyErr_Clear(); + } + else if (res == Py_None) { + Py_DECREF(res); + } else { Py_DECREF(ret); return res; } } return PyArray_Return(ret); - } -/* This function analyzes the input arguments - and determines an appropriate __array_wrap__ function to call - for the outputs. - - If an output argument is provided, then it is wrapped - with its own __array_wrap__ not with the one determined by - the input arguments. - - if the provided output argument is already an array, - the wrapping function is None (which means no wrapping will - be done --- not even PyArray_Return). - - A NULL is placed in output_wrap for outputs that - should just have PyArray_Return called. -*/ - +/* + * This function analyzes the input arguments + * and determines an appropriate __array_wrap__ function to call + * for the outputs. + * + * If an output argument is provided, then it is wrapped + * with its own __array_wrap__ not with the one determined by + * the input arguments. + * + * if the provided output argument is already an array, + * the wrapping function is None (which means no wrapping will + * be done --- not even PyArray_Return). + * + * A NULL is placed in output_wrap for outputs that + * should just have PyArray_Return called. + */ static void _find_array_wrap(PyObject *args, PyObject **output_wrap, int nin, int nout) { @@ -3101,11 +3152,11 @@ PyObject *obj, *wrap = NULL; nargs = PyTuple_GET_SIZE(args); - for(i = 0; i < nin; i++) { + for (i = 0; i < nin; i++) { obj = PyTuple_GET_ITEM(args, i); - if (PyArray_CheckExact(obj) || \ - PyArray_IsAnyScalar(obj)) + if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) { continue; + } wrap = PyObject_GetAttrString(obj, "__array_wrap__"); if (wrap) { if (PyCallable_Check(wrap)) { @@ -3125,50 +3176,50 @@ if (np >= 2) { wrap = wraps[0]; maxpriority = PyArray_GetPriority(with_wrap[0], - PyArray_SUBTYPE_PRIORITY); - for(i = 1; i < np; ++i) { - priority = \ - PyArray_GetPriority(with_wrap[i], - PyArray_SUBTYPE_PRIORITY); + PyArray_SUBTYPE_PRIORITY); + for (i = 1; i < np; ++i) { + priority = PyArray_GetPriority(with_wrap[i], + PyArray_SUBTYPE_PRIORITY); if (priority > maxpriority) { maxpriority = priority; Py_DECREF(wrap); wrap = wraps[i]; - } else { + } + else { Py_DECREF(wraps[i]); } } } - /* Here wrap is the wrapping function determined from the - input arrays (could be NULL). - - For all the output arrays decide what to do. - - 1) Use the wrap function determined from the input arrays - This is the default if the output array is not - passed in. - - 2) Use the __array_wrap__ method of the output object - passed in. -- this is special cased for - exact ndarray so that no PyArray_Return is - done in that case. - */ - - for(i=0; inargs; i++) { mps[i] = NULL; } - errval = PyUFunc_GenericFunction(self, args, kwds, mps); if (errval < 0) { - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { PyArray_XDECREF_ERR(mps[i]); } if (errval == -1) @@ -3223,12 +3272,9 @@ return Py_NotImplemented; } } - - for(i = 0; i < self->nin; i++) { + for (i = 0; i < self->nin; i++) { Py_DECREF(mps[i]); } - - /* * Use __array_wrap__ on all outputs * if present on one of the input arguments. @@ -3249,10 +3295,9 @@ _find_array_wrap(args, wraparr, self->nin, self->nout); /* wrap outputs */ - for(i = 0; i < self->nout; i++) { - int j=self->nin+i; + for (i = 0; i < self->nout; i++) { + int j = self->nin+i; PyObject *wrap; - /* * check to see if any UPDATEIFCOPY flags are set * which meant that a temporary output was generated @@ -3272,14 +3317,10 @@ retobj[i] = (PyObject *)mps[j]; continue; } - res = PyObject_CallFunction(wrap, "O(OOi)", - mps[j], self, args, i); - if (res == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { + res = PyObject_CallFunction(wrap, "O(OOi)", mps[j], self, args, i); + if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); - res = PyObject_CallFunctionObjArgs(wrap, - mps[j], - NULL); + res = PyObject_CallFunctionObjArgs(wrap, mps[j], NULL); } Py_DECREF(wrap); if (res == NULL) { @@ -3300,15 +3341,17 @@ if (self->nout == 1) { return retobj[0]; - } else { + } + else { ret = (PyTupleObject *)PyTuple_New(self->nout); - for(i = 0; i < self->nout; i++) { + for (i = 0; i < self->nout; i++) { PyTuple_SET_ITEM(ret, i, retobj[i]); } return (PyObject *)ret; } + fail: - for(i = self->nin; i < self->nargs; i++) { + for (i = self->nin; i < self->nargs; i++) { Py_XDECREF(mps[i]); } return NULL; @@ -3320,8 +3363,9 @@ PyObject *thedict; PyObject *res; - if (!PyArg_ParseTuple(args, "")) return NULL; - + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } if (PyUFunc_PYVALS_NAME == NULL) { PyUFunc_PYVALS_NAME = PyString_InternFromString(UFUNC_PYVALS_NAME); } @@ -3336,7 +3380,9 @@ } /* Construct list of defaults */ res = PyList_New(3); - if (res == NULL) return NULL; + if (res == NULL) { + return NULL; + } PyList_SET_ITEM(res, 0, PyInt_FromLong(PyArray_BUFSIZE)); PyList_SET_ITEM(res, 1, PyInt_FromLong(UFUNC_ERR_DEFAULT)); PyList_SET_ITEM(res, 2, Py_None); Py_INCREF(Py_None); @@ -3345,28 +3391,27 @@ #if USE_USE_DEFAULTS==1 /* - This is a strategy to buy a little speed up and avoid the dictionary - look-up in the default case. It should work in the presence of - threads. If it is deemed too complicated or it doesn't actually work - it could be taken out. -*/ + * This is a strategy to buy a little speed up and avoid the dictionary + * look-up in the default case. It should work in the presence of + * threads. If it is deemed too complicated or it doesn't actually work + * it could be taken out. + */ static int ufunc_update_use_defaults(void) { - PyObject *errobj=NULL; + PyObject *errobj = NULL; int errmask, bufsize; int res; PyUFunc_NUM_NODEFAULTS += 1; - res = PyUFunc_GetPyValues("test", &bufsize, &errmask, - &errobj); + res = PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj); PyUFunc_NUM_NODEFAULTS -= 1; - - if (res < 0) {Py_XDECREF(errobj); return -1;} - - if ((errmask != UFUNC_ERR_DEFAULT) || \ - (bufsize != PyArray_BUFSIZE) || \ - (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { + if (res < 0) { + Py_XDECREF(errobj); + return -1; + } + if ((errmask != UFUNC_ERR_DEFAULT) || (bufsize != PyArray_BUFSIZE) + || (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { PyUFunc_NUM_NODEFAULTS += 1; } else if (PyUFunc_NUM_NODEFAULTS > 0) { @@ -3385,8 +3430,9 @@ PyObject *val; static char *msg = "Error object must be a list of length 3"; - if (!PyArg_ParseTuple(args, "O", &val)) return NULL; - + if (!PyArg_ParseTuple(args, "O", &val)) { + return NULL; + } if (!PyList_CheckExact(val) || PyList_GET_SIZE(val) != 3) { PyErr_SetString(PyExc_ValueError, msg); return NULL; @@ -3399,9 +3445,13 @@ thedict = PyEval_GetBuiltins(); } res = PyDict_SetItem(thedict, PyUFunc_PYVALS_NAME, val); - if (res < 0) return NULL; + if (res < 0) { + return NULL; + } #if USE_USE_DEFAULTS==1 - if (ufunc_update_use_defaults() < 0) return NULL; + if (ufunc_update_use_defaults() < 0) { + return NULL; + } #endif Py_INCREF(Py_None); return Py_None; @@ -3412,41 +3462,45 @@ static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om}; static char -doc_frompyfunc[] = "frompyfunc(func, nin, nout) take an arbitrary python function that takes nin objects as input and returns nout objects and return a universal function (ufunc). This ufunc always returns PyObject arrays"; +doc_frompyfunc[] = "frompyfunc(func, nin, nout) take an arbitrary python\n" \ + "function that takes nin objects as input and returns\n" \ + "nout objects and return a universal function (ufunc).\n" \ + "This ufunc always returns PyObject arrays\n"; static PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { /* Keywords are ignored for now */ - PyObject *function, *pyname=NULL; + PyObject *function, *pyname = NULL; int nin, nout, i; PyUFunc_PyFuncData *fdata; PyUFuncObject *self; char *fname, *str; - Py_ssize_t fname_len=-1; + Py_ssize_t fname_len = -1; int offset[2]; - if (!PyArg_ParseTuple(args, "Oii", &function, &nin, &nout)) return NULL; - + if (!PyArg_ParseTuple(args, "Oii", &function, &nin, &nout)) { + return NULL; + } if (!PyCallable_Check(function)) { PyErr_SetString(PyExc_TypeError, "function must be callable"); return NULL; } - self = _pya_malloc(sizeof(PyUFuncObject)); - if (self == NULL) return NULL; + if (self == NULL) { + return NULL; + } PyObject_Init((PyObject *)self, &PyUFunc_Type); self->userloops = NULL; self->nin = nin; self->nout = nout; - self->nargs = nin+nout; + self->nargs = nin + nout; self->identity = PyUFunc_None; self->functions = pyfunc_functions; - self->ntypes = 1; self->check_return = 0; - + /* generalized ufunc */ self->core_enabled = 0; self->core_num_dim_ix = 0; @@ -3456,9 +3510,9 @@ self->core_signature = NULL; pyname = PyObject_GetAttrString(function, "__name__"); - if (pyname) + if (pyname) { (void) PyString_AsStringAndSize(pyname, &fname, &fname_len); - + } if (PyErr_Occurred()) { fname = "?"; fname_len = 1; @@ -3466,28 +3520,31 @@ } Py_XDECREF(pyname); - - - /* self->ptr holds a pointer for enough memory for - self->data[0] (fdata) - self->data - self->name - self->types - - To be safest, all of these need their memory aligned on void * pointers - Therefore, we may need to allocate extra space. - */ + /* + * self->ptr holds a pointer for enough memory for + * self->data[0] (fdata) + * self->data + * self->name + * self->types + * + * To be safest, all of these need their memory aligned on void * pointers + * Therefore, we may need to allocate extra space. + */ offset[0] = sizeof(PyUFunc_PyFuncData); i = (sizeof(PyUFunc_PyFuncData) % sizeof(void *)); - if (i) offset[0] += (sizeof(void *) - i); + if (i) { + offset[0] += (sizeof(void *) - i); + } offset[1] = self->nargs; i = (self->nargs % sizeof(void *)); - if (i) offset[1] += (sizeof(void *)-i); - - self->ptr = _pya_malloc(offset[0] + offset[1] + sizeof(void *) + \ - (fname_len+14)); - - if (self->ptr == NULL) return PyErr_NoMemory(); + if (i) { + offset[1] += (sizeof(void *)-i); + } + self->ptr = _pya_malloc(offset[0] + offset[1] + sizeof(void *) + + (fname_len + 14)); + if (self->ptr == NULL) { + return PyErr_NoMemory(); + } Py_INCREF(function); self->obj = function; fdata = (PyUFunc_PyFuncData *)(self->ptr); @@ -3497,20 +3554,18 @@ self->data = (void **)(((char *)self->ptr) + offset[0]); self->data[0] = (void *)fdata; - self->types = (char *)self->data + sizeof(void *); - for(i=0; inargs; i++) self->types[i] = PyArray_OBJECT; - + for (i = 0; i < self->nargs; i++) { + self->types[i] = PyArray_OBJECT; + } str = self->types + offset[1]; memcpy(str, fname, fname_len); memcpy(str+fname_len, " (vectorized)", 14); - self->name = str; /* Do a better job someday */ self->doc = "dynamic ufunc based on a python function"; - return (PyObject *)self; } @@ -3521,16 +3576,18 @@ int *signature, PyUFuncGenericFunction *oldfunc) { - int i,j; + int i, j; int res = -1; /* Find the location of the matching signature */ - for(i=0; intypes; i++) { - for(j=0; jnargs; j++) { - if (signature[j] != func->types[i*func->nargs+j]) + for (i = 0; i < func->ntypes; i++) { + for (j = 0; j < func->nargs; j++) { + if (signature[j] != func->types[i*func->nargs+j]) { break; + } } - if (j < func->nargs) continue; - + if (j < func->nargs) { + continue; + } if (oldfunc != NULL) { *oldfunc = func->functions[i]; } @@ -3551,7 +3608,7 @@ return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes, nin, nout, identity, name, doc, check_return, NULL); } - + /*UFUNC_API*/ static PyObject * PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, @@ -3563,7 +3620,9 @@ PyUFuncObject *self; self = _pya_malloc(sizeof(PyUFuncObject)); - if (self == NULL) return NULL; + if (self == NULL) { + return NULL; + } PyObject_Init((PyObject *)self, &PyUFunc_Type); self->nin = nin; @@ -3580,12 +3639,19 @@ self->obj = NULL; self->userloops=NULL; - if (name == NULL) self->name = "?"; - else self->name = name; + if (name == NULL) { + self->name = "?"; + } + else { + self->name = name; + } + if (doc == NULL) { + self->doc = "NULL"; + } + else { + self->doc = doc; + } - if (doc == NULL) self->doc = "NULL"; - else self->doc = doc; - /* generalized ufunc */ self->core_enabled = 0; self->core_num_dim_ix = 0; @@ -3594,22 +3660,22 @@ self->core_offsets = NULL; self->core_signature = NULL; if (signature != NULL) { - if (_parse_signature(self, signature) != 0) + if (_parse_signature(self, signature) != 0) { return NULL; + } } - return (PyObject *)self; } -/* This is the first-part of the CObject structure. - - I don't think this will change, but if it should, then - this needs to be fixed. The exposed C-API was insufficient - because I needed to replace the pointer and it wouldn't - let me with a destructor set (even though it works fine - with the destructor). -*/ - +/* + * This is the first-part of the CObject structure. + * + * I don't think this will change, but if it should, then + * this needs to be fixed. The exposed C-API was insufficient + * because I needed to replace the pointer and it wouldn't + * let me with a destructor set (even though it works fine + * with the destructor). + */ typedef struct { PyObject_HEAD void *c_obj; @@ -3617,31 +3683,37 @@ #define _SETCPTR(cobj, val) ((_simple_cobj *)(cobj))->c_obj = (val) -/* return 1 if arg1 > arg2, 0 if arg1 == arg2, and -1 if arg1 < arg2 - */ +/* return 1 if arg1 > arg2, 0 if arg1 == arg2, and -1 if arg1 < arg2 */ static int cmp_arg_types(int *arg1, int *arg2, int n) { - for(;n>0; n--, arg1++, arg2++) { - if (PyArray_EquivTypenums(*arg1, *arg2)) continue; - if (PyArray_CanCastSafely(*arg1, *arg2)) + for (; n > 0; n--, arg1++, arg2++) { + if (PyArray_EquivTypenums(*arg1, *arg2)) { + continue; + } + if (PyArray_CanCastSafely(*arg1, *arg2)) { return -1; + } return 1; } return 0; } -/* This frees the linked-list structure - when the CObject is destroyed (removed - from the internal dictionary) +/* + * This frees the linked-list structure when the CObject + * is destroyed (removed from the internal dictionary) */ static void _loop1d_list_free(void *ptr) { PyUFunc_Loop1d *funcdata; - if (ptr == NULL) return; + if (ptr == NULL) { + return; + } funcdata = (PyUFunc_Loop1d *)ptr; - if (funcdata == NULL) return; + if (funcdata == NULL) { + return; + } _pya_free(funcdata->arg_types); _loop1d_list_free(funcdata->next); _pya_free(funcdata); @@ -3664,8 +3736,7 @@ descr=PyArray_DescrFromType(usertype); if ((usertype < PyArray_USERDEF) || (descr==NULL)) { - PyErr_SetString(PyExc_TypeError, - "unknown user-defined type"); + PyErr_SetString(PyExc_TypeError, "unknown user-defined type"); return -1; } Py_DECREF(descr); @@ -3674,18 +3745,24 @@ ufunc->userloops = PyDict_New(); } key = PyInt_FromLong((long) usertype); - if (key == NULL) return -1; + if (key == NULL) { + return -1; + } funcdata = _pya_malloc(sizeof(PyUFunc_Loop1d)); - if (funcdata == NULL) goto fail; + if (funcdata == NULL) { + goto fail; + } newtypes = _pya_malloc(sizeof(int)*ufunc->nargs); - if (newtypes == NULL) goto fail; + if (newtypes == NULL) { + goto fail; + } if (arg_types != NULL) { - for(i=0; inargs; i++) { + for (i = 0; i < ufunc->nargs; i++) { newtypes[i] = arg_types[i]; } } else { - for(i=0; inargs; i++) { + for (i = 0; i < ufunc->nargs; i++) { newtypes[i] = usertype; } } @@ -3697,46 +3774,51 @@ /* Get entry for this user-defined type*/ cobj = PyDict_GetItem(ufunc->userloops, key); - /* If it's not there, then make one and return. */ if (cobj == NULL) { - cobj = PyCObject_FromVoidPtr((void *)funcdata, - _loop1d_list_free); - if (cobj == NULL) goto fail; + cobj = PyCObject_FromVoidPtr((void *)funcdata, _loop1d_list_free); + if (cobj == NULL) { + goto fail; + } PyDict_SetItem(ufunc->userloops, key, cobj); Py_DECREF(cobj); Py_DECREF(key); return 0; } else { - PyUFunc_Loop1d *current, *prev=NULL; - int cmp=1; - /* There is already at least 1 loop. Place this one in - lexicographic order. If the next one signature - is exactly like this one, then just replace. - Otherwise insert. - */ + PyUFunc_Loop1d *current, *prev = NULL; + int cmp = 1; + /* + * There is already at least 1 loop. Place this one in + * lexicographic order. If the next one signature + * is exactly like this one, then just replace. + * Otherwise insert. + */ current = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(cobj); while (current != NULL) { - cmp = cmp_arg_types(current->arg_types, newtypes, - ufunc->nargs); - if (cmp >= 0) break; + cmp = cmp_arg_types(current->arg_types, newtypes, ufunc->nargs); + if (cmp >= 0) { + break; + } prev = current; current = current->next; } - if (cmp == 0) { /* just replace it with new function */ + if (cmp == 0) { + /* just replace it with new function */ current->func = function; current->data = data; _pya_free(newtypes); _pya_free(funcdata); } - else { /* insert it before the current one - by hacking the internals of cobject to - replace the function pointer --- - can't use CObject API because destructor is set. - */ + else { + /* + * insert it before the current one by hacking the internals + * of cobject to replace the function pointer --- can't use + * CObject API because destructor is set. + */ funcdata->next = current; - if (prev == NULL) { /* place this at front */ + if (prev == NULL) { + /* place this at front */ _SETCPTR(cobj, funcdata); } else { @@ -3747,7 +3829,6 @@ Py_DECREF(key); return 0; - fail: Py_DECREF(key); _pya_free(funcdata); @@ -3762,11 +3843,21 @@ static void ufunc_dealloc(PyUFuncObject *self) { - if (self->core_num_dims) _pya_free(self->core_num_dims); - if (self->core_dim_ixs) _pya_free(self->core_dim_ixs); - if (self->core_offsets) _pya_free(self->core_offsets); - if (self->core_signature) _pya_free(self->core_signature); - if (self->ptr) _pya_free(self->ptr); + if (self->core_num_dims) { + _pya_free(self->core_num_dims); + } + if (self->core_dim_ixs) { + _pya_free(self->core_dim_ixs); + } + if (self->core_offsets) { + _pya_free(self->core_offsets); + } + if (self->core_signature) { + _pya_free(self->core_signature); + } + if (self->ptr) { + _pya_free(self->ptr); + } Py_XDECREF(self->userloops); Py_XDECREF(self->obj); _pya_free(self); @@ -3778,25 +3869,24 @@ char buf[100]; sprintf(buf, "", self->name); - return PyString_FromString(buf); } /* -------------------------------------------------------- */ -/* op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b) - where a has b.ndim NewAxis terms appended. - - The result has dimensions a.ndim + b.ndim -*/ - +/* + * op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b) + * where a has b.ndim NewAxis terms appended. + * + * The result has dimensions a.ndim + b.ndim + */ static PyObject * ufunc_outer(PyUFuncObject *self, PyObject *args, PyObject *kwds) { int i; PyObject *ret; - PyArrayObject *ap1=NULL, *ap2=NULL, *ap_new=NULL; + PyArrayObject *ap1 = NULL, *ap2 = NULL, *ap_new = NULL; PyObject *new_args, *tmp; PyObject *shape1, *shape2, *newshape; @@ -3806,7 +3896,7 @@ " signature"); return NULL; } - + if(self->nin != 2) { PyErr_SetString(PyExc_ValueError, "outer product only supported "\ @@ -3815,45 +3905,57 @@ } if (PySequence_Length(args) != 2) { - PyErr_SetString(PyExc_TypeError, - "exactly two arguments expected"); + PyErr_SetString(PyExc_TypeError, "exactly two arguments expected"); return NULL; } tmp = PySequence_GetItem(args, 0); - if (tmp == NULL) return NULL; - ap1 = (PyArrayObject *) \ - PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); + if (tmp == NULL) { + return NULL; + } + ap1 = (PyArrayObject *) PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); Py_DECREF(tmp); - if (ap1 == NULL) return NULL; - + if (ap1 == NULL) { + return NULL; + } tmp = PySequence_GetItem(args, 1); - if (tmp == NULL) return NULL; + if (tmp == NULL) { + return NULL; + } ap2 = (PyArrayObject *)PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); Py_DECREF(tmp); - if (ap2 == NULL) {Py_DECREF(ap1); return NULL;} - + if (ap2 == NULL) { + Py_DECREF(ap1); + return NULL; + } /* Construct new shape tuple */ shape1 = PyTuple_New(ap1->nd); - if (shape1 == NULL) goto fail; - for(i=0; ind; i++) + if (shape1 == NULL) { + goto fail; + } + for (i = 0; i < ap1->nd; i++) { PyTuple_SET_ITEM(shape1, i, - PyLong_FromLongLong((longlong)ap1-> \ - dimensions[i])); - + PyLong_FromLongLong((longlong)ap1->dimensions[i])); + } shape2 = PyTuple_New(ap2->nd); - for(i=0; ind; i++) + for (i = 0; i < ap2->nd; i++) { PyTuple_SET_ITEM(shape2, i, PyInt_FromLong((long) 1)); - if (shape2 == NULL) {Py_DECREF(shape1); goto fail;} + } + if (shape2 == NULL) { + Py_DECREF(shape1); + goto fail; + } newshape = PyNumber_Add(shape1, shape2); Py_DECREF(shape1); Py_DECREF(shape2); - if (newshape == NULL) goto fail; - + if (newshape == NULL) { + goto fail; + } ap_new = (PyArrayObject *)PyArray_Reshape(ap1, newshape); Py_DECREF(newshape); - if (ap_new == NULL) goto fail; - + if (ap_new == NULL) { + goto fail; + } new_args = Py_BuildValue("(OO)", ap_new, ap2); Py_DECREF(ap1); Py_DECREF(ap2); @@ -3873,14 +3975,12 @@ static PyObject * ufunc_reduce(PyUFuncObject *self, PyObject *args, PyObject *kwds) { - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCE); } static PyObject * ufunc_accumulate(PyUFuncObject *self, PyObject *args, PyObject *kwds) { - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_ACCUMULATE); } @@ -3892,34 +3992,41 @@ static struct PyMethodDef ufunc_methods[] = { - {"reduce", (PyCFunction)ufunc_reduce, METH_VARARGS | METH_KEYWORDS, NULL }, - {"accumulate", (PyCFunction)ufunc_accumulate, - METH_VARARGS | METH_KEYWORDS, NULL }, - {"reduceat", (PyCFunction)ufunc_reduceat, - METH_VARARGS | METH_KEYWORDS, NULL }, - {"outer", (PyCFunction)ufunc_outer, METH_VARARGS | METH_KEYWORDS, NULL}, + {"reduce", + (PyCFunction)ufunc_reduce, + METH_VARARGS | METH_KEYWORDS, NULL }, + {"accumulate", + (PyCFunction)ufunc_accumulate, + METH_VARARGS | METH_KEYWORDS, NULL }, + {"reduceat", + (PyCFunction)ufunc_reduceat, + METH_VARARGS | METH_KEYWORDS, NULL }, + {"outer", + (PyCFunction)ufunc_outer, + METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; -/* construct the string - y1,y2,...,yn -*/ +/* construct the string y1,y2,...,yn */ static PyObject * _makeargs(int num, char *ltr, int null_if_none) { PyObject *str; int i; + switch (num) { case 0: - if (null_if_none) return NULL; + if (null_if_none) { + return NULL; + } return PyString_FromString(""); case 1: return PyString_FromString(ltr); } str = PyString_FromFormat("%s1, %s2", ltr, ltr); - for(i = 3; i <= num; ++i) { + for (i = 3; i <= num; ++i) { PyString_ConcatAndDel(&str, PyString_FromFormat(", %s%d", ltr, i)); } return str; @@ -3939,15 +4046,11 @@ static PyObject * ufunc_get_doc(PyUFuncObject *self) { - /* Put docstring first or FindMethod finds it...*/ - /* could so some introspection on name and nin + nout */ - /* to automate the first part of it */ - /* the doc string shouldn't need the calling convention */ - /* construct - name(x1, x2, ...,[ out1, out2, ...]) - - __doc__ - */ + /* Put docstring first or FindMethod finds it... could so some + * introspection on name and nin + nout to automate the first part + * of it the doc string shouldn't need the calling convention + * construct name(x1, x2, ...,[ out1, out2, ...]) __doc__ + */ PyObject *outargs, *inargs, *doc; outargs = _makeargs(self->nout, "out", 1); inargs = _makeargs(self->nin, "x", 0); @@ -3956,7 +4059,8 @@ self->name, PyString_AS_STRING(inargs), self->doc); - } else { + } + else { doc = PyString_FromFormat("%s(%s[, %s])\n\n%s", self->name, PyString_AS_STRING(inargs), @@ -3995,31 +4099,31 @@ static PyObject * ufunc_get_types(PyUFuncObject *self) { - /* return a list with types grouped - input->output */ + /* return a list with types grouped input->output */ PyObject *list; PyObject *str; - int k, j, n, nt=self->ntypes; + int k, j, n, nt = self->ntypes; int ni = self->nin; int no = self->nout; char *t; list = PyList_New(nt); - if (list == NULL) return NULL; + if (list == NULL) { + return NULL; + } t = _pya_malloc(no+ni+2); n = 0; - for(k=0; ktypes[n]); n++; } t[ni] = '-'; t[ni+1] = '>'; - for(j=0; jtypes[n]); + for (j = 0; j < no; j++) { + t[ni + 2 + j] = _typecharfromnum(self->types[n]); n++; } - str = PyString_FromStringAndSize(t, no+ni+2); + str = PyString_FromStringAndSize(t, no + ni + 2); PyList_SET_ITEM(list, k, str); } _pya_free(t); @@ -4047,86 +4151,106 @@ static PyObject * ufunc_get_signature(PyUFuncObject *self) { - if (!self->core_enabled) + if (!self->core_enabled) { Py_RETURN_NONE; + } return PyString_FromString(self->core_signature); } #undef _typecharfromnum -/* Docstring is now set from python */ -/* static char *Ufunctype__doc__ = NULL; */ - +/* + * Docstring is now set from python + * static char *Ufunctype__doc__ = NULL; + */ static PyGetSetDef ufunc_getset[] = { - {"__doc__", (getter)ufunc_get_doc, NULL, "documentation string", NULL}, - {"nin", (getter)ufunc_get_nin, NULL, "number of inputs", NULL}, - {"nout", (getter)ufunc_get_nout, NULL, "number of outputs", NULL}, - {"nargs", (getter)ufunc_get_nargs, NULL, "number of arguments", NULL}, - {"ntypes", (getter)ufunc_get_ntypes, NULL, "number of types", NULL}, - {"types", (getter)ufunc_get_types, NULL, "return a list with types grouped input->output", NULL}, - {"__name__", (getter)ufunc_get_name, NULL, "function name", NULL}, - {"identity", (getter)ufunc_get_identity, NULL, "identity value", NULL}, - {"signature",(getter)ufunc_get_signature,NULL, "signature"}, + {"__doc__", + (getter)ufunc_get_doc, + NULL, "documentation string", NULL}, + {"nin", + (getter)ufunc_get_nin, + NULL, "number of inputs", NULL}, + {"nout", + (getter)ufunc_get_nout, + NULL, "number of outputs", NULL}, + {"nargs", + (getter)ufunc_get_nargs, + NULL, "number of arguments", NULL}, + {"ntypes", + (getter)ufunc_get_ntypes, + NULL, "number of types", NULL}, + {"types", + (getter)ufunc_get_types, + NULL, "return a list with types grouped input->output", NULL}, + {"__name__", + (getter)ufunc_get_name, + NULL, "function name", NULL}, + {"identity", + (getter)ufunc_get_identity, + NULL, "identity value", NULL}, + {"signature", + (getter)ufunc_get_signature, + NULL, "signature"}, {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ }; static PyTypeObject PyUFunc_Type = { PyObject_HEAD_INIT(0) - 0, /*ob_size*/ - "numpy.ufunc", /*tp_name*/ - sizeof(PyUFuncObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ + 0, /* ob_size */ + "numpy.ufunc", /* tp_name */ + sizeof(PyUFuncObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)ufunc_dealloc, /*tp_dealloc*/ - (printfunc)0, /*tp_print*/ - (getattrfunc)0, /*tp_getattr*/ - (setattrfunc)0, /*tp_setattr*/ - (cmpfunc)0, /*tp_compare*/ - (reprfunc)ufunc_repr, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - (hashfunc)0, /*tp_hash*/ - (ternaryfunc)ufunc_generic_call, /*tp_call*/ - (reprfunc)ufunc_repr, /*tp_str*/ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - NULL, /* tp_doc */ /* was Ufunctype__doc__ */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - ufunc_methods, /* tp_methods */ - 0, /* tp_members */ - ufunc_getset, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)ufunc_dealloc, /* tp_dealloc */ + (printfunc)0, /* tp_print */ + (getattrfunc)0, /* tp_getattr */ + (setattrfunc)0, /* tp_setattr */ + (cmpfunc)0, /* tp_compare */ + (reprfunc)ufunc_repr, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + (hashfunc)0, /* tp_hash */ + (ternaryfunc)ufunc_generic_call, /* tp_call */ + (reprfunc)ufunc_repr, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + NULL, /* tp_doc */ /* was Ufunctype__doc__ */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + ufunc_methods, /* tp_methods */ + 0, /* tp_members */ + ufunc_getset, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; From numpy-svn at scipy.org Mon Feb 23 11:15:31 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 23 Feb 2009 10:15:31 -0600 (CST) Subject: [Numpy-svn] r6459 - trunk/numpy/core/include/numpy Message-ID: <20090223161531.F24A0C7C029@scipy.org> Author: cdavid Date: 2009-02-23 10:14:55 -0600 (Mon, 23 Feb 2009) New Revision: 6459 Added: trunk/numpy/core/include/numpy/mingw_amd64_fenv.h Modified: trunk/numpy/core/include/numpy/ufuncobject.h Log: Add custom code for FPU error handling on windows x64 with mingw compilers (mingw-w64). Added: trunk/numpy/core/include/numpy/mingw_amd64_fenv.h =================================================================== --- trunk/numpy/core/include/numpy/mingw_amd64_fenv.h 2009-02-22 22:36:18 UTC (rev 6458) +++ trunk/numpy/core/include/numpy/mingw_amd64_fenv.h 2009-02-23 16:14:55 UTC (rev 6459) @@ -0,0 +1,98 @@ +/* + * Those are mostly copies from BSD mlib + */ +#include +typedef struct { + struct { + uint32_t __control; + uint32_t __status; + uint32_t __tag; + char __other[16]; + } __x87; + uint32_t __mxcsr; +} npy_fenv_t; + +typedef uint16_t npy_fexcept_t; + +/* Exception flags */ +#define NPY_FE_INVALID 0x01 +#define NPY_FE_DENORMAL 0x02 +#define NPY_FE_DIVBYZERO 0x04 +#define NPY_FE_OVERFLOW 0x08 +#define NPY_FE_UNDERFLOW 0x10 +#define NPY_FE_INEXACT 0x20 +#define NPY_FE_ALL_EXCEPT (NPY_FE_DIVBYZERO | NPY_FE_DENORMAL | \ + NPY_FE_INEXACT | NPY_FE_INVALID | \ + NPY_FE_OVERFLOW | NPY_FE_UNDERFLOW) + +/* Assembly macros */ +#define __fldcw(__cw) __asm __volatile("fldcw %0" : : "m" (__cw)) +#define __fldenv(__env) __asm __volatile("fldenv %0" : : "m" (__env)) +#define __fldenvx(__env) __asm __volatile("fldenv %0" : : "m" (__env) \ + : "st", "st(1)", "st(2)", "st(3)", "st(4)", \ + "st(5)", "st(6)", "st(7)") +#define __fnclex() __asm __volatile("fnclex") +#define __fnstenv(__env) __asm __volatile("fnstenv %0" : "=m" (*(__env))) +#define __fnstcw(__cw) __asm __volatile("fnstcw %0" : "=m" (*(__cw))) +#define __fnstsw(__sw) __asm __volatile("fnstsw %0" : "=am" (*(__sw))) +#define __fwait() __asm __volatile("fwait") +#define __ldmxcsr(__csr) __asm __volatile("ldmxcsr %0" : : "m" (__csr)) +#define __stmxcsr(__csr) __asm __volatile("stmxcsr %0" : "=m" (*(__csr))) + +static __inline int npy_feclearexcept(int __excepts) +{ + npy_fenv_t __env; + + if (__excepts == NPY_FE_ALL_EXCEPT) { + __fnclex(); + } else { + __fnstenv(&__env.__x87); + __env.__x87.__status &= ~__excepts; + __fldenv(__env.__x87); + } + __stmxcsr(&__env.__mxcsr); + __env.__mxcsr &= ~__excepts; + __ldmxcsr(__env.__mxcsr); + return (0); +} + +static __inline int npy_fetestexcept(int __excepts) +{ + int __mxcsr, __status; + + __stmxcsr(&__mxcsr); + __fnstsw(&__status); + return ((__status | __mxcsr) & __excepts); +} + +static __inline int +npy_fegetexceptflag(npy_fexcept_t *__flagp, int __excepts) +{ + int __mxcsr, __status; + + __stmxcsr(&__mxcsr); + __fnstsw(&__status); + *__flagp = (__mxcsr | __status) & __excepts; + return (0); +} + +int npy_feraiseexcept(int excepts) +{ + npy_fexcept_t ex = excepts; + + npy_fesetexceptflag(&ex, excepts); + __fwait(); + return (0); +} + +#undef __fldcw +#undef __fldenv +#undef __fldenvx +#undef __fnclex +#undef __fnstenv +#undef __fnstcw +#undef __fnstsw +#undef __fwait +#undef __ldmxcsr +#undef __stmxcsr + Modified: trunk/numpy/core/include/numpy/ufuncobject.h =================================================================== --- trunk/numpy/core/include/numpy/ufuncobject.h 2009-02-22 22:36:18 UTC (rev 6458) +++ trunk/numpy/core/include/numpy/ufuncobject.h 2009-02-23 16:14:55 UTC (rev 6459) @@ -299,6 +299,23 @@ (void) fpsetsticky(0); \ } +#elif defined(__MINGW32__) && defined(__amd64__) +#include "mingw_amd64_fenv.h" + +#define UFUNC_CHECK_STATUS(ret) { \ + int fpstatus = (int) npy_fetestexcept(NPY_FE_DIVBYZERO | \ + NPY_FE_OVERFLOW | NPY_FE_UNDERFLOW | NPY_FE_INVALID); \ + ret = ((NPY_FE_DIVBYZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ + | ((NPY_FE_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ + | ((NPY_FE_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ + | ((NPY_FE_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ + (void) npy_feclearexcept(NPY_FE_DIVBYZERO | NPY_FE_OVERFLOW | \ + NPY_FE_UNDERFLOW | NPY_FE_INVALID); \ +} + +#define generate_divbyzero_error() npy_feraiseexcept(NPY_FE_DIVBYZERO) +#define generate_overflow_error() npy_feraiseexcept(NPY_FE_OVERFLOW) + #elif defined(__GLIBC__) || defined(__APPLE__) || defined(__CYGWIN__) || defined(__MINGW32__) || (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) #if defined(__GLIBC__) || defined(__APPLE__) || defined(__MINGW32__) || defined(__FreeBSD__) From numpy-svn at scipy.org Mon Feb 23 11:26:19 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 23 Feb 2009 10:26:19 -0600 (CST) Subject: [Numpy-svn] r6460 - in branches/coremath: . numpy/core/include/numpy numpy/core/src numpy/distutils numpy/lib numpy/lib/tests numpy/testing Message-ID: <20090223162619.54AD8C7C029@scipy.org> Author: cdavid Date: 2009-02-23 10:25:45 -0600 (Mon, 23 Feb 2009) New Revision: 6460 Added: branches/coremath/numpy/core/include/numpy/mingw_amd64_fenv.h Modified: branches/coremath/ branches/coremath/numpy/core/include/numpy/ufuncobject.h branches/coremath/numpy/core/src/umath_ufunc_object.inc branches/coremath/numpy/distutils/mingw32ccompiler.py branches/coremath/numpy/lib/io.py branches/coremath/numpy/lib/tests/test_io.py branches/coremath/numpy/testing/utils.py Log: Merged revisions 6453-6459 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6453 | cdavid | 2009-02-22 18:15:26 +0900 (Sun, 22 Feb 2009) | 1 line Fix wrong merge for manifest version. ........ r6454 | stefan | 2009-02-22 22:45:15 +0900 (Sun, 22 Feb 2009) | 1 line Add GzipFile wrapper to support the "whence" keyword in GzipFile.seek. ........ r6455 | stefan | 2009-02-22 22:47:14 +0900 (Sun, 22 Feb 2009) | 1 line Whitespace cleanup. ........ r6456 | stefan | 2009-02-22 22:48:21 +0900 (Sun, 22 Feb 2009) | 1 line Add test for Gzip loader. ........ r6457 | stefan | 2009-02-23 07:10:30 +0900 (Mon, 23 Feb 2009) | 1 line Allow subclasses of arrays in testing. ........ r6458 | charris | 2009-02-23 07:36:18 +0900 (Mon, 23 Feb 2009) | 1 line Coding style cleanups. This finishes umath_ufunc_object.inc. ........ r6459 | cdavid | 2009-02-24 01:14:55 +0900 (Tue, 24 Feb 2009) | 1 line Add custom code for FPU error handling on windows x64 with mingw compilers (mingw-w64). ........ Property changes on: branches/coremath ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6451 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6459 Copied: branches/coremath/numpy/core/include/numpy/mingw_amd64_fenv.h (from rev 6459, trunk/numpy/core/include/numpy/mingw_amd64_fenv.h) Modified: branches/coremath/numpy/core/include/numpy/ufuncobject.h =================================================================== --- branches/coremath/numpy/core/include/numpy/ufuncobject.h 2009-02-23 16:14:55 UTC (rev 6459) +++ branches/coremath/numpy/core/include/numpy/ufuncobject.h 2009-02-23 16:25:45 UTC (rev 6460) @@ -299,6 +299,23 @@ (void) fpsetsticky(0); \ } +#elif defined(__MINGW32__) && defined(__amd64__) +#include "mingw_amd64_fenv.h" + +#define UFUNC_CHECK_STATUS(ret) { \ + int fpstatus = (int) npy_fetestexcept(NPY_FE_DIVBYZERO | \ + NPY_FE_OVERFLOW | NPY_FE_UNDERFLOW | NPY_FE_INVALID); \ + ret = ((NPY_FE_DIVBYZERO & fpstatus) ? UFUNC_FPE_DIVIDEBYZERO : 0) \ + | ((NPY_FE_OVERFLOW & fpstatus) ? UFUNC_FPE_OVERFLOW : 0) \ + | ((NPY_FE_UNDERFLOW & fpstatus) ? UFUNC_FPE_UNDERFLOW : 0) \ + | ((NPY_FE_INVALID & fpstatus) ? UFUNC_FPE_INVALID : 0); \ + (void) npy_feclearexcept(NPY_FE_DIVBYZERO | NPY_FE_OVERFLOW | \ + NPY_FE_UNDERFLOW | NPY_FE_INVALID); \ +} + +#define generate_divbyzero_error() npy_feraiseexcept(NPY_FE_DIVBYZERO) +#define generate_overflow_error() npy_feraiseexcept(NPY_FE_OVERFLOW) + #elif defined(__GLIBC__) || defined(__APPLE__) || defined(__CYGWIN__) || defined(__MINGW32__) || (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) #if defined(__GLIBC__) || defined(__APPLE__) || defined(__MINGW32__) || defined(__FreeBSD__) Modified: branches/coremath/numpy/core/src/umath_ufunc_object.inc =================================================================== --- branches/coremath/numpy/core/src/umath_ufunc_object.inc 2009-02-23 16:14:55 UTC (rev 6459) +++ branches/coremath/numpy/core/src/umath_ufunc_object.inc 2009-02-23 16:25:45 UTC (rev 6460) @@ -24,102 +24,100 @@ * */ - #define USE_USE_DEFAULTS 1 - - - /* ---------------------------------------------------------------- */ +/* + * fpstatus is the ufunc_formatted hardware status + * errmask is the handling mask specified by the user. + * errobj is a Python object with (string, callable object or None) + * or NULL + */ -/* fpstatus is the ufunc_formatted hardware status - errmask is the handling mask specified by the user. - errobj is a Python object with (string, callable object or None) - or NULL -*/ - /* - 2. for each of the flags - determine whether to ignore, warn, raise error, or call Python function. - If ignore, do nothing - If warn, print a warning and continue - If raise return an error - If call, call a user-defined function with string -*/ + * 2. for each of the flags + * determine whether to ignore, warn, raise error, or call Python function. + * If ignore, do nothing + * If warn, print a warning and continue + * If raise return an error + * If call, call a user-defined function with string + */ static int _error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first) { PyObject *pyfunc, *ret, *args; - char *name=PyString_AS_STRING(PyTuple_GET_ITEM(errobj,0)); + char *name = PyString_AS_STRING(PyTuple_GET_ITEM(errobj,0)); char msg[100]; + ALLOW_C_API_DEF; - ALLOW_C_API_DEF - - ALLOW_C_API - - switch(method) { - case UFUNC_ERR_WARN: - PyOS_snprintf(msg, sizeof(msg), - "%s encountered in %s", errtype, name); - if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) goto fail; - break; - case UFUNC_ERR_RAISE: - PyErr_Format(PyExc_FloatingPointError, - "%s encountered in %s", - errtype, name); + ALLOW_C_API; + switch(method) { + case UFUNC_ERR_WARN: + PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name); + if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) { goto fail; - case UFUNC_ERR_CALL: + } + break; + case UFUNC_ERR_RAISE: + PyErr_Format(PyExc_FloatingPointError, "%s encountered in %s", + errtype, name); + goto fail; + case UFUNC_ERR_CALL: + pyfunc = PyTuple_GET_ITEM(errobj, 1); + if (pyfunc == Py_None) { + PyErr_Format(PyExc_NameError, + "python callback specified for %s (in " \ + " %s) but no function found.", + errtype, name); + goto fail; + } + args = Py_BuildValue("NN", PyString_FromString(errtype), + PyInt_FromLong((long) retstatus)); + if (args == NULL) { + goto fail; + } + ret = PyObject_CallObject(pyfunc, args); + Py_DECREF(args); + if (ret == NULL) { + goto fail; + } + Py_DECREF(ret); + break; + case UFUNC_ERR_PRINT: + if (*first) { + fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name); + *first = 0; + } + break; + case UFUNC_ERR_LOG: + if (first) { + *first = 0; pyfunc = PyTuple_GET_ITEM(errobj, 1); - if (pyfunc == Py_None) { PyErr_Format(PyExc_NameError, - "python callback specified for %s (in " \ - " %s) but no function found.", - errtype, name); + "log specified for %s (in %s) but no " \ + "object with write method found.", + errtype, name); goto fail; } - args = Py_BuildValue("NN", PyString_FromString(errtype), - PyInt_FromLong((long) retstatus)); - if (args == NULL) goto fail; - ret = PyObject_CallObject(pyfunc, args); - Py_DECREF(args); - if (ret == NULL) goto fail; + PyOS_snprintf(msg, sizeof(msg), + "Warning: %s encountered in %s\n", errtype, name); + ret = PyObject_CallMethod(pyfunc, "write", "s", msg); + if (ret == NULL) { + goto fail; + } Py_DECREF(ret); - - break; - case UFUNC_ERR_PRINT: - if (*first) { - fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name); - *first = 0; - } - break; - case UFUNC_ERR_LOG: - if (first) { - *first = 0; - pyfunc = PyTuple_GET_ITEM(errobj, 1); - if (pyfunc == Py_None) { - PyErr_Format(PyExc_NameError, - "log specified for %s (in %s) but no " \ - "object with write method found.", - errtype, name); - goto fail; - } - PyOS_snprintf(msg, sizeof(msg), - "Warning: %s encountered in %s\n", errtype, name); - ret = PyObject_CallMethod(pyfunc, "write", "s", msg); - if (ret == NULL) goto fail; - Py_DECREF(ret); - } - break; } - DISABLE_C_API - return 0; + break; + } + DISABLE_C_API; + return 0; - fail: - DISABLE_C_API - return -1; +fail: + DISABLE_C_API; + return -1; } @@ -184,8 +182,8 @@ #define ONE_EL_REDUCELOOP 1 #define NOBUFFER_UFUNCLOOP 2 #define NOBUFFER_REDUCELOOP 2 -#define BUFFER_UFUNCLOOP 3 -#define BUFFER_REDUCELOOP 3 +#define BUFFER_UFUNCLOOP 3 +#define BUFFER_REDUCELOOP 3 #define SIGNATURE_NOBUFFER_UFUNCLOOP 4 @@ -221,12 +219,13 @@ static char *_types_msg = "function not supported for these types, " \ "and can't coerce safely to supported types"; -/* Called for non-NULL user-defined functions. - The object should be a CObject pointing to a linked-list of functions - storing the function, data, and signature of all user-defined functions. - There must be a match with the input argument types or an error - will occur. -*/ +/* + * Called for non-NULL user-defined functions. + * The object should be a CObject pointing to a linked-list of functions + * storing the function, data, and signature of all user-defined functions. + * There must be a match with the input argument types or an error + * will occur. + */ static int _find_matching_userloop(PyObject *obj, int *arg_types, PyArray_SCALARKIND *scalars, @@ -235,20 +234,21 @@ { PyUFunc_Loop1d *funcdata; int i; + funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); while (funcdata != NULL) { - for(i=0; iarg_types[i], scalars[i])) break; } - if (i==nin) { /* match found */ + if (i == nin) { + /* match found */ *function = funcdata->func; *data = funcdata->data; - /* Make sure actual arg_types supported - by the loop are used */ - for(i=0; iarg_types[i]; } return 0; @@ -258,27 +258,27 @@ return -1; } -/* if only one type is specified then it is the "first" output data-type - and the first signature matching this output data-type is returned. - - if a tuple of types is specified then an exact match to the signature - is searched and it much match exactly or an error occurs -*/ +/* + * if only one type is specified then it is the "first" output data-type + * and the first signature matching this output data-type is returned. + * + * if a tuple of types is specified then an exact match to the signature + * is searched and it much match exactly or an error occurs + */ static int extract_specified_loop(PyUFuncObject *self, int *arg_types, PyUFuncGenericFunction *function, void **data, PyObject *type_tup, int userdef) { - Py_ssize_t n=1; + Py_ssize_t n = 1; int *rtypenums; static char msg[] = "loop written to specified type(s) not found"; PyArray_Descr *dtype; int nargs; int i, j; - int strtype=0; + int strtype = 0; nargs = self->nargs; - if (PyTuple_Check(type_tup)) { n = PyTuple_GET_SIZE(type_tup); if (n != 1 && n != nargs) { @@ -292,17 +292,19 @@ else if PyString_Check(type_tup) { Py_ssize_t slen; char *thestr; + slen = PyString_GET_SIZE(type_tup); thestr = PyString_AS_STRING(type_tup); - for(i=0; i < slen-2; i++) { - if (thestr[i] == '-' && thestr[i+1] == '>') + for (i = 0; i < slen - 2; i++) { + if (thestr[i] == '-' && thestr[i+1] == '>') { break; + } } if (i < slen-2) { strtype = 1; - n = slen-2; - if (i != self->nin || - slen-2-i != self->nout) { + n = slen - 2; + if (i != self->nin + || slen - 2 - i != self->nout) { PyErr_Format(PyExc_ValueError, "a type-string for %s, " \ "requires %d typecode(s) before " \ @@ -314,7 +316,7 @@ } } rtypenums = (int *)_pya_malloc(n*sizeof(int)); - if (rtypenums==NULL) { + if (rtypenums == NULL) { PyErr_NoMemory(); return -1; } @@ -329,18 +331,21 @@ continue; } dtype = PyArray_DescrFromType((int) *ptr); - if (dtype == NULL) goto fail; + if (dtype == NULL) { + goto fail; + } rtypenums[i] = dtype->type_num; Py_DECREF(dtype); - ptr++; i++; + ptr++; + i++; } } else if (PyTuple_Check(type_tup)) { - for(i=0; itype_num; Py_DECREF(dtype); } @@ -353,12 +358,16 @@ Py_DECREF(dtype); } - if (userdef > 0) { /* search in the user-defined functions */ + if (userdef > 0) { + /* search in the user-defined functions */ PyObject *key, *obj; PyUFunc_Loop1d *funcdata; + obj = NULL; key = PyInt_FromLong((long) userdef); - if (key == NULL) goto fail; + if (key == NULL) { + goto fail; + } obj = PyDict_GetItem(self->userloops, key); Py_DECREF(key); if (obj == NULL) { @@ -367,25 +376,29 @@ " with no registered loops"); goto fail; } - /* extract the correct function - data and argtypes - */ + /* + * extract the correct function + * data and argtypes + */ funcdata = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(obj); while (funcdata != NULL) { if (n != 1) { - for(i=0; iarg_types[i]) + for (i = 0; i < nargs; i++) { + if (rtypenums[i] != funcdata->arg_types[i]) { break; + } } } else if (rtypenums[0] == funcdata->arg_types[self->nin]) { i = nargs; } - else i = -1; + else { + i = -1; + } if (i == nargs) { *function = funcdata->func; *data = funcdata->data; - for(i=0; iarg_types[i]; } Py_DECREF(obj); @@ -398,22 +411,24 @@ } /* look for match in self->functions */ - - for(j=0; jntypes; j++) { + for (j = 0; j < self->ntypes; j++) { if (n != 1) { - for(i=0; itypes[j*nargs + i]) + for(i = 0; i < nargs; i++) { + if (rtypenums[i] != self->types[j*nargs + i]) { break; + } } } else if (rtypenums[0] == self->types[j*nargs+self->nin]) { i = nargs; } - else i = -1; + else { + i = -1; + } if (i == nargs) { *function = self->functions[j]; *data = self->data[j]; - for(i=0; itypes[j*nargs+i]; } goto finish; @@ -421,7 +436,6 @@ } PyErr_SetString(PyExc_TypeError, msg); - fail: _pya_free(rtypenums); return -1; @@ -429,7 +443,6 @@ finish: _pya_free(rtypenums); return 0; - } @@ -437,7 +450,6 @@ * Called to determine coercion * Can change arg_types. */ - static int select_types(PyUFuncObject *self, int *arg_types, PyUFuncGenericFunction *function, void **data, @@ -547,9 +559,9 @@ } #if USE_USE_DEFAULTS==1 -static int PyUFunc_NUM_NODEFAULTS=0; +static int PyUFunc_NUM_NODEFAULTS = 0; #endif -static PyObject *PyUFunc_PYVALS_NAME=NULL; +static PyObject *PyUFunc_PYVALS_NAME = NULL; static int @@ -569,9 +581,9 @@ if ((*bufsize == -1) && PyErr_Occurred()) { return -1; } - if ((*bufsize < PyArray_MIN_BUFSIZE) || - (*bufsize > PyArray_MAX_BUFSIZE) || - (*bufsize % 16 != 0)) { + if ((*bufsize < PyArray_MIN_BUFSIZE) + || (*bufsize > PyArray_MAX_BUFSIZE) + || (*bufsize % 16 != 0)) { PyErr_Format(PyExc_ValueError, "buffer size (%d) is not in range " "(%"INTP_FMT" - %"INTP_FMT") or not a multiple of 16", @@ -605,13 +617,10 @@ Py_DECREF(temp); } - *errobj = Py_BuildValue("NO", - PyString_FromString(name), - retval); + *errobj = Py_BuildValue("NO", PyString_FromString(name), retval); if (*errobj == NULL) { return -1; } - return 0; } @@ -628,8 +637,7 @@ if (PyUFunc_NUM_NODEFAULTS != 0) { #endif if (PyUFunc_PYVALS_NAME == NULL) { - PyUFunc_PYVALS_NAME = \ - PyString_InternFromString(UFUNC_PYVALS_NAME); + PyUFunc_PYVALS_NAME = PyString_InternFromString(UFUNC_PYVALS_NAME); } thedict = PyThreadState_GetDict(); if (thedict == NULL) { @@ -641,20 +649,18 @@ #endif if (ref == NULL) { *errmask = UFUNC_ERR_DEFAULT; - *errobj = Py_BuildValue("NO", - PyString_FromString(name), - Py_None); + *errobj = Py_BuildValue("NO", PyString_FromString(name), Py_None); *bufsize = PyArray_BUFSIZE; return 0; } return _extract_pyvals(ref, name, bufsize, errmask, errobj); } -/* Create copies for any arrays that are less than loop->bufsize - in total size (or core_enabled) and are mis-behaved or in need - of casting. -*/ - +/* + * Create copies for any arrays that are less than loop->bufsize + * in total size (or core_enabled) and are mis-behaved or in need + * of casting. + */ static int _create_copies(PyUFuncLoopObject *loop, int *arg_types, PyArrayObject **mps) { @@ -665,12 +671,12 @@ PyArray_Descr *ntype; PyArray_Descr *atype; - for(i=0; idescr; atype = PyArray_DescrFromType(arg_types[i]); @@ -680,19 +686,20 @@ Py_DECREF(atype); } if (size < loop->bufsize || loop->ufunc->core_enabled) { - if (!(PyArray_ISBEHAVED_RO(mps[i])) || \ - PyArray_TYPE(mps[i]) != arg_types[i]) { + if (!(PyArray_ISBEHAVED_RO(mps[i])) + || PyArray_TYPE(mps[i]) != arg_types[i]) { ntype = PyArray_DescrFromType(arg_types[i]); new = PyArray_FromAny((PyObject *)mps[i], ntype, 0, 0, FORCECAST | ALIGNED, NULL); - if (new == NULL) return -1; + if (new == NULL) { + return -1; + } Py_DECREF(mps[i]); mps[i] = (PyArrayObject *)new; } } } - return 0; } @@ -721,13 +728,14 @@ #undef _GETATTR_ -/* Return the position of next non-white-space char in the string -*/ +/* Return the position of next non-white-space char in the string */ static int _next_non_white_space(const char* str, int offset) { int ret = offset; - while (str[ret] == ' ' || str[ret] == '\t') ret++; + while (str[ret] == ' ' || str[ret] == '\t') { + ret++; + } return ret; } @@ -743,34 +751,41 @@ return _is_alpha_underscore(ch) || (ch >= '0' && ch <= '9'); } -/* Return the ending position of a variable name -*/ +/* + * Return the ending position of a variable name + */ static int _get_end_of_name(const char* str, int offset) { int ret = offset; - while (_is_alnum_underscore(str[ret])) ret++; + while (_is_alnum_underscore(str[ret])) { + ret++; + } return ret; } -/* Returns 1 if the dimension names pointed by s1 and s2 are the same, - otherwise returns 0. -*/ +/* + * Returns 1 if the dimension names pointed by s1 and s2 are the same, + * otherwise returns 0. + */ static int _is_same_name(const char* s1, const char* s2) { while (_is_alnum_underscore(*s1) && _is_alnum_underscore(*s2)) { - if (*s1 != *s2) return 0; + if (*s1 != *s2) { + return 0; + } s1++; s2++; } return !_is_alnum_underscore(*s1) && !_is_alnum_underscore(*s2); } -/* Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets, - and core_signature in PyUFuncObject "self". Returns 0 unless an - error occured. -*/ +/* + * Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets, + * and core_signature in PyUFuncObject "self". Returns 0 unless an + * error occured. + */ static int _parse_signature(PyUFuncObject *self, const char *signature) { @@ -790,54 +805,59 @@ len = strlen(signature); self->core_signature = _pya_malloc(sizeof(char) * (len+1)); - if (self->core_signature) + if (self->core_signature) { strcpy(self->core_signature, signature); - + } /* Allocate sufficient memory to store pointers to all dimension names */ var_names = _pya_malloc(sizeof(char const*) * len); if (var_names == NULL) { PyErr_NoMemory(); return -1; } - + self->core_enabled = 1; self->core_num_dim_ix = 0; self->core_num_dims = _pya_malloc(sizeof(int) * self->nargs); self->core_dim_ixs = _pya_malloc(sizeof(int) * len); /* shrink this later */ self->core_offsets = _pya_malloc(sizeof(int) * self->nargs); - if (self->core_num_dims == NULL || self->core_dim_ixs == NULL || - self->core_offsets == NULL) { + if (self->core_num_dims == NULL || self->core_dim_ixs == NULL + || self->core_offsets == NULL) { PyErr_NoMemory(); goto fail; } i = _next_non_white_space(signature, 0); - - while (signature[i] != '\0') { /* loop over input/output arguments */ + while (signature[i] != '\0') { + /* loop over input/output arguments */ if (cur_arg == self->nin) { /* expect "->" */ if (signature[i] != '-' || signature[i+1] != '>') { parse_error = "expect '->'"; goto fail; } - i = _next_non_white_space(signature, i+2); + i = _next_non_white_space(signature, i + 2); } - /* parse core dimensions of one argument, e.g. "()", "(i)", or - "(i,j)" */ + /* + * parse core dimensions of one argument, + * e.g. "()", "(i)", or "(i,j)" + */ if (signature[i] != '(') { parse_error = "expect '('"; goto fail; } - i = _next_non_white_space(signature, i+1); - while (signature[i] != ')') { /* loop over core dimensions */ + i = _next_non_white_space(signature, i + 1); + while (signature[i] != ')') { + /* loop over core dimensions */ int j = 0; if (!_is_alpha_underscore(signature[i])) { parse_error = "expect dimension name"; goto fail; } while (j < self->core_num_dim_ix) { - if (_is_same_name(signature+i, var_names[j])) break; + if (_is_same_name(signature+i, var_names[j])) { + break; + } j++; } if (j >= self->core_num_dim_ix) { @@ -855,7 +875,7 @@ } if (signature[i] == ',') { - i = _next_non_white_space(signature, i+1); + i = _next_non_white_space(signature, i + 1); if (signature[i] == ')') { parse_error = "',' must not be followed by ')'"; goto fail; @@ -866,16 +886,18 @@ self->core_offsets[cur_arg] = cur_core_dim-nd; cur_arg++; nd = 0; - i = _next_non_white_space(signature, i+1); + i = _next_non_white_space(signature, i + 1); if (cur_arg != self->nin && cur_arg != self->nargs) { - /* The list of input arguments (or output arguments) was - only read partially */ + /* + * The list of input arguments (or output arguments) was + * only read partially + */ if (signature[i] != ',') { parse_error = "expect ','"; goto fail; } - i = _next_non_white_space(signature, i+1); + i = _next_non_white_space(signature, i + 1); } } if (cur_arg != self->nargs) { @@ -883,12 +905,14 @@ goto fail; } self->core_dim_ixs = _pya_realloc(self->core_dim_ixs, - sizeof(int) * cur_core_dim); + sizeof(int)*cur_core_dim); /* check for trivial core-signature, e.g. "(),()->()" */ - if (cur_core_dim == 0) + if (cur_core_dim == 0) { self->core_enabled = 0; + } _pya_free((void*)var_names); return 0; + fail: _pya_free((void*)var_names); if (parse_error) { @@ -906,10 +930,11 @@ return -1; } -/* Concatenate the loop and core dimensions of - PyArrayMultiIterObject's iarg-th argument, to recover a full - dimension array (used for output arguments). -*/ +/* + * Concatenate the loop and core dimensions of + * PyArrayMultiIterObject's iarg-th argument, to recover a full + * dimension array (used for output arguments). + */ static npy_intp* _compute_output_dims(PyUFuncLoopObject *loop, int iarg, int *out_nd, npy_intp *tmp_dims) @@ -933,14 +958,14 @@ memcpy(tmp_dims, loop->dimensions, sizeof(npy_intp) * loop->nd); /* copy core dimension */ - for (i = 0; i < ufunc->core_num_dims[iarg]; i++) + for (i = 0; i < ufunc->core_num_dims[iarg]; i++) { tmp_dims[loop->nd + i] = loop->core_dim_sizes[1 + - ufunc->core_dim_ixs[ufunc->core_offsets[iarg]+i]]; + ufunc->core_dim_ixs[ufunc->core_offsets[iarg] + i]]; + } return tmp_dims; } -/* Check and set core_dim_sizes and core_strides for the i-th argument. -*/ +/* Check and set core_dim_sizes and core_strides for the i-th argument. */ static int _compute_dimension_size(PyUFuncLoopObject *loop, PyArrayObject **mps, int i) { @@ -949,7 +974,7 @@ int k = PyArray_NDIM(mps[i]) - ufunc->core_num_dims[i]; int ind; for (ind = 0; ind < ufunc->core_num_dims[i]; ind++, j++, k++) { - npy_intp dim = k<0 ? 1 : PyArray_DIM(mps[i], k); + npy_intp dim = k < 0 ? 1 : PyArray_DIM(mps[i], k); /* First element of core_dim_sizes will be used for looping */ int dim_ix = ufunc->core_dim_ixs[j] + 1; if (loop->core_dim_sizes[dim_ix] == 1) { @@ -957,8 +982,7 @@ loop->core_dim_sizes[dim_ix] = dim; } else if (dim != 1 && dim != loop->core_dim_sizes[dim_ix]) { - PyErr_SetString(PyExc_ValueError, - "core dimensions mismatch"); + PyErr_SetString(PyExc_ValueError, "core dimensions mismatch"); return -1; } /* First ufunc->nargs elements will be used for looping */ @@ -974,23 +998,25 @@ { PyArrayObject *ret; int nd = ap->nd - core_nd; - if (nd < 0) nd = 0; + if (nd < 0) { + nd = 0; + } /* The following code is basically taken from PyArray_Transpose */ - Py_INCREF(ap->descr); /* NewFromDescr will steal this reference */ + /* NewFromDescr will steal this reference */ + Py_INCREF(ap->descr); ret = (PyArrayObject *) PyArray_NewFromDescr(ap->ob_type, ap->descr, nd, ap->dimensions, ap->strides, ap->data, ap->flags, (PyObject *)ap); - if (ret == NULL) return NULL; - + if (ret == NULL) { + return NULL; + } /* point at true owner of memory: */ ret->base = (PyObject *)ap; Py_INCREF(ap); - PyArray_UpdateFlags(ret, CONTIGUOUS | FORTRAN); - return ret; } @@ -1018,8 +1044,7 @@ /* Check number of arguments */ nargs = PyTuple_Size(args); if ((nargs < self->nin) || (nargs > self->nargs)) { - PyErr_SetString(PyExc_ValueError, - "invalid number of arguments"); + PyErr_SetString(PyExc_ValueError, "invalid number of arguments"); return -1; } @@ -1046,7 +1071,8 @@ if (!object && PyTypeNum_ISOBJECT(arg_types[i])) { object = 1; } - /* debug + /* + * debug * fprintf(stderr, "array %d has reference %d\n", i, * (mps[i])->ob_refcnt); */ @@ -1083,31 +1109,30 @@ * different kinds of lesser kinds then use normal coercion rules */ if (allscalars || (maxsckind > maxarrkind)) { - for(i = 0; i < self->nin; i++) { + for (i = 0; i < self->nin; i++) { scalars[i] = PyArray_NOSCALAR; } } /* Select an appropriate function for these argument types. */ if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, typetup) == -1) + &(loop->funcdata), scalars, typetup) == -1) { return -1; - + } /* * FAIL with NotImplemented if the other object has * the __r__ method and has __array_priority__ as * an attribute (signalling it can handle ndarray's) * and is not already an ndarray or a subtype of the same type. */ - if ((arg_types[1] == PyArray_OBJECT) && \ - (loop->ufunc->nin==2) && (loop->ufunc->nout == 1)) { + if ((arg_types[1] == PyArray_OBJECT) + && (loop->ufunc->nin==2) && (loop->ufunc->nout == 1)) { PyObject *_obj = PyTuple_GET_ITEM(args, 1); - if (!PyArray_CheckExact(_obj) && + if (!PyArray_CheckExact(_obj) /* If both are same subtype of object arrays, then proceed */ - !(_obj->ob_type == (PyTuple_GET_ITEM(args, 0))->ob_type) && \ - - PyObject_HasAttrString(_obj, "__array_priority__") && \ - _has_reflected_op(_obj, loop->ufunc->name)) { + && !(_obj->ob_type == (PyTuple_GET_ITEM(args, 0))->ob_type) + && PyObject_HasAttrString(_obj, "__array_priority__") + && _has_reflected_op(_obj, loop->ufunc->name)) { loop->notimplemented = 1; return nargs; } @@ -1121,33 +1146,34 @@ return -1; } - /* Only use loop dimensions when constructing Iterator: + /* + * Only use loop dimensions when constructing Iterator: * temporarily replace mps[i] (will be recovered below). */ if (self->core_enabled) { for (i = 0; i < self->nin; i++) { PyArrayObject *ao; - if (_compute_dimension_size(loop, mps, i) < 0) + if (_compute_dimension_size(loop, mps, i) < 0) { return -1; - + } ao = _trunc_coredim(mps[i], self->core_num_dims[i]); - if (ao == NULL) + if (ao == NULL) { return -1; + } mps[i] = ao; } } /* Create Iterators for the Inputs */ - for(i = 0; i < self->nin; i++) { - loop->iters[i] = (PyArrayIterObject *) \ + for (i = 0; i < self->nin; i++) { + loop->iters[i] = (PyArrayIterObject *) PyArray_IterNew((PyObject *)mps[i]); if (loop->iters[i] == NULL) { return -1; } } - /* Recover mps[i]. */ if (self->core_enabled) { for (i = 0; i < self->nin; i++) { @@ -1164,7 +1190,7 @@ } /* Get any return arguments */ - for(i = self->nin; i < nargs; i++) { + for (i = self->nin; i < nargs; i++) { mps[i] = (PyArrayObject *)PyTuple_GET_ITEM(args, i); if (((PyObject *)mps[i])==Py_None) { mps[i] = NULL; @@ -1188,27 +1214,25 @@ return -1; } } - if (self->core_enabled) { - if (_compute_dimension_size(loop, mps, i) < 0) + if (_compute_dimension_size(loop, mps, i) < 0) { return -1; + } } out_dims = _compute_output_dims(loop, i, &out_nd, temp_dims); - if (!out_dims) return -1; - - if (mps[i]->nd != out_nd || - !PyArray_CompareLists(mps[i]->dimensions, - out_dims, out_nd)) { - PyErr_SetString(PyExc_ValueError, - "invalid return array shape"); + if (!out_dims) { + return -1; + } + if (mps[i]->nd != out_nd + || !PyArray_CompareLists(mps[i]->dimensions, out_dims, out_nd)) { + PyErr_SetString(PyExc_ValueError, "invalid return array shape"); Py_DECREF(mps[i]); mps[i] = NULL; return -1; } if (!PyArray_ISWRITEABLE(mps[i])) { - PyErr_SetString(PyExc_ValueError, - "return array is not writeable"); + PyErr_SetString(PyExc_ValueError, "return array is not writeable"); Py_DECREF(mps[i]); mps[i] = NULL; return -1; @@ -1221,8 +1245,9 @@ if (mps[i] == NULL) { out_dims = _compute_output_dims(loop, i, &out_nd, temp_dims); - if (!out_dims) return -1; - + if (!out_dims) { + return -1; + } mps[i] = (PyArrayObject *)PyArray_New(subtype, out_nd, out_dims, @@ -1250,8 +1275,8 @@ } /* still not the same -- or will we have to use buffers?*/ - if (mps[i]->descr->type_num != arg_types[i] || - !PyArray_ISBEHAVED_RO(mps[i])) { + if (mps[i]->descr->type_num != arg_types[i] + || !PyArray_ISBEHAVED_RO(mps[i])) { if (loop->size < loop->bufsize || self->core_enabled) { PyObject *new; /* @@ -1276,17 +1301,18 @@ PyArrayObject *ao; /* computer for all output arguments, and set strides in "loop" */ - if (_compute_dimension_size(loop, mps, i) < 0) + if (_compute_dimension_size(loop, mps, i) < 0) { return -1; - + } ao = _trunc_coredim(mps[i], self->core_num_dims[i]); - if (ao == NULL) + if (ao == NULL) { return -1; + } /* Temporarily modify mps[i] for constructing iterator. */ mps[i] = ao; } - loop->iters[i] = (PyArrayIterObject *) \ + loop->iters[i] = (PyArrayIterObject *) PyArray_IterNew((PyObject *)mps[i]); if (loop->iters[i] == NULL) { return -1; @@ -1307,22 +1333,18 @@ */ loop->bufcnt = 0; loop->obj = 0; - /* Determine looping method needed */ loop->meth = NO_UFUNCLOOP; - if (loop->size == 0) { return nargs; } - if (self->core_enabled) { loop->meth = SIGNATURE_NOBUFFER_UFUNCLOOP; } - - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { loop->needbuffer[i] = 0; - if (arg_types[i] != mps[i]->descr->type_num || - !PyArray_ISBEHAVED_RO(mps[i])) { + if (arg_types[i] != mps[i]->descr->type_num + || !PyArray_ISBEHAVED_RO(mps[i])) { if (self->core_enabled) { PyErr_SetString(PyExc_RuntimeError, "never reached; copy should have been made"); @@ -1331,25 +1353,24 @@ loop->meth = BUFFER_UFUNCLOOP; loop->needbuffer[i] = 1; } - if (!loop->obj && ((mps[i]->descr->type_num == PyArray_OBJECT) || - (arg_types[i] == PyArray_OBJECT))) { + if (!loop->obj + && ((mps[i]->descr->type_num == PyArray_OBJECT) + || (arg_types[i] == PyArray_OBJECT))) { loop->obj = 1; } } - if (self->core_enabled && loop->obj) { PyErr_SetString(PyExc_TypeError, "Object type not allowed in ufunc with signature"); return -1; } - if (loop->meth == NO_UFUNCLOOP) { loop->meth = ONE_UFUNCLOOP; /* All correct type and BEHAVED */ /* Check for non-uniform stridedness */ - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { if (!(loop->iters[i]->contiguous)) { /* * May still have uniform stride @@ -1363,7 +1384,7 @@ } } if (loop->meth == ONE_UFUNCLOOP) { - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { loop->bufptr[i] = mps[i]->data; } } @@ -1398,23 +1419,21 @@ * Thus, choose the axis for which strides of the last iterator is * smallest but non-zero. */ - - for(i = 0; i < loop->nd; i++) { + for (i = 0; i < loop->nd; i++) { stride_sum[i] = 0; - for(j = 0; j < loop->numiter; j++) { + for (j = 0; j < loop->numiter; j++) { stride_sum[i] += loop->iters[j]->strides[i]; } } ldim = loop->nd - 1; - minsum = stride_sum[loop->nd-1]; - for(i = loop->nd - 2; i >= 0; i--) { + minsum = stride_sum[loop->nd - 1]; + for (i = loop->nd - 2; i >= 0; i--) { if (stride_sum[i] < minsum ) { ldim = i; minsum = stride_sum[i]; } } - maxdim = loop->dimensions[ldim]; loop->size /= maxdim; loop->bufcnt = maxdim; @@ -1426,10 +1445,10 @@ * setting the size to 1 in that dimension * (just in the iterators) */ - for(i = 0; i < loop->numiter; i++) { + for (i = 0; i < loop->numiter; i++) { it = loop->iters[i]; it->contiguous = 0; - it->size /= (it->dims_m1[ldim]+1); + it->size /= (it->dims_m1[ldim] + 1); it->dims_m1[ldim] = 0; it->backstrides[ldim] = 0; @@ -1461,7 +1480,7 @@ if (loop->meth == BUFFER_UFUNCLOOP) { loop->leftover = maxdim % loop->bufsize; loop->ninnerloops = (maxdim / loop->bufsize) + 1; - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { if (loop->needbuffer[i] && loop->steps[i]) { loop->steps[i] = mps[i]->descr->elsize; } @@ -1471,11 +1490,13 @@ } else if (loop->meth == ONE_UFUNCLOOP) { /* uniformly-strided case */ - for(i = 0; i < self->nargs; i++) { - if (PyArray_SIZE(mps[i]) == 1) + for (i = 0; i < self->nargs; i++) { + if (PyArray_SIZE(mps[i]) == 1) { loop->steps[i] = 0; - else - loop->steps[i] = mps[i]->strides[mps[i]->nd-1]; + } + else { + loop->steps[i] = mps[i]->strides[mps[i]->nd - 1]; + } } } @@ -1487,20 +1508,20 @@ * not copied multiple times */ if (loop->meth == BUFFER_UFUNCLOOP) { - int cnt = 0, cntcast = 0; /* keeps track of bytes to allocate */ + int cnt = 0, cntcast = 0; int scnt = 0, scntcast = 0; char *castptr; char *bufptr; - int last_was_scalar=0; - int last_cast_was_scalar=0; - int oldbufsize=0; - int oldsize=0; + int last_was_scalar = 0; + int last_cast_was_scalar = 0; + int oldbufsize = 0; + int oldsize = 0; int scbufsize = 4*sizeof(double); int memsize; PyArray_Descr *descr; /* compute the element size */ - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { if (!loop->needbuffer[i]) { continue; } @@ -1536,11 +1557,11 @@ memsize = loop->bufsize*(cnt+cntcast) + scbufsize*(scnt+scntcast); loop->buffer[0] = PyDataMem_NEW(memsize); - /* debug + /* + * debug * fprintf(stderr, "Allocated buffer at %p of size %d, cnt=%d, cntcast=%d\n", * loop->buffer[0], loop->bufsize * (cnt + cntcast), cnt, cntcast); */ - if (loop->buffer[0] == NULL) { PyErr_NoMemory(); return -1; @@ -1551,11 +1572,11 @@ castptr = loop->buffer[0] + loop->bufsize*cnt + scbufsize*scnt; bufptr = loop->buffer[0]; loop->objfunc = 0; - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { if (!loop->needbuffer[i]) { continue; } - loop->buffer[i] = bufptr + (last_was_scalar ? scbufsize : \ + loop->buffer[i] = bufptr + (last_was_scalar ? scbufsize : loop->bufsize)*oldbufsize; last_was_scalar = (loop->steps[i] == 0); bufptr = loop->buffer[i]; @@ -1563,7 +1584,7 @@ /* fprintf(stderr, "buffer[%d] = %p\n", i, loop->buffer[i]); */ if (loop->cast[i]) { PyArray_Descr *descr; - loop->castbuf[i] = castptr + (last_cast_was_scalar ? scbufsize : \ + loop->castbuf[i] = castptr + (last_cast_was_scalar ? scbufsize : loop->bufsize)*oldsize; last_cast_was_scalar = last_was_scalar; /* fprintf(stderr, "castbuf[%d] = %p\n", i, loop->castbuf[i]); */ @@ -1572,8 +1593,9 @@ Py_DECREF(descr); loop->bufptr[i] = loop->castbuf[i]; castptr = loop->castbuf[i]; - if (loop->steps[i]) + if (loop->steps[i]) { loop->steps[i] = oldsize; + } } else { loop->bufptr[i] = loop->buffer[i]; @@ -1597,7 +1619,9 @@ Py_XDECREF(self->ret); Py_XDECREF(self->errobj); Py_XDECREF(self->decref); - if (self->buffer) PyDataMem_FREE(self->buffer); + if (self->buffer) { + PyDataMem_FREE(self->buffer); + } Py_DECREF(self->ufunc); } _pya_free(self); @@ -1609,12 +1633,15 @@ int i; if (self->ufunc != NULL) { - if (self->core_dim_sizes) + if (self->core_dim_sizes) { _pya_free(self->core_dim_sizes); - if (self->core_strides) + } + if (self->core_strides) { _pya_free(self->core_strides); - for(i = 0; i < self->ufunc->nargs; i++) + } + for (i = 0; i < self->ufunc->nargs; i++) { Py_XDECREF(self->iters[i]); + } if (self->buffer[0]) { PyDataMem_FREE(self->buffer[0]); } @@ -1646,7 +1673,7 @@ loop->ufunc = self; Py_INCREF(self); loop->buffer[0] = NULL; - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { loop->iters[i] = NULL; loop->cast[i] = NULL; } @@ -1658,19 +1685,19 @@ if (self->core_enabled) { int num_dim_ix = 1 + self->core_num_dim_ix; - int nstrides = self->nargs + self->core_offsets[self->nargs-1] - + self->core_num_dims[self->nargs-1]; - loop->core_dim_sizes = _pya_malloc(sizeof(npy_intp) * num_dim_ix); - loop->core_strides = _pya_malloc(sizeof(npy_intp) * nstrides); + int nstrides = self->nargs + self->core_offsets[self->nargs - 1] + + self->core_num_dims[self->nargs - 1]; + loop->core_dim_sizes = _pya_malloc(sizeof(npy_intp)*num_dim_ix); + loop->core_strides = _pya_malloc(sizeof(npy_intp)*nstrides); if (loop->core_dim_sizes == NULL || loop->core_strides == NULL) { PyErr_NoMemory(); goto fail; } memset(loop->core_strides, 0, sizeof(npy_intp) * nstrides); - for (i = 0; i < num_dim_ix; i++) + for (i = 0; i < num_dim_ix; i++) { loop->core_dim_sizes[i] = 1; + } } - name = self->name ? self->name : ""; /* @@ -1680,9 +1707,10 @@ */ if (kwds != NULL) { PyObject *key, *value; - Py_ssize_t pos=0; + Py_ssize_t pos = 0; while (PyDict_Next(kwds, &pos, &key, &value)) { char *keystring = PyString_AsString(key); + if (keystring == NULL) { PyErr_Clear(); PyErr_SetString(PyExc_TypeError, "invalid keyword"); @@ -1721,7 +1749,6 @@ if (construct_arrays(loop, args, mps, typetup) < 0) { goto fail; } - PyUFunc_clearfperr(); return loop; @@ -1794,13 +1821,12 @@ * */ -/* +/*UFUNC_API + * * This generic function is called with the ufunc object, the arguments to it, * and an array of (pointers to) PyArrayObjects which are NULL. The * arguments are parsed and placed in mps in construct_loop (construct_arrays) */ - -/*UFUNC_API*/ static int PyUFunc_GenericFunction(PyUFuncObject *self, PyObject *args, PyObject *kwds, PyArrayObject **mps) @@ -1824,298 +1850,296 @@ NPY_LOOP_BEGIN_THREADS; switch(loop->meth) { - case ONE_UFUNCLOOP: - /* - * Everything is contiguous, notswapped, aligned, - * and of the right type. -- Fastest. - * Or if not contiguous, then a single-stride - * increment moves through the entire array. - */ - /*fprintf(stderr, "ONE...%d\n", loop->size);*/ - loop->function((char **)loop->bufptr, &(loop->size), + case ONE_UFUNCLOOP: + /* + * Everything is contiguous, notswapped, aligned, + * and of the right type. -- Fastest. + * Or if not contiguous, then a single-stride + * increment moves through the entire array. + */ + /*fprintf(stderr, "ONE...%d\n", loop->size);*/ + loop->function((char **)loop->bufptr, &(loop->size), + loop->steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + break; + case NOBUFFER_UFUNCLOOP: + /* + * Everything is notswapped, aligned and of the + * right type but not contiguous. -- Almost as fast. + */ + /*fprintf(stderr, "NOBUFFER...%d\n", loop->size);*/ + while (loop->index < loop->size) { + for (i = 0; i < self->nargs; i++) { + loop->bufptr[i] = loop->iters[i]->dataptr; + } + loop->function((char **)loop->bufptr, &(loop->bufcnt), loop->steps, loop->funcdata); UFUNC_CHECK_ERROR(loop); - break; - case NOBUFFER_UFUNCLOOP: - /* - * Everything is notswapped, aligned and of the - * right type but not contiguous. -- Almost as fast. - */ - /*fprintf(stderr, "NOBUFFER...%d\n", loop->size);*/ - - while (loop->index < loop->size) { - for(i = 0; i < self->nargs; i++) { - loop->bufptr[i] = loop->iters[i]->dataptr; - } - loop->function((char **)loop->bufptr, &(loop->bufcnt), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); + /* Adjust loop pointers */ + for (i = 0; i < self->nargs; i++) { + PyArray_ITER_NEXT(loop->iters[i]); + } + loop->index++; + } + break; + case SIGNATURE_NOBUFFER_UFUNCLOOP: + while (loop->index < loop->size) { + for (i = 0; i < self->nargs; i++) { + loop->bufptr[i] = loop->iters[i]->dataptr; + } + loop->function((char **)loop->bufptr, loop->core_dim_sizes, + loop->core_strides, loop->funcdata); + UFUNC_CHECK_ERROR(loop); - /* Adjust loop pointers */ - for(i = 0; i < self->nargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - loop->index++; + /* Adjust loop pointers */ + for (i = 0; i < self->nargs; i++) { + PyArray_ITER_NEXT(loop->iters[i]); } - break; + loop->index++; + } + break; + case BUFFER_UFUNCLOOP: { + /* This should be a function */ + PyArray_CopySwapNFunc *copyswapn[NPY_MAXARGS]; + PyArrayIterObject **iters=loop->iters; + int *swap=loop->swap; + char **dptr=loop->dptr; + int mpselsize[NPY_MAXARGS]; + intp laststrides[NPY_MAXARGS]; + int fastmemcpy[NPY_MAXARGS]; + int *needbuffer = loop->needbuffer; + intp index=loop->index, size=loop->size; + int bufsize; + intp bufcnt; + int copysizes[NPY_MAXARGS]; + char **bufptr = loop->bufptr; + char **buffer = loop->buffer; + char **castbuf = loop->castbuf; + intp *steps = loop->steps; + char *tptr[NPY_MAXARGS]; + int ninnerloops = loop->ninnerloops; + Bool pyobject[NPY_MAXARGS]; + int datasize[NPY_MAXARGS]; + int j, k, stopcondition; + char *myptr1, *myptr2; - case SIGNATURE_NOBUFFER_UFUNCLOOP: - while (loop->index < loop->size) { - for(i = 0; i < self->nargs; i++) { - loop->bufptr[i] = loop->iters[i]->dataptr; - } - loop->function((char **)loop->bufptr, loop->core_dim_sizes, - loop->core_strides, loop->funcdata); - UFUNC_CHECK_ERROR(loop); + for (i = 0; i nargs; i++) { + copyswapn[i] = mps[i]->descr->f->copyswapn; + mpselsize[i] = mps[i]->descr->elsize; + pyobject[i] = (loop->obj + && (mps[i]->descr->type_num == PyArray_OBJECT)); + laststrides[i] = iters[i]->strides[loop->lastdim]; + if (steps[i] && laststrides[i] != mpselsize[i]) { + fastmemcpy[i] = 0; + } + else { + fastmemcpy[i] = 1; + } + } + /* Do generic buffered looping here (works for any kind of + * arrays -- some need buffers, some don't. + * + * + * New algorithm: N is the largest dimension. B is the buffer-size. + * quotient is loop->ninnerloops-1 + * remainder is loop->leftover + * + * Compute N = quotient * B + remainder. + * quotient = N / B # integer math + * (store quotient + 1) as the number of innerloops + * remainder = N % B # integer remainder + * + * On the inner-dimension we will have (quotient + 1) loops where + * the size of the inner function is B for all but the last when the niter size is + * remainder. + * + * So, the code looks very similar to NOBUFFER_LOOP except the inner-most loop is + * replaced with... + * + * for(i=0; inargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - loop->index++; - } - break; - case BUFFER_UFUNCLOOP: { - PyArray_CopySwapNFunc *copyswapn[NPY_MAXARGS]; - PyArrayIterObject **iters=loop->iters; - int *swap=loop->swap; - char **dptr=loop->dptr; - int mpselsize[NPY_MAXARGS]; - intp laststrides[NPY_MAXARGS]; - int fastmemcpy[NPY_MAXARGS]; - int *needbuffer=loop->needbuffer; - intp index=loop->index, size=loop->size; - int bufsize; - intp bufcnt; - int copysizes[NPY_MAXARGS]; - char **bufptr = loop->bufptr; - char **buffer = loop->buffer; - char **castbuf = loop->castbuf; - intp *steps = loop->steps; - char *tptr[NPY_MAXARGS]; - int ninnerloops = loop->ninnerloops; - Bool pyobject[NPY_MAXARGS]; - int datasize[NPY_MAXARGS]; - int j, k, stopcondition; - char *myptr1, *myptr2; + /* + * fprintf(stderr, "BUFFER...%d,%d,%d\n", loop->size, + * loop->ninnerloops, loop->leftover); + */ + /* + * for(i=0; inargs; i++) { + * fprintf(stderr, "iters[%d]->dataptr = %p, %p of size %d\n", i, + * iters[i], iters[i]->ao->data, PyArray_NBYTES(iters[i]->ao)); + * } + */ + stopcondition = ninnerloops; + if (loop->leftover == 0) { + stopcondition--; + } + while (index < size) { + bufsize=loop->bufsize; + for(i = 0; inargs; i++) { + tptr[i] = loop->iters[i]->dataptr; + if (needbuffer[i]) { + dptr[i] = bufptr[i]; + datasize[i] = (steps[i] ? bufsize : 1); + copysizes[i] = datasize[i] * mpselsize[i]; + } + else { + dptr[i] = tptr[i]; + } + } - for(i = 0; i nargs; i++) { - copyswapn[i] = mps[i]->descr->f->copyswapn; - mpselsize[i] = mps[i]->descr->elsize; - pyobject[i] = (loop->obj && \ - (mps[i]->descr->type_num == PyArray_OBJECT)); - laststrides[i] = iters[i]->strides[loop->lastdim]; - if (steps[i] && laststrides[i] != mpselsize[i]) { - fastmemcpy[i] = 0; - } - else { - fastmemcpy[i] = 1; - } - } - /* Do generic buffered looping here (works for any kind of - * arrays -- some need buffers, some don't. - * - * - * New algorithm: N is the largest dimension. B is the buffer-size. - * quotient is loop->ninnerloops-1 - * remainder is loop->leftover - * - * Compute N = quotient * B + remainder. - * quotient = N / B # integer math - * (store quotient + 1) as the number of innerloops - * remainder = N % B # integer remainder - * - * On the inner-dimension we will have (quotient + 1) loops where - * the size of the inner function is B for all but the last when the niter size is - * remainder. - * - * So, the code looks very similar to NOBUFFER_LOOP except the inner-most loop is - * replaced with... - * - * for(i=0; ileftover; + for (i=0; inargs;i++) { + if (!needbuffer[i]) { + continue; + } + datasize[i] = (steps[i] ? bufsize : 1); + copysizes[i] = datasize[i] * mpselsize[i]; + } + } + for (i = 0; i < self->nin; i++) { + if (!needbuffer[i]) { + continue; + } + if (fastmemcpy[i]) { + memcpy(buffer[i], tptr[i], copysizes[i]); + } + else { + myptr1 = buffer[i]; + myptr2 = tptr[i]; + for (j = 0; j < bufsize; j++) { + memcpy(myptr1, myptr2, mpselsize[i]); + myptr1 += mpselsize[i]; + myptr2 += laststrides[i]; + } + } + /* swap the buffer if necessary */ + if (swap[i]) { + /* fprintf(stderr, "swapping...\n");*/ + copyswapn[i](buffer[i], mpselsize[i], NULL, -1, + (intp) datasize[i], 1, + mps[i]); + } + /* cast to the other buffer if necessary */ + if (loop->cast[i]) { + /* fprintf(stderr, "casting... %d, %p %p\n", i, buffer[i]); */ + loop->cast[i](buffer[i], castbuf[i], + (intp) datasize[i], + NULL, NULL); + } + } - /* - * fprintf(stderr, "BUFFER...%d,%d,%d\n", loop->size, - * loop->ninnerloops, loop->leftover); - */ - /* - * for(i=0; inargs; i++) { - * fprintf(stderr, "iters[%d]->dataptr = %p, %p of size %d\n", i, - * iters[i], iters[i]->ao->data, PyArray_NBYTES(iters[i]->ao)); - * } - */ - stopcondition = ninnerloops; - if (loop->leftover == 0) stopcondition--; - while (index < size) { - bufsize=loop->bufsize; - for(i = 0; inargs; i++) { - tptr[i] = loop->iters[i]->dataptr; - if (needbuffer[i]) { - dptr[i] = bufptr[i]; - datasize[i] = (steps[i] ? bufsize : 1); - copysizes[i] = datasize[i] * mpselsize[i]; - } - else { - dptr[i] = tptr[i]; - } - } + bufcnt = (intp) bufsize; + loop->function((char **)dptr, &bufcnt, steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); - /* This is the inner function over the last dimension */ - for(k = 1; k<=stopcondition; k++) { - if (k == ninnerloops) { - bufsize = loop->leftover; - for(i=0; inargs;i++) { - if (!needbuffer[i]) { - continue; - } - datasize[i] = (steps[i] ? bufsize : 1); - copysizes[i] = datasize[i] * mpselsize[i]; - } - } - for(i = 0; i < self->nin; i++) { - if (!needbuffer[i]) { - continue; - } - if (fastmemcpy[i]) { - memcpy(buffer[i], tptr[i], copysizes[i]); - } - else { - myptr1 = buffer[i]; - myptr2 = tptr[i]; - for(j = 0; j < bufsize; j++) { - memcpy(myptr1, myptr2, mpselsize[i]); - myptr1 += mpselsize[i]; - myptr2 += laststrides[i]; - } - } + for (i = self->nin; i < self->nargs; i++) { + if (!needbuffer[i]) { + continue; + } + if (loop->cast[i]) { + /* fprintf(stderr, "casting back... %d, %p", i, castbuf[i]); */ + loop->cast[i](castbuf[i], + buffer[i], + (intp) datasize[i], + NULL, NULL); + } + if (swap[i]) { + copyswapn[i](buffer[i], mpselsize[i], NULL, -1, + (intp) datasize[i], 1, + mps[i]); + } + /* + * copy back to output arrays + * decref what's already there for object arrays + */ + if (pyobject[i]) { + myptr1 = tptr[i]; + for (j = 0; j < datasize[i]; j++) { + Py_XDECREF(*((PyObject **)myptr1)); + myptr1 += laststrides[i]; + } + } + if (fastmemcpy[i]) { + memcpy(tptr[i], buffer[i], copysizes[i]); + } + else { + myptr2 = buffer[i]; + myptr1 = tptr[i]; + for (j = 0; j < bufsize; j++) { + memcpy(myptr1, myptr2, mpselsize[i]); + myptr1 += laststrides[i]; + myptr2 += mpselsize[i]; + } + } + } + if (k == stopcondition) { + continue; + } + for (i = 0; i < self->nargs; i++) { + tptr[i] += bufsize * laststrides[i]; + if (!needbuffer[i]) { + dptr[i] = tptr[i]; + } + } + } + /* end inner function over last dimension */ - /* swap the buffer if necessary */ - if (swap[i]) { - /* fprintf(stderr, "swapping...\n");*/ - copyswapn[i](buffer[i], mpselsize[i], NULL, -1, - (intp) datasize[i], 1, - mps[i]); - } - /* cast to the other buffer if necessary */ - if (loop->cast[i]) { - /* fprintf(stderr, "casting... %d, %p %p\n", i, buffer[i]); */ - loop->cast[i](buffer[i], castbuf[i], - (intp) datasize[i], - NULL, NULL); - } - } + if (loop->objfunc) { + /* + * DECREF castbuf when underlying function used + * object arrays and casting was needed to get + * to object arrays + */ + for (i = 0; i < self->nargs; i++) { + if (loop->cast[i]) { + if (steps[i] == 0) { + Py_XDECREF(*((PyObject **)castbuf[i])); + } + else { + int size = loop->bufsize; - bufcnt = (intp) bufsize; - loop->function((char **)dptr, &bufcnt, steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); + PyObject **objptr = (PyObject **)castbuf[i]; + /* + * size is loop->bufsize unless there + * was only one loop + */ + if (ninnerloops == 1) { + size = loop->leftover; + } + for (j = 0; j < size; j++) { + Py_XDECREF(*objptr); + *objptr = NULL; + objptr += 1; + } + } + } + } + } + /* fixme -- probably not needed here*/ + UFUNC_CHECK_ERROR(loop); - for(i=self->nin; inargs; i++) { - if (!needbuffer[i]) { - continue; - } - if (loop->cast[i]) { - /* fprintf(stderr, "casting back... %d, %p", i, castbuf[i]); */ - loop->cast[i](castbuf[i], - buffer[i], - (intp) datasize[i], - NULL, NULL); - } - if (swap[i]) { - copyswapn[i](buffer[i], mpselsize[i], NULL, -1, - (intp) datasize[i], 1, - mps[i]); - } - /* - * copy back to output arrays - * decref what's already there for object arrays - */ - if (pyobject[i]) { - myptr1 = tptr[i]; - for(j = 0; j < datasize[i]; j++) { - Py_XDECREF(*((PyObject **)myptr1)); - myptr1 += laststrides[i]; - } - } - if (fastmemcpy[i]) - memcpy(tptr[i], buffer[i], copysizes[i]); - else { - myptr2 = buffer[i]; - myptr1 = tptr[i]; - for(j = 0; j < bufsize; j++) { - memcpy(myptr1, myptr2, - mpselsize[i]); - myptr1 += laststrides[i]; - myptr2 += mpselsize[i]; - } - } - } - if (k == stopcondition) { - continue; - } - for(i = 0; i < self->nargs; i++) { - tptr[i] += bufsize * laststrides[i]; - if (!needbuffer[i]) { - dptr[i] = tptr[i]; - } - } - } - /* end inner function over last dimension */ - - if (loop->objfunc) { - /* - * DECREF castbuf when underlying function used - * object arrays and casting was needed to get - * to object arrays - */ - for(i = 0; i < self->nargs; i++) { - if (loop->cast[i]) { - if (steps[i] == 0) { - Py_XDECREF(*((PyObject **)castbuf[i])); - } - else { - int size = loop->bufsize; - - PyObject **objptr = (PyObject **)castbuf[i]; - /* - * size is loop->bufsize unless there - * was only one loop - */ - if (ninnerloops == 1) { - size = loop->leftover; - } - for(j = 0; j < size; j++) { - Py_XDECREF(*objptr); - *objptr = NULL; - objptr += 1; - } - } - } - } - - } - /* fixme -- probably not needed here*/ - UFUNC_CHECK_ERROR(loop); - - for(i=0; inargs; i++) { - PyArray_ITER_NEXT(loop->iters[i]); - } - index++; - } - } + for (i = 0; i < self->nargs; i++) { + PyArray_ITER_NEXT(loop->iters[i]); + } + index++; + } + } /* end of last case statement */ } NPY_LOOP_END_THREADS; @@ -2124,7 +2148,9 @@ fail: NPY_LOOP_END_THREADS; - if (loop) ufuncloop_dealloc(loop); + if (loop) { + ufuncloop_dealloc(loop); + } return -1; } @@ -2162,8 +2188,8 @@ maxsize = PyArray_SIZE(*arr); if (maxsize < loop->bufsize) { - if (!(PyArray_ISBEHAVED_RO(*arr)) || - PyArray_TYPE(*arr) != rtype) { + if (!(PyArray_ISBEHAVED_RO(*arr)) + || PyArray_TYPE(*arr) != rtype) { ntype = PyArray_DescrFromType(rtype); new = PyArray_FromAny((PyObject *)(*arr), ntype, 0, 0, @@ -2176,14 +2202,14 @@ } } - /* Don't decref *arr before re-assigning - because it was not going to be DECREF'd anyway. - - If a copy is made, then the copy will be removed - on deallocation of the loop structure by setting - loop->decref. - */ - + /* + * Don't decref *arr before re-assigning + * because it was not going to be DECREF'd anyway. + * + * If a copy is made, then the copy will be removed + * on deallocation of the loop structure by setting + * loop->decref. + */ return 0; } @@ -2194,30 +2220,29 @@ PyUFuncReduceObject *loop; PyArrayObject *idarr; PyArrayObject *aar; - intp loop_i[MAX_DIMS], outsize=0; + intp loop_i[MAX_DIMS], outsize = 0; int arg_types[3]; PyArray_SCALARKIND scalars[3] = {PyArray_NOSCALAR, PyArray_NOSCALAR, PyArray_NOSCALAR}; int i, j, nd; int flags; - /* Reduce type is the type requested of the input - during reduction */ + /* Reduce type is the type requested of the input during reduction */ if (self->core_enabled) { PyErr_Format(PyExc_RuntimeError, "construct_reduce not allowed on ufunc with signature"); return NULL; } - nd = (*arr)->nd; arg_types[0] = otype; arg_types[1] = otype; arg_types[2] = otype; - if ((loop = _pya_malloc(sizeof(PyUFuncReduceObject)))==NULL) { - PyErr_NoMemory(); return loop; + if ((loop = _pya_malloc(sizeof(PyUFuncReduceObject))) == NULL) { + PyErr_NoMemory(); + return loop; } - loop->retbase=0; + loop->retbase = 0; loop->swap = 0; loop->index = 0; loop->ufunc = self; @@ -2229,39 +2254,43 @@ loop->rit = NULL; loop->errobj = NULL; loop->first = 1; - loop->decref=NULL; + loop->decref = NULL; loop->N = (*arr)->dimensions[axis]; loop->instrides = (*arr)->strides[axis]; - if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, NULL) == -1) goto fail; - - /* output type may change -- if it does - reduction is forced into that type - and we need to select the reduction function again - */ + &(loop->funcdata), scalars, NULL) == -1) { + goto fail; + } + /* + * output type may change -- if it does + * reduction is forced into that type + * and we need to select the reduction function again + */ if (otype != arg_types[2]) { otype = arg_types[2]; arg_types[0] = otype; arg_types[1] = otype; if (select_types(loop->ufunc, arg_types, &(loop->function), - &(loop->funcdata), scalars, NULL) == -1) + &(loop->funcdata), scalars, NULL) == -1) { goto fail; + } } /* get looping parameters from Python */ if (PyUFunc_GetPyValues(str, &(loop->bufsize), &(loop->errormask), - &(loop->errobj)) < 0) goto fail; - + &(loop->errobj)) < 0) { + goto fail; + } /* Make copy if misbehaved or not otype for small arrays */ - if (_create_reduce_copy(loop, arr, otype) < 0) goto fail; + if (_create_reduce_copy(loop, arr, otype) < 0) { + goto fail; + } aar = *arr; if (loop->N == 0) { loop->meth = ZERO_EL_REDUCELOOP; } - else if (PyArray_ISBEHAVED_RO(aar) && \ - otype == (aar)->descr->type_num) { + else if (PyArray_ISBEHAVED_RO(aar) && otype == (aar)->descr->type_num) { if (loop->N == 1) { loop->meth = ONE_EL_REDUCELOOP; } @@ -2277,14 +2306,17 @@ } /* Determine if object arrays are involved */ - if (otype == PyArray_OBJECT || aar->descr->type_num == PyArray_OBJECT) + if (otype == PyArray_OBJECT || aar->descr->type_num == PyArray_OBJECT) { loop->obj = 1; - else + } + else { loop->obj = 0; - + } if (loop->meth == ZERO_EL_REDUCELOOP) { idarr = _getidentity(self, otype, str); - if (idarr == NULL) goto fail; + if (idarr == NULL) { + goto fail; + } if (idarr->descr->elsize > UFUNC_MAXIDENTITY) { PyErr_Format(PyExc_RuntimeError, "UFUNC_MAXIDENTITY (%d)" \ @@ -2301,24 +2333,24 @@ flags = NPY_CARRAY | NPY_UPDATEIFCOPY | NPY_FORCECAST; switch(operation) { case UFUNC_REDUCE: - for(j=0, i=0; idimensions[i]; - + } } if (out == NULL) { - loop->ret = (PyArrayObject *) \ + loop->ret = (PyArrayObject *) PyArray_New(aar->ob_type, aar->nd-1, loop_i, otype, NULL, NULL, 0, 0, (PyObject *)aar); } else { - outsize = PyArray_MultiplyList(loop_i, aar->nd-1); + outsize = PyArray_MultiplyList(loop_i, aar->nd - 1); } break; case UFUNC_ACCUMULATE: if (out == NULL) { - loop->ret = (PyArrayObject *) \ + loop->ret = (PyArrayObject *) PyArray_New(aar->ob_type, aar->nd, aar->dimensions, otype, NULL, NULL, 0, 0, (PyObject *)aar); } @@ -2331,7 +2363,7 @@ /* Index is 1-d array */ loop_i[axis] = ind_size; if (out == NULL) { - loop->ret = (PyArrayObject *) \ + loop->ret = (PyArrayObject *) PyArray_New(aar->ob_type, aar->nd, loop_i, otype, NULL, NULL, 0, 0, (PyObject *)aar); } @@ -2342,8 +2374,9 @@ loop->meth = ZERO_EL_REDUCELOOP; return loop; } - if (loop->meth == ONE_EL_REDUCELOOP) + if (loop->meth == ONE_EL_REDUCELOOP) { loop->meth = NOBUFFER_REDUCELOOP; + } break; } if (out) { @@ -2352,14 +2385,15 @@ "wrong shape for output"); goto fail; } - loop->ret = (PyArrayObject *) \ - PyArray_FromArray(out, PyArray_DescrFromType(otype), - flags); + loop->ret = (PyArrayObject *) + PyArray_FromArray(out, PyArray_DescrFromType(otype), flags); if (loop->ret && loop->ret != out) { loop->retbase = 1; } } - if (loop->ret == NULL) goto fail; + if (loop->ret == NULL) { + goto fail; + } loop->insize = aar->descr->elsize; loop->outsize = loop->ret->descr->elsize; loop->bufptr[0] = loop->ret->data; @@ -2370,74 +2404,82 @@ } loop->it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)aar); - if (loop->it == NULL) return NULL; - + if (loop->it == NULL) { + return NULL; + } if (loop->meth == ONE_EL_REDUCELOOP) { loop->size = loop->it->size; return loop; } - /* Fix iterator to loop over correct dimension */ - /* Set size in axis dimension to 1 */ - + /* + * Fix iterator to loop over correct dimension + * Set size in axis dimension to 1 + */ loop->it->contiguous = 0; loop->it->size /= (loop->it->dims_m1[axis]+1); loop->it->dims_m1[axis] = 0; loop->it->backstrides[axis] = 0; - - loop->size = loop->it->size; - if (operation == UFUNC_REDUCE) { loop->steps[0] = 0; } else { loop->rit = (PyArrayIterObject *) \ PyArray_IterNew((PyObject *)(loop->ret)); - if (loop->rit == NULL) return NULL; - - /* Fix iterator to loop over correct dimension */ - /* Set size in axis dimension to 1 */ - + if (loop->rit == NULL) { + return NULL; + } + /* + * Fix iterator to loop over correct dimension + * Set size in axis dimension to 1 + */ loop->rit->contiguous = 0; - loop->rit->size /= (loop->rit->dims_m1[axis]+1); + loop->rit->size /= (loop->rit->dims_m1[axis] + 1); loop->rit->dims_m1[axis] = 0; loop->rit->backstrides[axis] = 0; - if (operation == UFUNC_ACCUMULATE) + if (operation == UFUNC_ACCUMULATE) { loop->steps[0] = loop->ret->strides[axis]; - else + } + else { loop->steps[0] = 0; + } } loop->steps[2] = loop->steps[0]; loop->bufptr[2] = loop->bufptr[0] + loop->steps[2]; - - if (loop->meth == BUFFER_UFUNCLOOP) { int _size; + loop->steps[1] = loop->outsize; if (otype != aar->descr->type_num) { - _size=loop->bufsize*(loop->outsize + \ - aar->descr->elsize); + _size=loop->bufsize*(loop->outsize + aar->descr->elsize); loop->buffer = PyDataMem_NEW(_size); - if (loop->buffer == NULL) goto fail; - if (loop->obj) memset(loop->buffer, 0, _size); - loop->castbuf = loop->buffer + \ - loop->bufsize*aar->descr->elsize; + if (loop->buffer == NULL) { + goto fail; + } + if (loop->obj) { + memset(loop->buffer, 0, _size); + } + loop->castbuf = loop->buffer + loop->bufsize*aar->descr->elsize; loop->bufptr[1] = loop->castbuf; loop->cast = PyArray_GetCastFunc(aar->descr, otype); - if (loop->cast == NULL) goto fail; + if (loop->cast == NULL) { + goto fail; + } } else { _size = loop->bufsize * loop->outsize; loop->buffer = PyDataMem_NEW(_size); - if (loop->buffer == NULL) goto fail; - if (loop->obj) memset(loop->buffer, 0, _size); + if (loop->buffer == NULL) { + goto fail; + } + if (loop->obj) { + memset(loop->buffer, 0, _size); + } loop->bufptr[1] = loop->buffer; } } - - PyUFunc_clearfperr(); return loop; @@ -2447,19 +2489,18 @@ } -/* We have two basic kinds of loops */ -/* One is used when arr is not-swapped and aligned and output type - is the same as input type. - and another using buffers when one of these is not satisfied. - - Zero-length and one-length axes-to-be-reduced are handled separately. -*/ - - static PyObject * +/* + * We have two basic kinds of loops. One is used when arr is not-swapped + * and aligned and output type is the same as input type. The other uses + * buffers when one of these is not satisfied. + * + * Zero-length and one-length axes-to-be-reduced are handled separately. + */ +static PyObject * PyUFunc_Reduce(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, int axis, int otype) { - PyArrayObject *ret=NULL; + PyArrayObject *ret = NULL; PyUFuncReduceObject *loop; intp i, n; char *dptr; @@ -2468,133 +2509,130 @@ /* Construct loop object */ loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_REDUCE, 0, "reduce"); - if (!loop) return NULL; + if (!loop) { + return NULL; + } NPY_LOOP_BEGIN_THREADS; - switch(loop->meth) { - case ZERO_EL_REDUCELOOP: - /* fprintf(stderr, "ZERO..%d\n", loop->size); */ - for(i=0; isize; i++) { - if (loop->obj) Py_INCREF(*((PyObject **)loop->idptr)); - memmove(loop->bufptr[0], loop->idptr, loop->outsize); - loop->bufptr[0] += loop->outsize; + switch(loop->meth) { + case ZERO_EL_REDUCELOOP: + /* fprintf(stderr, "ZERO..%d\n", loop->size); */ + for (i = 0; i < loop->size; i++) { + if (loop->obj) { + Py_INCREF(*((PyObject **)loop->idptr)); } - break; - case ONE_EL_REDUCELOOP: - /*fprintf(stderr, "ONEDIM..%d\n", loop->size); */ - while(loop->index < loop->size) { - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - PyArray_ITER_NEXT(loop->it); - loop->bufptr[0] += loop->outsize; - loop->index++; + memmove(loop->bufptr[0], loop->idptr, loop->outsize); + loop->bufptr[0] += loop->outsize; + } + break; + case ONE_EL_REDUCELOOP: + /*fprintf(stderr, "ONEDIM..%d\n", loop->size); */ + while (loop->index < loop->size) { + if (loop->obj) { + Py_INCREF(*((PyObject **)loop->it->dataptr)); } - break; - case NOBUFFER_UFUNCLOOP: - /*fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { - /* Copy first element to output */ - if (loop->obj) - Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); - /* Adjust input pointer */ - loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; - loop->function((char **)loop->bufptr, - &(loop->N), - loop->steps, loop->funcdata); - UFUNC_CHECK_ERROR(loop); - - PyArray_ITER_NEXT(loop->it) - loop->bufptr[0] += loop->outsize; - loop->bufptr[2] = loop->bufptr[0]; - loop->index++; + memmove(loop->bufptr[0], loop->it->dataptr, loop->outsize); + PyArray_ITER_NEXT(loop->it); + loop->bufptr[0] += loop->outsize; + loop->index++; + } + break; + case NOBUFFER_UFUNCLOOP: + /*fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ + while (loop->index < loop->size) { + /* Copy first element to output */ + if (loop->obj) { + Py_INCREF(*((PyObject **)loop->it->dataptr)); } - break; - case BUFFER_UFUNCLOOP: - /* use buffer for arr */ - /* - For each row to reduce - 1. copy first item over to output (casting if necessary) - 2. Fill inner buffer - 3. When buffer is filled or end of row - a. Cast input buffers if needed - b. Call inner function. - 4. Repeat 2 until row is done. - */ - /* fprintf(stderr, "BUFFERED..%d %d\n", loop->size, - loop->swap); */ - while(loop->index < loop->size) { - loop->inptr = loop->it->dataptr; - /* Copy (cast) First term over to output */ - if (loop->cast) { - /* A little tricky because we need to - cast it first */ - arr->descr->f->copyswap(loop->buffer, - loop->inptr, - loop->swap, - NULL); - loop->cast(loop->buffer, loop->castbuf, - 1, NULL, NULL); - if (loop->obj) { - Py_XINCREF(*((PyObject **)loop->castbuf)); + memmove(loop->bufptr[0], loop->it->dataptr, loop->outsize); + /* Adjust input pointer */ + loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; + loop->function((char **)loop->bufptr, &(loop->N), + loop->steps, loop->funcdata); + UFUNC_CHECK_ERROR(loop); + PyArray_ITER_NEXT(loop->it); + loop->bufptr[0] += loop->outsize; + loop->bufptr[2] = loop->bufptr[0]; + loop->index++; + } + break; + case BUFFER_UFUNCLOOP: + /* + * use buffer for arr + * + * For each row to reduce + * 1. copy first item over to output (casting if necessary) + * 2. Fill inner buffer + * 3. When buffer is filled or end of row + * a. Cast input buffers if needed + * b. Call inner function. + * 4. Repeat 2 until row is done. + */ + /* fprintf(stderr, "BUFFERED..%d %d\n", loop->size, loop->swap); */ + while(loop->index < loop->size) { + loop->inptr = loop->it->dataptr; + /* Copy (cast) First term over to output */ + if (loop->cast) { + /* A little tricky because we need to cast it first */ + arr->descr->f->copyswap(loop->buffer, loop->inptr, + loop->swap, NULL); + loop->cast(loop->buffer, loop->castbuf, 1, NULL, NULL); + if (loop->obj) { + Py_XINCREF(*((PyObject **)loop->castbuf)); + } + memcpy(loop->bufptr[0], loop->castbuf, loop->outsize); + } + else { + /* Simple copy */ + arr->descr->f->copyswap(loop->bufptr[0], loop->inptr, + loop->swap, NULL); + } + loop->inptr += loop->instrides; + n = 1; + while(n < loop->N) { + /* Copy up to loop->bufsize elements to buffer */ + dptr = loop->buffer; + for (i = 0; i < loop->bufsize; i++, n++) { + if (n == loop->N) { + break; } - memcpy(loop->bufptr[0], loop->castbuf, - loop->outsize); + arr->descr->f->copyswap(dptr, loop->inptr, + loop->swap, NULL); + loop->inptr += loop->instrides; + dptr += loop->insize; } - else { /* Simple copy */ - arr->descr->f->copyswap(loop->bufptr[0], - loop->inptr, - loop->swap, NULL); + if (loop->cast) { + loop->cast(loop->buffer, loop->castbuf, i, NULL, NULL); } - loop->inptr += loop->instrides; - n = 1; - while(n < loop->N) { - /* Copy up to loop->bufsize elements to - buffer */ - dptr = loop->buffer; - for(i=0; ibufsize; i++, n++) { - if (n == loop->N) break; - arr->descr->f->copyswap(dptr, - loop->inptr, - loop->swap, - NULL); - loop->inptr += loop->instrides; - dptr += loop->insize; - } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - i, NULL, NULL); - loop->function((char **)loop->bufptr, - &i, - loop->steps, loop->funcdata); - loop->bufptr[0] += loop->steps[0]*i; - loop->bufptr[2] += loop->steps[2]*i; - UFUNC_CHECK_ERROR(loop); - } - PyArray_ITER_NEXT(loop->it); - loop->bufptr[0] += loop->outsize; - loop->bufptr[2] = loop->bufptr[0]; - loop->index++; + loop->function((char **)loop->bufptr, &i, + loop->steps, loop->funcdata); + loop->bufptr[0] += loop->steps[0]*i; + loop->bufptr[2] += loop->steps[2]*i; + UFUNC_CHECK_ERROR(loop); } + PyArray_ITER_NEXT(loop->it); + loop->bufptr[0] += loop->outsize; + loop->bufptr[2] = loop->bufptr[0]; + loop->index++; } + } + NPY_LOOP_END_THREADS; + /* Hang on to this reference -- will be decref'd with loop */ + if (loop->retbase) { + ret = (PyArrayObject *)loop->ret->base; + } + else { + ret = loop->ret; + } + Py_INCREF(ret); + ufuncreduce_dealloc(loop); + return (PyObject *)ret; - NPY_LOOP_END_THREADS; - - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; - Py_INCREF(ret); - ufuncreduce_dealloc(loop); - return (PyObject *)ret; - fail: NPY_LOOP_END_THREADS; - - if (loop) ufuncreduce_dealloc(loop); + if (loop) { + ufuncreduce_dealloc(loop); + } return NULL; } @@ -2603,55 +2641,59 @@ PyUFunc_Accumulate(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *out, int axis, int otype) { - PyArrayObject *ret=NULL; + PyArrayObject *ret = NULL; PyUFuncReduceObject *loop; intp i, n; char *dptr; NPY_BEGIN_THREADS_DEF; - /* Construct loop object */ - loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_ACCUMULATE, 0, - "accumulate"); - if (!loop) return NULL; + /* Construct loop object */ + loop = construct_reduce(self, &arr, out, axis, otype, + UFUNC_ACCUMULATE, 0, "accumulate"); + if (!loop) { + return NULL; + } NPY_LOOP_BEGIN_THREADS; switch(loop->meth) { - case ZERO_EL_REDUCELOOP: /* Accumulate */ + case ZERO_EL_REDUCELOOP: + /* Accumulate */ /* fprintf(stderr, "ZERO..%d\n", loop->size); */ - for(i=0; isize; i++) { - if (loop->obj) + for (i = 0; i < loop->size; i++) { + if (loop->obj) { Py_INCREF(*((PyObject **)loop->idptr)); + } memcpy(loop->bufptr[0], loop->idptr, loop->outsize); loop->bufptr[0] += loop->outsize; } break; - case ONE_EL_REDUCELOOP: /* Accumulate */ + case ONE_EL_REDUCELOOP: + /* Accumulate */ /* fprintf(stderr, "ONEDIM..%d\n", loop->size); */ - while(loop->index < loop->size) { - if (loop->obj) + while (loop->index < loop->size) { + if (loop->obj) { Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); + } + memmove(loop->bufptr[0], loop->it->dataptr, loop->outsize); PyArray_ITER_NEXT(loop->it); loop->bufptr[0] += loop->outsize; loop->index++; } break; - case NOBUFFER_UFUNCLOOP: /* Accumulate */ + case NOBUFFER_UFUNCLOOP: + /* Accumulate */ /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { + while (loop->index < loop->size) { /* Copy first element to output */ - if (loop->obj) + if (loop->obj) { Py_INCREF(*((PyObject **)loop->it->dataptr)); - memmove(loop->bufptr[0], loop->it->dataptr, - loop->outsize); + } + memmove(loop->bufptr[0], loop->it->dataptr, loop->outsize); /* Adjust input pointer */ - loop->bufptr[1] = loop->it->dataptr+loop->steps[1]; - loop->function((char **)loop->bufptr, - &(loop->N), + loop->bufptr[1] = loop->it->dataptr + loop->steps[1]; + loop->function((char **)loop->bufptr, &(loop->N), loop->steps, loop->funcdata); UFUNC_CHECK_ERROR(loop); - PyArray_ITER_NEXT(loop->it); PyArray_ITER_NEXT(loop->rit); loop->bufptr[0] = loop->rit->dataptr; @@ -2659,64 +2701,57 @@ loop->index++; } break; - case BUFFER_UFUNCLOOP: /* Accumulate */ - /* use buffer for arr */ - /* - For each row to reduce - 1. copy identity over to output (casting if necessary) - 2. Fill inner buffer - 3. When buffer is filled or end of row - a. Cast input buffers if needed - b. Call inner function. - 4. Repeat 2 until row is done. - */ - /* fprintf(stderr, "BUFFERED..%d %p\n", loop->size, - loop->cast); */ - while(loop->index < loop->size) { + case BUFFER_UFUNCLOOP: + /* Accumulate + * + * use buffer for arr + * + * For each row to reduce + * 1. copy identity over to output (casting if necessary) + * 2. Fill inner buffer + * 3. When buffer is filled or end of row + * a. Cast input buffers if needed + * b. Call inner function. + * 4. Repeat 2 until row is done. + */ + /* fprintf(stderr, "BUFFERED..%d %p\n", loop->size, loop->cast); */ + while (loop->index < loop->size) { loop->inptr = loop->it->dataptr; /* Copy (cast) First term over to output */ if (loop->cast) { /* A little tricky because we need to cast it first */ - arr->descr->f->copyswap(loop->buffer, - loop->inptr, - loop->swap, - NULL); - loop->cast(loop->buffer, loop->castbuf, - 1, NULL, NULL); + arr->descr->f->copyswap(loop->buffer, loop->inptr, + loop->swap, NULL); + loop->cast(loop->buffer, loop->castbuf, 1, NULL, NULL); if (loop->obj) { Py_XINCREF(*((PyObject **)loop->castbuf)); } - memcpy(loop->bufptr[0], loop->castbuf, - loop->outsize); + memcpy(loop->bufptr[0], loop->castbuf, loop->outsize); } - else { /* Simple copy */ - arr->descr->f->copyswap(loop->bufptr[0], - loop->inptr, - loop->swap, - NULL); + else { + /* Simple copy */ + arr->descr->f->copyswap(loop->bufptr[0], loop->inptr, + loop->swap, NULL); } loop->inptr += loop->instrides; n = 1; - while(n < loop->N) { - /* Copy up to loop->bufsize elements to - buffer */ + while (n < loop->N) { + /* Copy up to loop->bufsize elements to buffer */ dptr = loop->buffer; - for(i=0; ibufsize; i++, n++) { - if (n == loop->N) break; - arr->descr->f->copyswap(dptr, - loop->inptr, - loop->swap, - NULL); + for (i = 0; i < loop->bufsize; i++, n++) { + if (n == loop->N) { + break; + } + arr->descr->f->copyswap(dptr, loop->inptr, + loop->swap, NULL); loop->inptr += loop->instrides; dptr += loop->insize; } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - i, NULL, NULL); - loop->function((char **)loop->bufptr, - &i, + if (loop->cast) { + loop->cast(loop->buffer, loop->castbuf, i, NULL, NULL); + } + loop->function((char **)loop->bufptr, &i, loop->steps, loop->funcdata); loop->bufptr[0] += loop->steps[0]*i; loop->bufptr[2] += loop->steps[2]*i; @@ -2729,57 +2764,60 @@ loop->index++; } } - NPY_LOOP_END_THREADS; - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; + if (loop->retbase) { + ret = (PyArrayObject *)loop->ret->base; + } + else { + ret = loop->ret; + } Py_INCREF(ret); ufuncreduce_dealloc(loop); return (PyObject *)ret; fail: NPY_LOOP_END_THREADS; - - if (loop) ufuncreduce_dealloc(loop); + if (loop) { + ufuncreduce_dealloc(loop); + } return NULL; } -/* Reduceat performs a reduce over an axis using the indices as a guide - - op.reduceat(array,indices) computes - op.reduce(array[indices[i]:indices[i+1]] - for i=0..end with an implicit indices[i+1]=len(array) - assumed when i=end-1 - - if indices[i+1] <= indices[i]+1 - then the result is array[indices[i]] for that value - - op.accumulate(array) is the same as - op.reduceat(array,indices)[::2] - where indices is range(len(array)-1) with a zero placed in every other sample - indices = zeros(len(array)*2-1) - indices[1::2] = range(1,len(array)) - - output shape is based on the size of indices -*/ - +/* + * Reduceat performs a reduce over an axis using the indices as a guide + * + * op.reduceat(array,indices) computes + * op.reduce(array[indices[i]:indices[i+1]] + * for i=0..end with an implicit indices[i+1]=len(array) + * assumed when i=end-1 + * + * if indices[i+1] <= indices[i]+1 + * then the result is array[indices[i]] for that value + * + * op.accumulate(array) is the same as + * op.reduceat(array,indices)[::2] + * where indices is range(len(array)-1) with a zero placed in every other sample + * indices = zeros(len(array)*2-1) + * indices[1::2] = range(1,len(array)) + * + * output shape is based on the size of indices + */ static PyObject * PyUFunc_Reduceat(PyUFuncObject *self, PyArrayObject *arr, PyArrayObject *ind, PyArrayObject *out, int axis, int otype) { PyArrayObject *ret; PyUFuncReduceObject *loop; - intp *ptr=(intp *)ind->data; - intp nn=ind->dimensions[0]; - intp mm=arr->dimensions[axis]-1; + intp *ptr = (intp *)ind->data; + intp nn = ind->dimensions[0]; + intp mm = arr->dimensions[axis] - 1; intp n, i, j; char *dptr; NPY_BEGIN_THREADS_DEF; /* Check for out-of-bounds values in indices array */ - for(i=0; i mm)) { PyErr_Format(PyExc_IndexError, "index out-of-bounds (0, %d)", (int) mm); @@ -2790,37 +2828,38 @@ ptr = (intp *)ind->data; /* Construct loop object */ - loop = construct_reduce(self, &arr, out, axis, otype, UFUNC_REDUCEAT, nn, - "reduceat"); - if (!loop) return NULL; + loop = construct_reduce(self, &arr, out, axis, otype, + UFUNC_REDUCEAT, nn, "reduceat"); + if (!loop) { + return NULL; + } NPY_LOOP_BEGIN_THREADS; switch(loop->meth) { - /* zero-length index -- return array immediately */ case ZERO_EL_REDUCELOOP: + /* zero-length index -- return array immediately */ /* fprintf(stderr, "ZERO..\n"); */ break; - /* NOBUFFER -- behaved array and same type */ - case NOBUFFER_UFUNCLOOP: /* Reduceat */ + case NOBUFFER_UFUNCLOOP: + /* Reduceat + * NOBUFFER -- behaved array and same type + */ /* fprintf(stderr, "NOBUFFER..%d\n", loop->size); */ - while(loop->index < loop->size) { + while (loop->index < loop->size) { ptr = (intp *)ind->data; - for(i=0; ibufptr[1] = loop->it->dataptr + \ - (*ptr)*loop->instrides; + for (i = 0; i < nn; i++) { + loop->bufptr[1] = loop->it->dataptr + (*ptr)*loop->instrides; if (loop->obj) { Py_XINCREF(*((PyObject **)loop->bufptr[1])); } - memcpy(loop->bufptr[0], loop->bufptr[1], - loop->outsize); - mm = (i==nn-1 ? arr->dimensions[axis]-*ptr : \ - *(ptr+1) - *ptr) - 1; + memcpy(loop->bufptr[0], loop->bufptr[1], loop->outsize); + mm = (i == nn - 1 ? arr->dimensions[axis] - *ptr : + *(ptr + 1) - *ptr) - 1; if (mm > 0) { loop->bufptr[1] += loop->instrides; loop->bufptr[2] = loop->bufptr[0]; - loop->function((char **)loop->bufptr, - &mm, loop->steps, - loop->funcdata); + loop->function((char **)loop->bufptr, &mm, + loop->steps, loop->funcdata); UFUNC_CHECK_ERROR(loop); } loop->bufptr[0] += loop->ret->strides[axis]; @@ -2833,44 +2872,43 @@ } break; - /* BUFFER -- misbehaved array or different types */ - case BUFFER_UFUNCLOOP: /* Reduceat */ - /* fprintf(stderr, "BUFFERED..%d\n", loop->size); */ - while(loop->index < loop->size) { + case BUFFER_UFUNCLOOP: + /* Reduceat + * BUFFER -- misbehaved array or different types + */ + /* fprintf(stderr, "BUFFERED..%d\n", loop->size); */ + while (loop->index < loop->size) { ptr = (intp *)ind->data; - for(i=0; iobj) { Py_XINCREF(*((PyObject **)loop->idptr)); } - memcpy(loop->bufptr[0], loop->idptr, - loop->outsize); + memcpy(loop->bufptr[0], loop->idptr, loop->outsize); n = 0; - mm = (i==nn-1 ? arr->dimensions[axis] - *ptr :\ - *(ptr+1) - *ptr); - if (mm < 1) mm = 1; - loop->inptr = loop->it->dataptr + \ - (*ptr)*loop->instrides; + mm = (i == nn - 1 ? arr->dimensions[axis] - *ptr : + *(ptr + 1) - *ptr); + if (mm < 1) { + mm = 1; + } + loop->inptr = loop->it->dataptr + (*ptr)*loop->instrides; while (n < mm) { - /* Copy up to loop->bufsize elements - to buffer */ + /* Copy up to loop->bufsize elements to buffer */ dptr = loop->buffer; - for(j=0; jbufsize; j++, n++) { - if (n == mm) break; - arr->descr->f->copyswap\ - (dptr, - loop->inptr, + for (j = 0; j < loop->bufsize; j++, n++) { + if (n == mm) { + break; + } + arr->descr->f->copyswap(dptr, loop->inptr, loop->swap, NULL); loop->inptr += loop->instrides; dptr += loop->insize; } - if (loop->cast) - loop->cast(loop->buffer, - loop->castbuf, - j, NULL, NULL); + if (loop->cast) { + loop->cast(loop->buffer, loop->castbuf, j, NULL, NULL); + } loop->bufptr[2] = loop->bufptr[0]; - loop->function((char **)loop->bufptr, - &j, loop->steps, - loop->funcdata); + loop->function((char **)loop->bufptr, &j, + loop->steps, loop->funcdata); UFUNC_CHECK_ERROR(loop); loop->bufptr[0] += j*loop->steps[0]; } @@ -2884,55 +2922,56 @@ } break; } - NPY_LOOP_END_THREADS; - - /* Hang on to this reference -- will be decref'd with loop */ - if (loop->retbase) ret = (PyArrayObject *)loop->ret->base; - else ret = loop->ret; + /* Hang on to this reference -- will be decref'd with loop */ + if (loop->retbase) { + ret = (PyArrayObject *)loop->ret->base; + } + else { + ret = loop->ret; + } Py_INCREF(ret); ufuncreduce_dealloc(loop); return (PyObject *)ret; fail: NPY_LOOP_END_THREADS; - - if (loop) ufuncreduce_dealloc(loop); + if (loop) { + ufuncreduce_dealloc(loop); + } return NULL; } -/* This code handles reduce, reduceat, and accumulate - (accumulate and reduce are special cases of the more general reduceat - but they are handled separately for speed) -*/ - +/* + * This code handles reduce, reduceat, and accumulate + * (accumulate and reduce are special cases of the more general reduceat + * but they are handled separately for speed) + */ static PyObject * PyUFunc_GenericReduction(PyUFuncObject *self, PyObject *args, PyObject *kwds, int operation) { int axis=0; PyArrayObject *mp, *ret = NULL; - PyObject *op, *res=NULL; + PyObject *op, *res = NULL; PyObject *obj_ind, *context; PyArrayObject *indices = NULL; - PyArray_Descr *otype=NULL; - PyArrayObject *out=NULL; + PyArray_Descr *otype = NULL; + PyArrayObject *out = NULL; static char *kwlist1[] = {"array", "axis", "dtype", "out", NULL}; static char *kwlist2[] = {"array", "indices", "axis", "dtype", "out", NULL}; - static char *_reduce_type[] = {"reduce", "accumulate", \ - "reduceat", NULL}; + static char *_reduce_type[] = {"reduce", "accumulate", "reduceat", NULL}; + if (self == NULL) { PyErr_SetString(PyExc_ValueError, "function not supported"); return NULL; } - if (self->core_enabled) { PyErr_Format(PyExc_RuntimeError, "Reduction not defined on ufunc with signature"); return NULL; } - if (self->nin != 2) { PyErr_Format(PyExc_ValueError, "%s only supported for binary functions", @@ -2961,7 +3000,10 @@ } indices = (PyArrayObject *)PyArray_FromAny(obj_ind, indtype, 1, 1, CARRAY, NULL); - if (indices == NULL) {Py_XDECREF(otype); return NULL;} + if (indices == NULL) { + Py_XDECREF(otype); + return NULL; + } } else { if(!PyArg_ParseTupleAndKeywords(args, kwds, "O|iO&O&", kwlist1, @@ -2974,7 +3016,6 @@ return NULL; } } - /* Ensure input is an array */ if (!PyArray_Check(op) && !PyArray_IsScalar(op, Generic)) { context = Py_BuildValue("O(O)i", self, op, 0); @@ -2984,8 +3025,9 @@ } mp = (PyArrayObject *)PyArray_FromAny(op, NULL, 0, 0, 0, context); Py_XDECREF(context); - if (mp == NULL) return NULL; - + if (mp == NULL) { + return NULL; + } /* Check to see if input is zero-dimensional */ if (mp->nd == 0) { PyErr_Format(PyExc_TypeError, "cannot %s on a scalar", @@ -2994,7 +3036,6 @@ Py_DECREF(mp); return NULL; } - /* Check to see that type (and otype) is not FLEXIBLE */ if (PyArray_ISFLEXIBLE(mp) || (otype && PyTypeNum_ISFLEXIBLE(otype->type_num))) { @@ -3006,37 +3047,42 @@ return NULL; } - if (axis < 0) axis += mp->nd; + if (axis < 0) { + axis += mp->nd; + } if (axis < 0 || axis >= mp->nd) { PyErr_SetString(PyExc_ValueError, "axis not in array"); Py_XDECREF(otype); Py_DECREF(mp); return NULL; } - - /* If out is specified it determines otype unless otype - already specified. - */ + /* + * If out is specified it determines otype + * unless otype already specified. + */ if (otype == NULL && out != NULL) { otype = out->descr; Py_INCREF(otype); } - if (otype == NULL) { - /* For integer types --- make sure at - least a long is used for add and multiply - reduction --- to avoid overflow */ + /* + * For integer types --- make sure at least a long + * is used for add and multiply reduction to avoid overflow + */ int typenum = PyArray_TYPE(mp); - if ((typenum < NPY_FLOAT) && \ - ((strcmp(self->name,"add")==0) || \ - (strcmp(self->name,"multiply")==0))) { - if (PyTypeNum_ISBOOL(typenum)) + if ((typenum < NPY_FLOAT) + && ((strcmp(self->name,"add") == 0) + || (strcmp(self->name,"multiply") == 0))) { + if (PyTypeNum_ISBOOL(typenum)) { typenum = PyArray_LONG; + } else if (mp->descr->elsize < sizeof(long)) { - if (PyTypeNum_ISUNSIGNED(typenum)) + if (PyTypeNum_ISUNSIGNED(typenum)) { typenum = PyArray_ULONG; - else + } + else { typenum = PyArray_LONG; + } } } otype = PyArray_DescrFromType(typenum); @@ -3060,36 +3106,41 @@ } Py_DECREF(mp); Py_DECREF(otype); - if (ret==NULL) return NULL; + if (ret == NULL) { + return NULL; + } if (op->ob_type != ret->ob_type) { res = PyObject_CallMethod(op, "__array_wrap__", "O", ret); - if (res == NULL) PyErr_Clear(); - else if (res == Py_None) Py_DECREF(res); + if (res == NULL) { + PyErr_Clear(); + } + else if (res == Py_None) { + Py_DECREF(res); + } else { Py_DECREF(ret); return res; } } return PyArray_Return(ret); - } -/* This function analyzes the input arguments - and determines an appropriate __array_wrap__ function to call - for the outputs. - - If an output argument is provided, then it is wrapped - with its own __array_wrap__ not with the one determined by - the input arguments. - - if the provided output argument is already an array, - the wrapping function is None (which means no wrapping will - be done --- not even PyArray_Return). - - A NULL is placed in output_wrap for outputs that - should just have PyArray_Return called. -*/ - +/* + * This function analyzes the input arguments + * and determines an appropriate __array_wrap__ function to call + * for the outputs. + * + * If an output argument is provided, then it is wrapped + * with its own __array_wrap__ not with the one determined by + * the input arguments. + * + * if the provided output argument is already an array, + * the wrapping function is None (which means no wrapping will + * be done --- not even PyArray_Return). + * + * A NULL is placed in output_wrap for outputs that + * should just have PyArray_Return called. + */ static void _find_array_wrap(PyObject *args, PyObject **output_wrap, int nin, int nout) { @@ -3101,11 +3152,11 @@ PyObject *obj, *wrap = NULL; nargs = PyTuple_GET_SIZE(args); - for(i = 0; i < nin; i++) { + for (i = 0; i < nin; i++) { obj = PyTuple_GET_ITEM(args, i); - if (PyArray_CheckExact(obj) || \ - PyArray_IsAnyScalar(obj)) + if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) { continue; + } wrap = PyObject_GetAttrString(obj, "__array_wrap__"); if (wrap) { if (PyCallable_Check(wrap)) { @@ -3125,50 +3176,50 @@ if (np >= 2) { wrap = wraps[0]; maxpriority = PyArray_GetPriority(with_wrap[0], - PyArray_SUBTYPE_PRIORITY); - for(i = 1; i < np; ++i) { - priority = \ - PyArray_GetPriority(with_wrap[i], - PyArray_SUBTYPE_PRIORITY); + PyArray_SUBTYPE_PRIORITY); + for (i = 1; i < np; ++i) { + priority = PyArray_GetPriority(with_wrap[i], + PyArray_SUBTYPE_PRIORITY); if (priority > maxpriority) { maxpriority = priority; Py_DECREF(wrap); wrap = wraps[i]; - } else { + } + else { Py_DECREF(wraps[i]); } } } - /* Here wrap is the wrapping function determined from the - input arrays (could be NULL). - - For all the output arrays decide what to do. - - 1) Use the wrap function determined from the input arrays - This is the default if the output array is not - passed in. - - 2) Use the __array_wrap__ method of the output object - passed in. -- this is special cased for - exact ndarray so that no PyArray_Return is - done in that case. - */ - - for(i=0; inargs; i++) { mps[i] = NULL; } - errval = PyUFunc_GenericFunction(self, args, kwds, mps); if (errval < 0) { - for(i = 0; i < self->nargs; i++) { + for (i = 0; i < self->nargs; i++) { PyArray_XDECREF_ERR(mps[i]); } if (errval == -1) @@ -3223,12 +3272,9 @@ return Py_NotImplemented; } } - - for(i = 0; i < self->nin; i++) { + for (i = 0; i < self->nin; i++) { Py_DECREF(mps[i]); } - - /* * Use __array_wrap__ on all outputs * if present on one of the input arguments. @@ -3249,10 +3295,9 @@ _find_array_wrap(args, wraparr, self->nin, self->nout); /* wrap outputs */ - for(i = 0; i < self->nout; i++) { - int j=self->nin+i; + for (i = 0; i < self->nout; i++) { + int j = self->nin+i; PyObject *wrap; - /* * check to see if any UPDATEIFCOPY flags are set * which meant that a temporary output was generated @@ -3272,14 +3317,10 @@ retobj[i] = (PyObject *)mps[j]; continue; } - res = PyObject_CallFunction(wrap, "O(OOi)", - mps[j], self, args, i); - if (res == NULL && \ - PyErr_ExceptionMatches(PyExc_TypeError)) { + res = PyObject_CallFunction(wrap, "O(OOi)", mps[j], self, args, i); + if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) { PyErr_Clear(); - res = PyObject_CallFunctionObjArgs(wrap, - mps[j], - NULL); + res = PyObject_CallFunctionObjArgs(wrap, mps[j], NULL); } Py_DECREF(wrap); if (res == NULL) { @@ -3300,15 +3341,17 @@ if (self->nout == 1) { return retobj[0]; - } else { + } + else { ret = (PyTupleObject *)PyTuple_New(self->nout); - for(i = 0; i < self->nout; i++) { + for (i = 0; i < self->nout; i++) { PyTuple_SET_ITEM(ret, i, retobj[i]); } return (PyObject *)ret; } + fail: - for(i = self->nin; i < self->nargs; i++) { + for (i = self->nin; i < self->nargs; i++) { Py_XDECREF(mps[i]); } return NULL; @@ -3320,8 +3363,9 @@ PyObject *thedict; PyObject *res; - if (!PyArg_ParseTuple(args, "")) return NULL; - + if (!PyArg_ParseTuple(args, "")) { + return NULL; + } if (PyUFunc_PYVALS_NAME == NULL) { PyUFunc_PYVALS_NAME = PyString_InternFromString(UFUNC_PYVALS_NAME); } @@ -3336,7 +3380,9 @@ } /* Construct list of defaults */ res = PyList_New(3); - if (res == NULL) return NULL; + if (res == NULL) { + return NULL; + } PyList_SET_ITEM(res, 0, PyInt_FromLong(PyArray_BUFSIZE)); PyList_SET_ITEM(res, 1, PyInt_FromLong(UFUNC_ERR_DEFAULT)); PyList_SET_ITEM(res, 2, Py_None); Py_INCREF(Py_None); @@ -3345,28 +3391,27 @@ #if USE_USE_DEFAULTS==1 /* - This is a strategy to buy a little speed up and avoid the dictionary - look-up in the default case. It should work in the presence of - threads. If it is deemed too complicated or it doesn't actually work - it could be taken out. -*/ + * This is a strategy to buy a little speed up and avoid the dictionary + * look-up in the default case. It should work in the presence of + * threads. If it is deemed too complicated or it doesn't actually work + * it could be taken out. + */ static int ufunc_update_use_defaults(void) { - PyObject *errobj=NULL; + PyObject *errobj = NULL; int errmask, bufsize; int res; PyUFunc_NUM_NODEFAULTS += 1; - res = PyUFunc_GetPyValues("test", &bufsize, &errmask, - &errobj); + res = PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj); PyUFunc_NUM_NODEFAULTS -= 1; - - if (res < 0) {Py_XDECREF(errobj); return -1;} - - if ((errmask != UFUNC_ERR_DEFAULT) || \ - (bufsize != PyArray_BUFSIZE) || \ - (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { + if (res < 0) { + Py_XDECREF(errobj); + return -1; + } + if ((errmask != UFUNC_ERR_DEFAULT) || (bufsize != PyArray_BUFSIZE) + || (PyTuple_GET_ITEM(errobj, 1) != Py_None)) { PyUFunc_NUM_NODEFAULTS += 1; } else if (PyUFunc_NUM_NODEFAULTS > 0) { @@ -3385,8 +3430,9 @@ PyObject *val; static char *msg = "Error object must be a list of length 3"; - if (!PyArg_ParseTuple(args, "O", &val)) return NULL; - + if (!PyArg_ParseTuple(args, "O", &val)) { + return NULL; + } if (!PyList_CheckExact(val) || PyList_GET_SIZE(val) != 3) { PyErr_SetString(PyExc_ValueError, msg); return NULL; @@ -3399,9 +3445,13 @@ thedict = PyEval_GetBuiltins(); } res = PyDict_SetItem(thedict, PyUFunc_PYVALS_NAME, val); - if (res < 0) return NULL; + if (res < 0) { + return NULL; + } #if USE_USE_DEFAULTS==1 - if (ufunc_update_use_defaults() < 0) return NULL; + if (ufunc_update_use_defaults() < 0) { + return NULL; + } #endif Py_INCREF(Py_None); return Py_None; @@ -3412,41 +3462,45 @@ static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om}; static char -doc_frompyfunc[] = "frompyfunc(func, nin, nout) take an arbitrary python function that takes nin objects as input and returns nout objects and return a universal function (ufunc). This ufunc always returns PyObject arrays"; +doc_frompyfunc[] = "frompyfunc(func, nin, nout) take an arbitrary python\n" \ + "function that takes nin objects as input and returns\n" \ + "nout objects and return a universal function (ufunc).\n" \ + "This ufunc always returns PyObject arrays\n"; static PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) { /* Keywords are ignored for now */ - PyObject *function, *pyname=NULL; + PyObject *function, *pyname = NULL; int nin, nout, i; PyUFunc_PyFuncData *fdata; PyUFuncObject *self; char *fname, *str; - Py_ssize_t fname_len=-1; + Py_ssize_t fname_len = -1; int offset[2]; - if (!PyArg_ParseTuple(args, "Oii", &function, &nin, &nout)) return NULL; - + if (!PyArg_ParseTuple(args, "Oii", &function, &nin, &nout)) { + return NULL; + } if (!PyCallable_Check(function)) { PyErr_SetString(PyExc_TypeError, "function must be callable"); return NULL; } - self = _pya_malloc(sizeof(PyUFuncObject)); - if (self == NULL) return NULL; + if (self == NULL) { + return NULL; + } PyObject_Init((PyObject *)self, &PyUFunc_Type); self->userloops = NULL; self->nin = nin; self->nout = nout; - self->nargs = nin+nout; + self->nargs = nin + nout; self->identity = PyUFunc_None; self->functions = pyfunc_functions; - self->ntypes = 1; self->check_return = 0; - + /* generalized ufunc */ self->core_enabled = 0; self->core_num_dim_ix = 0; @@ -3456,9 +3510,9 @@ self->core_signature = NULL; pyname = PyObject_GetAttrString(function, "__name__"); - if (pyname) + if (pyname) { (void) PyString_AsStringAndSize(pyname, &fname, &fname_len); - + } if (PyErr_Occurred()) { fname = "?"; fname_len = 1; @@ -3466,28 +3520,31 @@ } Py_XDECREF(pyname); - - - /* self->ptr holds a pointer for enough memory for - self->data[0] (fdata) - self->data - self->name - self->types - - To be safest, all of these need their memory aligned on void * pointers - Therefore, we may need to allocate extra space. - */ + /* + * self->ptr holds a pointer for enough memory for + * self->data[0] (fdata) + * self->data + * self->name + * self->types + * + * To be safest, all of these need their memory aligned on void * pointers + * Therefore, we may need to allocate extra space. + */ offset[0] = sizeof(PyUFunc_PyFuncData); i = (sizeof(PyUFunc_PyFuncData) % sizeof(void *)); - if (i) offset[0] += (sizeof(void *) - i); + if (i) { + offset[0] += (sizeof(void *) - i); + } offset[1] = self->nargs; i = (self->nargs % sizeof(void *)); - if (i) offset[1] += (sizeof(void *)-i); - - self->ptr = _pya_malloc(offset[0] + offset[1] + sizeof(void *) + \ - (fname_len+14)); - - if (self->ptr == NULL) return PyErr_NoMemory(); + if (i) { + offset[1] += (sizeof(void *)-i); + } + self->ptr = _pya_malloc(offset[0] + offset[1] + sizeof(void *) + + (fname_len + 14)); + if (self->ptr == NULL) { + return PyErr_NoMemory(); + } Py_INCREF(function); self->obj = function; fdata = (PyUFunc_PyFuncData *)(self->ptr); @@ -3497,20 +3554,18 @@ self->data = (void **)(((char *)self->ptr) + offset[0]); self->data[0] = (void *)fdata; - self->types = (char *)self->data + sizeof(void *); - for(i=0; inargs; i++) self->types[i] = PyArray_OBJECT; - + for (i = 0; i < self->nargs; i++) { + self->types[i] = PyArray_OBJECT; + } str = self->types + offset[1]; memcpy(str, fname, fname_len); memcpy(str+fname_len, " (vectorized)", 14); - self->name = str; /* Do a better job someday */ self->doc = "dynamic ufunc based on a python function"; - return (PyObject *)self; } @@ -3521,16 +3576,18 @@ int *signature, PyUFuncGenericFunction *oldfunc) { - int i,j; + int i, j; int res = -1; /* Find the location of the matching signature */ - for(i=0; intypes; i++) { - for(j=0; jnargs; j++) { - if (signature[j] != func->types[i*func->nargs+j]) + for (i = 0; i < func->ntypes; i++) { + for (j = 0; j < func->nargs; j++) { + if (signature[j] != func->types[i*func->nargs+j]) { break; + } } - if (j < func->nargs) continue; - + if (j < func->nargs) { + continue; + } if (oldfunc != NULL) { *oldfunc = func->functions[i]; } @@ -3551,7 +3608,7 @@ return PyUFunc_FromFuncAndDataAndSignature(func, data, types, ntypes, nin, nout, identity, name, doc, check_return, NULL); } - + /*UFUNC_API*/ static PyObject * PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data, @@ -3563,7 +3620,9 @@ PyUFuncObject *self; self = _pya_malloc(sizeof(PyUFuncObject)); - if (self == NULL) return NULL; + if (self == NULL) { + return NULL; + } PyObject_Init((PyObject *)self, &PyUFunc_Type); self->nin = nin; @@ -3580,12 +3639,19 @@ self->obj = NULL; self->userloops=NULL; - if (name == NULL) self->name = "?"; - else self->name = name; + if (name == NULL) { + self->name = "?"; + } + else { + self->name = name; + } + if (doc == NULL) { + self->doc = "NULL"; + } + else { + self->doc = doc; + } - if (doc == NULL) self->doc = "NULL"; - else self->doc = doc; - /* generalized ufunc */ self->core_enabled = 0; self->core_num_dim_ix = 0; @@ -3594,22 +3660,22 @@ self->core_offsets = NULL; self->core_signature = NULL; if (signature != NULL) { - if (_parse_signature(self, signature) != 0) + if (_parse_signature(self, signature) != 0) { return NULL; + } } - return (PyObject *)self; } -/* This is the first-part of the CObject structure. - - I don't think this will change, but if it should, then - this needs to be fixed. The exposed C-API was insufficient - because I needed to replace the pointer and it wouldn't - let me with a destructor set (even though it works fine - with the destructor). -*/ - +/* + * This is the first-part of the CObject structure. + * + * I don't think this will change, but if it should, then + * this needs to be fixed. The exposed C-API was insufficient + * because I needed to replace the pointer and it wouldn't + * let me with a destructor set (even though it works fine + * with the destructor). + */ typedef struct { PyObject_HEAD void *c_obj; @@ -3617,31 +3683,37 @@ #define _SETCPTR(cobj, val) ((_simple_cobj *)(cobj))->c_obj = (val) -/* return 1 if arg1 > arg2, 0 if arg1 == arg2, and -1 if arg1 < arg2 - */ +/* return 1 if arg1 > arg2, 0 if arg1 == arg2, and -1 if arg1 < arg2 */ static int cmp_arg_types(int *arg1, int *arg2, int n) { - for(;n>0; n--, arg1++, arg2++) { - if (PyArray_EquivTypenums(*arg1, *arg2)) continue; - if (PyArray_CanCastSafely(*arg1, *arg2)) + for (; n > 0; n--, arg1++, arg2++) { + if (PyArray_EquivTypenums(*arg1, *arg2)) { + continue; + } + if (PyArray_CanCastSafely(*arg1, *arg2)) { return -1; + } return 1; } return 0; } -/* This frees the linked-list structure - when the CObject is destroyed (removed - from the internal dictionary) +/* + * This frees the linked-list structure when the CObject + * is destroyed (removed from the internal dictionary) */ static void _loop1d_list_free(void *ptr) { PyUFunc_Loop1d *funcdata; - if (ptr == NULL) return; + if (ptr == NULL) { + return; + } funcdata = (PyUFunc_Loop1d *)ptr; - if (funcdata == NULL) return; + if (funcdata == NULL) { + return; + } _pya_free(funcdata->arg_types); _loop1d_list_free(funcdata->next); _pya_free(funcdata); @@ -3664,8 +3736,7 @@ descr=PyArray_DescrFromType(usertype); if ((usertype < PyArray_USERDEF) || (descr==NULL)) { - PyErr_SetString(PyExc_TypeError, - "unknown user-defined type"); + PyErr_SetString(PyExc_TypeError, "unknown user-defined type"); return -1; } Py_DECREF(descr); @@ -3674,18 +3745,24 @@ ufunc->userloops = PyDict_New(); } key = PyInt_FromLong((long) usertype); - if (key == NULL) return -1; + if (key == NULL) { + return -1; + } funcdata = _pya_malloc(sizeof(PyUFunc_Loop1d)); - if (funcdata == NULL) goto fail; + if (funcdata == NULL) { + goto fail; + } newtypes = _pya_malloc(sizeof(int)*ufunc->nargs); - if (newtypes == NULL) goto fail; + if (newtypes == NULL) { + goto fail; + } if (arg_types != NULL) { - for(i=0; inargs; i++) { + for (i = 0; i < ufunc->nargs; i++) { newtypes[i] = arg_types[i]; } } else { - for(i=0; inargs; i++) { + for (i = 0; i < ufunc->nargs; i++) { newtypes[i] = usertype; } } @@ -3697,46 +3774,51 @@ /* Get entry for this user-defined type*/ cobj = PyDict_GetItem(ufunc->userloops, key); - /* If it's not there, then make one and return. */ if (cobj == NULL) { - cobj = PyCObject_FromVoidPtr((void *)funcdata, - _loop1d_list_free); - if (cobj == NULL) goto fail; + cobj = PyCObject_FromVoidPtr((void *)funcdata, _loop1d_list_free); + if (cobj == NULL) { + goto fail; + } PyDict_SetItem(ufunc->userloops, key, cobj); Py_DECREF(cobj); Py_DECREF(key); return 0; } else { - PyUFunc_Loop1d *current, *prev=NULL; - int cmp=1; - /* There is already at least 1 loop. Place this one in - lexicographic order. If the next one signature - is exactly like this one, then just replace. - Otherwise insert. - */ + PyUFunc_Loop1d *current, *prev = NULL; + int cmp = 1; + /* + * There is already at least 1 loop. Place this one in + * lexicographic order. If the next one signature + * is exactly like this one, then just replace. + * Otherwise insert. + */ current = (PyUFunc_Loop1d *)PyCObject_AsVoidPtr(cobj); while (current != NULL) { - cmp = cmp_arg_types(current->arg_types, newtypes, - ufunc->nargs); - if (cmp >= 0) break; + cmp = cmp_arg_types(current->arg_types, newtypes, ufunc->nargs); + if (cmp >= 0) { + break; + } prev = current; current = current->next; } - if (cmp == 0) { /* just replace it with new function */ + if (cmp == 0) { + /* just replace it with new function */ current->func = function; current->data = data; _pya_free(newtypes); _pya_free(funcdata); } - else { /* insert it before the current one - by hacking the internals of cobject to - replace the function pointer --- - can't use CObject API because destructor is set. - */ + else { + /* + * insert it before the current one by hacking the internals + * of cobject to replace the function pointer --- can't use + * CObject API because destructor is set. + */ funcdata->next = current; - if (prev == NULL) { /* place this at front */ + if (prev == NULL) { + /* place this at front */ _SETCPTR(cobj, funcdata); } else { @@ -3747,7 +3829,6 @@ Py_DECREF(key); return 0; - fail: Py_DECREF(key); _pya_free(funcdata); @@ -3762,11 +3843,21 @@ static void ufunc_dealloc(PyUFuncObject *self) { - if (self->core_num_dims) _pya_free(self->core_num_dims); - if (self->core_dim_ixs) _pya_free(self->core_dim_ixs); - if (self->core_offsets) _pya_free(self->core_offsets); - if (self->core_signature) _pya_free(self->core_signature); - if (self->ptr) _pya_free(self->ptr); + if (self->core_num_dims) { + _pya_free(self->core_num_dims); + } + if (self->core_dim_ixs) { + _pya_free(self->core_dim_ixs); + } + if (self->core_offsets) { + _pya_free(self->core_offsets); + } + if (self->core_signature) { + _pya_free(self->core_signature); + } + if (self->ptr) { + _pya_free(self->ptr); + } Py_XDECREF(self->userloops); Py_XDECREF(self->obj); _pya_free(self); @@ -3778,25 +3869,24 @@ char buf[100]; sprintf(buf, "", self->name); - return PyString_FromString(buf); } /* -------------------------------------------------------- */ -/* op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b) - where a has b.ndim NewAxis terms appended. - - The result has dimensions a.ndim + b.ndim -*/ - +/* + * op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b) + * where a has b.ndim NewAxis terms appended. + * + * The result has dimensions a.ndim + b.ndim + */ static PyObject * ufunc_outer(PyUFuncObject *self, PyObject *args, PyObject *kwds) { int i; PyObject *ret; - PyArrayObject *ap1=NULL, *ap2=NULL, *ap_new=NULL; + PyArrayObject *ap1 = NULL, *ap2 = NULL, *ap_new = NULL; PyObject *new_args, *tmp; PyObject *shape1, *shape2, *newshape; @@ -3806,7 +3896,7 @@ " signature"); return NULL; } - + if(self->nin != 2) { PyErr_SetString(PyExc_ValueError, "outer product only supported "\ @@ -3815,45 +3905,57 @@ } if (PySequence_Length(args) != 2) { - PyErr_SetString(PyExc_TypeError, - "exactly two arguments expected"); + PyErr_SetString(PyExc_TypeError, "exactly two arguments expected"); return NULL; } tmp = PySequence_GetItem(args, 0); - if (tmp == NULL) return NULL; - ap1 = (PyArrayObject *) \ - PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); + if (tmp == NULL) { + return NULL; + } + ap1 = (PyArrayObject *) PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); Py_DECREF(tmp); - if (ap1 == NULL) return NULL; - + if (ap1 == NULL) { + return NULL; + } tmp = PySequence_GetItem(args, 1); - if (tmp == NULL) return NULL; + if (tmp == NULL) { + return NULL; + } ap2 = (PyArrayObject *)PyArray_FromObject(tmp, PyArray_NOTYPE, 0, 0); Py_DECREF(tmp); - if (ap2 == NULL) {Py_DECREF(ap1); return NULL;} - + if (ap2 == NULL) { + Py_DECREF(ap1); + return NULL; + } /* Construct new shape tuple */ shape1 = PyTuple_New(ap1->nd); - if (shape1 == NULL) goto fail; - for(i=0; ind; i++) + if (shape1 == NULL) { + goto fail; + } + for (i = 0; i < ap1->nd; i++) { PyTuple_SET_ITEM(shape1, i, - PyLong_FromLongLong((longlong)ap1-> \ - dimensions[i])); - + PyLong_FromLongLong((longlong)ap1->dimensions[i])); + } shape2 = PyTuple_New(ap2->nd); - for(i=0; ind; i++) + for (i = 0; i < ap2->nd; i++) { PyTuple_SET_ITEM(shape2, i, PyInt_FromLong((long) 1)); - if (shape2 == NULL) {Py_DECREF(shape1); goto fail;} + } + if (shape2 == NULL) { + Py_DECREF(shape1); + goto fail; + } newshape = PyNumber_Add(shape1, shape2); Py_DECREF(shape1); Py_DECREF(shape2); - if (newshape == NULL) goto fail; - + if (newshape == NULL) { + goto fail; + } ap_new = (PyArrayObject *)PyArray_Reshape(ap1, newshape); Py_DECREF(newshape); - if (ap_new == NULL) goto fail; - + if (ap_new == NULL) { + goto fail; + } new_args = Py_BuildValue("(OO)", ap_new, ap2); Py_DECREF(ap1); Py_DECREF(ap2); @@ -3873,14 +3975,12 @@ static PyObject * ufunc_reduce(PyUFuncObject *self, PyObject *args, PyObject *kwds) { - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_REDUCE); } static PyObject * ufunc_accumulate(PyUFuncObject *self, PyObject *args, PyObject *kwds) { - return PyUFunc_GenericReduction(self, args, kwds, UFUNC_ACCUMULATE); } @@ -3892,34 +3992,41 @@ static struct PyMethodDef ufunc_methods[] = { - {"reduce", (PyCFunction)ufunc_reduce, METH_VARARGS | METH_KEYWORDS, NULL }, - {"accumulate", (PyCFunction)ufunc_accumulate, - METH_VARARGS | METH_KEYWORDS, NULL }, - {"reduceat", (PyCFunction)ufunc_reduceat, - METH_VARARGS | METH_KEYWORDS, NULL }, - {"outer", (PyCFunction)ufunc_outer, METH_VARARGS | METH_KEYWORDS, NULL}, + {"reduce", + (PyCFunction)ufunc_reduce, + METH_VARARGS | METH_KEYWORDS, NULL }, + {"accumulate", + (PyCFunction)ufunc_accumulate, + METH_VARARGS | METH_KEYWORDS, NULL }, + {"reduceat", + (PyCFunction)ufunc_reduceat, + METH_VARARGS | METH_KEYWORDS, NULL }, + {"outer", + (PyCFunction)ufunc_outer, + METH_VARARGS | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; -/* construct the string - y1,y2,...,yn -*/ +/* construct the string y1,y2,...,yn */ static PyObject * _makeargs(int num, char *ltr, int null_if_none) { PyObject *str; int i; + switch (num) { case 0: - if (null_if_none) return NULL; + if (null_if_none) { + return NULL; + } return PyString_FromString(""); case 1: return PyString_FromString(ltr); } str = PyString_FromFormat("%s1, %s2", ltr, ltr); - for(i = 3; i <= num; ++i) { + for (i = 3; i <= num; ++i) { PyString_ConcatAndDel(&str, PyString_FromFormat(", %s%d", ltr, i)); } return str; @@ -3939,15 +4046,11 @@ static PyObject * ufunc_get_doc(PyUFuncObject *self) { - /* Put docstring first or FindMethod finds it...*/ - /* could so some introspection on name and nin + nout */ - /* to automate the first part of it */ - /* the doc string shouldn't need the calling convention */ - /* construct - name(x1, x2, ...,[ out1, out2, ...]) - - __doc__ - */ + /* Put docstring first or FindMethod finds it... could so some + * introspection on name and nin + nout to automate the first part + * of it the doc string shouldn't need the calling convention + * construct name(x1, x2, ...,[ out1, out2, ...]) __doc__ + */ PyObject *outargs, *inargs, *doc; outargs = _makeargs(self->nout, "out", 1); inargs = _makeargs(self->nin, "x", 0); @@ -3956,7 +4059,8 @@ self->name, PyString_AS_STRING(inargs), self->doc); - } else { + } + else { doc = PyString_FromFormat("%s(%s[, %s])\n\n%s", self->name, PyString_AS_STRING(inargs), @@ -3995,31 +4099,31 @@ static PyObject * ufunc_get_types(PyUFuncObject *self) { - /* return a list with types grouped - input->output */ + /* return a list with types grouped input->output */ PyObject *list; PyObject *str; - int k, j, n, nt=self->ntypes; + int k, j, n, nt = self->ntypes; int ni = self->nin; int no = self->nout; char *t; list = PyList_New(nt); - if (list == NULL) return NULL; + if (list == NULL) { + return NULL; + } t = _pya_malloc(no+ni+2); n = 0; - for(k=0; ktypes[n]); n++; } t[ni] = '-'; t[ni+1] = '>'; - for(j=0; jtypes[n]); + for (j = 0; j < no; j++) { + t[ni + 2 + j] = _typecharfromnum(self->types[n]); n++; } - str = PyString_FromStringAndSize(t, no+ni+2); + str = PyString_FromStringAndSize(t, no + ni + 2); PyList_SET_ITEM(list, k, str); } _pya_free(t); @@ -4047,86 +4151,106 @@ static PyObject * ufunc_get_signature(PyUFuncObject *self) { - if (!self->core_enabled) + if (!self->core_enabled) { Py_RETURN_NONE; + } return PyString_FromString(self->core_signature); } #undef _typecharfromnum -/* Docstring is now set from python */ -/* static char *Ufunctype__doc__ = NULL; */ - +/* + * Docstring is now set from python + * static char *Ufunctype__doc__ = NULL; + */ static PyGetSetDef ufunc_getset[] = { - {"__doc__", (getter)ufunc_get_doc, NULL, "documentation string", NULL}, - {"nin", (getter)ufunc_get_nin, NULL, "number of inputs", NULL}, - {"nout", (getter)ufunc_get_nout, NULL, "number of outputs", NULL}, - {"nargs", (getter)ufunc_get_nargs, NULL, "number of arguments", NULL}, - {"ntypes", (getter)ufunc_get_ntypes, NULL, "number of types", NULL}, - {"types", (getter)ufunc_get_types, NULL, "return a list with types grouped input->output", NULL}, - {"__name__", (getter)ufunc_get_name, NULL, "function name", NULL}, - {"identity", (getter)ufunc_get_identity, NULL, "identity value", NULL}, - {"signature",(getter)ufunc_get_signature,NULL, "signature"}, + {"__doc__", + (getter)ufunc_get_doc, + NULL, "documentation string", NULL}, + {"nin", + (getter)ufunc_get_nin, + NULL, "number of inputs", NULL}, + {"nout", + (getter)ufunc_get_nout, + NULL, "number of outputs", NULL}, + {"nargs", + (getter)ufunc_get_nargs, + NULL, "number of arguments", NULL}, + {"ntypes", + (getter)ufunc_get_ntypes, + NULL, "number of types", NULL}, + {"types", + (getter)ufunc_get_types, + NULL, "return a list with types grouped input->output", NULL}, + {"__name__", + (getter)ufunc_get_name, + NULL, "function name", NULL}, + {"identity", + (getter)ufunc_get_identity, + NULL, "identity value", NULL}, + {"signature", + (getter)ufunc_get_signature, + NULL, "signature"}, {NULL, NULL, NULL, NULL, NULL}, /* Sentinel */ }; static PyTypeObject PyUFunc_Type = { PyObject_HEAD_INIT(0) - 0, /*ob_size*/ - "numpy.ufunc", /*tp_name*/ - sizeof(PyUFuncObject), /*tp_basicsize*/ - 0, /*tp_itemsize*/ + 0, /* ob_size */ + "numpy.ufunc", /* tp_name */ + sizeof(PyUFuncObject), /* tp_basicsize */ + 0, /* tp_itemsize */ /* methods */ - (destructor)ufunc_dealloc, /*tp_dealloc*/ - (printfunc)0, /*tp_print*/ - (getattrfunc)0, /*tp_getattr*/ - (setattrfunc)0, /*tp_setattr*/ - (cmpfunc)0, /*tp_compare*/ - (reprfunc)ufunc_repr, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - (hashfunc)0, /*tp_hash*/ - (ternaryfunc)ufunc_generic_call, /*tp_call*/ - (reprfunc)ufunc_repr, /*tp_str*/ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT, /* tp_flags */ - NULL, /* tp_doc */ /* was Ufunctype__doc__ */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - ufunc_methods, /* tp_methods */ - 0, /* tp_members */ - ufunc_getset, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ + (destructor)ufunc_dealloc, /* tp_dealloc */ + (printfunc)0, /* tp_print */ + (getattrfunc)0, /* tp_getattr */ + (setattrfunc)0, /* tp_setattr */ + (cmpfunc)0, /* tp_compare */ + (reprfunc)ufunc_repr, /* tp_repr */ + 0, /* tp_as_number */ + 0, /* tp_as_sequence */ + 0, /* tp_as_mapping */ + (hashfunc)0, /* tp_hash */ + (ternaryfunc)ufunc_generic_call, /* tp_call */ + (reprfunc)ufunc_repr, /* tp_str */ + 0, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + NULL, /* tp_doc */ /* was Ufunctype__doc__ */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + ufunc_methods, /* tp_methods */ + 0, /* tp_members */ + ufunc_getset, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ + 0, /* tp_free */ + 0, /* tp_is_gc */ + 0, /* tp_bases */ + 0, /* tp_mro */ + 0, /* tp_cache */ + 0, /* tp_subclasses */ + 0, /* tp_weaklist */ + 0, /* tp_del */ #ifdef COUNT_ALLOCS - /* these must be last and never explicitly initialized */ - 0, /* tp_allocs */ - 0, /* tp_frees */ - 0, /* tp_maxalloc */ - 0, /* tp_prev */ - 0, /* *tp_next */ + /* these must be last and never explicitly initialized */ + 0, /* tp_allocs */ + 0, /* tp_frees */ + 0, /* tp_maxalloc */ + 0, /* tp_prev */ + 0, /* *tp_next */ #endif }; Modified: branches/coremath/numpy/distutils/mingw32ccompiler.py =================================================================== --- branches/coremath/numpy/distutils/mingw32ccompiler.py 2009-02-23 16:14:55 UTC (rev 6459) +++ branches/coremath/numpy/distutils/mingw32ccompiler.py 2009-02-23 16:25:45 UTC (rev 6460) @@ -456,7 +456,7 @@ def generate_manifest(config): msver = get_build_msvc_version() if msver is not None: - if msver >= 9: + if msver >= 8: check_embedded_msvcr_match_linked(msver) ma = int(msver) mi = int((msver - ma) * 10) Modified: branches/coremath/numpy/lib/io.py =================================================================== --- branches/coremath/numpy/lib/io.py 2009-02-23 16:14:55 UTC (rev 6459) +++ branches/coremath/numpy/lib/io.py 2009-02-23 16:25:45 UTC (rev 6460) @@ -22,6 +22,37 @@ _file = file _string_like = _is_string_like +def seek_gzip_factory(f): + """Use this factory to produce the class so that we can do a lazy + import on gzip. + + """ + import gzip, new + + def seek(self, offset, whence=0): + # figure out new position (we can only seek forwards) + if whence == 1: + offset = self.offset + offset + + if whence not in [0, 1]: + raise IOError, "Illegal argument" + + if offset < self.offset: + # for negative seek, rewind and do positive seek + self.rewind() + count = offset - self.offset + for i in range(count // 1024): + self.read(1024) + self.read(count % 1024) + + def tell(self): + return self.offset + + f.seek = new.instancemethod(seek, f) + f.tell = new.instancemethod(tell, f) + + return f + class BagObj(object): """A simple class that converts attribute lookups to getitems on the class passed in. @@ -138,8 +169,12 @@ memmap([4, 5, 6]) """ + import gzip + if isinstance(file, basestring): fid = _file(file,"rb") + elif isinstance(file, gzip.GzipFile): + fid = seek_gzip_factory(file) else: fid = file @@ -346,7 +381,7 @@ if _is_string_like(fname): if fname.endswith('.gz'): import gzip - fh = gzip.open(fname) + fh = seek_gzip_factory(fname) elif fname.endswith('.bz2'): import bz2 fh = bz2.BZ2File(fname) Modified: branches/coremath/numpy/lib/tests/test_io.py =================================================================== --- branches/coremath/numpy/lib/tests/test_io.py 2009-02-23 16:14:55 UTC (rev 6459) +++ branches/coremath/numpy/lib/tests/test_io.py 2009-02-23 16:25:45 UTC (rev 6460) @@ -1,4 +1,3 @@ - import numpy as np import numpy.ma as ma from numpy.ma.testutils import * @@ -699,7 +698,7 @@ def test_user_missing_values(self): - datastr ="A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + datastr ="A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" data = StringIO.StringIO(datastr) basekwargs = dict(dtype=None, delimiter=',', names=True, missing='N/A') mdtype = [('A', int), ('B', float), ('C', complex)] @@ -712,7 +711,7 @@ assert_equal(test, control) # data.seek(0) - test = np.mafromtxt(data, + test = np.mafromtxt(data, missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs) control = ma.array([( 0, 0.0, 0j), (1, -999, 1j), ( -9, 2.2, -999j), (3, -99, 3j)], @@ -721,7 +720,7 @@ assert_equal(test, control) # data.seek(0) - test = np.mafromtxt(data, + test = np.mafromtxt(data, missing_values={0:-9, 'B':-99, 'C':-999j}, **basekwargs) control = ma.array([( 0, 0.0, 0j), (1, -999, 1j), @@ -806,8 +805,22 @@ self.failUnless(isinstance(test, np.recarray)) assert_equal(test, control) +def test_gzip_load(): + import gzip + from StringIO import StringIO + a = np.random.random((5, 5)) + s = StringIO() + f = gzip.GzipFile(fileobj=s, mode="w") + np.save(f, a) + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.load(f), a) + + if __name__ == "__main__": run_module_suite() Modified: branches/coremath/numpy/testing/utils.py =================================================================== --- branches/coremath/numpy/testing/utils.py 2009-02-23 16:14:55 UTC (rev 6459) +++ branches/coremath/numpy/testing/utils.py 2009-02-23 16:25:45 UTC (rev 6460) @@ -240,9 +240,9 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header=''): - from numpy.core import asarray, isnan, any - x = asarray(x) - y = asarray(y) + from numpy.core import array, isnan, any + x = array(x, copy=False, subok=True) + y = array(y, copy=False, subok=True) def isnumber(x): return x.dtype.char in '?bhilqpBHILQPfdgFDG' From numpy-svn at scipy.org Mon Feb 23 11:32:53 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 23 Feb 2009 10:32:53 -0600 (CST) Subject: [Numpy-svn] r6461 - trunk/numpy/core/include/numpy Message-ID: <20090223163253.AA8D2C7C029@scipy.org> Author: cdavid Date: 2009-02-23 10:32:46 -0600 (Mon, 23 Feb 2009) New Revision: 6461 Modified: trunk/numpy/core/include/numpy/mingw_amd64_fenv.h Log: Add fesetexceptflag func for mingw-w64 support. Modified: trunk/numpy/core/include/numpy/mingw_amd64_fenv.h =================================================================== --- trunk/numpy/core/include/numpy/mingw_amd64_fenv.h 2009-02-23 16:25:45 UTC (rev 6460) +++ trunk/numpy/core/include/numpy/mingw_amd64_fenv.h 2009-02-23 16:32:46 UTC (rev 6461) @@ -66,13 +66,20 @@ } static __inline int -npy_fegetexceptflag(npy_fexcept_t *__flagp, int __excepts) +npy_fesetexceptflag(const npy_fexcept_t *flagp, int excepts) { - int __mxcsr, __status; + npy_fenv_t env; - __stmxcsr(&__mxcsr); - __fnstsw(&__status); - *__flagp = (__mxcsr | __status) & __excepts; + __fnstenv(&env.__x87); + env.__x87.__status &= ~excepts; + env.__x87.__status |= *flagp & excepts; + __fldenv(env.__x87); + + __stmxcsr(&env.__mxcsr); + env.__mxcsr &= ~excepts; + env.__mxcsr |= *flagp & excepts; + __ldmxcsr(env.__mxcsr); + return (0); } From numpy-svn at scipy.org Mon Feb 23 12:10:20 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 23 Feb 2009 11:10:20 -0600 (CST) Subject: [Numpy-svn] r6462 - in branches/coremath: . numpy/core/include/numpy Message-ID: <20090223171020.14606C7C029@scipy.org> Author: cdavid Date: 2009-02-23 11:10:02 -0600 (Mon, 23 Feb 2009) New Revision: 6462 Modified: branches/coremath/ branches/coremath/numpy/core/include/numpy/mingw_amd64_fenv.h Log: Merged revisions 6461 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6461 | cdavid | 2009-02-24 01:32:46 +0900 (Tue, 24 Feb 2009) | 1 line Add fesetexceptflag func for mingw-w64 support. ........ Property changes on: branches/coremath ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6459 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6461 Modified: branches/coremath/numpy/core/include/numpy/mingw_amd64_fenv.h =================================================================== --- branches/coremath/numpy/core/include/numpy/mingw_amd64_fenv.h 2009-02-23 16:32:46 UTC (rev 6461) +++ branches/coremath/numpy/core/include/numpy/mingw_amd64_fenv.h 2009-02-23 17:10:02 UTC (rev 6462) @@ -66,13 +66,20 @@ } static __inline int -npy_fegetexceptflag(npy_fexcept_t *__flagp, int __excepts) +npy_fesetexceptflag(const npy_fexcept_t *flagp, int excepts) { - int __mxcsr, __status; + npy_fenv_t env; - __stmxcsr(&__mxcsr); - __fnstsw(&__status); - *__flagp = (__mxcsr | __status) & __excepts; + __fnstenv(&env.__x87); + env.__x87.__status &= ~excepts; + env.__x87.__status |= *flagp & excepts; + __fldenv(env.__x87); + + __stmxcsr(&env.__mxcsr); + env.__mxcsr &= ~excepts; + env.__mxcsr |= *flagp & excepts; + __ldmxcsr(env.__mxcsr); + return (0); } From numpy-svn at scipy.org Mon Feb 23 12:33:18 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 23 Feb 2009 11:33:18 -0600 (CST) Subject: [Numpy-svn] r6463 - in trunk/numpy/ma: . tests Message-ID: <20090223173318.66477C7C029@scipy.org> Author: pierregm Date: 2009-02-23 11:33:11 -0600 (Mon, 23 Feb 2009) New Revision: 6463 Modified: trunk/numpy/ma/core.py trunk/numpy/ma/tests/test_core.py Log: * MaskedArray.__setstate__ : fixed for structured array Modified: trunk/numpy/ma/core.py =================================================================== --- trunk/numpy/ma/core.py 2009-02-23 17:10:02 UTC (rev 6462) +++ trunk/numpy/ma/core.py 2009-02-23 17:33:11 UTC (rev 6463) @@ -3846,7 +3846,7 @@ """ (ver, shp, typ, isf, raw, msk, flv) = state ndarray.__setstate__(self, (shp, typ, isf, raw)) - self._mask.__setstate__((shp, np.dtype(bool), isf, msk)) + self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) self.fill_value = flv # def __reduce__(self): Modified: trunk/numpy/ma/tests/test_core.py =================================================================== --- trunk/numpy/ma/tests/test_core.py 2009-02-23 17:10:02 UTC (rev 6462) +++ trunk/numpy/ma/tests/test_core.py 2009-02-23 17:33:11 UTC (rev 6463) @@ -369,14 +369,26 @@ assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled._data, a._data) assert_equal(a_pickled.fill_value, 999) - # + + def test_pickling_subbaseclass(self): + "Test pickling w/ a subclass of ndarray" + import cPickle a = array(np.matrix(range(10)), mask=[1,0,1,0,0]*2) a_pickled = cPickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) self.failUnless(isinstance(a_pickled._data,np.matrix)) + def test_pickling_wstructured(self): + "Tests pickling w/ structured array" + import cPickle + a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], + dtype=[('a', int), ('b', float)]) + a_pickled = cPickle.loads(a.dumps()) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + def test_single_element_subscript(self): "Tests single element subscripts of Maskedarrays." a = array([1,3,2]) From numpy-svn at scipy.org Mon Feb 23 14:02:57 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Mon, 23 Feb 2009 13:02:57 -0600 (CST) Subject: [Numpy-svn] r6464 - trunk/numpy/core/src Message-ID: <20090223190257.BC575C7C024@scipy.org> Author: charris Date: 2009-02-23 13:02:43 -0600 (Mon, 23 Feb 2009) New Revision: 6464 Modified: trunk/numpy/core/src/arraytypes.inc.src Log: Coding style cleanups. Start on arraytypes.inc.src. I'm a bit hesitant to commit this as tests are lacking for the functionality, but I want the changes somewhere central. Modified: trunk/numpy/core/src/arraytypes.inc.src =================================================================== --- trunk/numpy/core/src/arraytypes.inc.src 2009-02-23 17:33:11 UTC (rev 6463) +++ trunk/numpy/core/src/arraytypes.inc.src 2009-02-23 19:02:43 UTC (rev 6464) @@ -21,10 +21,9 @@ /**begin repeat -#type=long,longlong# -#Type=Long,LongLong# -*/ - + * #type = long, longlong# + * #Type = Long, LongLong# + */ static @type@ MyPyLong_As at Type@ (PyObject *obj) { @@ -39,7 +38,6 @@ return ret; } - static u at type@ MyPyLong_AsUnsigned at Type@ (PyObject *obj) { @@ -64,15 +62,21 @@ /****************** getitem and setitem **********************/ /**begin repeat - -#TYP=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,LONG,UINT,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE# -#func1=PyBool_FromLong, PyInt_FromLong*6, PyLong_FromUnsignedLong*2, PyLong_FromLongLong, PyLong_FromUnsignedLongLong, PyFloat_FromDouble*2# -#func2=PyObject_IsTrue, MyPyLong_AsLong*6, MyPyLong_AsUnsignedLong*2, MyPyLong_AsLongLong, MyPyLong_AsUnsignedLongLong, MyPyFloat_AsDouble*2# -#typ=Bool, byte, ubyte, short, ushort, int, long, uint, ulong, longlong, ulonglong, float, double# -#typ1=long*7, ulong*2, longlong, ulonglong, float, double# -#kind=Bool, Byte, UByte, Short, UShort, Int, Long, UInt, ULong, LongLong, ULongLong, Float, Double# + * + * #TYP = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, LONG, UINT, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE# + * #func1 = PyBool_FromLong, PyInt_FromLong*6, PyLong_FromUnsignedLong*2, + * PyLong_FromLongLong, PyLong_FromUnsignedLongLong, + * PyFloat_FromDouble*2# + * #func2 = PyObject_IsTrue, MyPyLong_AsLong*6, MyPyLong_AsUnsignedLong*2, + * MyPyLong_AsLongLong, MyPyLong_AsUnsignedLongLong, + * MyPyFloat_AsDouble*2# + * #typ = Bool, byte, ubyte, short, ushort, int, long, uint, ulong, + * longlong, ulonglong, float, double# + * #typ1 = long*7, ulong*2, longlong, ulonglong, float, double# + * #kind = Bool, Byte, UByte, Short, UShort, Int, Long, UInt, ULong, + * LongLong, ULongLong, Float, Double# */ - static PyObject * @TYP at _getitem(char *ip, PyArrayObject *ap) { @typ@ t1; @@ -82,8 +86,7 @@ return @func1@((@typ1@)t1); } else { - ap->descr->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), - ap); + ap->descr->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), ap); return @func1@((@typ1@)t1); } } @@ -92,7 +95,6 @@ @TYP at _setitem(PyObject *op, char *ov, PyArrayObject *ap) { @typ@ temp; /* ensures alignment */ - if (PyArray_IsScalar(op, @kind@)) { temp = ((Py at kind@ScalarObject *)op)->obval; } @@ -113,19 +115,16 @@ ap->descr->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap); } - return 0; } /**end repeat**/ - /**begin repeat - -#TYP=CFLOAT,CDOUBLE# -#typ=float, double# -*/ - + * + * #TYP = CFLOAT, CDOUBLE# + * #typ = float, double# + */ static PyObject * @TYP at _getitem(char *ip, PyArrayObject *ap) { @typ@ t1, t2; @@ -142,14 +141,15 @@ return PyComplex_FromDoubles((double)t1, (double)t2); } } + /**end repeat**/ /**begin repeat - -#TYP=CFLOAT, CDOUBLE, CLONGDOUBLE# -#typ=float, double, longdouble# -#kind=CFloat, CDouble, CLongDouble# -*/ + * + * #TYP = CFLOAT, CDOUBLE, CLONGDOUBLE# + * #typ = float, double, longdouble# + * #kind = CFloat, CDouble, CLongDouble# + */ static int @TYP at _setitem(PyObject *op, char *ov, PyArrayObject *ap) { @@ -159,8 +159,8 @@ int rsize; if (!(PyArray_IsScalar(op, @kind@))) { - if (PyArray_Check(op) && (PyArray_NDIM(op)==0)) { - op2 = ((PyArrayObject *)op)->descr->f->getitem \ + if (PyArray_Check(op) && (PyArray_NDIM(op) == 0)) { + op2 = ((PyArrayObject *)op)->descr->f->getitem (((PyArrayObject *)op)->data, (PyArrayObject *)op); } @@ -174,22 +174,24 @@ oop = PyComplex_AsCComplex (op2); } Py_DECREF(op2); - if (PyErr_Occurred()) return -1; + if (PyErr_Occurred()) { + return -1; + } temp.real = (@typ@) oop.real; temp.imag = (@typ@) oop.imag; } else { temp = ((Py at kind@ScalarObject *)op)->obval; } - memcpy(ov, &temp, ap->descr->elsize); - if (!PyArray_ISNOTSWAPPED(ap)) + if (!PyArray_ISNOTSWAPPED(ap)) { byte_swap_vector(ov, 2, sizeof(@typ@)); - + } rsize = sizeof(@typ@); copy_and_swap(ov, &temp, rsize, 2, rsize, !PyArray_ISNOTSWAPPED(ap)); return 0; } + /**end repeat**/ static PyObject * @@ -208,9 +210,12 @@ else { temp = (longdouble) MyPyFloat_AsDouble(op); } - if (PyErr_Occurred()) return -1; - if (ap == NULL || PyArray_ISBEHAVED(ap)) + if (PyErr_Occurred()) { + return -1; + } + if (ap == NULL || PyArray_ISBEHAVED(ap)) { *((longdouble *)ov)=temp; + } else { copy_and_swap(ov, &temp, ap->descr->elsize, 1, 0, !PyArray_ISNOTSWAPPED(ap)); @@ -297,8 +302,12 @@ return -1; } /* Sequence_Size might have returned an error */ - if (PyErr_Occurred()) PyErr_Clear(); - if ((temp=PyObject_Unicode(op)) == NULL) return -1; + if (PyErr_Occurred()) { + PyErr_Clear(); + } + if ((temp=PyObject_Unicode(op)) == NULL) { + return -1; + } ptr = PyUnicode_AS_UNICODE(temp); if ((ptr == NULL) || (PyErr_Occurred())) { Py_DECREF(temp); @@ -317,7 +326,9 @@ return -1; } } - else buffer = ov; + else { + buffer = ov; + } datalen = PyUCS2Buffer_AsUCS4(ptr, (PyArray_UCS4 *)buffer, datalen >> 1, ap->descr->elsize >> 2); @@ -331,16 +342,18 @@ if (ap->descr->elsize > datalen) { memset(ov + datalen, 0, (ap->descr->elsize - datalen)); } - - if (!PyArray_ISNOTSWAPPED(ap)) + if (!PyArray_ISNOTSWAPPED(ap)) { byte_swap_vector(ov, ap->descr->elsize >> 2, 4); + } Py_DECREF(temp); return 0; } -/* STRING -- can handle both NULL-terminated and not NULL-terminated cases - will truncate all ending NULLs in returned string. -*/ +/* STRING + * + * can handle both NULL-terminated and not NULL-terminated cases + * will truncate all ending NULLs in returned string. + */ static PyObject * STRING_getitem(char *ip, PyArrayObject *ap) { @@ -348,8 +361,10 @@ char *ptr; int size = ap->descr->elsize; - ptr = ip + size-1; - while (*ptr-- == '\0' && size > 0) size--; + ptr = ip + size - 1; + while (*ptr-- == '\0' && size > 0) { + size--; + } return PyString_FromStringAndSize(ip,size); } @@ -358,7 +373,7 @@ { char *ptr; Py_ssize_t len; - PyObject *temp=NULL; + PyObject *temp = NULL; if (!PyString_Check(op) && !PyUnicode_Check(op) && PySequence_Check(op) && PySequence_Size(op) > 0) { @@ -367,17 +382,21 @@ return -1; } /* Sequence_Size might have returned an error */ - if (PyErr_Occurred()) PyErr_Clear(); - if ((temp = PyObject_Str(op)) == NULL) return -1; - + if (PyErr_Occurred()) { + PyErr_Clear(); + } + if ((temp = PyObject_Str(op)) == NULL) { + return -1; + } if (PyString_AsStringAndSize(temp, &ptr, &len) == -1) { Py_DECREF(temp); return -1; } memcpy(ov, ptr, MIN(ap->descr->elsize,len)); - /* If string lenth is smaller than room in array - Then fill the rest of the element size - with NULL */ + /* + * If string lenth is smaller than room in array + * Then fill the rest of the element size with NULL + */ if (ap->descr->elsize > len) { memset(ov + len, 0, (ap->descr->elsize - len)); } @@ -429,7 +448,7 @@ static PyObject * VOID_getitem(char *ip, PyArrayObject *ap) { - PyObject *u=NULL; + PyObject *u = NULL; PyArray_Descr* descr; int itemsize; @@ -446,29 +465,30 @@ /* get the names from the fields dictionary*/ names = descr->names; - if (!names) goto finish; + if (!names) { + goto finish; + } n = PyTuple_GET_SIZE(names); ret = PyTuple_New(n); savedflags = ap->flags; - for (i=0; ifields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, - &title)) { + if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, &title)) { Py_DECREF(ret); ap->descr = descr; return NULL; } ap->descr = new; /* update alignment based on offset */ - if ((new->alignment > 1) && \ - ((((intp)(ip+offset)) % new->alignment) != 0)) + if ((new->alignment > 1) + && ((((intp)(ip+offset)) % new->alignment) != 0)) { ap->flags &= ~ALIGNED; - else + } + else { ap->flags |= ALIGNED; - - PyTuple_SET_ITEM(ret, i, \ - new->f->getitem(ip+offset, ap)); + } + PyTuple_SET_ITEM(ret, i, new->f->getitem(ip+offset, ap)); ap->flags = savedflags; } ap->descr = descr; @@ -477,10 +497,10 @@ if (descr->subarray) { /* return an array of the basic type */ - PyArray_Dims shape={NULL,-1}; + PyArray_Dims shape = {NULL, -1}; PyObject *ret; - if (!(PyArray_IntpConverter(descr->subarray->shape, - &shape))) { + + if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) { PyDimMem_FREE(shape.ptr); PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple."); @@ -488,11 +508,12 @@ } Py_INCREF(descr->subarray->base); ret = PyArray_NewFromDescr(&PyArray_Type, - descr->subarray->base, - shape.len, shape.ptr, + descr->subarray->base, shape.len, shape.ptr, NULL, ip, ap->flags, NULL); PyDimMem_FREE(shape.ptr); - if (!ret) return NULL; + if (!ret) { + return NULL; + } PyArray_BASE(ret) = (PyObject *)ap; Py_INCREF(ap); PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); @@ -500,23 +521,27 @@ } finish: - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || - PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { + if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) + || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { PyErr_SetString(PyExc_ValueError, "tried to get void-array with object" " members as buffer."); return NULL; } - - itemsize=ap->descr->elsize; - if (PyArray_ISWRITEABLE(ap)) + itemsize = ap->descr->elsize; + if (PyArray_ISWRITEABLE(ap)) { u = PyBuffer_FromReadWriteMemory(ip, itemsize); - else + } + else { u = PyBuffer_FromMemory(ip, itemsize); - if (u==NULL) goto fail; - - /* default is to return buffer object pointing to current item */ - /* a view of it */ + } + if (u == NULL) { + goto fail; + } + /* + * default is to return buffer object pointing to + * current item a view of it + */ return u; fail: @@ -543,6 +568,7 @@ PyArray_Descr *new; int offset; int savedflags; + res = -1; /* get the names from the fields dictionary*/ names = descr->names; @@ -554,26 +580,27 @@ return -1; } savedflags = ap->flags; - for (i=0; ifields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, - &title)) { + if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, &title)) { ap->descr = descr; return -1; } ap->descr = new; /* remember to update alignment flags */ - if ((new->alignment > 1) && \ - ((((intp)(ip+offset)) % new->alignment) != 0)) + if ((new->alignment > 1) + && ((((intp)(ip+offset)) % new->alignment) != 0)) { ap->flags &= ~ALIGNED; - else + } + else { ap->flags |= ALIGNED; - - res = new->f->setitem(PyTuple_GET_ITEM(op, i), - ip+offset, ap); + } + res = new->f->setitem(PyTuple_GET_ITEM(op, i), ip+offset, ap); ap->flags = savedflags; - if (res < 0) break; + if (res < 0) { + break; + } } ap->descr = descr; return res; @@ -581,10 +608,9 @@ if (descr->subarray) { /* copy into an array of the same basic type */ - PyArray_Dims shape={NULL,-1}; + PyArray_Dims shape = {NULL, -1}; PyObject *ret; - if (!(PyArray_IntpConverter(descr->subarray->shape, - &shape))) { + if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) { PyDimMem_FREE(shape.ptr); PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple."); @@ -592,11 +618,12 @@ } Py_INCREF(descr->subarray->base); ret = PyArray_NewFromDescr(&PyArray_Type, - descr->subarray->base, - shape.len, shape.ptr, + descr->subarray->base, shape.len, shape.ptr, NULL, ip, ap->flags, NULL); PyDimMem_FREE(shape.ptr); - if (!ret) return -1; + if (!ret) { + return -1; + } PyArray_BASE(ret) = (PyObject *)ap; Py_INCREF(ap); PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); @@ -609,15 +636,17 @@ { const void *buffer; Py_ssize_t buflen; - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || - PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { + if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) + || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { PyErr_SetString(PyExc_ValueError, "tried to set void-array with object" " members using buffer."); return -1; } res = PyObject_AsReadBuffer(op, &buffer, &buflen); - if (res == -1) goto fail; + if (res == -1) { + goto fail; + } memcpy(ip, buffer, NPY_MIN(buflen, itemsize)); if (itemsize > buflen) { memset(ip+buflen, 0, (itemsize-buflen)); @@ -636,29 +665,58 @@ /**begin repeat -#to=(BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*16# -#from=BYTE*13,UBYTE*13,SHORT*13,USHORT*13,INT*13,UINT*13,LONG*13,ULONG*13,LONGLONG*13,ULONGLONG*13,FLOAT*13,DOUBLE*13,LONGDOUBLE*13,CFLOAT*13,CDOUBLE*13,CLONGDOUBLE*13# -#totyp=(byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*16# -#fromtyp=byte*13, ubyte*13, short*13, ushort*13, int*13, uint*13, long*13, ulong*13, longlong*13, ulonglong*13, float*13, double*13, longdouble*13, float*13, double*13, longdouble*13# -#incr= (ip++)*169,(ip+=2)*39# + * + * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #totype = byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble# */ + +/**begin repeat1 + * + * #FROMTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #fromtype = byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble# + */ static void - at from@_to_ at to@(register @fromtyp@ *ip, register @totyp@ *op, register intp n, + at FROMTYPE@_to_ at TOTYPE@(@fromtype@ *ip, @totype@ *op, intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { while (n--) { - *op++ = (@totyp@)*ip; - @incr@; + *op++ = (@totype@)*ip++; } } +/**end repeat1**/ + +/**begin repeat1 + * + * #FROMTYPE = CFLOAT, CDOUBLE, CLONGDOUBLE# + * #fromtype = float, double, longdouble# + */ +static void + at FROMTYPE@_to_ at TOTYPE@(@fromtype@ *ip, @totype@ *op, intp n, + PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) +{ + while (n--) { + *op++ = (@totype@)*ip; + ip += 2; + } +} +/**end repeat1**/ + /**end repeat**/ + /**begin repeat -#from=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#fromtyp=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# + * + * #FROMTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #fromtype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble# */ static void - at from@_to_BOOL(register @fromtyp@ *ip, register Bool *op, register intp n, + at FROMTYPE@_to_BOOL(@fromtype@ *ip, Bool *op, intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { while (n--) { @@ -668,87 +726,105 @@ /**end repeat**/ /**begin repeat -#from=CFLOAT, CDOUBLE, CLONGDOUBLE# -#fromtyp=cfloat, cdouble, clongdouble# + * + * #FROMTYPE = CFLOAT, CDOUBLE, CLONGDOUBLE# + * #fromtype = cfloat, cdouble, clongdouble# */ static void - at from@_to_BOOL(register @fromtyp@ *ip, register Bool *op, register intp n, + at FROMTYPE@_to_BOOL(@fromtype@ *ip, Bool *op, intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { while (n--) { *op = (Bool)(((*ip).real != FALSE) || ((*ip).imag != FALSE)); - op++; ip++; + op++; + ip++; } } /**end repeat**/ /**begin repeat -#to=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#totyp=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# + * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #totype = byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble# */ static void -BOOL_to_ at to@(register Bool *ip, register @totyp@ *op, register intp n, +BOOL_to_ at TOTYPE@(Bool *ip, @totype@ *op, intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { while (n--) { - *op++ = (@totyp@)(*ip++ != FALSE); + *op++ = (@totype@)(*ip++ != FALSE); } } /**end repeat**/ /**begin repeat + * + * #TOTYPE = CFLOAT,CDOUBLE,CLONGDOUBLE# + * #totype = float, double, longdouble# + */ -#to=(CFLOAT,CDOUBLE,CLONGDOUBLE)*14# -#from=BOOL*3,BYTE*3,UBYTE*3,SHORT*3,USHORT*3,INT*3,UINT*3,LONG*3,ULONG*3,LONGLONG*3,ULONGLONG*3,FLOAT*3,DOUBLE*3,LONGDOUBLE*3# -#fromtyp=Bool*3,byte*3, ubyte*3, short*3, ushort*3, int*3, uint*3, long*3, ulong*3, longlong*3, ulonglong*3, float*3, double*3, longdouble*3# -#totyp= (float, double, longdouble)*14# -*/ +/**begin repeat1 + * #FROMTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #fromtype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble# + */ static void - at from@_to_ at to@(register @fromtyp@ *ip, register @totyp@ *op, register intp n, + at FROMTYPE@_to_ at TOTYPE@(register @fromtype@ *ip, register @totype@ *op, register intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { while (n--) { - *op++ = (@totyp@)*ip++; + *op++ = (@totype@)*ip++; *op++ = 0.0; } } +/**end repeat1**/ /**end repeat**/ /**begin repeat + * + * #TOTYPE = CFLOAT,CDOUBLE,CLONGDOUBLE# + * #totype = float, double, longdouble# + */ -#to=(CFLOAT,CDOUBLE,CLONGDOUBLE)*3# -#from=CFLOAT*3,CDOUBLE*3,CLONGDOUBLE*3# -#totyp=(float, double, longdouble)*3# -#fromtyp=float*3, double*3, longdouble*3# -*/ +/**begin repeat1 + * #FROMTYPE = CFLOAT,CDOUBLE,CLONGDOUBLE# + * #fromtype = float, double, longdouble# + */ static void - at from@_to_ at to@(register @fromtyp@ *ip, register @totyp@ *op, register intp n, + at FROMTYPE@_to_ at TOTYPE@(@fromtype@ *ip, @totype@ *op, intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { n <<= 1; while (n--) { - *op++ = (@totyp@)*ip++; + *op++ = (@totype@)*ip++; } +} -} +/**end repeat1**/ /**end repeat**/ /**begin repeat - -#from=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE, STRING, UNICODE, VOID, OBJECT# -#fromtyp=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, char, char, char, PyObject *# -#skip= 1*17, aip->descr->elsize*3, 1# -*/ + * + * #FROMTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, VOID, OBJECT# + * #fromtype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble, + * cfloat, cdouble, clongdouble, char, char, char, PyObject *# + * #skip = 1*17, aip->descr->elsize*3, 1# + */ static void - at from@_to_OBJECT(@fromtyp@ *ip, PyObject **op, intp n, PyArrayObject *aip, + at FROMTYPE@_to_OBJECT(@fromtype@ *ip, PyObject **op, intp n, PyArrayObject *aip, PyArrayObject *NPY_UNUSED(aop)) { - register intp i; - int skip=@skip@; - for(i=0;idescr->elsize*3# -*/ + * + * #TOTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, VOID# + * #totype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble, + * cfloat, cdouble, clongdouble, char, char, char# + * #skip = 1*17, aip->descr->elsize*3# + */ static void -OBJECT_to_ at to@(PyObject **ip, @totyp@ *op, intp n, PyArrayObject *_NPY_UNUSED at to@(aip), - PyArrayObject *aop) +OBJECT_to_ at TOTYPE@(PyObject **ip, @totype@ *op, intp n, + PyArrayObject *_NPY_UNUSED at TOTYPE@(aip), PyArrayObject *aop) { - register intp i; - int skip=@skip@; - for(i=0;idescr->elsize*3)*3# -#convert=1*17,0*3,1*17,0*3,0*20# -#convstr=(Int*9,Long*2,Float*3,Complex*3,Tuple*3)*3# + * + * #from = STRING*20, UNICODE*20, VOID*20# + * #fromtyp = char*60# + * #to = (BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, VOID)*3# + * #totyp = (Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, char, char, char)*3# + * #oskip = (1*17,aop->descr->elsize*3)*3# + * #convert = 1*17, 0*3, 1*17, 0*3, 0*20# + * #convstr = (Int*9, Long*2, Float*3, Complex*3, Tuple*3)*3# */ static void @from at _to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip, PyArrayObject *aop) { register intp i; - PyObject *temp=NULL; - int skip=aip->descr->elsize; - int oskip=@oskip@; - for(i=0; idescr->elsize; + int oskip = @oskip@; + for (i = 0; i < n; i++, ip+=skip, op+=oskip) { temp = @from at _getitem((char *)ip, aip); - if (temp==NULL) return; + if (temp == NULL) { + return; + } /* convert from Python object to needed one */ if (@convert@) { PyObject *new, *args; @@ -826,9 +910,10 @@ new = Py at convstr@_Type.tp_new(&Py at convstr@_Type, args, NULL); Py_DECREF(args); temp = new; - if (temp==NULL) return; + if (temp == NULL) { + return; + } } - @to at _setitem(temp,(char *)op, aop); Py_DECREF(temp); } @@ -837,24 +922,23 @@ /**end repeat**/ /**begin repeat - -#to=STRING*17, UNICODE*17, VOID*17# -#totyp=char*17, char*17, char*17# -#from=(BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE)*3# -#fromtyp=(Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble)*3# -*/ - + * + * #to = STRING*17, UNICODE*17, VOID*17# + * #totyp = char*17, char*17, char*17# + * #from = (BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE)*3# + * #fromtyp = (Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble)*3# + */ static void @from at _to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip, PyArrayObject *aop) { - register intp i; - PyObject *temp=NULL; - int skip=1; - int oskip=aop->descr->elsize; - for(i=0; idescr->elsize; + for (i = 0; i < n; i++, ip += skip, op += oskip) { temp = @from at _getitem((char *)ip, aip); - if (temp==NULL) { + if (temp == NULL) { Py_INCREF(Py_False); temp = Py_False; } @@ -868,31 +952,33 @@ /****************** scan *************************************/ -/* The first ignore argument is for backwards compatibility. - Should be removed when the API version is bumped up. +/* + * The first ignore argument is for backwards compatibility. + * Should be removed when the API version is bumped up. */ /**begin repeat -#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# -#type=short,ushort,int,uint,long,ulong,longlong,ulonglong# -#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT# -*/ + * #fname = SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG# + * #type = short, ushort, int, uint, long, ulong, longlong, ulonglong# + * #format = "hd", "hu", "d", "u", "ld", "lu", LONGLONG_FMT, ULONGLONG_FMT# + */ static int - at fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) + at fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) { return fscanf(fp, "%"@format@, ip); } /**end repeat**/ /**begin repeat -#fname=FLOAT,DOUBLE,LONGDOUBLE# -#type=float,double,longdouble# -*/ + * #fname = FLOAT, DOUBLE, LONGDOUBLE# + * #type = float, double, longdouble# + */ static int - at fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) + at fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) { double result; int ret; + ret = NumPyOS_ascii_ftolf(fp, &result); *ip = (@type@) result; return ret; @@ -900,16 +986,17 @@ /**end repeat**/ /**begin repeat -#fname=BYTE,UBYTE# -#type=byte,ubyte# -#btype=int,uint# -#format="d","u"# -*/ + * #fname = BYTE, UBYTE# + * #type = byte, ubyte# + * #btype = int, uint# + * #format = "d", "u"# + */ static int - at fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignore2)) + at fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignore2)) { @btype@ temp; int num; + num = fscanf(fp, "%"@format@, &temp); *ip = (@type@) temp; return num; @@ -917,29 +1004,30 @@ /**end repeat**/ static int -BOOL_scan (FILE *fp, Bool *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignore2)) +BOOL_scan(FILE *fp, Bool *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignore2)) { int temp; int num; + num = fscanf(fp, "%d", &temp); *ip = (Bool) (temp != 0); return num; } /**begin repeat -#fname=CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT,STRING,UNICODE,VOID# -*/ + * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, STRING, UNICODE, VOID# + */ #define @fname at _scan NULL /**end repeat**/ /****************** fromstr *************************************/ /**begin repeat -#fname=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# -#type=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# -#func=(l,ul)*5# -#btype=(long,ulong)*5# -*/ + * #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG# + * #type = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong# + * #func = (l, ul)*5# + * #btype = (long, ulong)*5# + */ static int @fname at _fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) { @@ -952,9 +1040,10 @@ /**end repeat**/ /**begin repeat -#fname=FLOAT,DOUBLE,LONGDOUBLE# -#type=float,double,longdouble# -*/ + * + * #fname=FLOAT,DOUBLE,LONGDOUBLE# + * #type=float,double,longdouble# + */ static int @fname at _fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) { @@ -969,8 +1058,8 @@ /**begin repeat -#fname=BOOL,CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT,STRING,UNICODE,VOID# -*/ + * #fname = BOOL, CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, STRING, UNICODE, VOID# + */ #define @fname at _fromstr NULL /**end repeat**/ @@ -978,11 +1067,11 @@ /****************** copyswapn *************************************/ /**begin repeat - -#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#fsize=SHORT,SHORT,INT,INT,LONG,LONG,LONGLONG,LONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#type=short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble# -*/ + * + * #fname = SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #fsize = SHORT, SHORT, INT, INT, LONG, LONG, LONGLONG, LONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #type = short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# + */ static void @fname at _copyswapn (void *dst, intp dstride, void *src, intp sstride, intp n, int swap, void *NPY_UNUSED(arr)) @@ -1005,11 +1094,13 @@ @fname at _copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) { - if (src != NULL) /* copy first if needed */ + if (src != NULL) { + /* copy first if needed */ memcpy(dst, src, sizeof(@type@)); + } + if (swap) { + char *a, *b, c; - if (swap) { - register char *a, *b, c; a = (char *)dst; #if SIZEOF_ at fsize@ == 2 b = a + 1; @@ -1051,25 +1142,27 @@ c = *a; *a++ = *b; *b = c; #else { - register int i, nn; + int i, nn; + b = a + (SIZEOF_ at fsize@-1); nn = SIZEOF_ at fsize@ / 2; - for (i=0; i Author: matthew.brett at gmail.com Date: 2009-02-24 14:53:10 -0600 (Tue, 24 Feb 2009) New Revision: 6465 Modified: trunk/numpy/add_newdocs.py Log: Added docstring for isbuiltin Modified: trunk/numpy/add_newdocs.py =================================================================== --- trunk/numpy/add_newdocs.py 2009-02-23 19:02:43 UTC (rev 6464) +++ trunk/numpy/add_newdocs.py 2009-02-24 20:53:10 UTC (rev 6465) @@ -2937,7 +2937,7 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', ''' - dt.byteorder + byteorder String giving byteorder of dtype @@ -2995,6 +2995,33 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', """ + isbuiltin + + Value identifying if numpy dtype is a numpy builtin type + + Read-only + + Returns + ------- + val : {0,1,2} + 0 if this is a structured array type, with fields + 1 if this is a dtype compiled into numpy (such as ints, floats etc) + 2 if the dtype is for a user-defined numpy type + A user-defined type uses the numpy C-API machinery to extend + numpy to handle a new array type. See the Guide to Numpy for + details. + + Examples + -------- + >>> dt = np.dtype('i2') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype('f8') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.isbuiltin + 0 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', From numpy-svn at scipy.org Wed Feb 25 00:15:53 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 24 Feb 2009 23:15:53 -0600 (CST) Subject: [Numpy-svn] r6466 - in trunk/numpy/core: src tests Message-ID: <20090225051553.EDF48C7C01E@scipy.org> Author: oliphant Date: 2009-02-24 23:15:47 -0600 (Tue, 24 Feb 2009) New Revision: 6466 Modified: trunk/numpy/core/src/scalartypes.inc.src trunk/numpy/core/tests/test_regression.py Log: Fix void-scalar element access when data-type contains titles (meta-data). Modified: trunk/numpy/core/src/scalartypes.inc.src =================================================================== --- trunk/numpy/core/src/scalartypes.inc.src 2009-02-24 20:53:10 UTC (rev 6465) +++ trunk/numpy/core/src/scalartypes.inc.src 2009-02-25 05:15:47 UTC (rev 6466) @@ -1448,9 +1448,11 @@ static PyObject * voidtype_getfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) { - PyObject *ret; + PyObject *ret, *newargs; - ret = gentype_generic_method((PyObject *)self, args, kwds, "getfield"); + newargs = PyTuple_GetSlice(args, 0, 2); + ret = gentype_generic_method((PyObject *)self, newargs, kwds, "getfield"); + Py_DECREF(newargs); if (!ret) { return ret; } Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2009-02-24 20:53:10 UTC (rev 6465) +++ trunk/numpy/core/tests/test_regression.py 2009-02-25 05:15:47 UTC (rev 6466) @@ -1220,5 +1220,13 @@ n_after = len(gc.get_objects()) assert n_before >= n_after, (n_before, n_after) + def test_void_scalar_with_titles(self, level=rlevel): + """No ticket""" + data = [('john', 4), ('mary', 5)] + dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] + arr = array(data, dtype=dtype1) + assert arr[0][0] == 'john' + assert arr[0][1] == 4 + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Wed Feb 25 00:27:47 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 24 Feb 2009 23:27:47 -0600 (CST) Subject: [Numpy-svn] r6467 - in branches/1.2.x/numpy/core: src tests Message-ID: <20090225052747.9A30FC7C01E@scipy.org> Author: oliphant Date: 2009-02-24 23:27:46 -0600 (Tue, 24 Feb 2009) New Revision: 6467 Modified: branches/1.2.x/numpy/core/src/scalartypes.inc.src branches/1.2.x/numpy/core/tests/test_regression.py Log: Patch 1.2.x branch with titles fix. Modified: branches/1.2.x/numpy/core/src/scalartypes.inc.src =================================================================== --- branches/1.2.x/numpy/core/src/scalartypes.inc.src 2009-02-25 05:15:47 UTC (rev 6466) +++ branches/1.2.x/numpy/core/src/scalartypes.inc.src 2009-02-25 05:27:46 UTC (rev 6467) @@ -1211,9 +1211,11 @@ static PyObject * voidtype_getfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) { - PyObject *ret; + PyObject *ret, *newargs; - ret = gentype_generic_method((PyObject *)self, args, kwds, "getfield"); + newargs = PyTuple_GetSlice(args, 0, 2); + ret = gentype_generic_method((PyObject *)self, newargs, kwds, "getfield"); + Py_DECREF(newargs); if (!ret) return ret; if (PyArray_IsScalar(ret, Generic) && \ (!PyArray_IsScalar(ret, Void))) { Modified: branches/1.2.x/numpy/core/tests/test_regression.py =================================================================== --- branches/1.2.x/numpy/core/tests/test_regression.py 2009-02-25 05:15:47 UTC (rev 6466) +++ branches/1.2.x/numpy/core/tests/test_regression.py 2009-02-25 05:27:46 UTC (rev 6467) @@ -1209,5 +1209,13 @@ a = np.array(1) self.failUnlessRaises(ValueError, lambda x: x.choose([]), a) + def test_void_scalar_with_titles(self, level=rlevel): + """No ticket""" + data = [('john', 4), ('mary', 5)] + dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] + arr = array(data, dtype=dtype1) + assert arr[0][0] == 'john' + assert arr[0][1] == 4 + if __name__ == "__main__": run_module_suite() From numpy-svn at scipy.org Wed Feb 25 00:29:37 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Tue, 24 Feb 2009 23:29:37 -0600 (CST) Subject: [Numpy-svn] r6468 - in branches/1.1.x/numpy/core: src tests Message-ID: <20090225052937.7D472C7C01E@scipy.org> Author: oliphant Date: 2009-02-24 23:29:33 -0600 (Tue, 24 Feb 2009) New Revision: 6468 Modified: branches/1.1.x/numpy/core/src/scalartypes.inc.src branches/1.1.x/numpy/core/tests/test_regression.py Log: Patch 1.1.x branch with fix to void getitem when field-names have titles. Modified: branches/1.1.x/numpy/core/src/scalartypes.inc.src =================================================================== --- branches/1.1.x/numpy/core/src/scalartypes.inc.src 2009-02-25 05:27:46 UTC (rev 6467) +++ branches/1.1.x/numpy/core/src/scalartypes.inc.src 2009-02-25 05:29:33 UTC (rev 6468) @@ -1211,9 +1211,11 @@ static PyObject * voidtype_getfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds) { - PyObject *ret; + PyObject *ret, *newargs; - ret = gentype_generic_method((PyObject *)self, args, kwds, "getfield"); + newargs = PyTuple_GetSlice(args, 0, 2); + ret = gentype_generic_method((PyObject *)self, newargs, kwds, "getfield"); + Py_DECREF(newargs); if (!ret) return ret; if (PyArray_IsScalar(ret, Generic) && \ (!PyArray_IsScalar(ret, Void))) { Modified: branches/1.1.x/numpy/core/tests/test_regression.py =================================================================== --- branches/1.1.x/numpy/core/tests/test_regression.py 2009-02-25 05:27:46 UTC (rev 6467) +++ branches/1.1.x/numpy/core/tests/test_regression.py 2009-02-25 05:29:33 UTC (rev 6468) @@ -1095,5 +1095,13 @@ assert_equal(have, want) + def test_void_scalar_with_titles(self, level=rlevel): + """No ticket""" + data = [('john', 4), ('mary', 5)] + dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] + arr = array(data, dtype=dtype1) + assert arr[0][0] == 'john' + assert arr[0][1] == 4 + if __name__ == "__main__": NumpyTest().run() From numpy-svn at scipy.org Wed Feb 25 08:45:31 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 25 Feb 2009 07:45:31 -0600 (CST) Subject: [Numpy-svn] r6469 - trunk/numpy/core/src Message-ID: <20090225134531.A908BC7C172@scipy.org> Author: oliphant Date: 2009-02-25 07:45:30 -0600 (Wed, 25 Feb 2009) New Revision: 6469 Modified: trunk/numpy/core/src/scalartypes.inc.src Log: Add Error checking. Modified: trunk/numpy/core/src/scalartypes.inc.src =================================================================== --- trunk/numpy/core/src/scalartypes.inc.src 2009-02-25 05:29:33 UTC (rev 6468) +++ trunk/numpy/core/src/scalartypes.inc.src 2009-02-25 13:45:30 UTC (rev 6469) @@ -1451,6 +1451,7 @@ PyObject *ret, *newargs; newargs = PyTuple_GetSlice(args, 0, 2); + if (newargs == NULL) return NULL; ret = gentype_generic_method((PyObject *)self, newargs, kwds, "getfield"); Py_DECREF(newargs); if (!ret) { From numpy-svn at scipy.org Wed Feb 25 08:45:54 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 25 Feb 2009 07:45:54 -0600 (CST) Subject: [Numpy-svn] r6470 - branches/1.2.x/numpy/core/src Message-ID: <20090225134554.C03D4C7C153@scipy.org> Author: oliphant Date: 2009-02-25 07:45:53 -0600 (Wed, 25 Feb 2009) New Revision: 6470 Modified: branches/1.2.x/numpy/core/src/scalartypes.inc.src Log: Add Error checking to 1.2.x branch Modified: branches/1.2.x/numpy/core/src/scalartypes.inc.src =================================================================== --- branches/1.2.x/numpy/core/src/scalartypes.inc.src 2009-02-25 13:45:30 UTC (rev 6469) +++ branches/1.2.x/numpy/core/src/scalartypes.inc.src 2009-02-25 13:45:53 UTC (rev 6470) @@ -1214,6 +1214,7 @@ PyObject *ret, *newargs; newargs = PyTuple_GetSlice(args, 0, 2); + if (newargs == NULL) return NULL; ret = gentype_generic_method((PyObject *)self, newargs, kwds, "getfield"); Py_DECREF(newargs); if (!ret) return ret; From numpy-svn at scipy.org Wed Feb 25 08:46:14 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 25 Feb 2009 07:46:14 -0600 (CST) Subject: [Numpy-svn] r6471 - branches/1.1.x/numpy/core/src Message-ID: <20090225134614.270EFC7C153@scipy.org> Author: oliphant Date: 2009-02-25 07:46:13 -0600 (Wed, 25 Feb 2009) New Revision: 6471 Modified: branches/1.1.x/numpy/core/src/scalartypes.inc.src Log: Add Error checking on titles fix to 1.1.x branch Modified: branches/1.1.x/numpy/core/src/scalartypes.inc.src =================================================================== --- branches/1.1.x/numpy/core/src/scalartypes.inc.src 2009-02-25 13:45:53 UTC (rev 6470) +++ branches/1.1.x/numpy/core/src/scalartypes.inc.src 2009-02-25 13:46:13 UTC (rev 6471) @@ -1214,6 +1214,7 @@ PyObject *ret, *newargs; newargs = PyTuple_GetSlice(args, 0, 2); + if (newargs == NULL) return NULL; ret = gentype_generic_method((PyObject *)self, newargs, kwds, "getfield"); Py_DECREF(newargs); if (!ret) return ret; From numpy-svn at scipy.org Wed Feb 25 11:51:40 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 25 Feb 2009 10:51:40 -0600 (CST) Subject: [Numpy-svn] r6472 - in trunk/numpy/core: src tests Message-ID: <20090225165140.7232AC7C026@scipy.org> Author: charris Date: 2009-02-25 10:51:29 -0600 (Wed, 25 Feb 2009) New Revision: 6472 Modified: trunk/numpy/core/src/scalartypes.inc.src trunk/numpy/core/tests/test_regression.py Log: Fix coding style. Fix test_void_scalar_with_titles. Modified: trunk/numpy/core/src/scalartypes.inc.src =================================================================== --- trunk/numpy/core/src/scalartypes.inc.src 2009-02-25 13:46:13 UTC (rev 6471) +++ trunk/numpy/core/src/scalartypes.inc.src 2009-02-25 16:51:29 UTC (rev 6472) @@ -1451,7 +1451,9 @@ PyObject *ret, *newargs; newargs = PyTuple_GetSlice(args, 0, 2); - if (newargs == NULL) return NULL; + if (newargs == NULL) { + return NULL; + } ret = gentype_generic_method((PyObject *)self, newargs, kwds, "getfield"); Py_DECREF(newargs); if (!ret) { Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2009-02-25 13:46:13 UTC (rev 6471) +++ trunk/numpy/core/tests/test_regression.py 2009-02-25 16:51:29 UTC (rev 6472) @@ -1224,7 +1224,7 @@ """No ticket""" data = [('john', 4), ('mary', 5)] dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] - arr = array(data, dtype=dtype1) + arr = np.array(data, dtype=dtype1) assert arr[0][0] == 'john' assert arr[0][1] == 4 From numpy-svn at scipy.org Wed Feb 25 13:27:29 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 25 Feb 2009 12:27:29 -0600 (CST) Subject: [Numpy-svn] r6473 - trunk/numpy/distutils/fcompiler Message-ID: <20090225182729.BBD2BC7C0E0@scipy.org> Author: cdavid Date: 2009-02-25 12:27:19 -0600 (Wed, 25 Feb 2009) New Revision: 6473 Modified: trunk/numpy/distutils/fcompiler/compaq.py Log: Trap another kind of exception for MSVC9 in compaq fcompiler. Modified: trunk/numpy/distutils/fcompiler/compaq.py =================================================================== --- trunk/numpy/distutils/fcompiler/compaq.py 2009-02-25 16:51:29 UTC (rev 6472) +++ trunk/numpy/distutils/fcompiler/compaq.py 2009-02-25 18:27:19 UTC (rev 6473) @@ -89,6 +89,10 @@ if not "vcvarsall.bat" in str(e): print "Unexpected IOError in", __file__ raise e + except ValueError, e: + if not "path']" in str(e): + print "Unexpected ValueError in", __file__ + raise e executables = { 'version_cmd' : ['', "/what"], From numpy-svn at scipy.org Wed Feb 25 13:56:41 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Wed, 25 Feb 2009 12:56:41 -0600 (CST) Subject: [Numpy-svn] r6474 - trunk/numpy/core/tests Message-ID: <20090225185641.D1F71C7C0E0@scipy.org> Author: cdavid Date: 2009-02-25 12:56:15 -0600 (Wed, 25 Feb 2009) New Revision: 6474 Modified: trunk/numpy/core/tests/test_multiarray.py trunk/numpy/core/tests/test_regression.py Log: Mark mingw-w64 crashing tests as known failures. Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2009-02-25 18:27:19 UTC (rev 6473) +++ trunk/numpy/core/tests/test_multiarray.py 2009-02-25 18:56:15 UTC (rev 6474) @@ -7,6 +7,10 @@ from test_print import in_foreign_locale +def iswin64(): + import platform + return platform.architecture()[0] == "64bit" and sys.platform == "win32" + class TestFlags(TestCase): def setUp(self): self.a = arange(10) @@ -556,6 +560,7 @@ class TestStringCompare(TestCase): + @dec.knownfailureif(iswin64(), "Crash on win64") def test_string(self): g1 = array(["This","is","example"]) g2 = array(["This","was","example"]) @@ -566,6 +571,7 @@ assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]]) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_mixed(self): g1 = array(["spam","spa","spammer","and eggs"]) g2 = "spam" @@ -577,6 +583,7 @@ assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_unicode(self): g1 = array([u"This",u"is",u"example"]) g2 = array([u"This",u"was",u"example"]) @@ -796,6 +803,7 @@ os.unlink(self.filename) #tmp_file.close() + @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_file(self): f = open(self.filename, 'wb') self.x.tofile(f) @@ -807,11 +815,13 @@ assert_array_equal(y, self.x.flat) os.unlink(self.filename) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_filename(self): self.x.tofile(self.filename) y = np.fromfile(self.filename, dtype=self.dtype) assert_array_equal(y, self.x.flat) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_binary_str(self): s = self.x.tostring() y = np.fromstring(s, dtype=self.dtype) @@ -821,6 +831,7 @@ y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flatten('F')) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_str(self): x = self.x.real.ravel() s = "@".join(map(str, x)) @@ -830,12 +841,14 @@ assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_repr(self): x = self.x.real.ravel() s = "@".join(map(repr, x)) y = np.fromstring(s, sep="@") assert_array_equal(x, y) + @dec.knownfailureif(iswin64(), "Crash on win64") def _check_from(self, s, value, **kw): y = np.fromstring(s, **kw) assert_array_equal(y, value) @@ -846,53 +859,66 @@ y = np.fromfile(self.filename, **kw) assert_array_equal(y, value) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_nan(self): self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [nan, nan, nan, nan, nan, nan, nan], sep=' ') + @dec.knownfailureif(iswin64(), "Crash on win64") def test_inf(self): self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF", [inf, inf, -inf, inf, -inf, inf, -inf], sep=' ') + @dec.knownfailureif(iswin64(), "Crash on win64") def test_numbers(self): self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') + @dec.knownfailureif(iswin64(), "Crash on win64") def test_binary(self): self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', array([1,2,3,4]), dtype=' Author: cdavid Date: 2009-02-25 13:04:59 -0600 (Wed, 25 Feb 2009) New Revision: 6475 Modified: trunk/numpy/core/tests/test_scalarmath.py trunk/numpy/core/tests/test_unicode.py Log: Tag more mingw-w64 failures as such. Modified: trunk/numpy/core/tests/test_scalarmath.py =================================================================== --- trunk/numpy/core/tests/test_scalarmath.py 2009-02-25 18:56:15 UTC (rev 6474) +++ trunk/numpy/core/tests/test_scalarmath.py 2009-02-25 19:04:59 UTC (rev 6475) @@ -1,3 +1,4 @@ +import sys from numpy.testing import * import numpy as np @@ -6,6 +7,10 @@ np.single, np.double, np.longdouble, np.csingle, np.cdouble, np.clongdouble] +def iswin64(): + import platform + return platform.architecture()[0] == "64bit" and sys.platform == "win32" + # This compares scalarmath against ufuncs. class TestTypes(TestCase): @@ -42,6 +47,7 @@ b = a ** 4 assert b == 81, "error with %r: got %r" % (t,b) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_large_types(self): for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: a = t(51) Modified: trunk/numpy/core/tests/test_unicode.py =================================================================== --- trunk/numpy/core/tests/test_unicode.py 2009-02-25 18:56:15 UTC (rev 6474) +++ trunk/numpy/core/tests/test_unicode.py 2009-02-25 19:04:59 UTC (rev 6475) @@ -1,6 +1,12 @@ +import sys + from numpy.testing import * from numpy.core import * +def iswin64(): + import platform + return platform.architecture()[0] == "64bit" and sys.platform == "win32" + # Guess the UCS length for this python interpreter if len(buffer(u'u')) == 4: ucs4 = True @@ -36,17 +42,20 @@ else: self.assert_(len(buffer(ua_scalar)) == 0) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_zeros0D(self): """Check creation of 0-dimensional objects""" ua = zeros((), dtype='U%s' % self.ulen) self.content_check(ua, ua[()], 4*self.ulen) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_zerosSD(self): """Check creation of single-dimensional objects""" ua = zeros((2,), dtype='U%s' % self.ulen) self.content_check(ua, ua[0], 4*self.ulen*2) self.content_check(ua, ua[1], 4*self.ulen*2) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_zerosMD(self): """Check creation of multi-dimensional objects""" ua = zeros((2,3,4), dtype='U%s' % self.ulen) @@ -96,17 +105,20 @@ # regular 2-byte word self.assert_(len(buffer(ua_scalar)) == 2*self.ulen) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_values0D(self): """Check creation of 0-dimensional objects with values""" ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) self.content_check(ua, ua[()], 4*self.ulen) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesSD(self): """Check creation of single-dimensional objects with values""" ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) self.content_check(ua, ua[0], 4*self.ulen*2) self.content_check(ua, ua[1], 4*self.ulen*2) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesMD(self): """Check creation of multi-dimensional objects with values""" ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) @@ -181,12 +193,14 @@ # regular 2-byte word self.assert_(len(buffer(ua_scalar)) == 2*self.ulen) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_values0D(self): """Check assignment of 0-dimensional objects with values""" ua = zeros((), dtype='U%s' % self.ulen) ua[()] = self.ucs_value*self.ulen self.content_check(ua, ua[()], 4*self.ulen) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesSD(self): """Check assignment of single-dimensional objects with values""" ua = zeros((2,), dtype='U%s' % self.ulen) @@ -195,6 +209,7 @@ ua[1] = self.ucs_value*self.ulen self.content_check(ua, ua[1], 4*self.ulen*2) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesMD(self): """Check assignment of multi-dimensional objects with values""" ua = zeros((2,3,4), dtype='U%s' % self.ulen) @@ -248,6 +263,7 @@ class byteorder_values: """Check the byteorder of unicode arrays in round-trip conversions""" + @dec.knownfailureif(iswin64(), "Crash on win64") def test_values0D(self): """Check byteorder of 0-dimensional objects""" ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) @@ -260,6 +276,7 @@ # Arrays must be equal after the round-trip assert_equal(ua, ua3) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesSD(self): """Check byteorder of single-dimensional objects""" ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) @@ -270,6 +287,7 @@ # Arrays must be equal after the round-trip assert_equal(ua, ua3) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesMD(self): """Check byteorder of multi-dimensional objects""" ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, From numpy-svn at scipy.org Thu Feb 26 01:26:07 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 00:26:07 -0600 (CST) Subject: [Numpy-svn] r6476 - in branches/coremath: . numpy numpy/core/src numpy/core/tests numpy/distutils/fcompiler numpy/ma numpy/ma/tests Message-ID: <20090226062607.2F35EC7C19A@scipy.org> Author: cdavid Date: 2009-02-26 00:24:22 -0600 (Thu, 26 Feb 2009) New Revision: 6476 Modified: branches/coremath/ branches/coremath/numpy/add_newdocs.py branches/coremath/numpy/core/src/arraytypes.inc.src branches/coremath/numpy/core/src/scalartypes.inc.src branches/coremath/numpy/core/tests/test_multiarray.py branches/coremath/numpy/core/tests/test_regression.py branches/coremath/numpy/core/tests/test_scalarmath.py branches/coremath/numpy/core/tests/test_unicode.py branches/coremath/numpy/distutils/fcompiler/compaq.py branches/coremath/numpy/ma/core.py branches/coremath/numpy/ma/tests/test_core.py Log: Merged revisions 6463-6466,6469,6472-6475 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6463 | pierregm | 2009-02-24 02:33:11 +0900 (Tue, 24 Feb 2009) | 1 line * MaskedArray.__setstate__ : fixed for structured array ........ r6464 | charris | 2009-02-24 04:02:43 +0900 (Tue, 24 Feb 2009) | 4 lines Coding style cleanups. Start on arraytypes.inc.src. I'm a bit hesitant to commit this as tests are lacking for the functionality, but I want the changes somewhere central. ........ r6465 | matthew.brett at gmail.com | 2009-02-25 05:53:10 +0900 (Wed, 25 Feb 2009) | 1 line Added docstring for isbuiltin ........ r6466 | oliphant | 2009-02-25 14:15:47 +0900 (Wed, 25 Feb 2009) | 1 line Fix void-scalar element access when data-type contains titles (meta-data). ........ r6469 | oliphant | 2009-02-25 22:45:30 +0900 (Wed, 25 Feb 2009) | 1 line Add Error checking. ........ r6472 | charris | 2009-02-26 01:51:29 +0900 (Thu, 26 Feb 2009) | 1 line Fix coding style. Fix test_void_scalar_with_titles. ........ r6473 | cdavid | 2009-02-26 03:27:19 +0900 (Thu, 26 Feb 2009) | 1 line Trap another kind of exception for MSVC9 in compaq fcompiler. ........ r6474 | cdavid | 2009-02-26 03:56:15 +0900 (Thu, 26 Feb 2009) | 1 line Mark mingw-w64 crashing tests as known failures. ........ r6475 | cdavid | 2009-02-26 04:04:59 +0900 (Thu, 26 Feb 2009) | 1 line Tag more mingw-w64 failures as such. ........ Property changes on: branches/coremath ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6461 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6475 Modified: branches/coremath/numpy/add_newdocs.py =================================================================== --- branches/coremath/numpy/add_newdocs.py 2009-02-25 19:04:59 UTC (rev 6475) +++ branches/coremath/numpy/add_newdocs.py 2009-02-26 06:24:22 UTC (rev 6476) @@ -2937,7 +2937,7 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', ''' - dt.byteorder + byteorder String giving byteorder of dtype @@ -2995,6 +2995,33 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', """ + isbuiltin + + Value identifying if numpy dtype is a numpy builtin type + + Read-only + + Returns + ------- + val : {0,1,2} + 0 if this is a structured array type, with fields + 1 if this is a dtype compiled into numpy (such as ints, floats etc) + 2 if the dtype is for a user-defined numpy type + A user-defined type uses the numpy C-API machinery to extend + numpy to handle a new array type. See the Guide to Numpy for + details. + + Examples + -------- + >>> dt = np.dtype('i2') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype('f8') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.isbuiltin + 0 """)) add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', Modified: branches/coremath/numpy/core/src/arraytypes.inc.src =================================================================== --- branches/coremath/numpy/core/src/arraytypes.inc.src 2009-02-25 19:04:59 UTC (rev 6475) +++ branches/coremath/numpy/core/src/arraytypes.inc.src 2009-02-26 06:24:22 UTC (rev 6476) @@ -21,10 +21,9 @@ /**begin repeat -#type=long,longlong# -#Type=Long,LongLong# -*/ - + * #type = long, longlong# + * #Type = Long, LongLong# + */ static @type@ MyPyLong_As at Type@ (PyObject *obj) { @@ -39,7 +38,6 @@ return ret; } - static u at type@ MyPyLong_AsUnsigned at Type@ (PyObject *obj) { @@ -64,15 +62,21 @@ /****************** getitem and setitem **********************/ /**begin repeat - -#TYP=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,LONG,UINT,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE# -#func1=PyBool_FromLong, PyInt_FromLong*6, PyLong_FromUnsignedLong*2, PyLong_FromLongLong, PyLong_FromUnsignedLongLong, PyFloat_FromDouble*2# -#func2=PyObject_IsTrue, MyPyLong_AsLong*6, MyPyLong_AsUnsignedLong*2, MyPyLong_AsLongLong, MyPyLong_AsUnsignedLongLong, MyPyFloat_AsDouble*2# -#typ=Bool, byte, ubyte, short, ushort, int, long, uint, ulong, longlong, ulonglong, float, double# -#typ1=long*7, ulong*2, longlong, ulonglong, float, double# -#kind=Bool, Byte, UByte, Short, UShort, Int, Long, UInt, ULong, LongLong, ULongLong, Float, Double# + * + * #TYP = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, LONG, UINT, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE# + * #func1 = PyBool_FromLong, PyInt_FromLong*6, PyLong_FromUnsignedLong*2, + * PyLong_FromLongLong, PyLong_FromUnsignedLongLong, + * PyFloat_FromDouble*2# + * #func2 = PyObject_IsTrue, MyPyLong_AsLong*6, MyPyLong_AsUnsignedLong*2, + * MyPyLong_AsLongLong, MyPyLong_AsUnsignedLongLong, + * MyPyFloat_AsDouble*2# + * #typ = Bool, byte, ubyte, short, ushort, int, long, uint, ulong, + * longlong, ulonglong, float, double# + * #typ1 = long*7, ulong*2, longlong, ulonglong, float, double# + * #kind = Bool, Byte, UByte, Short, UShort, Int, Long, UInt, ULong, + * LongLong, ULongLong, Float, Double# */ - static PyObject * @TYP at _getitem(char *ip, PyArrayObject *ap) { @typ@ t1; @@ -82,8 +86,7 @@ return @func1@((@typ1@)t1); } else { - ap->descr->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), - ap); + ap->descr->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), ap); return @func1@((@typ1@)t1); } } @@ -92,7 +95,6 @@ @TYP at _setitem(PyObject *op, char *ov, PyArrayObject *ap) { @typ@ temp; /* ensures alignment */ - if (PyArray_IsScalar(op, @kind@)) { temp = ((Py at kind@ScalarObject *)op)->obval; } @@ -113,19 +115,16 @@ ap->descr->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap); } - return 0; } /**end repeat**/ - /**begin repeat - -#TYP=CFLOAT,CDOUBLE# -#typ=float, double# -*/ - + * + * #TYP = CFLOAT, CDOUBLE# + * #typ = float, double# + */ static PyObject * @TYP at _getitem(char *ip, PyArrayObject *ap) { @typ@ t1, t2; @@ -142,14 +141,15 @@ return PyComplex_FromDoubles((double)t1, (double)t2); } } + /**end repeat**/ /**begin repeat - -#TYP=CFLOAT, CDOUBLE, CLONGDOUBLE# -#typ=float, double, longdouble# -#kind=CFloat, CDouble, CLongDouble# -*/ + * + * #TYP = CFLOAT, CDOUBLE, CLONGDOUBLE# + * #typ = float, double, longdouble# + * #kind = CFloat, CDouble, CLongDouble# + */ static int @TYP at _setitem(PyObject *op, char *ov, PyArrayObject *ap) { @@ -159,8 +159,8 @@ int rsize; if (!(PyArray_IsScalar(op, @kind@))) { - if (PyArray_Check(op) && (PyArray_NDIM(op)==0)) { - op2 = ((PyArrayObject *)op)->descr->f->getitem \ + if (PyArray_Check(op) && (PyArray_NDIM(op) == 0)) { + op2 = ((PyArrayObject *)op)->descr->f->getitem (((PyArrayObject *)op)->data, (PyArrayObject *)op); } @@ -174,22 +174,24 @@ oop = PyComplex_AsCComplex (op2); } Py_DECREF(op2); - if (PyErr_Occurred()) return -1; + if (PyErr_Occurred()) { + return -1; + } temp.real = (@typ@) oop.real; temp.imag = (@typ@) oop.imag; } else { temp = ((Py at kind@ScalarObject *)op)->obval; } - memcpy(ov, &temp, ap->descr->elsize); - if (!PyArray_ISNOTSWAPPED(ap)) + if (!PyArray_ISNOTSWAPPED(ap)) { byte_swap_vector(ov, 2, sizeof(@typ@)); - + } rsize = sizeof(@typ@); copy_and_swap(ov, &temp, rsize, 2, rsize, !PyArray_ISNOTSWAPPED(ap)); return 0; } + /**end repeat**/ static PyObject * @@ -208,9 +210,12 @@ else { temp = (longdouble) MyPyFloat_AsDouble(op); } - if (PyErr_Occurred()) return -1; - if (ap == NULL || PyArray_ISBEHAVED(ap)) + if (PyErr_Occurred()) { + return -1; + } + if (ap == NULL || PyArray_ISBEHAVED(ap)) { *((longdouble *)ov)=temp; + } else { copy_and_swap(ov, &temp, ap->descr->elsize, 1, 0, !PyArray_ISNOTSWAPPED(ap)); @@ -297,8 +302,12 @@ return -1; } /* Sequence_Size might have returned an error */ - if (PyErr_Occurred()) PyErr_Clear(); - if ((temp=PyObject_Unicode(op)) == NULL) return -1; + if (PyErr_Occurred()) { + PyErr_Clear(); + } + if ((temp=PyObject_Unicode(op)) == NULL) { + return -1; + } ptr = PyUnicode_AS_UNICODE(temp); if ((ptr == NULL) || (PyErr_Occurred())) { Py_DECREF(temp); @@ -317,7 +326,9 @@ return -1; } } - else buffer = ov; + else { + buffer = ov; + } datalen = PyUCS2Buffer_AsUCS4(ptr, (PyArray_UCS4 *)buffer, datalen >> 1, ap->descr->elsize >> 2); @@ -331,16 +342,18 @@ if (ap->descr->elsize > datalen) { memset(ov + datalen, 0, (ap->descr->elsize - datalen)); } - - if (!PyArray_ISNOTSWAPPED(ap)) + if (!PyArray_ISNOTSWAPPED(ap)) { byte_swap_vector(ov, ap->descr->elsize >> 2, 4); + } Py_DECREF(temp); return 0; } -/* STRING -- can handle both NULL-terminated and not NULL-terminated cases - will truncate all ending NULLs in returned string. -*/ +/* STRING + * + * can handle both NULL-terminated and not NULL-terminated cases + * will truncate all ending NULLs in returned string. + */ static PyObject * STRING_getitem(char *ip, PyArrayObject *ap) { @@ -348,8 +361,10 @@ char *ptr; int size = ap->descr->elsize; - ptr = ip + size-1; - while (*ptr-- == '\0' && size > 0) size--; + ptr = ip + size - 1; + while (*ptr-- == '\0' && size > 0) { + size--; + } return PyString_FromStringAndSize(ip,size); } @@ -358,7 +373,7 @@ { char *ptr; Py_ssize_t len; - PyObject *temp=NULL; + PyObject *temp = NULL; if (!PyString_Check(op) && !PyUnicode_Check(op) && PySequence_Check(op) && PySequence_Size(op) > 0) { @@ -367,17 +382,21 @@ return -1; } /* Sequence_Size might have returned an error */ - if (PyErr_Occurred()) PyErr_Clear(); - if ((temp = PyObject_Str(op)) == NULL) return -1; - + if (PyErr_Occurred()) { + PyErr_Clear(); + } + if ((temp = PyObject_Str(op)) == NULL) { + return -1; + } if (PyString_AsStringAndSize(temp, &ptr, &len) == -1) { Py_DECREF(temp); return -1; } memcpy(ov, ptr, MIN(ap->descr->elsize,len)); - /* If string lenth is smaller than room in array - Then fill the rest of the element size - with NULL */ + /* + * If string lenth is smaller than room in array + * Then fill the rest of the element size with NULL + */ if (ap->descr->elsize > len) { memset(ov + len, 0, (ap->descr->elsize - len)); } @@ -429,7 +448,7 @@ static PyObject * VOID_getitem(char *ip, PyArrayObject *ap) { - PyObject *u=NULL; + PyObject *u = NULL; PyArray_Descr* descr; int itemsize; @@ -446,29 +465,30 @@ /* get the names from the fields dictionary*/ names = descr->names; - if (!names) goto finish; + if (!names) { + goto finish; + } n = PyTuple_GET_SIZE(names); ret = PyTuple_New(n); savedflags = ap->flags; - for (i=0; ifields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, - &title)) { + if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, &title)) { Py_DECREF(ret); ap->descr = descr; return NULL; } ap->descr = new; /* update alignment based on offset */ - if ((new->alignment > 1) && \ - ((((intp)(ip+offset)) % new->alignment) != 0)) + if ((new->alignment > 1) + && ((((intp)(ip+offset)) % new->alignment) != 0)) { ap->flags &= ~ALIGNED; - else + } + else { ap->flags |= ALIGNED; - - PyTuple_SET_ITEM(ret, i, \ - new->f->getitem(ip+offset, ap)); + } + PyTuple_SET_ITEM(ret, i, new->f->getitem(ip+offset, ap)); ap->flags = savedflags; } ap->descr = descr; @@ -477,10 +497,10 @@ if (descr->subarray) { /* return an array of the basic type */ - PyArray_Dims shape={NULL,-1}; + PyArray_Dims shape = {NULL, -1}; PyObject *ret; - if (!(PyArray_IntpConverter(descr->subarray->shape, - &shape))) { + + if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) { PyDimMem_FREE(shape.ptr); PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple."); @@ -488,11 +508,12 @@ } Py_INCREF(descr->subarray->base); ret = PyArray_NewFromDescr(&PyArray_Type, - descr->subarray->base, - shape.len, shape.ptr, + descr->subarray->base, shape.len, shape.ptr, NULL, ip, ap->flags, NULL); PyDimMem_FREE(shape.ptr); - if (!ret) return NULL; + if (!ret) { + return NULL; + } PyArray_BASE(ret) = (PyObject *)ap; Py_INCREF(ap); PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); @@ -500,23 +521,27 @@ } finish: - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || - PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { + if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) + || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { PyErr_SetString(PyExc_ValueError, "tried to get void-array with object" " members as buffer."); return NULL; } - - itemsize=ap->descr->elsize; - if (PyArray_ISWRITEABLE(ap)) + itemsize = ap->descr->elsize; + if (PyArray_ISWRITEABLE(ap)) { u = PyBuffer_FromReadWriteMemory(ip, itemsize); - else + } + else { u = PyBuffer_FromMemory(ip, itemsize); - if (u==NULL) goto fail; - - /* default is to return buffer object pointing to current item */ - /* a view of it */ + } + if (u == NULL) { + goto fail; + } + /* + * default is to return buffer object pointing to + * current item a view of it + */ return u; fail: @@ -543,6 +568,7 @@ PyArray_Descr *new; int offset; int savedflags; + res = -1; /* get the names from the fields dictionary*/ names = descr->names; @@ -554,26 +580,27 @@ return -1; } savedflags = ap->flags; - for (i=0; ifields, key); - if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, - &title)) { + if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset, &title)) { ap->descr = descr; return -1; } ap->descr = new; /* remember to update alignment flags */ - if ((new->alignment > 1) && \ - ((((intp)(ip+offset)) % new->alignment) != 0)) + if ((new->alignment > 1) + && ((((intp)(ip+offset)) % new->alignment) != 0)) { ap->flags &= ~ALIGNED; - else + } + else { ap->flags |= ALIGNED; - - res = new->f->setitem(PyTuple_GET_ITEM(op, i), - ip+offset, ap); + } + res = new->f->setitem(PyTuple_GET_ITEM(op, i), ip+offset, ap); ap->flags = savedflags; - if (res < 0) break; + if (res < 0) { + break; + } } ap->descr = descr; return res; @@ -581,10 +608,9 @@ if (descr->subarray) { /* copy into an array of the same basic type */ - PyArray_Dims shape={NULL,-1}; + PyArray_Dims shape = {NULL, -1}; PyObject *ret; - if (!(PyArray_IntpConverter(descr->subarray->shape, - &shape))) { + if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) { PyDimMem_FREE(shape.ptr); PyErr_SetString(PyExc_ValueError, "invalid shape in fixed-type tuple."); @@ -592,11 +618,12 @@ } Py_INCREF(descr->subarray->base); ret = PyArray_NewFromDescr(&PyArray_Type, - descr->subarray->base, - shape.len, shape.ptr, + descr->subarray->base, shape.len, shape.ptr, NULL, ip, ap->flags, NULL); PyDimMem_FREE(shape.ptr); - if (!ret) return -1; + if (!ret) { + return -1; + } PyArray_BASE(ret) = (PyObject *)ap; Py_INCREF(ap); PyArray_UpdateFlags((PyArrayObject *)ret, UPDATE_ALL); @@ -609,15 +636,17 @@ { const void *buffer; Py_ssize_t buflen; - if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) || - PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { + if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) + || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) { PyErr_SetString(PyExc_ValueError, "tried to set void-array with object" " members using buffer."); return -1; } res = PyObject_AsReadBuffer(op, &buffer, &buflen); - if (res == -1) goto fail; + if (res == -1) { + goto fail; + } memcpy(ip, buffer, NPY_MIN(buflen, itemsize)); if (itemsize > buflen) { memset(ip+buflen, 0, (itemsize-buflen)); @@ -636,29 +665,58 @@ /**begin repeat -#to=(BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE)*16# -#from=BYTE*13,UBYTE*13,SHORT*13,USHORT*13,INT*13,UINT*13,LONG*13,ULONG*13,LONGLONG*13,ULONGLONG*13,FLOAT*13,DOUBLE*13,LONGDOUBLE*13,CFLOAT*13,CDOUBLE*13,CLONGDOUBLE*13# -#totyp=(byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble)*16# -#fromtyp=byte*13, ubyte*13, short*13, ushort*13, int*13, uint*13, long*13, ulong*13, longlong*13, ulonglong*13, float*13, double*13, longdouble*13, float*13, double*13, longdouble*13# -#incr= (ip++)*169,(ip+=2)*39# + * + * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #totype = byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble# */ + +/**begin repeat1 + * + * #FROMTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #fromtype = byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble# + */ static void - at from@_to_ at to@(register @fromtyp@ *ip, register @totyp@ *op, register intp n, + at FROMTYPE@_to_ at TOTYPE@(@fromtype@ *ip, @totype@ *op, intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { while (n--) { - *op++ = (@totyp@)*ip; - @incr@; + *op++ = (@totype@)*ip++; } } +/**end repeat1**/ + +/**begin repeat1 + * + * #FROMTYPE = CFLOAT, CDOUBLE, CLONGDOUBLE# + * #fromtype = float, double, longdouble# + */ +static void + at FROMTYPE@_to_ at TOTYPE@(@fromtype@ *ip, @totype@ *op, intp n, + PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) +{ + while (n--) { + *op++ = (@totype@)*ip; + ip += 2; + } +} +/**end repeat1**/ + /**end repeat**/ + /**begin repeat -#from=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#fromtyp=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# + * + * #FROMTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #fromtype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble# */ static void - at from@_to_BOOL(register @fromtyp@ *ip, register Bool *op, register intp n, + at FROMTYPE@_to_BOOL(@fromtype@ *ip, Bool *op, intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { while (n--) { @@ -668,87 +726,105 @@ /**end repeat**/ /**begin repeat -#from=CFLOAT, CDOUBLE, CLONGDOUBLE# -#fromtyp=cfloat, cdouble, clongdouble# + * + * #FROMTYPE = CFLOAT, CDOUBLE, CLONGDOUBLE# + * #fromtype = cfloat, cdouble, clongdouble# */ static void - at from@_to_BOOL(register @fromtyp@ *ip, register Bool *op, register intp n, + at FROMTYPE@_to_BOOL(@fromtype@ *ip, Bool *op, intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { while (n--) { *op = (Bool)(((*ip).real != FALSE) || ((*ip).imag != FALSE)); - op++; ip++; + op++; + ip++; } } /**end repeat**/ /**begin repeat -#to=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#totyp=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# + * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #totype = byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble# */ static void -BOOL_to_ at to@(register Bool *ip, register @totyp@ *op, register intp n, +BOOL_to_ at TOTYPE@(Bool *ip, @totype@ *op, intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { while (n--) { - *op++ = (@totyp@)(*ip++ != FALSE); + *op++ = (@totype@)(*ip++ != FALSE); } } /**end repeat**/ /**begin repeat + * + * #TOTYPE = CFLOAT,CDOUBLE,CLONGDOUBLE# + * #totype = float, double, longdouble# + */ -#to=(CFLOAT,CDOUBLE,CLONGDOUBLE)*14# -#from=BOOL*3,BYTE*3,UBYTE*3,SHORT*3,USHORT*3,INT*3,UINT*3,LONG*3,ULONG*3,LONGLONG*3,ULONGLONG*3,FLOAT*3,DOUBLE*3,LONGDOUBLE*3# -#fromtyp=Bool*3,byte*3, ubyte*3, short*3, ushort*3, int*3, uint*3, long*3, ulong*3, longlong*3, ulonglong*3, float*3, double*3, longdouble*3# -#totyp= (float, double, longdouble)*14# -*/ +/**begin repeat1 + * #FROMTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #fromtype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble# + */ static void - at from@_to_ at to@(register @fromtyp@ *ip, register @totyp@ *op, register intp n, + at FROMTYPE@_to_ at TOTYPE@(register @fromtype@ *ip, register @totype@ *op, register intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { while (n--) { - *op++ = (@totyp@)*ip++; + *op++ = (@totype@)*ip++; *op++ = 0.0; } } +/**end repeat1**/ /**end repeat**/ /**begin repeat + * + * #TOTYPE = CFLOAT,CDOUBLE,CLONGDOUBLE# + * #totype = float, double, longdouble# + */ -#to=(CFLOAT,CDOUBLE,CLONGDOUBLE)*3# -#from=CFLOAT*3,CDOUBLE*3,CLONGDOUBLE*3# -#totyp=(float, double, longdouble)*3# -#fromtyp=float*3, double*3, longdouble*3# -*/ +/**begin repeat1 + * #FROMTYPE = CFLOAT,CDOUBLE,CLONGDOUBLE# + * #fromtype = float, double, longdouble# + */ static void - at from@_to_ at to@(register @fromtyp@ *ip, register @totyp@ *op, register intp n, + at FROMTYPE@_to_ at TOTYPE@(@fromtype@ *ip, @totype@ *op, intp n, PyArrayObject *NPY_UNUSED(aip), PyArrayObject *NPY_UNUSED(aop)) { n <<= 1; while (n--) { - *op++ = (@totyp@)*ip++; + *op++ = (@totype@)*ip++; } +} -} +/**end repeat1**/ /**end repeat**/ /**begin repeat - -#from=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE, STRING, UNICODE, VOID, OBJECT# -#fromtyp=Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, char, char, char, PyObject *# -#skip= 1*17, aip->descr->elsize*3, 1# -*/ + * + * #FROMTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, VOID, OBJECT# + * #fromtype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble, + * cfloat, cdouble, clongdouble, char, char, char, PyObject *# + * #skip = 1*17, aip->descr->elsize*3, 1# + */ static void - at from@_to_OBJECT(@fromtyp@ *ip, PyObject **op, intp n, PyArrayObject *aip, + at FROMTYPE@_to_OBJECT(@fromtype@ *ip, PyObject **op, intp n, PyArrayObject *aip, PyArrayObject *NPY_UNUSED(aop)) { - register intp i; - int skip=@skip@; - for(i=0;idescr->elsize*3# -*/ + * + * #TOTYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, VOID# + * #totype = Bool, byte, ubyte, short, ushort, int, uint, long, ulong, + * longlong, ulonglong, float, double, longdouble, + * cfloat, cdouble, clongdouble, char, char, char# + * #skip = 1*17, aip->descr->elsize*3# + */ static void -OBJECT_to_ at to@(PyObject **ip, @totyp@ *op, intp n, PyArrayObject *_NPY_UNUSED at to@(aip), - PyArrayObject *aop) +OBJECT_to_ at TOTYPE@(PyObject **ip, @totype@ *op, intp n, + PyArrayObject *_NPY_UNUSED at TOTYPE@(aip), PyArrayObject *aop) { - register intp i; - int skip=@skip@; - for(i=0;idescr->elsize*3)*3# -#convert=1*17,0*3,1*17,0*3,0*20# -#convstr=(Int*9,Long*2,Float*3,Complex*3,Tuple*3)*3# + * + * #from = STRING*20, UNICODE*20, VOID*20# + * #fromtyp = char*60# + * #to = (BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE, VOID)*3# + * #totyp = (Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble, char, char, char)*3# + * #oskip = (1*17,aop->descr->elsize*3)*3# + * #convert = 1*17, 0*3, 1*17, 0*3, 0*20# + * #convstr = (Int*9, Long*2, Float*3, Complex*3, Tuple*3)*3# */ static void @from at _to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip, PyArrayObject *aop) { register intp i; - PyObject *temp=NULL; - int skip=aip->descr->elsize; - int oskip=@oskip@; - for(i=0; idescr->elsize; + int oskip = @oskip@; + for (i = 0; i < n; i++, ip+=skip, op+=oskip) { temp = @from at _getitem((char *)ip, aip); - if (temp==NULL) return; + if (temp == NULL) { + return; + } /* convert from Python object to needed one */ if (@convert@) { PyObject *new, *args; @@ -826,9 +910,10 @@ new = Py at convstr@_Type.tp_new(&Py at convstr@_Type, args, NULL); Py_DECREF(args); temp = new; - if (temp==NULL) return; + if (temp == NULL) { + return; + } } - @to at _setitem(temp,(char *)op, aop); Py_DECREF(temp); } @@ -837,24 +922,23 @@ /**end repeat**/ /**begin repeat - -#to=STRING*17, UNICODE*17, VOID*17# -#totyp=char*17, char*17, char*17# -#from=(BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE)*3# -#fromtyp=(Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble)*3# -*/ - + * + * #to = STRING*17, UNICODE*17, VOID*17# + * #totyp = char*17, char*17, char*17# + * #from = (BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT, CDOUBLE, CLONGDOUBLE)*3# + * #fromtyp = (Bool, byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, cfloat, cdouble, clongdouble)*3# + */ static void @from at _to_@to@(@fromtyp@ *ip, @totyp@ *op, intp n, PyArrayObject *aip, PyArrayObject *aop) { - register intp i; - PyObject *temp=NULL; - int skip=1; - int oskip=aop->descr->elsize; - for(i=0; idescr->elsize; + for (i = 0; i < n; i++, ip += skip, op += oskip) { temp = @from at _getitem((char *)ip, aip); - if (temp==NULL) { + if (temp == NULL) { Py_INCREF(Py_False); temp = Py_False; } @@ -868,31 +952,33 @@ /****************** scan *************************************/ -/* The first ignore argument is for backwards compatibility. - Should be removed when the API version is bumped up. +/* + * The first ignore argument is for backwards compatibility. + * Should be removed when the API version is bumped up. */ /**begin repeat -#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# -#type=short,ushort,int,uint,long,ulong,longlong,ulonglong# -#format="hd","hu","d","u","ld","lu",LONGLONG_FMT,ULONGLONG_FMT# -*/ + * #fname = SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG# + * #type = short, ushort, int, uint, long, ulong, longlong, ulonglong# + * #format = "hd", "hu", "d", "u", "ld", "lu", LONGLONG_FMT, ULONGLONG_FMT# + */ static int - at fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) + at fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) { return fscanf(fp, "%"@format@, ip); } /**end repeat**/ /**begin repeat -#fname=FLOAT,DOUBLE,LONGDOUBLE# -#type=float,double,longdouble# -*/ + * #fname = FLOAT, DOUBLE, LONGDOUBLE# + * #type = float, double, longdouble# + */ static int - at fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) + at fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignored)) { double result; int ret; + ret = NumPyOS_ascii_ftolf(fp, &result); *ip = (@type@) result; return ret; @@ -900,16 +986,17 @@ /**end repeat**/ /**begin repeat -#fname=BYTE,UBYTE# -#type=byte,ubyte# -#btype=int,uint# -#format="d","u"# -*/ + * #fname = BYTE, UBYTE# + * #type = byte, ubyte# + * #btype = int, uint# + * #format = "d", "u"# + */ static int - at fname@_scan (FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignore2)) + at fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignore2)) { @btype@ temp; int num; + num = fscanf(fp, "%"@format@, &temp); *ip = (@type@) temp; return num; @@ -917,29 +1004,30 @@ /**end repeat**/ static int -BOOL_scan (FILE *fp, Bool *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignore2)) +BOOL_scan(FILE *fp, Bool *ip, void *NPY_UNUSED(ignore), PyArray_Descr *NPY_UNUSED(ignore2)) { int temp; int num; + num = fscanf(fp, "%d", &temp); *ip = (Bool) (temp != 0); return num; } /**begin repeat -#fname=CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT,STRING,UNICODE,VOID# -*/ + * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, STRING, UNICODE, VOID# + */ #define @fname at _scan NULL /**end repeat**/ /****************** fromstr *************************************/ /**begin repeat -#fname=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG# -#type=byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong# -#func=(l,ul)*5# -#btype=(long,ulong)*5# -*/ + * #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG# + * #type = byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong# + * #func = (l, ul)*5# + * #btype = (long, ulong)*5# + */ static int @fname at _fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) { @@ -952,9 +1040,10 @@ /**end repeat**/ /**begin repeat -#fname=FLOAT,DOUBLE,LONGDOUBLE# -#type=float,double,longdouble# -*/ + * + * #fname=FLOAT,DOUBLE,LONGDOUBLE# + * #type=float,double,longdouble# + */ static int @fname at _fromstr(char *str, @type@ *ip, char **endptr, PyArray_Descr *NPY_UNUSED(ignore)) { @@ -969,8 +1058,8 @@ /**begin repeat -#fname=BOOL,CFLOAT,CDOUBLE,CLONGDOUBLE,OBJECT,STRING,UNICODE,VOID# -*/ + * #fname = BOOL, CFLOAT, CDOUBLE, CLONGDOUBLE, OBJECT, STRING, UNICODE, VOID# + */ #define @fname at _fromstr NULL /**end repeat**/ @@ -978,11 +1067,11 @@ /****************** copyswapn *************************************/ /**begin repeat - -#fname=SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#fsize=SHORT,SHORT,INT,INT,LONG,LONG,LONGLONG,LONGLONG,FLOAT,DOUBLE,LONGDOUBLE# -#type=short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble# -*/ + * + * #fname = SHORT, USHORT, INT, UINT, LONG, ULONG, LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #fsize = SHORT, SHORT, INT, INT, LONG, LONG, LONGLONG, LONGLONG, FLOAT, DOUBLE, LONGDOUBLE# + * #type = short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble# + */ static void @fname at _copyswapn (void *dst, intp dstride, void *src, intp sstride, intp n, int swap, void *NPY_UNUSED(arr)) @@ -1005,11 +1094,13 @@ @fname at _copyswap (void *dst, void *src, int swap, void *NPY_UNUSED(arr)) { - if (src != NULL) /* copy first if needed */ + if (src != NULL) { + /* copy first if needed */ memcpy(dst, src, sizeof(@type@)); + } + if (swap) { + char *a, *b, c; - if (swap) { - register char *a, *b, c; a = (char *)dst; #if SIZEOF_ at fsize@ == 2 b = a + 1; @@ -1051,25 +1142,27 @@ c = *a; *a++ = *b; *b = c; #else { - register int i, nn; + int i, nn; + b = a + (SIZEOF_ at fsize@-1); nn = SIZEOF_ at fsize@ / 2; - for (i=0; i g2, [g1[i] > g2[i] for i in [0,1,2]]) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_mixed(self): g1 = array(["spam","spa","spammer","and eggs"]) g2 = "spam" @@ -577,6 +583,7 @@ assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_unicode(self): g1 = array([u"This",u"is",u"example"]) g2 = array([u"This",u"was",u"example"]) @@ -796,6 +803,7 @@ os.unlink(self.filename) #tmp_file.close() + @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_file(self): f = open(self.filename, 'wb') self.x.tofile(f) @@ -807,11 +815,13 @@ assert_array_equal(y, self.x.flat) os.unlink(self.filename) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_filename(self): self.x.tofile(self.filename) y = np.fromfile(self.filename, dtype=self.dtype) assert_array_equal(y, self.x.flat) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_binary_str(self): s = self.x.tostring() y = np.fromstring(s, dtype=self.dtype) @@ -821,6 +831,7 @@ y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flatten('F')) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_str(self): x = self.x.real.ravel() s = "@".join(map(str, x)) @@ -830,12 +841,14 @@ assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_repr(self): x = self.x.real.ravel() s = "@".join(map(repr, x)) y = np.fromstring(s, sep="@") assert_array_equal(x, y) + @dec.knownfailureif(iswin64(), "Crash on win64") def _check_from(self, s, value, **kw): y = np.fromstring(s, **kw) assert_array_equal(y, value) @@ -846,53 +859,66 @@ y = np.fromfile(self.filename, **kw) assert_array_equal(y, value) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_nan(self): self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [nan, nan, nan, nan, nan, nan, nan], sep=' ') + @dec.knownfailureif(iswin64(), "Crash on win64") def test_inf(self): self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF", [inf, inf, -inf, inf, -inf, inf, -inf], sep=' ') + @dec.knownfailureif(iswin64(), "Crash on win64") def test_numbers(self): self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') + @dec.knownfailureif(iswin64(), "Crash on win64") def test_binary(self): self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', array([1,2,3,4]), dtype='= n_after, (n_before, n_after) + def test_void_scalar_with_titles(self, level=rlevel): + """No ticket""" + data = [('john', 4), ('mary', 5)] + dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)] + arr = np.array(data, dtype=dtype1) + assert arr[0][0] == 'john' + assert arr[0][1] == 4 + if __name__ == "__main__": run_module_suite() Modified: branches/coremath/numpy/core/tests/test_scalarmath.py =================================================================== --- branches/coremath/numpy/core/tests/test_scalarmath.py 2009-02-25 19:04:59 UTC (rev 6475) +++ branches/coremath/numpy/core/tests/test_scalarmath.py 2009-02-26 06:24:22 UTC (rev 6476) @@ -1,3 +1,4 @@ +import sys from numpy.testing import * import numpy as np @@ -6,6 +7,10 @@ np.single, np.double, np.longdouble, np.csingle, np.cdouble, np.clongdouble] +def iswin64(): + import platform + return platform.architecture()[0] == "64bit" and sys.platform == "win32" + # This compares scalarmath against ufuncs. class TestTypes(TestCase): @@ -42,6 +47,7 @@ b = a ** 4 assert b == 81, "error with %r: got %r" % (t,b) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_large_types(self): for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: a = t(51) Modified: branches/coremath/numpy/core/tests/test_unicode.py =================================================================== --- branches/coremath/numpy/core/tests/test_unicode.py 2009-02-25 19:04:59 UTC (rev 6475) +++ branches/coremath/numpy/core/tests/test_unicode.py 2009-02-26 06:24:22 UTC (rev 6476) @@ -1,6 +1,12 @@ +import sys + from numpy.testing import * from numpy.core import * +def iswin64(): + import platform + return platform.architecture()[0] == "64bit" and sys.platform == "win32" + # Guess the UCS length for this python interpreter if len(buffer(u'u')) == 4: ucs4 = True @@ -36,17 +42,20 @@ else: self.assert_(len(buffer(ua_scalar)) == 0) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_zeros0D(self): """Check creation of 0-dimensional objects""" ua = zeros((), dtype='U%s' % self.ulen) self.content_check(ua, ua[()], 4*self.ulen) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_zerosSD(self): """Check creation of single-dimensional objects""" ua = zeros((2,), dtype='U%s' % self.ulen) self.content_check(ua, ua[0], 4*self.ulen*2) self.content_check(ua, ua[1], 4*self.ulen*2) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_zerosMD(self): """Check creation of multi-dimensional objects""" ua = zeros((2,3,4), dtype='U%s' % self.ulen) @@ -96,17 +105,20 @@ # regular 2-byte word self.assert_(len(buffer(ua_scalar)) == 2*self.ulen) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_values0D(self): """Check creation of 0-dimensional objects with values""" ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) self.content_check(ua, ua[()], 4*self.ulen) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesSD(self): """Check creation of single-dimensional objects with values""" ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) self.content_check(ua, ua[0], 4*self.ulen*2) self.content_check(ua, ua[1], 4*self.ulen*2) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesMD(self): """Check creation of multi-dimensional objects with values""" ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) @@ -181,12 +193,14 @@ # regular 2-byte word self.assert_(len(buffer(ua_scalar)) == 2*self.ulen) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_values0D(self): """Check assignment of 0-dimensional objects with values""" ua = zeros((), dtype='U%s' % self.ulen) ua[()] = self.ucs_value*self.ulen self.content_check(ua, ua[()], 4*self.ulen) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesSD(self): """Check assignment of single-dimensional objects with values""" ua = zeros((2,), dtype='U%s' % self.ulen) @@ -195,6 +209,7 @@ ua[1] = self.ucs_value*self.ulen self.content_check(ua, ua[1], 4*self.ulen*2) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesMD(self): """Check assignment of multi-dimensional objects with values""" ua = zeros((2,3,4), dtype='U%s' % self.ulen) @@ -248,6 +263,7 @@ class byteorder_values: """Check the byteorder of unicode arrays in round-trip conversions""" + @dec.knownfailureif(iswin64(), "Crash on win64") def test_values0D(self): """Check byteorder of 0-dimensional objects""" ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) @@ -260,6 +276,7 @@ # Arrays must be equal after the round-trip assert_equal(ua, ua3) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesSD(self): """Check byteorder of single-dimensional objects""" ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) @@ -270,6 +287,7 @@ # Arrays must be equal after the round-trip assert_equal(ua, ua3) + @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesMD(self): """Check byteorder of multi-dimensional objects""" ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, Modified: branches/coremath/numpy/distutils/fcompiler/compaq.py =================================================================== --- branches/coremath/numpy/distutils/fcompiler/compaq.py 2009-02-25 19:04:59 UTC (rev 6475) +++ branches/coremath/numpy/distutils/fcompiler/compaq.py 2009-02-26 06:24:22 UTC (rev 6476) @@ -89,6 +89,10 @@ if not "vcvarsall.bat" in str(e): print "Unexpected IOError in", __file__ raise e + except ValueError, e: + if not "path']" in str(e): + print "Unexpected ValueError in", __file__ + raise e executables = { 'version_cmd' : ['', "/what"], Modified: branches/coremath/numpy/ma/core.py =================================================================== --- branches/coremath/numpy/ma/core.py 2009-02-25 19:04:59 UTC (rev 6475) +++ branches/coremath/numpy/ma/core.py 2009-02-26 06:24:22 UTC (rev 6476) @@ -3846,7 +3846,7 @@ """ (ver, shp, typ, isf, raw, msk, flv) = state ndarray.__setstate__(self, (shp, typ, isf, raw)) - self._mask.__setstate__((shp, np.dtype(bool), isf, msk)) + self._mask.__setstate__((shp, make_mask_descr(typ), isf, msk)) self.fill_value = flv # def __reduce__(self): Modified: branches/coremath/numpy/ma/tests/test_core.py =================================================================== --- branches/coremath/numpy/ma/tests/test_core.py 2009-02-25 19:04:59 UTC (rev 6475) +++ branches/coremath/numpy/ma/tests/test_core.py 2009-02-26 06:24:22 UTC (rev 6476) @@ -369,14 +369,26 @@ assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled._data, a._data) assert_equal(a_pickled.fill_value, 999) - # + + def test_pickling_subbaseclass(self): + "Test pickling w/ a subclass of ndarray" + import cPickle a = array(np.matrix(range(10)), mask=[1,0,1,0,0]*2) a_pickled = cPickle.loads(a.dumps()) assert_equal(a_pickled._mask, a._mask) assert_equal(a_pickled, a) self.failUnless(isinstance(a_pickled._data,np.matrix)) + def test_pickling_wstructured(self): + "Tests pickling w/ structured array" + import cPickle + a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], + dtype=[('a', int), ('b', float)]) + a_pickled = cPickle.loads(a.dumps()) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + def test_single_element_subscript(self): "Tests single element subscripts of Maskedarrays." a = array([1,3,2]) From numpy-svn at scipy.org Thu Feb 26 03:09:45 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 02:09:45 -0600 (CST) Subject: [Numpy-svn] r6477 - in trunk/tools/win32build: . misc misc/msvcrt90 Message-ID: <20090226080945.76AF9C7C026@scipy.org> Author: cdavid Date: 2009-02-26 02:09:39 -0600 (Thu, 26 Feb 2009) New Revision: 6477 Added: trunk/tools/win32build/misc/ trunk/tools/win32build/misc/msvcrt90/ trunk/tools/win32build/misc/msvcrt90/msvcrt.def.in trunk/tools/win32build/misc/msvcrt90/yop.sh Log: Add script to build msvcrt90 on mingw-w64. Added: trunk/tools/win32build/misc/msvcrt90/msvcrt.def.in =================================================================== --- trunk/tools/win32build/misc/msvcrt90/msvcrt.def.in 2009-02-26 06:24:22 UTC (rev 6476) +++ trunk/tools/win32build/misc/msvcrt90/msvcrt.def.in 2009-02-26 08:09:39 UTC (rev 6477) @@ -0,0 +1,825 @@ +; +; __FILENAME__ +; created from msvcrt.def.in +;* This file has no copyright assigned and is placed in the Public Domain. +;* This file is a part of the mingw-runtime package. +;* No warranty is given; refer to the file DISCLAIMER within the package. +; +; Exports from msvcrt.dll, msvcr70.dll, msvcr71.dll, msvcr80.dll and msvcr90.dll +; +; NOTE: All exports, except for what appeared to be C++ mangled names, +; are included. Not all functions have prototypes in the headers +; (and some are not functions at all). +; +EXPORTS +_CIacos +_CIasin +_CIatan +_CIatan2 +_CIcos +_CIcosh +_CIexp +_CIfmod +_CIlog +_CIlog10 +_CIpow +_CIsin +_CIsinh +_CIsqrt +_CItan +_CItanh +_CxxThrowException +_EH_prolog +_Getdays +_Getmonths +_Gettnames +_HUGE DATA +_Strftime +_XcptFilter +__CxxFrameHandler +__CxxLongjmpUnwind +__RTCastToVoid +__RTDynamicCast +__RTtypeid +__STRINGTOLD +__argc DATA +__argv DATA +__badioinfo DATA +__crtCompareStringA +__crtGetLocaleInfoW +__crtLCMapStringA +__dllonexit +__doserrno +__fpecode +__getmainargs +__initenv DATA +__isascii +__iscsym +__iscsymf +__lc_codepage DATA +__lc_handle DATA +__lconv_init +__mb_cur_max DATA +__p___argc +__p___argv +__p___initenv +__p___mb_cur_max +__p___wargv +__p___winitenv +__p__acmdln +__p__amblksiz +__p__commode +__p__daylight +__p__dstbias +__p__environ +__p__fileinfo +__p__fmode +__p__iob +__p__mbctype +__p__osver +__p__pctype +__p__pgmptr +__p__pwctype +__p__timezone +__p__tzname +__p__wcmdln +__p__wenviron +__p__winmajor +__p__winminor +__p__winver +__p__wpgmptr +__pioinfo DATA +__pxcptinfoptrs +__set_app_type +__setlc_active DATA +__setusermatherr +__threadhandle +__threadid +__toascii +__unDName +__unguarded_readlc_active DATA +__wargv DATA +__wgetmainargs +__winitenv DATA +_abnormal_termination +_access +_acmdln DATA +_adj_fdiv_m16i +_adj_fdiv_m32 +_adj_fdiv_m32i +_adj_fdiv_m64 +_adj_fdiv_r +_adj_fdivr_m16i +_adj_fdivr_m32 +_adj_fdivr_m32i +_adj_fdivr_m64 +_adj_fpatan +_adj_fprem +_adj_fprem1 +_adj_fptan +_adjust_fdiv DATA +_aexit_rtn DATA +_amsg_exit +_assert +_atodbl +_atoi64 +_atoldbl +_beep +_beginthread +_beginthreadex +_c_exit +_cabs +_callnewh +_cexit +_cgets +_chdir +_chdrive +_chgsign +_chmod +_chsize +_clearfp +_close +_commit +_commode DATA +_control87 +_controlfp +_copysign +_cprintf +_cputs +_creat +_cscanf +#if !(__msvcr71__ || __msvcr71d__ || __msvcr80__ || __msvcr80d__ || __msvcr90__ || msvcr90d__) +_ctype DATA +#endif +_cwait +_daylight DATA +_dstbias DATA +_dup +_dup2 +_ecvt +_endthread +_endthreadex +_environ DATA +_eof +_errno +_except_handler2 +_except_handler3 +_execl +_execle +_execlp +_execlpe +_execv +_execve +_execvp +_execvpe +_exit +_expand +_fcloseall +_fcvt +_fdopen +_fgetchar +_fgetwchar +_filbuf +_fileinfo DATA +_filelength +_filelengthi64 +_fileno +_findclose +_findfirst +_findfirsti64 +_findnext +_findnexti64 +_finite +_flsbuf +_flushall +_fmode DATA +_fpclass +_fpieee_flt +_fpreset DATA +_fputchar +_fputwchar +_fsopen +_fstat +_fstati64 +_ftime +_ftol +_fullpath +_futime +_gcvt +_get_osfhandle +_get_sbh_threshold +_getch +_getche +_getcwd +_getdcwd +_getdiskfree +_getdllprocaddr +_getdrive +_getdrives +_getmaxstdio +_getmbcp +_getpid +_getsystime +_getw +_getws +_global_unwind2 +_heapadd +_heapchk +_heapmin +_heapset +_heapused +_heapwalk +_hypot +_i64toa +_i64tow +_initterm +_inp +_inpd +_inpw +_iob DATA +_isatty +_isctype +_ismbbalnum +_ismbbalpha +_ismbbgraph +_ismbbkalnum +_ismbbkana +_ismbbkprint +_ismbbkpunct +_ismbblead +_ismbbprint +_ismbbpunct +_ismbbtrail +_ismbcalnum +_ismbcalpha +_ismbcdigit +_ismbcgraph +_ismbchira +_ismbckata +_ismbcl0 +_ismbcl1 +_ismbcl2 +_ismbclegal +_ismbclower +_ismbcprint +_ismbcpunct +_ismbcspace +_ismbcsymbol +_ismbcupper +_ismbslead +_ismbstrail +_isnan +_itoa +_itow +_j0 +_j1 +_jn +_kbhit +_lfind +_loaddll +_local_unwind2 +_lock +_locking +_logb +_longjmpex +_lrotl +_lrotr +_lsearch +_lseek +_lseeki64 +_ltoa +_ltow +_makepath +_mbbtombc +_mbbtype +_mbccpy +_mbcjistojms +_mbcjmstojis +_mbclen +_mbctohira +_mbctokata +_mbctolower +_mbctombb +_mbctoupper +_mbctype DATA +_mbsbtype +_mbscat +_mbschr +_mbscmp +_mbscoll +_mbscpy +_mbscspn +_mbsdec +_mbsdup +_mbsicmp +_mbsicoll +_mbsinc +_mbslen +_mbslwr +_mbsnbcat +_mbsnbcmp +_mbsnbcnt +_mbsnbcoll +_mbsnbcpy +_mbsnbicmp +_mbsnbicoll +_mbsnbset +_mbsncat +_mbsnccnt +_mbsncmp +_mbsncoll +_mbsncpy +_mbsnextc +_mbsnicmp +_mbsnicoll +_mbsninc +_mbsnset +_mbspbrk +_mbsrchr +_mbsrev +_mbsset +_mbsspn +_mbsspnp +_mbsstr +_mbstok +_mbstrlen +_mbsupr +_memccpy +_memicmp +_mkdir +_mktemp +_msize +_nextafter +_onexit DATA +_open +_open_osfhandle +_osver DATA +_outp +_outpd +_outpw +_pclose +_pctype DATA +_pgmptr DATA +_pipe +_popen +_purecall +_putch +_putenv +_putw +_putws +_pwctype DATA +_read +_rmdir +_rmtmp +_rotl +_rotr +_safe_fdiv +_safe_fdivr +_safe_fprem +_safe_fprem1 +_scalb +_searchenv +_seh_longjmp_unwind +_set_error_mode +_set_sbh_threshold +_seterrormode +_setjmp +_setjmp3 +_setmaxstdio +_setmbcp +_setmode +_setsystime +_sleep +_snprintf +_snwprintf +_sopen +_spawnl +_spawnle +_spawnlp +_spawnlpe +_spawnv +_spawnve +_spawnvp +_spawnvpe +_splitpath +_stat +_stati64 +_statusfp +_strcmpi +_strdate +_strdup +_strerror +_stricmp +_stricoll +_strlwr +_strncoll +_strnicmp +_strnicoll +_strnset +_strrev +_strset +_strtime +_strupr +_swab +_sys_errlist DATA +_sys_nerr DATA +_tell +_telli64 +_tempnam +_timezone DATA +_tolower +_toupper +_tzname DATA +_tzset +_ui64toa +_ui64tow +_ultoa +_ultow +_umask +_ungetch +_unlink +_unloaddll +_unlock +_utime +_vsnprintf +_vsnwprintf +_waccess +_wasctime +_wchdir +_wchmod +_wcmdln DATA +_wcreat +_wcsdup +_wcsicmp +_wcsicoll +_wcslwr +_wcsncoll +_wcsnicmp +_wcsnicoll +_wcsnset +_wcsrev +_wcsset +_wcsupr +_wctime +_wenviron DATA +_wexecl +_wexecle +_wexeclp +_wexeclpe +_wexecv +_wexecve +_wexecvp +_wexecvpe +_wfdopen +_wfindfirst +_wfindfirsti64 +_wfindnext +_wfindnexti64 +_wfopen +_wfreopen +_wfsopen +_wfullpath +_wgetcwd +_wgetdcwd +_wgetenv +_winmajor DATA +_winminor DATA +_winver DATA +_wmakepath +_wmkdir +_wmktemp +_wopen +_wperror +_wpgmptr DATA +_wpopen +_wputenv +_wremove +_wrename +_write +_wrmdir +_wsearchenv +_wsetlocale +_wsopen +_wspawnl +_wspawnle +_wspawnlp +_wspawnlpe +_wspawnv +_wspawnve +_wspawnvp +_wspawnvpe +_wsplitpath +_wstat +_wstati64 +_wstrdate +_wstrtime +_wsystem +_wtempnam +_wtmpnam +_wtoi +_wtoi64 +_wtol +_wunlink +_wutime +_y0 +_y1 +_yn +abort +abs +acos +asctime +asin +atan +atan2 +atexit DATA +atof +atoi +atol +bsearch +calloc +ceil +clearerr +clock +cos +cosh +ctime +difftime +div +exit +exp +fabs +fclose +feof +ferror +fflush +fgetc +fgetpos +fgets +fgetwc +fgetws +floor +fmod +fopen +fprintf +fputc +fputs +fputwc +fputws +fread +free +freopen +frexp +fscanf +fseek +fsetpos +ftell +fwprintf +fwrite +fwscanf +getc +getchar +getenv +gets +getwc +getwchar +gmtime +is_wctype +isalnum +isalpha +iscntrl +isdigit +isgraph +isleadbyte +islower +isprint +ispunct +isspace +isupper +iswalnum +iswalpha +iswascii +iswcntrl +iswctype +iswdigit +iswgraph +iswlower +iswprint +iswpunct +iswspace +iswupper +iswxdigit +isxdigit +labs +ldexp +ldiv +localeconv +localtime +log +log10 +;longjmp +malloc +mblen +mbstowcs +mbtowc +memchr +memcmp +memcpy +memmove +memset +mktime +modf +perror +pow +printf +putc +putchar +puts +putwc +putwchar +qsort +raise +rand +realloc +remove +rename +rewind +scanf +setbuf +setlocale +setvbuf +signal +sin +sinh +sprintf +sqrt +srand +sscanf +strcat +strchr +strcmp +strcoll +strcpy +strcspn +strerror +strftime +strlen +strncat +strncmp +strncpy +strpbrk +strrchr +strspn +strstr +strtod +strtok +strtol +strtoul +strxfrm +swprintf +swscanf +system +tan +tanh +time +tmpfile +tmpnam +tolower +toupper +towlower +towupper +ungetc +ungetwc +vfprintf +vfwprintf +vprintf +vsprintf +vswprintf +vwprintf +wcscat +wcschr +wcscmp +wcscoll +wcscpy +wcscspn +wcsftime +wcslen +wcsncat +wcsncmp +wcsncpy +wcspbrk +wcsrchr +wcsspn +wcsstr +wcstod +wcstok +wcstol +wcstombs +wcstoul +wcsxfrm +wctomb +wprintf +wscanf +; msvcrt.dll(version 6.10) and later +__lc_collate_cp DATA +__p__mbcasemap +__unDNameEx +_chkesp +_ctime64 +_findfirst64 +_findnext64 +_fstat64 +_ftime64 +_futime64 +_gmtime64 +_localtime64 +_mbcasemap +_mktime64 +_osplatform DATA +_stat64 +_time64 +_utime64 +_wctime64 +_wfindfirst64 +_wfindnext64 +_wstat64 +_wutime64 +#if ( __msvcr70__ || __msvcr70d__ || __msvcr71__ || __msvcr71d__ || __msvcr80__ || __msvcr80d__ || __msvcr90__ || __msvcr90d__) +; msvcr70.dll amd later +__buffer_overrun +__CxxDetectRethrow +__CxxExceptionFilter +__CxxQueryExceptionSize +__CxxRegisterExceptionObject +__CxxUnregisterExceptionObject +__DestructExceptionObject +__lc_clike +__security_error_handler +__set_buffer_overrun_handler +__uncaught_exception +__wcserror +_aligned_free +_aligned_malloc +_aligned_offset_malloc +_aligned_offset_realloc +_aligned_realloc +_cgetws +_cputws +_CRT_RTC_INIT +_cwprintf +_cwscanf +_getwch +_getwche +_putwch +_resetstkoflw +_scprintf +_scwprintf +_set_security_error_handler +_snscanf +_snwscanf +_strtoi64 +_strtoui64 +_ungetwch +_vscprintf +_vscwprintf +_wcserror +_wcstoi64 +_wcstoui64 +_wctype +_wtof +#endif /* 7.0 */ +#if (__msvcr71__ || __msvcr71d__ || __msvcr80__ || __msvcr80d__ || __msvcr90__ || __msvcr90d__) +; msvcr71.dll +___lc_codepage_func +___lc_collate_cp_func +___lc_handle_func +___mb_cur_max_func +___setlc_active_func +___unguarded_readlc_active_add_func +__CppXcptFilter +__crtCompareStringW +__crtGetStringTypeW +__crtLCMapStringW +__CxxCallUnwindDtor +__CxxCallUnwindVecDtor +__iob_func +__pctype_func +__pwctype_func +_get_heap_handle +_set_purecall_handler +_set_SSE2_enable +#endif /* 7.1 */ +#if ( __msvcr80__ || __msvcr80d__ || __msvcr90__ || __msvcr90d__) +; msvcr80.dll +_get_output_format +_set_output_format +_get_printf_count_output +_set_printf_count_output +_set_abort_behavior +_set_invalid_parameter_handler +_fseek_nolock +_ftell_nolock +_fseeki64 +_ftelli64 +_fseeki64_nolock +_ftelli64_nolock +#endif /* 8.0 */ Added: trunk/tools/win32build/misc/msvcrt90/yop.sh =================================================================== --- trunk/tools/win32build/misc/msvcrt90/yop.sh 2009-02-26 06:24:22 UTC (rev 6476) +++ trunk/tools/win32build/misc/msvcrt90/yop.sh 2009-02-26 08:09:39 UTC (rev 6477) @@ -0,0 +1,17 @@ +PATH=/cygdive/c/Mingw-w64/bin:$PATH +gcc -DRUNTIME=msvcr90 -D__msvcr90__=1 -D__MSVCRT__ -C -E -P -xc-header msvcrt.def.in > msvcr90.def +dlltool --as=as -k --dllname msvcr90.dll --output-lib libmsvcr90.a --def msvcr90.def +for key in printf fprintf sprintf vprintf vfprintf vsprintf; do + src=`nm libmsvcr90.a | sed -n -e '/:$/h;/^[0-7][0-7]* *T */{s///;H;g;s/\n//p' -e '}' | sed -n 's/:_'"$key"'$//p'`; + if test -n "$src"; then + dst=`echo "$src" | sed 's/0/4/'`; repl="$repl $dst"; + tmpfiles="$tmpfiles $src $dst"; + ar x libmsvcr90.a $src; + objcopy --redefine-sym _$key=___msvcrt_$key \ + --redefine-sym __imp__$key=__imp____msvcrt_$key \ + $src $dst; + fi; +done; +test `key=_get_output_format; nm libmsvcr90.a | sed -n -e '/:$/h;/^[0-7][0-7]* *T */{s///;H;g;s/\n//p' -e '}' | sed -n 's/:_'"$key"'$//p'` || repl="$repl ofmt_stub.o"; +test -n "$repl" && ar rcs libmsvcr90.a $repl; +rm -f $tmpfiles From numpy-svn at scipy.org Thu Feb 26 03:25:03 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 02:25:03 -0600 (CST) Subject: [Numpy-svn] r6478 - in branches/coremath: . tools/win32build tools/win32build/misc tools/win32build/misc/msvcrt90 Message-ID: <20090226082503.5F6C9C84116@scipy.org> Author: cdavid Date: 2009-02-26 02:24:54 -0600 (Thu, 26 Feb 2009) New Revision: 6478 Added: branches/coremath/tools/win32build/misc/ branches/coremath/tools/win32build/misc/msvcrt90/ branches/coremath/tools/win32build/misc/msvcrt90/msvcrt.def.in branches/coremath/tools/win32build/misc/msvcrt90/yop.sh Removed: branches/coremath/tools/win32build/misc/msvcrt90/ branches/coremath/tools/win32build/misc/msvcrt90/msvcrt.def.in branches/coremath/tools/win32build/misc/msvcrt90/yop.sh Modified: branches/coremath/ Log: Merged revisions 6477 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6477 | cdavid | 2009-02-26 17:09:39 +0900 (Thu, 26 Feb 2009) | 1 line Add script to build msvcrt90 on mingw-w64. ........ Property changes on: branches/coremath ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6475 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6477 Copied: branches/coremath/tools/win32build/misc (from rev 6477, trunk/tools/win32build/misc) Copied: branches/coremath/tools/win32build/misc/msvcrt90 (from rev 6477, trunk/tools/win32build/misc/msvcrt90) Deleted: branches/coremath/tools/win32build/misc/msvcrt90/msvcrt.def.in =================================================================== --- trunk/tools/win32build/misc/msvcrt90/msvcrt.def.in 2009-02-26 08:09:39 UTC (rev 6477) +++ branches/coremath/tools/win32build/misc/msvcrt90/msvcrt.def.in 2009-02-26 08:24:54 UTC (rev 6478) @@ -1,825 +0,0 @@ -; -; __FILENAME__ -; created from msvcrt.def.in -;* This file has no copyright assigned and is placed in the Public Domain. -;* This file is a part of the mingw-runtime package. -;* No warranty is given; refer to the file DISCLAIMER within the package. -; -; Exports from msvcrt.dll, msvcr70.dll, msvcr71.dll, msvcr80.dll and msvcr90.dll -; -; NOTE: All exports, except for what appeared to be C++ mangled names, -; are included. Not all functions have prototypes in the headers -; (and some are not functions at all). -; -EXPORTS -_CIacos -_CIasin -_CIatan -_CIatan2 -_CIcos -_CIcosh -_CIexp -_CIfmod -_CIlog -_CIlog10 -_CIpow -_CIsin -_CIsinh -_CIsqrt -_CItan -_CItanh -_CxxThrowException -_EH_prolog -_Getdays -_Getmonths -_Gettnames -_HUGE DATA -_Strftime -_XcptFilter -__CxxFrameHandler -__CxxLongjmpUnwind -__RTCastToVoid -__RTDynamicCast -__RTtypeid -__STRINGTOLD -__argc DATA -__argv DATA -__badioinfo DATA -__crtCompareStringA -__crtGetLocaleInfoW -__crtLCMapStringA -__dllonexit -__doserrno -__fpecode -__getmainargs -__initenv DATA -__isascii -__iscsym -__iscsymf -__lc_codepage DATA -__lc_handle DATA -__lconv_init -__mb_cur_max DATA -__p___argc -__p___argv -__p___initenv -__p___mb_cur_max -__p___wargv -__p___winitenv -__p__acmdln -__p__amblksiz -__p__commode -__p__daylight -__p__dstbias -__p__environ -__p__fileinfo -__p__fmode -__p__iob -__p__mbctype -__p__osver -__p__pctype -__p__pgmptr -__p__pwctype -__p__timezone -__p__tzname -__p__wcmdln -__p__wenviron -__p__winmajor -__p__winminor -__p__winver -__p__wpgmptr -__pioinfo DATA -__pxcptinfoptrs -__set_app_type -__setlc_active DATA -__setusermatherr -__threadhandle -__threadid -__toascii -__unDName -__unguarded_readlc_active DATA -__wargv DATA -__wgetmainargs -__winitenv DATA -_abnormal_termination -_access -_acmdln DATA -_adj_fdiv_m16i -_adj_fdiv_m32 -_adj_fdiv_m32i -_adj_fdiv_m64 -_adj_fdiv_r -_adj_fdivr_m16i -_adj_fdivr_m32 -_adj_fdivr_m32i -_adj_fdivr_m64 -_adj_fpatan -_adj_fprem -_adj_fprem1 -_adj_fptan -_adjust_fdiv DATA -_aexit_rtn DATA -_amsg_exit -_assert -_atodbl -_atoi64 -_atoldbl -_beep -_beginthread -_beginthreadex -_c_exit -_cabs -_callnewh -_cexit -_cgets -_chdir -_chdrive -_chgsign -_chmod -_chsize -_clearfp -_close -_commit -_commode DATA -_control87 -_controlfp -_copysign -_cprintf -_cputs -_creat -_cscanf -#if !(__msvcr71__ || __msvcr71d__ || __msvcr80__ || __msvcr80d__ || __msvcr90__ || msvcr90d__) -_ctype DATA -#endif -_cwait -_daylight DATA -_dstbias DATA -_dup -_dup2 -_ecvt -_endthread -_endthreadex -_environ DATA -_eof -_errno -_except_handler2 -_except_handler3 -_execl -_execle -_execlp -_execlpe -_execv -_execve -_execvp -_execvpe -_exit -_expand -_fcloseall -_fcvt -_fdopen -_fgetchar -_fgetwchar -_filbuf -_fileinfo DATA -_filelength -_filelengthi64 -_fileno -_findclose -_findfirst -_findfirsti64 -_findnext -_findnexti64 -_finite -_flsbuf -_flushall -_fmode DATA -_fpclass -_fpieee_flt -_fpreset DATA -_fputchar -_fputwchar -_fsopen -_fstat -_fstati64 -_ftime -_ftol -_fullpath -_futime -_gcvt -_get_osfhandle -_get_sbh_threshold -_getch -_getche -_getcwd -_getdcwd -_getdiskfree -_getdllprocaddr -_getdrive -_getdrives -_getmaxstdio -_getmbcp -_getpid -_getsystime -_getw -_getws -_global_unwind2 -_heapadd -_heapchk -_heapmin -_heapset -_heapused -_heapwalk -_hypot -_i64toa -_i64tow -_initterm -_inp -_inpd -_inpw -_iob DATA -_isatty -_isctype -_ismbbalnum -_ismbbalpha -_ismbbgraph -_ismbbkalnum -_ismbbkana -_ismbbkprint -_ismbbkpunct -_ismbblead -_ismbbprint -_ismbbpunct -_ismbbtrail -_ismbcalnum -_ismbcalpha -_ismbcdigit -_ismbcgraph -_ismbchira -_ismbckata -_ismbcl0 -_ismbcl1 -_ismbcl2 -_ismbclegal -_ismbclower -_ismbcprint -_ismbcpunct -_ismbcspace -_ismbcsymbol -_ismbcupper -_ismbslead -_ismbstrail -_isnan -_itoa -_itow -_j0 -_j1 -_jn -_kbhit -_lfind -_loaddll -_local_unwind2 -_lock -_locking -_logb -_longjmpex -_lrotl -_lrotr -_lsearch -_lseek -_lseeki64 -_ltoa -_ltow -_makepath -_mbbtombc -_mbbtype -_mbccpy -_mbcjistojms -_mbcjmstojis -_mbclen -_mbctohira -_mbctokata -_mbctolower -_mbctombb -_mbctoupper -_mbctype DATA -_mbsbtype -_mbscat -_mbschr -_mbscmp -_mbscoll -_mbscpy -_mbscspn -_mbsdec -_mbsdup -_mbsicmp -_mbsicoll -_mbsinc -_mbslen -_mbslwr -_mbsnbcat -_mbsnbcmp -_mbsnbcnt -_mbsnbcoll -_mbsnbcpy -_mbsnbicmp -_mbsnbicoll -_mbsnbset -_mbsncat -_mbsnccnt -_mbsncmp -_mbsncoll -_mbsncpy -_mbsnextc -_mbsnicmp -_mbsnicoll -_mbsninc -_mbsnset -_mbspbrk -_mbsrchr -_mbsrev -_mbsset -_mbsspn -_mbsspnp -_mbsstr -_mbstok -_mbstrlen -_mbsupr -_memccpy -_memicmp -_mkdir -_mktemp -_msize -_nextafter -_onexit DATA -_open -_open_osfhandle -_osver DATA -_outp -_outpd -_outpw -_pclose -_pctype DATA -_pgmptr DATA -_pipe -_popen -_purecall -_putch -_putenv -_putw -_putws -_pwctype DATA -_read -_rmdir -_rmtmp -_rotl -_rotr -_safe_fdiv -_safe_fdivr -_safe_fprem -_safe_fprem1 -_scalb -_searchenv -_seh_longjmp_unwind -_set_error_mode -_set_sbh_threshold -_seterrormode -_setjmp -_setjmp3 -_setmaxstdio -_setmbcp -_setmode -_setsystime -_sleep -_snprintf -_snwprintf -_sopen -_spawnl -_spawnle -_spawnlp -_spawnlpe -_spawnv -_spawnve -_spawnvp -_spawnvpe -_splitpath -_stat -_stati64 -_statusfp -_strcmpi -_strdate -_strdup -_strerror -_stricmp -_stricoll -_strlwr -_strncoll -_strnicmp -_strnicoll -_strnset -_strrev -_strset -_strtime -_strupr -_swab -_sys_errlist DATA -_sys_nerr DATA -_tell -_telli64 -_tempnam -_timezone DATA -_tolower -_toupper -_tzname DATA -_tzset -_ui64toa -_ui64tow -_ultoa -_ultow -_umask -_ungetch -_unlink -_unloaddll -_unlock -_utime -_vsnprintf -_vsnwprintf -_waccess -_wasctime -_wchdir -_wchmod -_wcmdln DATA -_wcreat -_wcsdup -_wcsicmp -_wcsicoll -_wcslwr -_wcsncoll -_wcsnicmp -_wcsnicoll -_wcsnset -_wcsrev -_wcsset -_wcsupr -_wctime -_wenviron DATA -_wexecl -_wexecle -_wexeclp -_wexeclpe -_wexecv -_wexecve -_wexecvp -_wexecvpe -_wfdopen -_wfindfirst -_wfindfirsti64 -_wfindnext -_wfindnexti64 -_wfopen -_wfreopen -_wfsopen -_wfullpath -_wgetcwd -_wgetdcwd -_wgetenv -_winmajor DATA -_winminor DATA -_winver DATA -_wmakepath -_wmkdir -_wmktemp -_wopen -_wperror -_wpgmptr DATA -_wpopen -_wputenv -_wremove -_wrename -_write -_wrmdir -_wsearchenv -_wsetlocale -_wsopen -_wspawnl -_wspawnle -_wspawnlp -_wspawnlpe -_wspawnv -_wspawnve -_wspawnvp -_wspawnvpe -_wsplitpath -_wstat -_wstati64 -_wstrdate -_wstrtime -_wsystem -_wtempnam -_wtmpnam -_wtoi -_wtoi64 -_wtol -_wunlink -_wutime -_y0 -_y1 -_yn -abort -abs -acos -asctime -asin -atan -atan2 -atexit DATA -atof -atoi -atol -bsearch -calloc -ceil -clearerr -clock -cos -cosh -ctime -difftime -div -exit -exp -fabs -fclose -feof -ferror -fflush -fgetc -fgetpos -fgets -fgetwc -fgetws -floor -fmod -fopen -fprintf -fputc -fputs -fputwc -fputws -fread -free -freopen -frexp -fscanf -fseek -fsetpos -ftell -fwprintf -fwrite -fwscanf -getc -getchar -getenv -gets -getwc -getwchar -gmtime -is_wctype -isalnum -isalpha -iscntrl -isdigit -isgraph -isleadbyte -islower -isprint -ispunct -isspace -isupper -iswalnum -iswalpha -iswascii -iswcntrl -iswctype -iswdigit -iswgraph -iswlower -iswprint -iswpunct -iswspace -iswupper -iswxdigit -isxdigit -labs -ldexp -ldiv -localeconv -localtime -log -log10 -;longjmp -malloc -mblen -mbstowcs -mbtowc -memchr -memcmp -memcpy -memmove -memset -mktime -modf -perror -pow -printf -putc -putchar -puts -putwc -putwchar -qsort -raise -rand -realloc -remove -rename -rewind -scanf -setbuf -setlocale -setvbuf -signal -sin -sinh -sprintf -sqrt -srand -sscanf -strcat -strchr -strcmp -strcoll -strcpy -strcspn -strerror -strftime -strlen -strncat -strncmp -strncpy -strpbrk -strrchr -strspn -strstr -strtod -strtok -strtol -strtoul -strxfrm -swprintf -swscanf -system -tan -tanh -time -tmpfile -tmpnam -tolower -toupper -towlower -towupper -ungetc -ungetwc -vfprintf -vfwprintf -vprintf -vsprintf -vswprintf -vwprintf -wcscat -wcschr -wcscmp -wcscoll -wcscpy -wcscspn -wcsftime -wcslen -wcsncat -wcsncmp -wcsncpy -wcspbrk -wcsrchr -wcsspn -wcsstr -wcstod -wcstok -wcstol -wcstombs -wcstoul -wcsxfrm -wctomb -wprintf -wscanf -; msvcrt.dll(version 6.10) and later -__lc_collate_cp DATA -__p__mbcasemap -__unDNameEx -_chkesp -_ctime64 -_findfirst64 -_findnext64 -_fstat64 -_ftime64 -_futime64 -_gmtime64 -_localtime64 -_mbcasemap -_mktime64 -_osplatform DATA -_stat64 -_time64 -_utime64 -_wctime64 -_wfindfirst64 -_wfindnext64 -_wstat64 -_wutime64 -#if ( __msvcr70__ || __msvcr70d__ || __msvcr71__ || __msvcr71d__ || __msvcr80__ || __msvcr80d__ || __msvcr90__ || __msvcr90d__) -; msvcr70.dll amd later -__buffer_overrun -__CxxDetectRethrow -__CxxExceptionFilter -__CxxQueryExceptionSize -__CxxRegisterExceptionObject -__CxxUnregisterExceptionObject -__DestructExceptionObject -__lc_clike -__security_error_handler -__set_buffer_overrun_handler -__uncaught_exception -__wcserror -_aligned_free -_aligned_malloc -_aligned_offset_malloc -_aligned_offset_realloc -_aligned_realloc -_cgetws -_cputws -_CRT_RTC_INIT -_cwprintf -_cwscanf -_getwch -_getwche -_putwch -_resetstkoflw -_scprintf -_scwprintf -_set_security_error_handler -_snscanf -_snwscanf -_strtoi64 -_strtoui64 -_ungetwch -_vscprintf -_vscwprintf -_wcserror -_wcstoi64 -_wcstoui64 -_wctype -_wtof -#endif /* 7.0 */ -#if (__msvcr71__ || __msvcr71d__ || __msvcr80__ || __msvcr80d__ || __msvcr90__ || __msvcr90d__) -; msvcr71.dll -___lc_codepage_func -___lc_collate_cp_func -___lc_handle_func -___mb_cur_max_func -___setlc_active_func -___unguarded_readlc_active_add_func -__CppXcptFilter -__crtCompareStringW -__crtGetStringTypeW -__crtLCMapStringW -__CxxCallUnwindDtor -__CxxCallUnwindVecDtor -__iob_func -__pctype_func -__pwctype_func -_get_heap_handle -_set_purecall_handler -_set_SSE2_enable -#endif /* 7.1 */ -#if ( __msvcr80__ || __msvcr80d__ || __msvcr90__ || __msvcr90d__) -; msvcr80.dll -_get_output_format -_set_output_format -_get_printf_count_output -_set_printf_count_output -_set_abort_behavior -_set_invalid_parameter_handler -_fseek_nolock -_ftell_nolock -_fseeki64 -_ftelli64 -_fseeki64_nolock -_ftelli64_nolock -#endif /* 8.0 */ Copied: branches/coremath/tools/win32build/misc/msvcrt90/msvcrt.def.in (from rev 6477, trunk/tools/win32build/misc/msvcrt90/msvcrt.def.in) Deleted: branches/coremath/tools/win32build/misc/msvcrt90/yop.sh =================================================================== --- trunk/tools/win32build/misc/msvcrt90/yop.sh 2009-02-26 08:09:39 UTC (rev 6477) +++ branches/coremath/tools/win32build/misc/msvcrt90/yop.sh 2009-02-26 08:24:54 UTC (rev 6478) @@ -1,17 +0,0 @@ -PATH=/cygdive/c/Mingw-w64/bin:$PATH -gcc -DRUNTIME=msvcr90 -D__msvcr90__=1 -D__MSVCRT__ -C -E -P -xc-header msvcrt.def.in > msvcr90.def -dlltool --as=as -k --dllname msvcr90.dll --output-lib libmsvcr90.a --def msvcr90.def -for key in printf fprintf sprintf vprintf vfprintf vsprintf; do - src=`nm libmsvcr90.a | sed -n -e '/:$/h;/^[0-7][0-7]* *T */{s///;H;g;s/\n//p' -e '}' | sed -n 's/:_'"$key"'$//p'`; - if test -n "$src"; then - dst=`echo "$src" | sed 's/0/4/'`; repl="$repl $dst"; - tmpfiles="$tmpfiles $src $dst"; - ar x libmsvcr90.a $src; - objcopy --redefine-sym _$key=___msvcrt_$key \ - --redefine-sym __imp__$key=__imp____msvcrt_$key \ - $src $dst; - fi; -done; -test `key=_get_output_format; nm libmsvcr90.a | sed -n -e '/:$/h;/^[0-7][0-7]* *T */{s///;H;g;s/\n//p' -e '}' | sed -n 's/:_'"$key"'$//p'` || repl="$repl ofmt_stub.o"; -test -n "$repl" && ar rcs libmsvcr90.a $repl; -rm -f $tmpfiles Copied: branches/coremath/tools/win32build/misc/msvcrt90/yop.sh (from rev 6477, trunk/tools/win32build/misc/msvcrt90/yop.sh) From numpy-svn at scipy.org Thu Feb 26 03:46:38 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 02:46:38 -0600 (CST) Subject: [Numpy-svn] r6479 - trunk/numpy/distutils/command Message-ID: <20090226084638.73779C7C19A@scipy.org> Author: cdavid Date: 2009-02-26 02:46:34 -0600 (Thu, 26 Feb 2009) New Revision: 6479 Modified: trunk/numpy/distutils/command/config.py Log: Fix docstring format for check_funcs_once. Modified: trunk/numpy/distutils/command/config.py =================================================================== --- trunk/numpy/distutils/command/config.py 2009-02-26 08:24:54 UTC (rev 6478) +++ trunk/numpy/distutils/command/config.py 2009-02-26 08:46:34 UTC (rev 6479) @@ -200,22 +200,22 @@ Arguments --------- - - funcs: seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - libraru_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionay, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f""" + funcs: seq + list of functions to test + include_dirs : seq + list of header paths + libraries : seq + list of libraries to link the code snippet to + libraru_dirs : seq + list of library paths + decl : dict + for every (key, value), the declaration in the value will be + used for function in key. If a function is not in the + dictionay, no declaration will be used. + call : dict + for every item (f, value), if the value is True, a call will be + done to the function f. + """ self._check_compiler() body = [] if decl: From numpy-svn at scipy.org Thu Feb 26 05:38:35 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 04:38:35 -0600 (CST) Subject: [Numpy-svn] r6480 - in branches/coremath: . numpy/distutils/command Message-ID: <20090226103835.AED12C7C066@scipy.org> Author: cdavid Date: 2009-02-26 04:38:30 -0600 (Thu, 26 Feb 2009) New Revision: 6480 Modified: branches/coremath/ branches/coremath/numpy/distutils/command/config.py Log: Merged revisions 6479 via svnmerge from http://svn.scipy.org/svn/numpy/trunk ........ r6479 | cdavid | 2009-02-26 17:46:34 +0900 (Thu, 26 Feb 2009) | 1 line Fix docstring format for check_funcs_once. ........ Property changes on: branches/coremath ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6477 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6479 Modified: branches/coremath/numpy/distutils/command/config.py =================================================================== --- branches/coremath/numpy/distutils/command/config.py 2009-02-26 08:46:34 UTC (rev 6479) +++ branches/coremath/numpy/distutils/command/config.py 2009-02-26 10:38:30 UTC (rev 6480) @@ -282,22 +282,22 @@ Arguments --------- - - funcs: seq - list of functions to test - include_dirs : seq - list of header paths - libraries : seq - list of libraries to link the code snippet to - libraru_dirs : seq - list of library paths - decl : dict - for every (key, value), the declaration in the value will be - used for function in key. If a function is not in the - dictionay, no declaration will be used. - call : dict - for every item (f, value), if the value is True, a call will be - done to the function f""" + funcs: seq + list of functions to test + include_dirs : seq + list of header paths + libraries : seq + list of libraries to link the code snippet to + libraru_dirs : seq + list of library paths + decl : dict + for every (key, value), the declaration in the value will be + used for function in key. If a function is not in the + dictionay, no declaration will be used. + call : dict + for every item (f, value), if the value is True, a call will be + done to the function f. + """ self._check_compiler() body = [] if decl: From numpy-svn at scipy.org Thu Feb 26 05:41:50 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 04:41:50 -0600 (CST) Subject: [Numpy-svn] r6481 - in trunk/numpy: core core/code_generators core/include/numpy core/src distutils/command Message-ID: <20090226104150.7F6E4C7C066@scipy.org> Author: cdavid Date: 2009-02-26 04:41:32 -0600 (Thu, 26 Feb 2009) New Revision: 6481 Added: trunk/numpy/core/include/numpy/npy_common.h trunk/numpy/core/include/numpy/npy_math.h trunk/numpy/core/src/npy_math.c.src Removed: trunk/numpy/core/src/umath_funcs_c99.inc.src Modified: trunk/numpy/core/SConscript trunk/numpy/core/code_generators/generate_umath.py trunk/numpy/core/include/numpy/ndarrayobject.h trunk/numpy/core/include/numpy/numpyconfig.h.in trunk/numpy/core/setup.py trunk/numpy/core/src/_signbit.c trunk/numpy/core/src/multiarraymodule.c trunk/numpy/core/src/numpyos.c trunk/numpy/core/src/umath_funcs.inc.src trunk/numpy/core/src/umath_loops.inc.src trunk/numpy/core/src/umathmodule.c.src trunk/numpy/distutils/command/build_clib.py trunk/numpy/distutils/command/config.py Log: Merge coremath. Modified: trunk/numpy/core/SConscript =================================================================== --- trunk/numpy/core/SConscript 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/SConscript 2009-02-26 10:41:32 UTC (rev 6481) @@ -131,7 +131,7 @@ #---------------------------------- # Function to check: mfuncs = ('expl', 'expf', 'log1p', 'expm1', 'asinh', 'atanhf', 'atanhl', - 'isnan', 'isinf', 'rint', 'trunc') + 'rint', 'trunc') # Set value to 1 for each defined function (in math lib) mfuncs_defined = dict([(f, 0) for f in mfuncs]) @@ -185,7 +185,11 @@ #include #include """ - config.CheckDeclaration(f, includes=includes) + st = config.CheckDeclaration(f, includes=includes) + if st: + numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), + '#define NPY_HAVE_DECL_%s' % f.upper())) + #------------------------------------------------------- # Define the function PyOS_ascii_strod if not available @@ -256,7 +260,6 @@ # Generate generated code #------------------------ scalartypes_src = env.GenerateFromTemplate(pjoin('src', 'scalartypes.inc.src')) -umath_funcs_c99_src = env.GenerateFromTemplate(pjoin('src', 'umath_funcs_c99.inc.src')) umath_funcs_src = env.GenerateFromTemplate(pjoin('src', 'umath_funcs.inc.src')) umath_loops_src = env.GenerateFromTemplate(pjoin('src', 'umath_loops.inc.src')) arraytypes_src = env.GenerateFromTemplate(pjoin('src', 'arraytypes.inc.src')) @@ -276,6 +279,12 @@ env.Prepend(CPPPATH = ['include', '.']) +# npymath core lib +npymath_src = env.GenerateFromTemplate(pjoin('src', 'npy_math.c.src')) +env.DistutilsStaticExtLibrary("npymath", npymath_src) +env.Prepend(LIBS=["npymath"]) +env.Prepend(LIBPATH=["."]) + #----------------- # Build multiarray #----------------- Modified: trunk/numpy/core/code_generators/generate_umath.py =================================================================== --- trunk/numpy/core/code_generators/generate_umath.py 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/code_generators/generate_umath.py 2009-02-26 10:41:32 UTC (rev 6481) @@ -37,7 +37,7 @@ self.out = self.type * nout assert len(self.out) == nout -_fdata_map = dict(f='%sf', d='%s', g='%sl', +_fdata_map = dict(f='npy_%sf', d='npy_%s', g='npy_%sl', F='nc_%sf', D='nc_%s', G='nc_%sl') def build_func_data(types, f): func_data = [] Modified: trunk/numpy/core/include/numpy/ndarrayobject.h =================================================================== --- trunk/numpy/core/include/numpy/ndarrayobject.h 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/include/numpy/ndarrayobject.h 2009-02-26 10:41:32 UTC (rev 6481) @@ -56,73 +56,8 @@ */ #define NPY_FEATURE_VERSION 0x00000001 -/* Some platforms don't define bool, long long, or long double. - Handle that here. -*/ +#include "npy_common.h" -#define NPY_BYTE_FMT "hhd" -#define NPY_UBYTE_FMT "hhu" -#define NPY_SHORT_FMT "hd" -#define NPY_USHORT_FMT "hu" -#define NPY_INT_FMT "d" -#define NPY_UINT_FMT "u" -#define NPY_LONG_FMT "ld" -#define NPY_ULONG_FMT "lu" -#define NPY_FLOAT_FMT "g" -#define NPY_DOUBLE_FMT "g" - -#ifdef PY_LONG_LONG -typedef PY_LONG_LONG npy_longlong; -typedef unsigned PY_LONG_LONG npy_ulonglong; -# ifdef _MSC_VER -# define NPY_LONGLONG_FMT "I64d" -# define NPY_ULONGLONG_FMT "I64u" -# define NPY_LONGLONG_SUFFIX(x) (x##i64) -# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) -# else - /* #define LONGLONG_FMT "lld" Another possible variant - #define ULONGLONG_FMT "llu" - - #define LONGLONG_FMT "qd" -- BSD perhaps? - #define ULONGLONG_FMT "qu" - */ -# define NPY_LONGLONG_FMT "Ld" -# define NPY_ULONGLONG_FMT "Lu" -# define NPY_LONGLONG_SUFFIX(x) (x##LL) -# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) -# endif -#else -typedef long npy_longlong; -typedef unsigned long npy_ulonglong; -# define NPY_LONGLONG_SUFFIX(x) (x##L) -# define NPY_ULONGLONG_SUFFIX(x) (x##UL) -#endif - - -typedef unsigned char npy_bool; -#define NPY_FALSE 0 -#define NPY_TRUE 1 - - -#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE - typedef double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "g" -#else - typedef long double npy_longdouble; - #define NPY_LONGDOUBLE_FMT "Lg" -#endif - -#ifndef Py_USING_UNICODE -#error Must use Python with unicode enabled. -#endif - - -typedef signed char npy_byte; -typedef unsigned char npy_ubyte; -typedef unsigned short npy_ushort; -typedef unsigned int npy_uint; -typedef unsigned long npy_ulong; - /* These are for completeness */ typedef float npy_float; typedef double npy_double; Added: trunk/numpy/core/include/numpy/npy_common.h =================================================================== --- trunk/numpy/core/include/numpy/npy_common.h 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/include/numpy/npy_common.h 2009-02-26 10:41:32 UTC (rev 6481) @@ -0,0 +1,74 @@ +#ifndef _NPY_COMMON_H_ +#define _NPY_COMMON_H_ + +/* This is auto-generated */ +#include "numpyconfig.h" + +/* Some platforms don't define bool, long long, or long double. + Handle that here. +*/ + +#define NPY_BYTE_FMT "hhd" +#define NPY_UBYTE_FMT "hhu" +#define NPY_SHORT_FMT "hd" +#define NPY_USHORT_FMT "hu" +#define NPY_INT_FMT "d" +#define NPY_UINT_FMT "u" +#define NPY_LONG_FMT "ld" +#define NPY_ULONG_FMT "lu" +#define NPY_FLOAT_FMT "g" +#define NPY_DOUBLE_FMT "g" + +#ifdef PY_LONG_LONG +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +# ifdef _MSC_VER +# define NPY_LONGLONG_FMT "I64d" +# define NPY_ULONGLONG_FMT "I64u" +# define NPY_LONGLONG_SUFFIX(x) (x##i64) +# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) +# else + /* #define LONGLONG_FMT "lld" Another possible variant + #define ULONGLONG_FMT "llu" + + #define LONGLONG_FMT "qd" -- BSD perhaps? + #define ULONGLONG_FMT "qu" + */ +# define NPY_LONGLONG_FMT "Ld" +# define NPY_ULONGLONG_FMT "Lu" +# define NPY_LONGLONG_SUFFIX(x) (x##LL) +# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) +# endif +#else +typedef long npy_longlong; +typedef unsigned long npy_ulonglong; +# define NPY_LONGLONG_SUFFIX(x) (x##L) +# define NPY_ULONGLONG_SUFFIX(x) (x##UL) +#endif + + +typedef unsigned char npy_bool; +#define NPY_FALSE 0 +#define NPY_TRUE 1 + + +#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + typedef double npy_longdouble; + #define NPY_LONGDOUBLE_FMT "g" +#else + typedef long double npy_longdouble; + #define NPY_LONGDOUBLE_FMT "Lg" +#endif + +#ifndef Py_USING_UNICODE +#error Must use Python with unicode enabled. +#endif + + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +#endif Added: trunk/numpy/core/include/numpy/npy_math.h =================================================================== --- trunk/numpy/core/include/numpy/npy_math.h 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/include/numpy/npy_math.h 2009-02-26 10:41:32 UTC (rev 6481) @@ -0,0 +1,217 @@ +#ifndef __NPY_MATH_C99_H_ +#define __NPY_MATH_C99_H_ + +#include +#include + +/* + * Useful constants + */ +#define NPY_E 2.7182818284590452353602874713526625 /* e */ +#define NPY_LOG2E 1.4426950408889634073599246810018921 /* log_2 e */ +#define NPY_LOG10E 0.4342944819032518276511289189166051 /* log_10 e */ +#define NPY_LOGE2 0.6931471805599453094172321214581766 /* log_e 2 */ +#define NPY_LOGE10 2.3025850929940456840179914546843642 /* log_e 10 */ +#define NPY_PI 3.1415926535897932384626433832795029 /* pi */ +#define NPY_PI_2 1.5707963267948966192313216916397514 /* pi/2 */ +#define NPY_PI_4 0.7853981633974483096156608458198757 /* pi/4 */ +#define NPY_1_PI 0.3183098861837906715377675267450287 /* 1/pi */ +#define NPY_2_PI 0.6366197723675813430755350534900574 /* 2/pi */ + +#define NPY_Ef 2.7182818284590452353602874713526625F /* e */ +#define NPY_LOG2Ef 1.4426950408889634073599246810018921F /* log_2 e */ +#define NPY_LOG10Ef 0.4342944819032518276511289189166051F /* log_10 e */ +#define NPY_LOGE2f 0.6931471805599453094172321214581766F /* log_e 2 */ +#define NPY_LOGE10f 2.3025850929940456840179914546843642F /* log_e 10 */ +#define NPY_PIf 3.1415926535897932384626433832795029F /* pi */ +#define NPY_PI_2f 1.5707963267948966192313216916397514F /* pi/2 */ +#define NPY_PI_4f 0.7853981633974483096156608458198757F /* pi/4 */ +#define NPY_1_PIf 0.3183098861837906715377675267450287F /* 1/pi */ +#define NPY_2_PIf 0.6366197723675813430755350534900574F /* 2/pi */ + +#define NPY_El 2.7182818284590452353602874713526625L /* e */ +#define NPY_LOG2El 1.4426950408889634073599246810018921L /* log_2 e */ +#define NPY_LOG10El 0.4342944819032518276511289189166051L /* log_10 e */ +#define NPY_LOGE2l 0.6931471805599453094172321214581766L /* log_e 2 */ +#define NPY_LOGE10l 2.3025850929940456840179914546843642L /* log_e 10 */ +#define NPY_PIl 3.1415926535897932384626433832795029L /* pi */ +#define NPY_PI_2l 1.5707963267948966192313216916397514L /* pi/2 */ +#define NPY_PI_4l 0.7853981633974483096156608458198757L /* pi/4 */ +#define NPY_1_PIl 0.3183098861837906715377675267450287L /* 1/pi */ +#define NPY_2_PIl 0.6366197723675813430755350534900574L /* 2/pi */ + +/* + * C99 double math funcs + */ +double npy_sin(double x); +double npy_cos(double x); +double npy_tan(double x); +double npy_sinh(double x); +double npy_cosh(double x); +double npy_tanh(double x); + +double npy_asin(double x); +double npy_acos(double x); +double npy_atan(double x); +double npy_aexp(double x); +double npy_alog(double x); +double npy_asqrt(double x); +double npy_afabs(double x); + +double npy_log(double x); +double npy_log10(double x); +double npy_exp(double x); +double npy_sqrt(double x); + +double npy_fabs(double x); +double npy_ceil(double x); +double npy_fmod(double x, double y); +double npy_floor(double x); + +double npy_expm1(double x); +double npy_log1p(double x); +double npy_hypot(double x, double y); +double npy_acosh(double x); +double npy_asinh(double xx); +double npy_atanh(double x); +double npy_rint(double x); +double npy_trunc(double x); +double npy_exp2(double x); +double npy_log2(double x); + +double npy_atan2(double x, double y); +double npy_pow(double x, double y); +/* + * IEEE 754 fpu handling. Those are guaranteed to be macros + */ +#ifndef NPY_HAVE_DECL_ISNAN + #define npy_isnan(x) ((x) != (x)) +#else + #define npy_isnan(x) isnan((x)) +#endif + +#ifndef NPY_HAVE_DECL_ISFINITE + #define npy_isfinite(x) !npy_isnan((x) + (-x)) +#else + #define npy_isfinite(x) isfinite((x)) +#endif + +#ifndef NPY_HAVE_DECL_ISINF + #define npy_isinf(x) (!npy_isfinite(x) && !npy_isnan(x)) +#else + #define npy_isinf(x) isinf((x)) +#endif + +#ifndef NPY_HAVE_DECL_SIGNBIT + int _npy_signbit_f(float x); + int _npy_signbit(double x); + int _npy_signbit_ld(npy_longdouble x); + #define npy_signbit(x) \ + (sizeof (x) == sizeof (long double) ? _npy_signbit_ld (x) \ + : sizeof (x) == sizeof (double) ? _npy_signbit_d (x) \ + : _npy_signbit_f (x)) +#else + #define npy_signbit(x) signbit((x)) +#endif + +/* + * float C99 math functions + */ + +float npy_sinf(float x); +float npy_cosf(float x); +float npy_tanf(float x); +float npy_sinhf(float x); +float npy_coshf(float x); +float npy_tanhf(float x); +float npy_fabsf(float x); +float npy_floorf(float x); +float npy_ceilf(float x); +float npy_rintf(float x); +float npy_truncf(float x); +float npy_sqrtf(float x); +float npy_log10f(float x); +float npy_logf(float x); +float npy_expf(float x); +float npy_expm1f(float x); +float npy_asinf(float x); +float npy_acosf(float x); +float npy_atanf(float x); +float npy_asinhf(float x); +float npy_acoshf(float x); +float npy_atanhf(float x); +float npy_log1pf(float x); +float npy_exp2f(float x); +float npy_log2f(float x); + +float npy_atan2f(float x, float y); +float npy_hypotf(float x, float y); +float npy_powf(float x, float y); +float npy_fmodf(float x, float y); + +float npy_modff(float x, float* y); + +/* + * float C99 math functions + */ + +npy_longdouble npy_sinl(npy_longdouble x); +npy_longdouble npy_cosl(npy_longdouble x); +npy_longdouble npy_tanl(npy_longdouble x); +npy_longdouble npy_sinhl(npy_longdouble x); +npy_longdouble npy_coshl(npy_longdouble x); +npy_longdouble npy_tanhl(npy_longdouble x); +npy_longdouble npy_fabsl(npy_longdouble x); +npy_longdouble npy_floorl(npy_longdouble x); +npy_longdouble npy_ceill(npy_longdouble x); +npy_longdouble npy_rintl(npy_longdouble x); +npy_longdouble npy_truncl(npy_longdouble x); +npy_longdouble npy_sqrtl(npy_longdouble x); +npy_longdouble npy_log10l(npy_longdouble x); +npy_longdouble npy_logl(npy_longdouble x); +npy_longdouble npy_expl(npy_longdouble x); +npy_longdouble npy_expm1l(npy_longdouble x); +npy_longdouble npy_asinl(npy_longdouble x); +npy_longdouble npy_acosl(npy_longdouble x); +npy_longdouble npy_atanl(npy_longdouble x); +npy_longdouble npy_asinhl(npy_longdouble x); +npy_longdouble npy_acoshl(npy_longdouble x); +npy_longdouble npy_atanhl(npy_longdouble x); +npy_longdouble npy_log1pl(npy_longdouble x); +npy_longdouble npy_exp2l(npy_longdouble x); +npy_longdouble npy_log2l(npy_longdouble x); + +npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y); + +npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); + +/* + * Non standard functions + */ +double npy_deg2rad(double x); +double npy_rad2deg(double x); +double npy_logaddexp(double x, double y); +double npy_logaddexp2(double x, double y); + +float npy_deg2radf(float x); +float npy_rad2degf(float x); +float npy_logaddexpf(float x, float y); +float npy_logaddexp2f(float x, float y); + +npy_longdouble npy_deg2radl(npy_longdouble x); +npy_longdouble npy_rad2degl(npy_longdouble x); +npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); +npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); + +#define npy_degrees npy_rad2deg +#define npy_degreesf npy_rad2degf +#define npy_degreesl npy_rad2degl + +#define npy_radians npy_deg2rad +#define npy_radiansf npy_deg2radf +#define npy_radiansl npy_deg2radl + +#endif Modified: trunk/numpy/core/include/numpy/numpyconfig.h.in =================================================================== --- trunk/numpy/core/include/numpy/numpyconfig.h.in 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/include/numpy/numpyconfig.h.in 2009-02-26 10:41:32 UTC (rev 6481) @@ -6,6 +6,11 @@ #define NPY_SIZEOF_LONGDOUBLE @SIZEOF_LONG_DOUBLE@ #define NPY_SIZEOF_PY_INTPTR_T @SIZEOF_PY_INTPTR_T@ + at DEFINE_NPY_HAVE_DECL_ISNAN@ + at DEFINE_NPY_HAVE_DECL_ISINF@ + at DEFINE_NPY_HAVE_DECL_ISFINITE@ + at DEFINE_NPY_HAVE_DECL_SIGNBIT@ + @DEFINE_NPY_NO_SIGNAL@ #define NPY_NO_SMP @NPY_NO_SMP@ Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/setup.py 2009-02-26 10:41:32 UTC (rev 6481) @@ -4,7 +4,15 @@ from os.path import join from numpy.distutils import log from distutils.dep_util import newer +from distutils.sysconfig import get_config_var +def pythonlib_dir(): + """return path where libpython* is.""" + if sys.platform == 'win32': + return os.path.join(sys.prefix, "libs") + else: + return get_config_var('LIBDIR') + def is_npy_no_signal(): """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration header.""" @@ -35,6 +43,22 @@ nosmp = 0 return nosmp == 1 +def win32_checks(deflist): + from numpy.distutils.misc_util import get_build_architecture + a = get_build_architecture() + + # Distutils hack on AMD64 on windows + print 'BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % \ + (a, os.name, sys.platform) + if a == 'AMD64': + deflist.append('DISTUTILS_USE_SDK') + + # On win32, force long double format string to be 'g', not + # 'Lg', since the MS runtime does not support long double whose + # size is > sizeof(double) + if a =="Intel": + deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') + def check_math_capabilities(config, moredefs, mathlibs): def check_func(func_name): return config.check_func(func_name, libraries=mathlibs, @@ -112,7 +136,70 @@ if st: moredefs.append(name_to_defsymb("decl_%s" % f)) +def check_types(config, ext, build_dir): + private_defines = [] + public_defines = [] + config_cmd = config.get_config_cmd() + + # Check we have the python header (-dev* packages on Linux) + result = config_cmd.check_header('Python.h') + if not result: + raise SystemError( + "Cannot compiler 'Python.h'. Perhaps you need to "\ + "install python-dev|python-devel.") + + # Check basic types sizes + for type in ('short', 'int', 'long', 'float', 'double', 'long double'): + res = config_cmd.check_type_size(type) + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + for type in ('Py_intptr_t',): + res = config_cmd.check_type_size(type, headers=["Python.h"], + library_dirs=[pythonlib_dir()]) + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % type) + + # We check declaration AND type because that's how distutils does it. + if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): + st = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], library_dirs=[pythonlib_dir()]) + assert not st == 0 + private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + + if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): + raise RuntimeError( + "Config wo CHAR_BIT is not supported"\ + ", please contact the maintainers") + + return private_defines, public_defines + +def sym2def(symbol): + define = symbol.replace(' ', '_') + return define.upper() + +def check_mathlib(config_cmd): + # Testing the C math library + mathlibs = [] + tc = testcode_mathlib() + mathlibs_choices = [[],['m'],['cpml']] + mathlib = os.environ.get('MATHLIB') + if mathlib: + mathlibs_choices.insert(0,mathlib.split(',')) + for libs in mathlibs_choices: + if config_cmd.try_run(tc,libraries=libs): + mathlibs = libs + break + else: + raise EnvironmentError("math library missing; rerun " + "setup.py after setting the " + "MATHLIB env variable") + return mathlibs + def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration,dot_join from numpy.distutils.system_info import get_info, default_lib_dirs @@ -131,70 +218,31 @@ def generate_config_h(ext, build_dir): target = join(build_dir,header_dir,'config.h') - dir = os.path.dirname(target) - if not os.path.exists(dir): - os.makedirs(dir) + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) if newer(__file__,target): config_cmd = config.get_config_cmd() log.info('Generating %s',target) - tc = generate_testcode(target) - from distutils import sysconfig - python_include = sysconfig.get_python_inc() - python_h = join(python_include, 'Python.h') - if not os.path.isfile(python_h): - raise SystemError,\ - "Non-existing %s. Perhaps you need to install"\ - " python-dev|python-devel." % (python_h) - result = config_cmd.try_run(tc,include_dirs=[python_include], - library_dirs = default_lib_dirs) - if not result: - raise SystemError,"Failed to test configuration. "\ - "See previous error messages for more information." - moredefs = [] - # - mathlibs = [] - tc = testcode_mathlib() - mathlibs_choices = [[],['m'],['cpml']] - mathlib = os.environ.get('MATHLIB') - if mathlib: - mathlibs_choices.insert(0,mathlib.split(',')) - for libs in mathlibs_choices: - if config_cmd.try_run(tc,libraries=libs): - mathlibs = libs - break - else: - raise EnvironmentError("math library missing; rerun " - "setup.py after setting the " - "MATHLIB env variable") - ext.libraries.extend(mathlibs) + # Check sizeof + moredefs, ignored = check_types(config, ext, build_dir) + + # Check math library and C99 math funcs availability + mathlibs = check_mathlib(config_cmd) moredefs.append(('MATHLIB',','.join(mathlibs))) check_math_capabilities(config_cmd, moredefs, mathlibs) + # Signal check if is_npy_no_signal(): moredefs.append('__NPY_PRIVATE_NO_SIGNAL') + # Windows checks if sys.platform=='win32' or os.name=='nt': - from numpy.distutils.misc_util import get_build_architecture - a = get_build_architecture() - print 'BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' % (a, os.name, sys.platform) - if a == 'AMD64': - moredefs.append('DISTUTILS_USE_SDK') + win32_checks(moredefs) - if sys.version[:3] < '2.4': - if config_cmd.check_func('strtod', decl=False, - headers=['stdlib.h']): - moredefs.append(('PyOS_ascii_strtod', 'strtod')) - - if sys.platform == "win32": - from numpy.distutils.misc_util import get_build_architecture - # On win32, force long double format string to be 'g', not - # 'Lg', since the MS runtime does not support long double whose - # size is > sizeof(double) - if get_build_architecture()=="Intel": - moredefs.append('FORCE_NO_LONG_DOUBLE_FORMATTING') - + # Generate the config.h file from moredefs target_f = open(target,'a') for d in moredefs: if isinstance(d,str): @@ -202,16 +250,6 @@ else: target_f.write('#define %s %s\n' % (d[0],d[1])) - # Keep those for backward compatibility for now - target_f.write(""" -#ifdef HAVE_EXPL -#define HAVE_LONGDOUBLE_FUNCS -#endif - -#ifdef HAVE_EXPF -#define HAVE_FLOAT_FUNCS -#endif -""") target_f.close() print 'File:',target target_f = open(target) @@ -229,7 +267,11 @@ mathlibs.extend(value.split(',')) target_f.close() - ext.libraries.extend(mathlibs) + # Ugly: this can be called within a library and not an extension, + # in which case there is no libraries attributes (and none is + # needed). + if hasattr(ext, 'libraries'): + ext.libraries.extend(mathlibs) incl_dir = os.path.dirname(target) if incl_dir not in config.numpy_include_dirs: @@ -240,34 +282,35 @@ def generate_numpyconfig_h(ext, build_dir): """Depends on config.h: generate_config_h has to be called before !""" target = join(build_dir,header_dir,'numpyconfig.h') - dir = os.path.dirname(target) - if not os.path.exists(dir): - os.makedirs(dir) + d = os.path.dirname(target) + if not os.path.exists(d): + os.makedirs(d) if newer(__file__,target): config_cmd = config.get_config_cmd() log.info('Generating %s',target) testcode = generate_numpyconfig_code(target) - from distutils import sysconfig - python_include = sysconfig.get_python_inc() - python_h = join(python_include, 'Python.h') - if not os.path.isfile(python_h): - raise SystemError,\ - "Non-existing %s. Perhaps you need to install"\ - " python-dev|python-devel." % (python_h) - - config.numpy_include_dirs result = config_cmd.try_run(testcode, - include_dirs = [python_include] + \ - config.numpy_include_dirs, - library_dirs = default_lib_dirs) - + include_dirs=config.numpy_include_dirs, + library_dirs=default_lib_dirs) if not result: raise SystemError,"Failed to generate numpy configuration. "\ "See previous error messages for more information." moredefs = [] + # Normally, isnan and isinf are macro (C99), but some platforms + # only have func, or both func and macro version. Check for macro + # only, and define replacement ones if not found. + # Note: including Python.h is necessary because it modifies some + # math.h definitions + # XXX: we check those twice... should decouple tests from + # config.h/numpyconfig.h to avoid this + for f in ["isnan", "isinf", "signbit", "isfinite"]: + st = config_cmd.check_decl(f, headers = ["Python.h", "math.h"]) + if st: + moredefs.append('NPY_HAVE_DECL_%s' % f.upper()) + # Check wether we can use inttypes (C99) formats if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']): moredefs.append(('NPY_USE_C99_FORMATS', 1)) @@ -353,6 +396,23 @@ if sys.platform == 'cygwin': config.add_data_dir('include/numpy/fenv') + config.add_extension('_sort', + sources=[join('src','_sortmodule.c.src'), + generate_config_h, + generate_numpyconfig_h, + generate_numpy_api, + ], + ) + + # npymath needs the config.h and numpyconfig.h files to be generated, but + # build_clib cannot handle generate_config_h and generate_numpyconfig_h + # (don't ask). Because clib are generated before extensions, we have to + # explicitly add an extension which has generate_config_h and + # generate_numpyconfig_h as sources *before* adding npymath. + config.add_library('npymath', + sources=[join('src', 'npy_math.c.src')], + depends=[]) + config.add_extension('multiarray', sources = [join('src','multiarraymodule.c'), generate_config_h, @@ -364,6 +424,7 @@ join('*.py') ], depends = deps, + libraries=['npymath'], ) config.add_extension('umath', @@ -374,7 +435,6 @@ generate_ufunc_api, join('src','scalartypes.inc.src'), join('src','arraytypes.inc.src'), - join('src','umath_funcs_c99.inc.src'), join('src','umath_funcs.inc.src'), join('src','umath_loops.inc.src'), ], @@ -382,16 +442,9 @@ generate_umath_py, join(codegen_dir,'generate_ufunc_api.py'), ]+deps, + libraries=['npymath'], ) - config.add_extension('_sort', - sources=[join('src','_sortmodule.c.src'), - generate_config_h, - generate_numpyconfig_h, - generate_numpy_api, - ], - ) - config.add_extension('scalarmath', sources=[join('src','scalarmathmodule.c.src'), generate_config_h, @@ -446,70 +499,6 @@ """ import sys -def generate_testcode(target): - if sys.platform == 'win32': - target = target.replace('\\','\\\\') - testcode = [r''' -#include -#include -#include - -int main(int argc, char **argv) -{ - - FILE *fp; - - fp = fopen("'''+target+'''","w"); - '''] - - c_size_test = r''' -#ifndef %(sz)s - fprintf(fp,"#define %(sz)s %%d\n", sizeof(%(type)s)); -#else - fprintf(fp,"/* #define %(sz)s %%d */\n", %(sz)s); -#endif -''' - for sz, t in [('SIZEOF_SHORT', 'short'), - ('SIZEOF_INT', 'int'), - ('SIZEOF_LONG', 'long'), - ('SIZEOF_FLOAT', 'float'), - ('SIZEOF_DOUBLE', 'double'), - ('SIZEOF_LONG_DOUBLE', 'long double'), - ('SIZEOF_PY_INTPTR_T', 'Py_intptr_t'), - ]: - testcode.append(c_size_test % {'sz' : sz, 'type' : t}) - - testcode.append('#ifdef PY_LONG_LONG') - testcode.append(c_size_test % {'sz' : 'SIZEOF_LONG_LONG', - 'type' : 'PY_LONG_LONG'}) - testcode.append(c_size_test % {'sz' : 'SIZEOF_PY_LONG_LONG', - 'type' : 'PY_LONG_LONG'}) - - - testcode.append(r''' -#else - fprintf(fp, "/* PY_LONG_LONG not defined */\n"); -#endif -#ifndef CHAR_BIT - { - unsigned char var = 2; - int i=0; - while (var >= 2) { - var = var << 1; - i++; - } - fprintf(fp,"#define CHAR_BIT %d\n", i+1); - } -#else - fprintf(fp, "/* #define CHAR_BIT %d */\n", CHAR_BIT); -#endif - fclose(fp); - return 0; -} -''') - testcode = '\n'.join(testcode) - return testcode - def generate_numpyconfig_code(target): """Return the source code as a string of the code to generate the numpyconfig header file.""" Modified: trunk/numpy/core/src/_signbit.c =================================================================== --- trunk/numpy/core/src/_signbit.c 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/src/_signbit.c 2009-02-26 10:41:32 UTC (rev 6481) @@ -1,7 +1,7 @@ /* Adapted from cephes */ -static int -signbit_d(double x) +int +_npy_signbit_d(double x) { union { Modified: trunk/numpy/core/src/multiarraymodule.c =================================================================== --- trunk/numpy/core/src/multiarraymodule.c 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/src/multiarraymodule.c 2009-02-26 10:41:32 UTC (rev 6481) @@ -89,13 +89,6 @@ } /* - * XXX: We include c99 compat math module here because it is needed for - * numpyos.c (included by arrayobject). This is bad - we should separate - * declaration/implementation and share this in a lib. - */ -#include "umath_funcs_c99.inc" - -/* * Including this file is the only way I know how to declare functions * static in each file, and store the pointers from functions in both * arrayobject.c and multiarraymodule.c for the C-API Copied: trunk/numpy/core/src/npy_math.c.src (from rev 6479, trunk/numpy/core/src/umath_funcs_c99.inc.src) =================================================================== --- trunk/numpy/core/src/umath_funcs_c99.inc.src 2009-02-26 08:46:34 UTC (rev 6479) +++ trunk/numpy/core/src/npy_math.c.src 2009-02-26 10:41:32 UTC (rev 6481) @@ -0,0 +1,423 @@ +/* + * vim:syntax=c + * A small module to implement missing C99 math capabilities required by numpy + * + * Please keep this independant of python ! Only basic types (npy_longdouble) + * can be used, otherwise, pure C, without any use of Python facilities + * + * How to add a function to this section + * ------------------------------------- + * + * Say you want to add `foo`, these are the steps and the reasons for them. + * + * 1) Add foo to the appropriate list in the configuration system. The + * lists can be found in numpy/core/setup.py lines 63-105. Read the + * comments that come with them, they are very helpful. + * + * 2) The configuration system will define a macro HAVE_FOO if your function + * can be linked from the math library. The result can depend on the + * optimization flags as well as the compiler, so can't be known ahead of + * time. If the function can't be linked, then either it is absent, defined + * as a macro, or is an intrinsic (hardware) function. + * + * i) Undefine any possible macros: + * + * #ifdef foo + * #undef foo + * #endif + * + * ii) Avoid as much as possible to declare any function here. Declaring + * functions is not portable: some platforms define some function inline + * with a non standard identifier, for example, or may put another + * idendifier which changes the calling convention of the function. If you + * really have to, ALWAYS declare it for the one platform you are dealing + * with: + * + * Not ok: + * double exp(double a); + * + * Ok: + * #ifdef SYMBOL_DEFINED_WEIRD_PLATFORM + * double exp(double); + * #endif + */ + +#include +#include + +#include "config.h" +#include "numpy/npy_math.h" + +/* + ***************************************************************************** + ** BASIC MATH FUNCTIONS ** + ***************************************************************************** + */ + +/* Original code by Konrad Hinsen. */ +#ifndef HAVE_EXPM1 +static double expm1(double x) +{ + double u = exp(x); + if (u == 1.0) { + return x; + } else if (u-1.0 == -1.0) { + return -1; + } else { + return (u-1.0) * x/log(u); + } +} +#endif + +#ifndef HAVE_LOG1P +static double log1p(double x) +{ + double u = 1. + x; + if (u == 1.0) { + return x; + } else { + return log(u) * x / (u - 1); + } +} +#endif + +#ifndef HAVE_HYPOT +static double hypot(double x, double y) +{ + double yx; + + x = fabs(x); + y = fabs(y); + if (x < y) { + double temp = x; + x = y; + y = temp; + } + if (x == 0.) + return 0.; + else { + yx = y/x; + return x*sqrt(1.+yx*yx); + } +} +#endif + +#ifndef HAVE_ACOSH +static double acosh(double x) +{ + return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); +} +#endif + +#ifndef HAVE_ASINH +static double asinh(double xx) +{ + double x, d; + int sign; + if (xx < 0.0) { + sign = -1; + x = -xx; + } + else { + sign = 1; + x = xx; + } + if (x > 1e8) { + d = x; + } else { + d = sqrt(x*x + 1); + } + return sign*log1p(x*(1.0 + x/(d+1))); +} +#endif + +#ifndef HAVE_ATANH +static double atanh(double x) +{ + if (x > 0) { + return -0.5*log1p(-2.0*x/(1.0 + x)); + } + else { + return 0.5*log1p(2.0*x/(1.0 - x)); + } +} +#endif + +#ifndef HAVE_RINT +static double rint(double x) +{ + double y, r; + + y = floor(x); + r = x - y; + + if (r > 0.5) goto rndup; + + /* Round to nearest even */ + if (r==0.5) { + r = y - 2.0*floor(0.5*y); + if (r==1.0) { + rndup: + y+=1.0; + } + } + return y; +} +#endif + +#ifndef HAVE_TRUNC +static double trunc(double x) +{ + return x < 0 ? ceil(x) : floor(x); +} +#endif + +#ifndef HAVE_EXP2 +#define LOG2 0.69314718055994530943 +static double exp2(double x) +{ + return exp(LOG2*x); +} +#undef LOG2 +#endif + +#ifndef HAVE_LOG2 +#define INVLOG2 1.4426950408889634074 +static double log2(double x) +{ + return INVLOG2*log(x); +} +#undef INVLOG2 +#endif + +/* + ***************************************************************************** + ** IEEE 754 FPU HANDLING ** + ***************************************************************************** + */ +#if !defined(HAVE_DECL_SIGNBIT) +#include "_signbit.c" + +int _npy_signbit_f (float x) +{ + return _npy_signbit_d((double)x); +} + +int _npy_signbit_ld (long double x) +{ + return _npy_signbit_d((double)x); +} +#endif + +/* + * if C99 extensions not available then define dummy functions that use the + * double versions for + * + * sin, cos, tan + * sinh, cosh, tanh, + * fabs, floor, ceil, rint, trunc + * sqrt, log10, log, exp, expm1 + * asin, acos, atan, + * asinh, acosh, atanh + * + * hypot, atan2, pow, fmod, modf + * + * We assume the above are always available in their double versions. + * + * NOTE: some facilities may be available as macro only instead of functions. + * For simplicity, we define our own functions and undef the macros. We could + * instead test for the macro, but I am lazy to do that for now. + */ + +/**begin repeat + * #type = npy_longdouble, float# + * #TYPE = NPY_LONGDOUBLE, FLOAT# + * #c = l,f# + * #C = L,F# + */ + +/**begin repeat1 + * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# + * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, + * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# + */ + +#ifdef @kind@@c@ +#undef @kind@@c@ +#endif +#ifndef HAVE_ at KIND@@C@ +static @type@ @kind@@c@(@type@ x) +{ + return (@type@) @kind@((double)x); +} +#endif + +/**end repeat1**/ + +/**begin repeat1 + * #kind = atan2,hypot,pow,fmod# + * #KIND = ATAN2,HYPOT,POW,FMOD# + */ +#ifdef @kind@@c@ +#undef @kind@@c@ +#endif +#ifndef HAVE_ at KIND@@C@ +static @type@ @kind@@c@(@type@ x, @type@ y) +{ + return (@type@) @kind@((double)x, (double) y); +} +#endif +/**end repeat1**/ + +#ifdef modf at c@ +#undef modf at c@ +#endif +#ifndef HAVE_MODF at C@ +static @type@ modf at c@(@type@ x, @type@ *iptr) +{ + double niptr; + double y = modf((double)x, &niptr); + *iptr = (@type@) niptr; + return (@type@) y; +} +#endif + +/**end repeat**/ + +/* + * Useful constants in three precisions: + * XXX: those should really be in the header + */ + +/**begin repeat + * #c = f, ,l# + * #C = F, ,L# + */ +#define NPY_E at c@ 2.7182818284590452353602874713526625 at C@ /* e */ +#define NPY_LOG2E at c@ 1.4426950408889634073599246810018921 at C@ /* log_2 e */ +#define NPY_LOG10E at c@ 0.4342944819032518276511289189166051 at C@ /* log_10 e */ +#define NPY_LOGE2 at c@ 0.6931471805599453094172321214581766 at C@ /* log_e 2 */ +#define NPY_LOGE10 at c@ 2.3025850929940456840179914546843642 at C@ /* log_e 10 */ +#define NPY_PI at c@ 3.1415926535897932384626433832795029 at C@ /* pi */ +#define NPY_PI_2 at c@ 1.5707963267948966192313216916397514 at C@ /* pi/2 */ +#define NPY_PI_4 at c@ 0.7853981633974483096156608458198757 at C@ /* pi/4 */ +#define NPY_1_PI at c@ 0.3183098861837906715377675267450287 at C@ /* 1/pi */ +#define NPY_2_PI at c@ 0.6366197723675813430755350534900574 at C@ /* 2/pi */ +/**end repeat**/ + +/* + * Non standard functions + */ + +/**begin repeat + * #type = float, double, npy_longdouble# + * #c = f, ,l# + * #C = F, ,L# + */ + +#define LOGE2 NPY_LOGE2 at c@ +#define LOG2E NPY_LOG2E at c@ +#define RAD2DEG (180.0 at c@/NPY_PI at c@) +#define DEG2RAD (NPY_PI at c@/180.0 at c@) + +static @type@ rad2deg at c@(@type@ x) +{ + return x*RAD2DEG; +} + +static @type@ deg2rad at c@(@type@ x) +{ + return x*DEG2RAD; +} + +static @type@ log2_1p at c@(@type@ x) +{ + @type@ u = 1 + x; + if (u == 1) { + return LOG2E*x; + } else { + return npy_log2 at c@(u) * x / (u - 1); + } +} + +static @type@ exp2_1m at c@(@type@ x) +{ + @type@ u = exp at c@(x); + if (u == 1.0) { + return LOGE2*x; + } else if (u - 1 == -1) { + return -LOGE2; + } else { + return (u - 1) * x/npy_log2 at c@(u); + } +} + +static @type@ logaddexp at c@(@type@ x, @type@ y) +{ + const @type@ tmp = x - y; + if (tmp > 0) { + return x + npy_log1p at c@(npy_exp at c@(-tmp)); + } + else { + return y + npy_log1p at c@(npy_exp at c@(tmp)); + } +} + +static @type@ logaddexp2 at c@(@type@ x, @type@ y) +{ + const @type@ tmp = x - y; + if (tmp > 0) { + return x + log2_1p at c@(npy_exp2 at c@(-tmp)); + } + else { + return y + log2_1p at c@(npy_exp2 at c@(tmp)); + } +} + +#define degrees at c@ rad2deg at c@ +#define radians at c@ deg2rad at c@ + +#undef LOGE2 +#undef LOG2E +#undef RAD2DEG +#undef DEG2RAD + +/**end repeat**/ + +/* + * Decorate all the functions: those are the public ones + */ + +/**begin repeat + * #type = npy_longdouble,double,float# + * #c = l,,f# + */ +/**begin repeat1 + * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, + * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2, + * rad2deg,deg2rad,exp2_1m# + */ + + at type@ npy_ at kind@@c@(@type@ x) +{ + return @kind@@c@(x); +} + +/**end repeat1**/ + +/**begin repeat1 + * #kind = atan2,hypot,pow,fmod,logaddexp,logaddexp2# + */ + at type@ npy_ at kind@@c@(@type@ x, @type@ y) +{ + return @kind@@c@(x, y); +} +/**end repeat1**/ + + at type@ npy_modf at c@(@type@ x, @type@ *iptr) +{ + return modf at c@(x, iptr); +} + +/**end repeat**/ Modified: trunk/numpy/core/src/numpyos.c =================================================================== --- trunk/numpy/core/src/numpyos.c 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/src/numpyos.c 2009-02-26 10:41:32 UTC (rev 6481) @@ -1,6 +1,8 @@ #include #include +#include "numpy/npy_math.h" + /* From the C99 standard, section 7.19.6: The exponent always contains at least two digits, and only as many more digits as necessary to represent the exponent. @@ -249,21 +251,21 @@ const char *format, \ type val, int decimal) \ { \ - if (isfinite(val)) { \ + if (npy_isfinite(val)) { \ if(_check_ascii_format(format)) { \ return NULL; \ } \ PyOS_snprintf(buffer, buf_size, format, (print_type)val); \ return _fix_ascii_format(buffer, buf_size, decimal); \ } \ - else if (isnan(val)){ \ + else if (npy_isnan(val)){ \ if (buf_size < 4) { \ return NULL; \ } \ strcpy(buffer, "nan"); \ } \ else { \ - if (signbit(val)) { \ + if (npy_signbit(val)) { \ if (buf_size < 5) { \ return NULL; \ } \ Modified: trunk/numpy/core/src/umath_funcs.inc.src =================================================================== --- trunk/numpy/core/src/umath_funcs.inc.src 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/src/umath_funcs.inc.src 2009-02-26 10:41:32 UTC (rev 6481) @@ -10,110 +10,7 @@ #define M_LOG10_E 0.434294481903251827651128918916605082294397 -/* Useful constants in three precisions.*/ - -/**begin repeat - * #c = f, ,l# - * #C = F, ,L# - */ -#define NPY_E at c@ 2.7182818284590452353602874713526625 at C@ /* e */ -#define NPY_LOG2E at c@ 1.4426950408889634073599246810018921 at C@ /* log_2 e */ -#define NPY_LOG10E at c@ 0.4342944819032518276511289189166051 at C@ /* log_10 e */ -#define NPY_LOGE2 at c@ 0.6931471805599453094172321214581766 at C@ /* log_e 2 */ -#define NPY_LOGE10 at c@ 2.3025850929940456840179914546843642 at C@ /* log_e 10 */ -#define NPY_PI at c@ 3.1415926535897932384626433832795029 at C@ /* pi */ -#define NPY_PI_2 at c@ 1.5707963267948966192313216916397514 at C@ /* pi/2 */ -#define NPY_PI_4 at c@ 0.7853981633974483096156608458198757 at C@ /* pi/4 */ -#define NPY_1_PI at c@ 0.3183098861837906715377675267450287 at C@ /* 1/pi */ -#define NPY_2_PI at c@ 0.6366197723675813430755350534900574 at C@ /* 2/pi */ -/**end repeat**/ - /* - ****************************************************************************** - ** FLOAT FUNCTIONS ** - ****************************************************************************** - */ - -/**begin repeat - * #type = float, double, longdouble# - * #c = f, ,l# - * #C = F, ,L# - */ - -#define LOGE2 NPY_LOGE2 at c@ -#define LOG2E NPY_LOG2E at c@ -#define RAD2DEG (180.0 at c@/NPY_PI at c@) -#define DEG2RAD (NPY_PI at c@/180.0 at c@) - -static @type@ -rad2deg at c@(@type@ x) { - return x*RAD2DEG; -} - -static @type@ -deg2rad at c@(@type@ x) { - return x*DEG2RAD; -} - -static @type@ -log2_1p at c@(@type@ x) -{ - @type@ u = 1 + x; - if (u == 1) { - return LOG2E*x; - } else { - return log2 at c@(u) * x / (u - 1); - } -} - -static @type@ -exp2_1m at c@(@type@ x) -{ - @type@ u = exp at c@(x); - if (u == 1.0) { - return LOGE2*x; - } else if (u - 1 == -1) { - return -LOGE2; - } else { - return (u - 1) * x/log2 at c@(u); - } -} - -static @type@ -logaddexp at c@(@type@ x, @type@ y) -{ - const @type@ tmp = x - y; - if (tmp > 0) { - return x + log1p at c@(exp at c@(-tmp)); - } - else { - return y + log1p at c@(exp at c@(tmp)); - } -} - -static @type@ -logaddexp2 at c@(@type@ x, @type@ y) -{ - const @type@ tmp = x - y; - if (tmp > 0) { - return x + log2_1p at c@(exp2 at c@(-tmp)); - } - else { - return y + log2_1p at c@(exp2 at c@(tmp)); - } -} - -#define degrees at c@ rad2deg at c@ -#define radians at c@ deg2rad at c@ - -#undef LOGE2 -#undef LOG2E -#undef RAD2DEG -#undef DEG2RAD - -/**end repeat**/ - -/* ***************************************************************************** ** PYTHON OBJECT FUNCTIONS ** ***************************************************************************** @@ -261,7 +158,7 @@ if (x->real == 0. && x->imag == 0.) *r = *x; else { - s = sqrt at c@((fabs at c@(x->real) + hypot at c@(x->real,x->imag))/2); + s = npy_sqrt at c@((npy_fabs at c@(x->real) + npy_hypot at c@(x->real,x->imag))/2); d = x->imag/(2*s); if (x->real > 0) { r->real = s; @@ -282,43 +179,43 @@ static void nc_rint at c@(c at typ@ *x, c at typ@ *r) { - r->real = rint at c@(x->real); - r->imag = rint at c@(x->imag); + r->real = npy_rint at c@(x->real); + r->imag = npy_rint at c@(x->imag); } static void nc_log at c@(c at typ@ *x, c at typ@ *r) { - @typ@ l = hypot at c@(x->real,x->imag); - r->imag = atan2 at c@(x->imag, x->real); - r->real = log at c@(l); + @typ@ l = npy_hypot at c@(x->real,x->imag); + r->imag = npy_atan2 at c@(x->imag, x->real); + r->real = npy_log at c@(l); return; } static void nc_log1p at c@(c at typ@ *x, c at typ@ *r) { - @typ@ l = hypot at c@(x->real + 1,x->imag); - r->imag = atan2 at c@(x->imag, x->real + 1); - r->real = log at c@(l); + @typ@ l = npy_hypot at c@(x->real + 1,x->imag); + r->imag = npy_atan2 at c@(x->imag, x->real + 1); + r->real = npy_log at c@(l); return; } static void nc_exp at c@(c at typ@ *x, c at typ@ *r) { - @typ@ a = exp at c@(x->real); - r->real = a*cos at c@(x->imag); - r->imag = a*sin at c@(x->imag); + @typ@ a = npy_exp at c@(x->real); + r->real = a*npy_cos at c@(x->imag); + r->imag = a*npy_sin at c@(x->imag); return; } static void nc_expm1 at c@(c at typ@ *x, c at typ@ *r) { - @typ@ a = exp at c@(x->real); - r->real = a*cos at c@(x->imag) - 1; - r->imag = a*sin at c@(x->imag); + @typ@ a = npy_exp at c@(x->real); + r->real = a*npy_cos at c@(x->imag) - 1; + r->imag = a*npy_sin at c@(x->imag); return; } @@ -483,8 +380,8 @@ nc_cos at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xr)*cosh at c@(xi); - r->imag = -sin at c@(xr)*sinh at c@(xi); + r->real = npy_cos at c@(xr)*npy_cosh at c@(xi); + r->imag = -npy_sin at c@(xr)*npy_sinh at c@(xi); return; } @@ -492,8 +389,8 @@ nc_cosh at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xi)*cosh at c@(xr); - r->imag = sin at c@(xi)*sinh at c@(xr); + r->real = npy_cos at c@(xi)*npy_cosh at c@(xr); + r->imag = npy_sin at c@(xi)*npy_sinh at c@(xr); return; } @@ -510,8 +407,8 @@ nc_sin at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = sin at c@(xr)*cosh at c@(xi); - r->imag = cos at c@(xr)*sinh at c@(xi); + r->real = npy_sin at c@(xr)*npy_cosh at c@(xi); + r->imag = npy_cos at c@(xr)*npy_sinh at c@(xi); return; } @@ -519,8 +416,8 @@ nc_sinh at c@(c at typ@ *x, c at typ@ *r) { @typ@ xr=x->real, xi=x->imag; - r->real = cos at c@(xi)*sinh at c@(xr); - r->imag = sin at c@(xi)*cosh at c@(xr); + r->real = npy_cos at c@(xi)*npy_sinh at c@(xr); + r->imag = npy_sin at c@(xi)*npy_cosh at c@(xr); return; } @@ -531,10 +428,10 @@ @typ@ rs,is,rc,ic; @typ@ d; @typ@ xr=x->real, xi=x->imag; - sr = sin at c@(xr); - cr = cos at c@(xr); - shi = sinh at c@(xi); - chi = cosh at c@(xi); + sr = npy_sin at c@(xr); + cr = npy_cos at c@(xr); + shi = npy_sinh at c@(xi); + chi = npy_cosh at c@(xi); rs = sr*chi; is = cr*shi; rc = cr*chi; @@ -552,10 +449,10 @@ @typ@ rs,is,rc,ic; @typ@ d; @typ@ xr=x->real, xi=x->imag; - si = sin at c@(xi); - ci = cos at c@(xi); - shr = sinh at c@(xr); - chr = cosh at c@(xr); + si = npy_sin at c@(xi); + ci = npy_cos at c@(xi); + shr = npy_sinh at c@(xr); + chr = npy_cosh at c@(xr); rs = ci*shr; is = si*chr; rc = ci*chr; Deleted: trunk/numpy/core/src/umath_funcs_c99.inc.src =================================================================== --- trunk/numpy/core/src/umath_funcs_c99.inc.src 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/src/umath_funcs_c99.inc.src 2009-02-26 10:41:32 UTC (rev 6481) @@ -1,304 +0,0 @@ -/* - * vim:syntax=c - * A small module to implement missing C99 math capabilities required by numpy - * - * Please keep this independant of python ! - * - * How to add a function to this section - * ------------------------------------- - * - * Say you want to add `foo`, these are the steps and the reasons for them. - * - * 1) Add foo to the appropriate list in the configuration system. The - * lists can be found in numpy/core/setup.py lines 63-105. Read the - * comments that come with them, they are very helpful. - * - * 2) The configuration system will define a macro HAVE_FOO if your function - * can be linked from the math library. The result can depend on the - * optimization flags as well as the compiler, so can't be known ahead of - * time. If the function can't be linked, then either it is absent, defined - * as a macro, or is an intrinsic (hardware) function. - * - * i) Undefine any possible macros: - * - * #ifdef foo - * #undef foo - * #endif - * - * ii) Avoid as much as possible to declare any function here. Declaring - * functions is not portable: some platforms define some function inline - * with a non standard identifier, for example, or may put another - * idendifier which changes the calling convention of the function. If you - * really have to, ALWAYS declare it for the one platform you are dealing - * with: - * - * Not ok: - * double exp(double a); - * - * Ok: - * #ifdef SYMBOL_DEFINED_WEIRD_PLATFORM - * double exp(double); - * #endif - */ - -/* - ***************************************************************************** - ** DISTRO VOODOO ** - ***************************************************************************** - */ - - -/* - ***************************************************************************** - ** BASIC MATH FUNCTIONS ** - ***************************************************************************** - */ - -/* Original code by Konrad Hinsen. */ -#ifndef HAVE_EXPM1 -double expm1(double x) -{ - double u = exp(x); - if (u == 1.0) { - return x; - } else if (u-1.0 == -1.0) { - return -1; - } else { - return (u-1.0) * x/log(u); - } -} -#endif - -#ifndef HAVE_LOG1P -double log1p(double x) -{ - double u = 1. + x; - if (u == 1.0) { - return x; - } else { - return log(u) * x / (u - 1); - } -} -#endif - -#ifndef HAVE_HYPOT -double hypot(double x, double y) -{ - double yx; - - x = fabs(x); - y = fabs(y); - if (x < y) { - double temp = x; - x = y; - y = temp; - } - if (x == 0.) - return 0.; - else { - yx = y/x; - return x*sqrt(1.+yx*yx); - } -} -#endif - -#ifndef HAVE_ACOSH -double acosh(double x) -{ - return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2)); -} -#endif - -#ifndef HAVE_ASINH -double asinh(double xx) -{ - double x, d; - int sign; - if (xx < 0.0) { - sign = -1; - x = -xx; - } - else { - sign = 1; - x = xx; - } - if (x > 1e8) { - d = x; - } else { - d = sqrt(x*x + 1); - } - return sign*log1p(x*(1.0 + x/(d+1))); -} -#endif - -#ifndef HAVE_ATANH -double atanh(double x) -{ - if (x > 0) { - return -0.5*log1p(-2.0*x/(1.0 + x)); - } - else { - return 0.5*log1p(2.0*x/(1.0 - x)); - } -} -#endif - -#ifndef HAVE_RINT -double rint(double x) -{ - double y, r; - - y = floor(x); - r = x - y; - - if (r > 0.5) goto rndup; - - /* Round to nearest even */ - if (r==0.5) { - r = y - 2.0*floor(0.5*y); - if (r==1.0) { - rndup: - y+=1.0; - } - } - return y; -} -#endif - -#ifndef HAVE_TRUNC -double trunc(double x) -{ - return x < 0 ? ceil(x) : floor(x); -} -#endif - -#ifndef HAVE_EXP2 -#define LOG2 0.69314718055994530943 -double exp2(double x) -{ - return exp(LOG2*x); -} -#undef LOG2 -#endif - -#ifndef HAVE_LOG2 -#define INVLOG2 1.4426950408889634074 -double log2(double x) -{ - return INVLOG2*log(x); -} -#undef INVLOG2 -#endif - -/* - ***************************************************************************** - ** IEEE 754 FPU HANDLING ** - ***************************************************************************** - */ -#if !defined(HAVE_DECL_ISNAN) - # define isnan(x) ((x) != (x)) -#endif - -/* VS 2003 with /Ox optimizes (x)-(x) to 0, which is not IEEE compliant. So we - * force (x) + (-x), which seems to work. */ -#if !defined(HAVE_DECL_ISFINITE) - # define isfinite(x) !isnan((x) + (-x)) -#endif - -#if !defined(HAVE_DECL_ISINF) -#define isinf(x) (!isfinite(x) && !isnan(x)) -#endif - -#if !defined(HAVE_DECL_SIGNBIT) - #include "_signbit.c" - # define signbit(x) \ - (sizeof (x) == sizeof (long double) ? signbit_ld (x) \ - : sizeof (x) == sizeof (double) ? signbit_d (x) \ - : signbit_f (x)) - -static int signbit_f (float x) -{ - return signbit_d((double)x); -} - -static int signbit_ld (long double x) -{ - return signbit_d((double)x); -} -#endif - -/* - * if C99 extensions not available then define dummy functions that use the - * double versions for - * - * sin, cos, tan - * sinh, cosh, tanh, - * fabs, floor, ceil, rint, trunc - * sqrt, log10, log, exp, expm1 - * asin, acos, atan, - * asinh, acosh, atanh - * - * hypot, atan2, pow, fmod, modf - * - * We assume the above are always available in their double versions. - * - * NOTE: some facilities may be available as macro only instead of functions. - * For simplicity, we define our own functions and undef the macros. We could - * instead test for the macro, but I am lazy to do that for now. - */ - -/**begin repeat - * #type = longdouble, float# - * #TYPE = LONGDOUBLE, FLOAT# - * #c = l,f# - * #C = L,F# - */ - -/**begin repeat1 - * #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10, - * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2# - * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10, - * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2# - */ - -#ifdef @kind@@c@ -#undef @kind@@c@ -#endif -#ifndef HAVE_ at KIND@@C@ - at type@ @kind@@c@(@type@ x) -{ - return (@type@) @kind@((double)x); -} -#endif - -/**end repeat1**/ - -/**begin repeat1 - * #kind = atan2,hypot,pow,fmod# - * #KIND = ATAN2,HYPOT,POW,FMOD# - */ -#ifdef @kind@@c@ -#undef @kind@@c@ -#endif -#ifndef HAVE_ at KIND@@C@ - at type@ @kind@@c@(@type@ x, @type@ y) -{ - return (@type@) @kind@((double)x, (double) y); -} -#endif -/**end repeat1**/ - -#ifdef modf at c@ -#undef modf at c@ -#endif -#ifndef HAVE_MODF at C@ - at type@ modf at c@(@type@ x, @type@ *iptr) -{ - double niptr; - double y = modf((double)x, &niptr); - *iptr = (@type@) niptr; - return (@type@) y; -} -#endif - -/**end repeat**/ Modified: trunk/numpy/core/src/umath_loops.inc.src =================================================================== --- trunk/numpy/core/src/umath_loops.inc.src 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/src/umath_loops.inc.src 2009-02-26 10:41:32 UTC (rev 6481) @@ -860,7 +860,7 @@ /**begin repeat1 * #kind = isnan, isinf, isfinite, signbit# - * #func = isnan, isinf, isfinite, signbit# + * #func = npy_isnan, npy_isinf, npy_isfinite, npy_signbit# **/ static void @TYPE at _@kind@(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func)) @@ -883,7 +883,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2 || isnan(in1)) ? in1 : in2; + *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2; } } /**end repeat1**/ @@ -899,7 +899,7 @@ BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2 || isnan(in2)) ? in1 : in2; + *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2; } } /**end repeat1**/ @@ -1173,7 +1173,7 @@ /**begin repeat1 * #kind = isnan, isinf, isfinite# - * #func = isnan, isinf, isfinite# + * #func = npy_isnan, npy_isinf, npy_isfinite# * #OP = ||, ||, &&# **/ static void @@ -1272,7 +1272,7 @@ const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - if (@OP@(in1r, in1i, in2r, in2i) || isnan(in1r) || isnan(in1i)) { + if (@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in1r) || npy_isnan(in1i)) { ((@type@ *)op1)[0] = in1r; ((@type@ *)op1)[1] = in1i; } @@ -1296,7 +1296,7 @@ const @type@ in1i = ((@type@ *)ip1)[1]; const @type@ in2r = ((@type@ *)ip2)[0]; const @type@ in2i = ((@type@ *)ip2)[1]; - if (@OP@(in1r, in1i, in2r, in2i) || isnan(in2r) || isnan(in2i)) { + if (@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in2r) || npy_isnan(in2i)) { ((@type@ *)op1)[0] = in1r; ((@type@ *)op1)[1] = in1i; } Modified: trunk/numpy/core/src/umathmodule.c.src =================================================================== --- trunk/numpy/core/src/umathmodule.c.src 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/core/src/umathmodule.c.src 2009-02-26 10:41:32 UTC (rev 6481) @@ -23,18 +23,13 @@ #include "abstract.h" #include "config.h" -/* - * Looks like some versions of Python.h do naughty things, so math.h needs - * to come after. - */ -#include +#include "numpy/npy_math.h" /* ***************************************************************************** ** INCLUDE GENERATED CODE ** ***************************************************************************** */ -#include "umath_funcs_c99.inc" #include "umath_funcs.inc" #include "umath_loops.inc" #include "umath_ufunc_object.inc" Modified: trunk/numpy/distutils/command/build_clib.py =================================================================== --- trunk/numpy/distutils/command/build_clib.py 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/distutils/command/build_clib.py 2009-02-26 10:41:32 UTC (rev 6481) @@ -10,7 +10,8 @@ from numpy.distutils import log from distutils.dep_util import newer_group from numpy.distutils.misc_util import filter_sources, has_f_sources,\ - has_cxx_sources, all_strings, get_lib_source_files, is_sequence + has_cxx_sources, all_strings, get_lib_source_files, is_sequence, \ + get_numpy_include_dirs # Fix Python distutils bug sf #1718574: _l = old_build_clib.user_options @@ -162,8 +163,11 @@ macros = build_info.get('macros') include_dirs = build_info.get('include_dirs') + if include_dirs is None: + include_dirs = [] extra_postargs = build_info.get('extra_compiler_args') or [] + include_dirs.extend(get_numpy_include_dirs()) # where compiled F90 module files are: module_dirs = build_info.get('module_dirs') or [] module_build_dir = os.path.dirname(lib_file) Modified: trunk/numpy/distutils/command/config.py =================================================================== --- trunk/numpy/distutils/command/config.py 2009-02-26 10:38:30 UTC (rev 6480) +++ trunk/numpy/distutils/command/config.py 2009-02-26 10:41:32 UTC (rev 6481) @@ -11,6 +11,7 @@ from distutils.command.config import LANG_EXT from distutils import log from distutils.file_util import copy_file +from distutils.ccompiler import CompileError, LinkError import distutils from numpy.distutils.exec_command import exec_command from numpy.distutils.mingw32ccompiler import generate_manifest @@ -143,6 +144,12 @@ (body, headers, include_dirs, libraries, library_dirs, lang)) + def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'): + self._check_compiler() + return self.try_compile( + "/* we need a dummy line to make distutils happy */", + [header], include_dirs) + def check_decl(self, symbol, headers=None, include_dirs=None): self._check_compiler() @@ -158,6 +165,81 @@ return self.try_compile(body, headers, include_dirs) + def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None): + """Check size of a given type.""" + # XXX: should also implement the cross-compiling version (using binary + # search + array indexing, see AC_CHECK_SIZEOF). + self._check_compiler() + + # We declare the functions to avoid warnings with -Wstrict-prototypes + body = r""" +typedef %(type)s _dist_type_sizeof_; + +static long int longval (void) +{ + return (long int) (sizeof (_dist_type_sizeof_)); +} +static unsigned long int ulongval (void) +{ + return (long int) (sizeof (_dist_type_sizeof_)); +} + +#include +#include +int +main (void) +{ + + if (((long int) (sizeof (_dist_type_sizeof_))) < 0) { + long int i = longval (); + if (i != ((long int) (sizeof (_dist_type_sizeof_)))) + return 1; + printf("%%ld\n", i); + } else { + unsigned long int i = ulongval (); + if (i != ((long int) (sizeof (_dist_type_sizeof_)))) + return 1; + printf("%%lu\n", i); + } + + return 0; +} +""" % {'type': type_name} + + # XXX: this should be refactored (same code as get_output) + exitcode, output = 255, '' + size = None + try: + src, obj, exe = self._link(body, headers, include_dirs, + [], library_dirs, 'c') + #exe = os.path.join('.', exe) + exitstatus, output = exec_command(exe, execute_in='.') + if hasattr(os, 'WEXITSTATUS'): + exitcode = os.WEXITSTATUS(exitstatus) + if os.WIFSIGNALED(exitstatus): + sig = os.WTERMSIG(exitstatus) + log.error('subprocess exited with signal %d' % (sig,)) + if sig == signal.SIGINT: + # control-C + raise KeyboardInterrupt + else: + exitcode = exitstatus + log.info("success!") + + try: + size = int(output) + except ValueError: + log.error("Unexpected output %s" % output) + log.info("failure") + except (CompileError, LinkError): + log.info("failure.") + + self._clean() + if size is not None: + return size + else: + return -1 + def check_func(self, func, headers=None, include_dirs=None, libraries=None, library_dirs=None, From numpy-svn at scipy.org Thu Feb 26 05:46:58 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 04:46:58 -0600 (CST) Subject: [Numpy-svn] r6482 - branches/coremath Message-ID: <20090226104658.EA993C7C1AD@scipy.org> Author: cdavid Date: 2009-02-26 04:46:56 -0600 (Thu, 26 Feb 2009) New Revision: 6482 Modified: branches/coremath/ Log: Removed merge tracking for "svnmerge" for http://svn.scipy.org/svn/numpy/trunk Property changes on: branches/coremath ___________________________________________________________________ Name: svnmerge-integrated - /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 /trunk:1-6479 + /branches/distutils-revamp:1-2752 /branches/dynamic_cpu_configuration:1-6101 /branches/fix_float_format:1-6359 /branches/multicore:1-3687 /branches/numpy-mingw-w64:1-6150 /branches/visualstudio_manifest:1-6077 From numpy-svn at scipy.org Thu Feb 26 05:47:32 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 04:47:32 -0600 (CST) Subject: [Numpy-svn] r6483 - branches Message-ID: <20090226104732.7D2B7C7C1AD@scipy.org> Author: cdavid Date: 2009-02-26 04:47:29 -0600 (Thu, 26 Feb 2009) New Revision: 6483 Removed: branches/coremath/ Log: Remove coremath branch - integrated into the trunk. From numpy-svn at scipy.org Thu Feb 26 06:44:19 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 05:44:19 -0600 (CST) Subject: [Numpy-svn] r6484 - trunk/numpy/core Message-ID: <20090226114419.A5C38C7C1AB@scipy.org> Author: cdavid Date: 2009-02-26 05:44:14 -0600 (Thu, 26 Feb 2009) New Revision: 6484 Modified: trunk/numpy/core/setup.py Log: Fix typo which caused PY_LONG_LONG sizeof to be defined with the wrong size. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 10:47:29 UTC (rev 6483) +++ trunk/numpy/core/setup.py 2009-02-26 11:44:14 UTC (rev 6484) @@ -167,9 +167,12 @@ # We check declaration AND type because that's how distutils does it. if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): - st = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], library_dirs=[pythonlib_dir()]) - assert not st == 0 - private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], + library_dirs=[pythonlib_dir()]) + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): raise RuntimeError( From numpy-svn at scipy.org Thu Feb 26 07:37:15 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 06:37:15 -0600 (CST) Subject: [Numpy-svn] r6485 - trunk/numpy/distutils/command Message-ID: <20090226123715.3BAD5C7C026@scipy.org> Author: cdavid Date: 2009-02-26 06:37:07 -0600 (Thu, 26 Feb 2009) New Revision: 6485 Modified: trunk/numpy/distutils/command/config.py Log: Find sizeof wo running on the target platform. Modified: trunk/numpy/distutils/command/config.py =================================================================== --- trunk/numpy/distutils/command/config.py 2009-02-26 11:44:14 UTC (rev 6484) +++ trunk/numpy/distutils/command/config.py 2009-02-26 12:37:07 UTC (rev 6485) @@ -167,79 +167,66 @@ def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None): """Check size of a given type.""" - # XXX: should also implement the cross-compiling version (using binary - # search + array indexing, see AC_CHECK_SIZEOF). self._check_compiler() - # We declare the functions to avoid warnings with -Wstrict-prototypes + # First check the type can be compiled body = r""" -typedef %(type)s _dist_type_sizeof_; - -static long int longval (void) +typedef %(type)s npy_check_sizeof_type; +int main () { - return (long int) (sizeof (_dist_type_sizeof_)); + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)]; + test_array [0] = 0 + + ; + return 0; } -static unsigned long int ulongval (void) -{ - return (long int) (sizeof (_dist_type_sizeof_)); -} +""" + self._compile(body % {'type': type_name}, + headers, include_dirs, 'c') + self._clean() -#include -#include -int -main (void) + # this fails to *compile* if size > sizeof(type) + body = r""" +typedef %(type)s npy_check_sizeof_type; +int main () { + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)]; + test_array [0] = 0 - if (((long int) (sizeof (_dist_type_sizeof_))) < 0) { - long int i = longval (); - if (i != ((long int) (sizeof (_dist_type_sizeof_)))) - return 1; - printf("%%ld\n", i); - } else { - unsigned long int i = ulongval (); - if (i != ((long int) (sizeof (_dist_type_sizeof_)))) - return 1; - printf("%%lu\n", i); - } - + ; return 0; } -""" % {'type': type_name} +""" - # XXX: this should be refactored (same code as get_output) - exitcode, output = 255, '' - size = None - try: - src, obj, exe = self._link(body, headers, include_dirs, - [], library_dirs, 'c') - #exe = os.path.join('.', exe) - exitstatus, output = exec_command(exe, execute_in='.') - if hasattr(os, 'WEXITSTATUS'): - exitcode = os.WEXITSTATUS(exitstatus) - if os.WIFSIGNALED(exitstatus): - sig = os.WTERMSIG(exitstatus) - log.error('subprocess exited with signal %d' % (sig,)) - if sig == signal.SIGINT: - # control-C - raise KeyboardInterrupt - else: - exitcode = exitstatus - log.info("success!") + # The principle is simple: we first find low and high bounds of size + # for the type, where low/high are looked up on a log scale. Then, we + # do a binary search to find the exact size between low and high + low = 0 + mid = 0 + while True: + try: + self._compile(body % {'type': type_name, 'size': mid}, + headers, include_dirs, 'c') + self._clean() + break + except CompileError: + #log.info("failure to test for bound %d" % mid) + low = mid + 1 + mid = 2 * mid + 1 + high = mid + # Binary search: + while low != high: + mid = (high - low) / 2 + low try: - size = int(output) - except ValueError: - log.error("Unexpected output %s" % output) - log.info("failure") - except (CompileError, LinkError): - log.info("failure.") + self._compile(body % {'type': type_name, 'size': mid}, + headers, include_dirs, 'c') + self._clean() + high = mid + except CompileError: + low = mid + 1 + return low - self._clean() - if size is not None: - return size - else: - return -1 - def check_func(self, func, headers=None, include_dirs=None, libraries=None, library_dirs=None, From numpy-svn at scipy.org Thu Feb 26 07:37:30 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 06:37:30 -0600 (CST) Subject: [Numpy-svn] r6486 - trunk/numpy/core Message-ID: <20090226123730.1CD54C7C026@scipy.org> Author: cdavid Date: 2009-02-26 06:37:26 -0600 (Thu, 26 Feb 2009) New Revision: 6486 Modified: trunk/numpy/core/setup.py Log: Fill-up public defines in check_types. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 12:37:07 UTC (rev 6485) +++ trunk/numpy/core/setup.py 2009-02-26 12:37:26 UTC (rev 6486) @@ -154,6 +154,7 @@ res = config_cmd.check_type_size(type) if res >= 0: private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) else: raise SystemError("Checking sizeof (%s) failed !" % type) @@ -162,6 +163,7 @@ library_dirs=[pythonlib_dir()]) if res >= 0: private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) else: raise SystemError("Checking sizeof (%s) failed !" % type) @@ -171,6 +173,7 @@ library_dirs=[pythonlib_dir()]) if res >= 0: private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) else: raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') From numpy-svn at scipy.org Thu Feb 26 07:37:43 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 06:37:43 -0600 (CST) Subject: [Numpy-svn] r6487 - trunk/numpy/core Message-ID: <20090226123743.D26F6C7C026@scipy.org> Author: cdavid Date: 2009-02-26 06:37:39 -0600 (Thu, 26 Feb 2009) New Revision: 6487 Modified: trunk/numpy/core/setup.py Log: Do not use testcode anymore for numpyconfig.h generation. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 12:37:26 UTC (rev 6486) +++ trunk/numpy/core/setup.py 2009-02-26 12:37:39 UTC (rev 6487) @@ -294,17 +294,18 @@ if newer(__file__,target): config_cmd = config.get_config_cmd() log.info('Generating %s',target) - testcode = generate_numpyconfig_code(target) - result = config_cmd.try_run(testcode, - include_dirs=config.numpy_include_dirs, - library_dirs=default_lib_dirs) - if not result: - raise SystemError,"Failed to generate numpy configuration. "\ - "See previous error messages for more information." + # Check sizeof + ignored, moredefs = check_types(config, ext, build_dir) - moredefs = [] + if is_npy_no_signal(): + moredefs.append(('NPY_NO_SIGNAL', 1)) + if is_npy_no_smp(): + moredefs.append(('NPY_NO_SMP', 1)) + else: + moredefs.append(('NPY_NO_SMP', 0)) + # Normally, isnan and isinf are macro (C99), but some platforms # only have func, or both func and macro version. Check for macro # only, and define replacement ones if not found. From numpy-svn at scipy.org Thu Feb 26 07:38:01 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 06:38:01 -0600 (CST) Subject: [Numpy-svn] r6488 - trunk/numpy/core Message-ID: <20090226123801.68270C7C026@scipy.org> Author: cdavid Date: 2009-02-26 06:37:57 -0600 (Thu, 26 Feb 2009) New Revision: 6488 Modified: trunk/numpy/core/setup.py Log: BUG: forgot to add sizeof check for long long type when PY_LONG_LONG is defined. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 12:37:39 UTC (rev 6487) +++ trunk/numpy/core/setup.py 2009-02-26 12:37:57 UTC (rev 6488) @@ -177,6 +177,13 @@ else: raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') + res = config_cmd.check_type_size('long long') + if res >= 0: + private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % 'LONGLONG', '%d' % res)) + else: + raise SystemError("Checking sizeof (%s) failed !" % 'long long') + if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']): raise RuntimeError( "Config wo CHAR_BIT is not supported"\ From numpy-svn at scipy.org Thu Feb 26 07:38:14 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 06:38:14 -0600 (CST) Subject: [Numpy-svn] r6489 - trunk/numpy/core Message-ID: <20090226123814.A285BC7C026@scipy.org> Author: cdavid Date: 2009-02-26 06:38:10 -0600 (Thu, 26 Feb 2009) New Revision: 6489 Modified: trunk/numpy/core/setup.py Log: BUG: fix sym2def. Some of our SIZEOF_ defines were not consistent with the ones actually used in the code (SIZEOF_LONG_DOUBLE vs SIZEOF_LONGDOUBLE). Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 12:37:57 UTC (rev 6488) +++ trunk/numpy/core/setup.py 2009-02-26 12:38:10 UTC (rev 6489) @@ -180,7 +180,7 @@ res = config_cmd.check_type_size('long long') if res >= 0: private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) - public_defines.append(('NPY_SIZEOF_%s' % 'LONGLONG', '%d' % res)) + public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res)) else: raise SystemError("Checking sizeof (%s) failed !" % 'long long') @@ -192,7 +192,7 @@ return private_defines, public_defines def sym2def(symbol): - define = symbol.replace(' ', '_') + define = symbol.replace(' ', '') return define.upper() def check_mathlib(config_cmd): From numpy-svn at scipy.org Thu Feb 26 07:38:27 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 06:38:27 -0600 (CST) Subject: [Numpy-svn] r6490 - trunk/numpy/core Message-ID: <20090226123827.930DDC7C026@scipy.org> Author: cdavid Date: 2009-02-26 06:38:23 -0600 (Thu, 26 Feb 2009) New Revision: 6490 Modified: trunk/numpy/core/setup.py Log: SIZEOF_LONGDOUBLE and SIZEOF_LONGLONG are public, since they are defined in noprefix.h. So don't define those in config.h. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 12:38:10 UTC (rev 6489) +++ trunk/numpy/core/setup.py 2009-02-26 12:38:23 UTC (rev 6490) @@ -153,7 +153,8 @@ for type in ('short', 'int', 'long', 'float', 'double', 'long double'): res = config_cmd.check_type_size(type) if res >= 0: - private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) + if not type == 'long double': + private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) else: raise SystemError("Checking sizeof (%s) failed !" % type) @@ -179,7 +180,7 @@ res = config_cmd.check_type_size('long long') if res >= 0: - private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) + #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res)) else: raise SystemError("Checking sizeof (%s) failed !" % 'long long') From numpy-svn at scipy.org Thu Feb 26 08:21:38 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 07:21:38 -0600 (CST) Subject: [Numpy-svn] r6491 - trunk/numpy/core Message-ID: <20090226132138.60EACC7C026@scipy.org> Author: cdavid Date: 2009-02-26 07:21:34 -0600 (Thu, 26 Feb 2009) New Revision: 6491 Modified: trunk/numpy/core/setup.py Log: Remote obsolete test code for sizeof defines. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 12:38:23 UTC (rev 6490) +++ trunk/numpy/core/setup.py 2009-02-26 13:21:34 UTC (rev 6491) @@ -513,86 +513,6 @@ } """ -import sys -def generate_numpyconfig_code(target): - """Return the source code as a string of the code to generate the - numpyconfig header file.""" - if sys.platform == 'win32': - target = target.replace('\\','\\\\') - # Config symbols to prepend - prepends = [('NPY_SIZEOF_SHORT', 'SIZEOF_SHORT'), - ('NPY_SIZEOF_INT', 'SIZEOF_INT'), - ('NPY_SIZEOF_LONG', 'SIZEOF_LONG'), - ('NPY_SIZEOF_FLOAT', 'SIZEOF_FLOAT'), - ('NPY_SIZEOF_DOUBLE', 'SIZEOF_DOUBLE'), - ('NPY_SIZEOF_LONGDOUBLE', 'SIZEOF_LONG_DOUBLE'), - ('NPY_SIZEOF_PY_INTPTR_T', 'SIZEOF_PY_INTPTR_T')] - - testcode = [""" -#include -#include "config.h" - -int main() -{ - FILE* f; - - f = fopen("%s", "w"); - if (f == NULL) { - return -1; - } -""" % target] - - testcode.append(r""" - fprintf(f, "/*\n * This file is generated by %s. DO NOT EDIT \n */\n"); -""" % __file__) - - # Prepend NPY_ to any SIZEOF defines - testcode.extend([r' fprintf(f, "#define ' + i + r' %%d \n", %s);' % j for i, j in prepends]) - - # Conditionally define NPY_NO_SIGNAL - if is_npy_no_signal(): - testcode.append(r' fprintf(f, "\n#define NPY_NO_SIGNAL\n");') - - # Define NPY_NOSMP to 1 if explicitely requested, or if we cannot - # support thread support reliably - if is_npy_no_smp(): - testcode.append(r' fprintf(f, "#define NPY_NO_SMP 1\n");') - else: - testcode.append(r' fprintf(f, "#define NPY_NO_SMP 0\n");') - - tmpcode = r""" - #ifdef PY_LONG_LONG - fprintf(f, "\n#define %s %%d \n", %s); - fprintf(f, "#define %s %%d \n", %s); - #else - fprintf(f, "/* PY_LONG_LONG not defined */ \n"); - #endif""" - testcode.append(tmpcode % ('NPY_SIZEOF_LONGLONG', 'SIZEOF_LONG_LONG', - 'NPY_SIZEOF_PY_LONG_LONG', 'SIZEOF_PY_LONG_LONG')) - - testcode.append(r""" -#ifndef CHAR_BIT - { - unsigned char var = 2; - int i = 0; - while (var >= 2) { - var = var << 1; - i++; - } - fprintf(f,"#define CHAR_BIT %d\n", i+1); - } -#else - fprintf(f, "/* #define CHAR_BIT %d */\n", CHAR_BIT); -#endif""") - - testcode.append(""" - fclose(f); - - return 0; -} -""") - return "\n".join(testcode) - if __name__=='__main__': from numpy.distutils.core import setup setup(configuration=configuration) From numpy-svn at scipy.org Thu Feb 26 08:21:53 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 07:21:53 -0600 (CST) Subject: [Numpy-svn] r6492 - trunk/numpy/core Message-ID: <20090226132153.DAA8BC7C026@scipy.org> Author: cdavid Date: 2009-02-26 07:21:49 -0600 (Thu, 26 Feb 2009) New Revision: 6492 Modified: trunk/numpy/core/setup.py Log: Use config_cmd directly as an argument for check_types. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 13:21:34 UTC (rev 6491) +++ trunk/numpy/core/setup.py 2009-02-26 13:21:49 UTC (rev 6492) @@ -136,12 +136,10 @@ if st: moredefs.append(name_to_defsymb("decl_%s" % f)) -def check_types(config, ext, build_dir): +def check_types(config_cmd, ext, build_dir): private_defines = [] public_defines = [] - config_cmd = config.get_config_cmd() - # Check we have the python header (-dev* packages on Linux) result = config_cmd.check_header('Python.h') if not result: @@ -240,7 +238,7 @@ log.info('Generating %s',target) # Check sizeof - moredefs, ignored = check_types(config, ext, build_dir) + moredefs, ignored = check_types(config_cmd, ext, build_dir) # Check math library and C99 math funcs availability mathlibs = check_mathlib(config_cmd) @@ -304,7 +302,7 @@ log.info('Generating %s',target) # Check sizeof - ignored, moredefs = check_types(config, ext, build_dir) + ignored, moredefs = check_types(config_cmd, ext, build_dir) if is_npy_no_signal(): moredefs.append(('NPY_NO_SIGNAL', 1)) From numpy-svn at scipy.org Thu Feb 26 08:22:06 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 07:22:06 -0600 (CST) Subject: [Numpy-svn] r6493 - trunk/numpy/core Message-ID: <20090226132206.C62CFC7C026@scipy.org> Author: cdavid Date: 2009-02-26 07:22:02 -0600 (Thu, 26 Feb 2009) New Revision: 6493 Modified: trunk/numpy/core/setup.py Log: Cache the sizeof computation to speed up configuration stage. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 13:21:49 UTC (rev 6492) +++ trunk/numpy/core/setup.py 2009-02-26 13:22:02 UTC (rev 6493) @@ -6,6 +6,29 @@ from distutils.dep_util import newer from distutils.sysconfig import get_config_var +# XXX: ugly, we use a class to avoid calling twice some expensive functions in +# config.h/numpyconfig.h. I don't see a better way because distutils force +# config.h generation inside an Extension class, and as such sharing +# configuration informations between extensions is not easy. +# Using a pickled-based memoize does not work because config_cmd is an instance +# method, which cPickle does not like. +try: + import cPickle as _pik +except ImportError: + import pickle as _pik + +class CallOnceOnly(object): + def __init__(self): + self._check_types = None + + def check_types(self, *a, **kw): + if self._check_types is None: + out = check_types(*a, **kw) + self._check_types = _pik.dumps(out) + else: + out = _pik.loads(self._check_types) + return out + def pythonlib_dir(): """return path where libpython* is.""" if sys.platform == 'win32': @@ -228,6 +251,8 @@ header_dir = 'include/numpy' # this is relative to config.path_in_package + cocache = CallOnceOnly() + def generate_config_h(ext, build_dir): target = join(build_dir,header_dir,'config.h') d = os.path.dirname(target) @@ -238,7 +263,7 @@ log.info('Generating %s',target) # Check sizeof - moredefs, ignored = check_types(config_cmd, ext, build_dir) + moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir) # Check math library and C99 math funcs availability mathlibs = check_mathlib(config_cmd) @@ -302,7 +327,7 @@ log.info('Generating %s',target) # Check sizeof - ignored, moredefs = check_types(config_cmd, ext, build_dir) + ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir) if is_npy_no_signal(): moredefs.append(('NPY_NO_SIGNAL', 1)) From numpy-svn at scipy.org Thu Feb 26 09:06:33 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 08:06:33 -0600 (CST) Subject: [Numpy-svn] r6494 - trunk/numpy/core Message-ID: <20090226140633.936FACB404E@scipy.org> Author: cdavid Date: 2009-02-26 08:06:29 -0600 (Thu, 26 Feb 2009) New Revision: 6494 Modified: trunk/numpy/core/setup.py Log: Rename name_to_defsymb for consistency. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 13:22:02 UTC (rev 6493) +++ trunk/numpy/core/setup.py 2009-02-26 14:06:29 UTC (rev 6494) @@ -92,7 +92,7 @@ st = config.check_funcs_once(funcs_name, libraries=mathlibs, decl=decl, call=decl) if st: - moredefs.extend([name_to_defsymb(f) for f in funcs_name]) + moredefs.extend([fname2def(f) for f in funcs_name]) return st def check_funcs(funcs_name): @@ -102,14 +102,11 @@ # Global check failed, check func per func for f in funcs_name: if check_func(f): - moredefs.append(name_to_defsymb(f)) + moredefs.append(fname2def(f)) return 0 else: return 1 - def name_to_defsymb(name): - return "HAVE_%s" % name.upper() - #use_msvc = config.check_decl("_MSC_VER") # Mandatory functions: if not found, fail the build @@ -157,8 +154,11 @@ for f in ["isnan", "isinf", "signbit", "isfinite"]: st = config.check_decl(f, headers = ["Python.h", "math.h"]) if st: - moredefs.append(name_to_defsymb("decl_%s" % f)) + moredefs.append(fname2def("decl_%s" % f)) +def fname2def(name): + return "HAVE_%s" % name.upper() + def check_types(config_cmd, ext, build_dir): private_defines = [] public_defines = [] From numpy-svn at scipy.org Thu Feb 26 09:06:46 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 08:06:46 -0600 (CST) Subject: [Numpy-svn] r6495 - trunk/numpy/core Message-ID: <20090226140646.7C2CDCB404E@scipy.org> Author: cdavid Date: 2009-02-26 08:06:42 -0600 (Thu, 26 Feb 2009) New Revision: 6495 Modified: trunk/numpy/core/setup.py Log: Make a deepcopy of cached output to avoid side-effects. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 14:06:29 UTC (rev 6494) +++ trunk/numpy/core/setup.py 2009-02-26 14:06:42 UTC (rev 6495) @@ -16,6 +16,7 @@ import cPickle as _pik except ImportError: import pickle as _pik +import copy class CallOnceOnly(object): def __init__(self): @@ -26,7 +27,7 @@ out = check_types(*a, **kw) self._check_types = _pik.dumps(out) else: - out = _pik.loads(self._check_types) + out = copy.deepcopy(_pik.loads(self._check_types)) return out def pythonlib_dir(): From numpy-svn at scipy.org Thu Feb 26 09:06:59 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 08:06:59 -0600 (CST) Subject: [Numpy-svn] r6496 - trunk/numpy/core Message-ID: <20090226140659.DBC40CB404E@scipy.org> Author: cdavid Date: 2009-02-26 08:06:55 -0600 (Thu, 26 Feb 2009) New Revision: 6496 Modified: trunk/numpy/core/setup.py Log: Avoid checking twice the IEEE macros. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 14:06:42 UTC (rev 6495) +++ trunk/numpy/core/setup.py 2009-02-26 14:06:55 UTC (rev 6496) @@ -21,6 +21,7 @@ class CallOnceOnly(object): def __init__(self): self._check_types = None + self._check_ieee_macros = None def check_types(self, *a, **kw): if self._check_types is None: @@ -30,6 +31,14 @@ out = copy.deepcopy(_pik.loads(self._check_types)) return out + def check_ieee_macros(self, *a, **kw): + if self._check_ieee_macros is None: + out = check_ieee_macros(*a, **kw) + self._check_ieee_macros = _pik.dumps(out) + else: + out = copy.deepcopy(_pik.loads(self._check_ieee_macros)) + return out + def pythonlib_dir(): """return path where libpython* is.""" if sys.platform == 'win32': @@ -147,6 +156,13 @@ fns = [f + prec for f in c99_funcs] check_funcs(fns) +def fname2def(name): + return "HAVE_%s" % name.upper() + +def check_ieee_macros(config): + priv = [] + pub = [] + # Normally, isnan and isinf are macro (C99), but some platforms only have # func, or both func and macro version. Check for macro only, and define # replacement ones if not found. @@ -155,10 +171,10 @@ for f in ["isnan", "isinf", "signbit", "isfinite"]: st = config.check_decl(f, headers = ["Python.h", "math.h"]) if st: - moredefs.append(fname2def("decl_%s" % f)) + priv.append(fname2def("decl_%s" % f)) + pub.append('NPY_%s' % fname2def("decl_%s" % f)) -def fname2def(name): - return "HAVE_%s" % name.upper() + return priv, pub def check_types(config_cmd, ext, build_dir): private_defines = [] @@ -271,6 +287,7 @@ moredefs.append(('MATHLIB',','.join(mathlibs))) check_math_capabilities(config_cmd, moredefs, mathlibs) + moredefs.extend(cocache.check_ieee_macros(config_cmd)[0]) # Signal check if is_npy_no_signal(): @@ -338,17 +355,7 @@ else: moredefs.append(('NPY_NO_SMP', 0)) - # Normally, isnan and isinf are macro (C99), but some platforms - # only have func, or both func and macro version. Check for macro - # only, and define replacement ones if not found. - # Note: including Python.h is necessary because it modifies some - # math.h definitions - # XXX: we check those twice... should decouple tests from - # config.h/numpyconfig.h to avoid this - for f in ["isnan", "isinf", "signbit", "isfinite"]: - st = config_cmd.check_decl(f, headers = ["Python.h", "math.h"]) - if st: - moredefs.append('NPY_HAVE_DECL_%s' % f.upper()) + moredefs.extend(cocache.check_ieee_macros(config_cmd)[1]) # Check wether we can use inttypes (C99) formats if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']): From numpy-svn at scipy.org Thu Feb 26 09:07:14 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 08:07:14 -0600 (CST) Subject: [Numpy-svn] r6497 - trunk/numpy/distutils/command Message-ID: <20090226140714.C0BB0CB404E@scipy.org> Author: cdavid Date: 2009-02-26 08:07:09 -0600 (Thu, 26 Feb 2009) New Revision: 6497 Modified: trunk/numpy/distutils/command/config.py Log: Add an expected keyword for check_type_size check, to speed things up on common platforms. Modified: trunk/numpy/distutils/command/config.py =================================================================== --- trunk/numpy/distutils/command/config.py 2009-02-26 14:06:55 UTC (rev 6496) +++ trunk/numpy/distutils/command/config.py 2009-02-26 14:07:09 UTC (rev 6497) @@ -165,7 +165,7 @@ return self.try_compile(body, headers, include_dirs) - def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None): + def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None): """Check size of a given type.""" self._check_compiler() @@ -185,6 +185,27 @@ headers, include_dirs, 'c') self._clean() + if expected: + body = r""" +typedef %(type)s npy_check_sizeof_type; +int main () +{ + static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)]; + test_array [0] = 0 + + ; + return 0; +} +""" + for size in expected: + try: + self._compile(body % {'type': type_name, 'size': size}, + headers, include_dirs, 'c') + self._clean() + return size + except CompileError: + pass + # this fails to *compile* if size > sizeof(type) body = r""" typedef %(type)s npy_check_sizeof_type; From numpy-svn at scipy.org Thu Feb 26 09:07:28 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 08:07:28 -0600 (CST) Subject: [Numpy-svn] r6498 - trunk/numpy/core Message-ID: <20090226140728.F3C8BCB405F@scipy.org> Author: cdavid Date: 2009-02-26 08:07:23 -0600 (Thu, 26 Feb 2009) New Revision: 6498 Modified: trunk/numpy/core/setup.py Log: Use hints for sizeof checks, to speed things up. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 14:07:09 UTC (rev 6497) +++ trunk/numpy/core/setup.py 2009-02-26 14:07:23 UTC (rev 6498) @@ -180,6 +180,20 @@ private_defines = [] public_defines = [] + # Expected size (in number of bytes) for each type. This is an + # optimization: those are only hints, and an exhaustive search for the size + # is done if the hints are wrong. + expected = {} + expected['short'] = [2] + expected['int'] = [4] + expected['long'] = [8, 4] + expected['float'] = [4] + expected['double'] = [8] + expected['long double'] = [8, 12, 16] + expected['Py_intptr_t'] = [4, 8] + expected['PY_LONG_LONG'] = [8] + expected['long long'] = [8] + # Check we have the python header (-dev* packages on Linux) result = config_cmd.check_header('Python.h') if not result: @@ -189,7 +203,7 @@ # Check basic types sizes for type in ('short', 'int', 'long', 'float', 'double', 'long double'): - res = config_cmd.check_type_size(type) + res = config_cmd.check_type_size(type, expected=expected[type]) if res >= 0: if not type == 'long double': private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) @@ -199,7 +213,9 @@ for type in ('Py_intptr_t',): res = config_cmd.check_type_size(type, headers=["Python.h"], - library_dirs=[pythonlib_dir()]) + library_dirs=[pythonlib_dir()], + expected=expected[type]) + if res >= 0: private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res)) public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res)) @@ -209,14 +225,16 @@ # We check declaration AND type because that's how distutils does it. if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']): res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'], - library_dirs=[pythonlib_dir()]) + library_dirs=[pythonlib_dir()], + expected=expected['PY_LONG_LONG']) if res >= 0: private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res)) else: raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG') - res = config_cmd.check_type_size('long long') + res = config_cmd.check_type_size('long long', + expected=expected['long long']) if res >= 0: #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res)) public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res)) From numpy-svn at scipy.org Thu Feb 26 09:41:40 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 08:41:40 -0600 (CST) Subject: [Numpy-svn] r6499 - trunk/numpy/core Message-ID: <20090226144140.0D868C84133@scipy.org> Author: cdavid Date: 2009-02-26 08:41:19 -0600 (Thu, 26 Feb 2009) New Revision: 6499 Modified: trunk/numpy/core/setup.py Log: Do not unconditionally remove some function checks. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-26 14:07:23 UTC (rev 6498) +++ trunk/numpy/core/setup.py 2009-02-26 14:41:19 UTC (rev 6499) @@ -141,7 +141,8 @@ # hoping their own test are correct if sys.version_info[0] == 2 and sys.version_info[1] >= 6: for f in ["expm1", "log1p", "acosh", "atanh", "asinh"]: - optional_stdfuncs.remove(f) + if config.check_decl(fname2def(f), headers = ["Python.h", "math.h"]): + optional_stdfuncs.remove(f) check_funcs(optional_stdfuncs) From numpy-svn at scipy.org Thu Feb 26 23:50:32 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Thu, 26 Feb 2009 22:50:32 -0600 (CST) Subject: [Numpy-svn] r6500 - trunk/doc/release Message-ID: <20090227045032.5E3D3C7C018@scipy.org> Author: cdavid Date: 2009-02-26 22:50:13 -0600 (Thu, 26 Feb 2009) New Revision: 6500 Modified: trunk/doc/release/1.3.0-notes.rst Log: Update releases notes for 1.3.0. Modified: trunk/doc/release/1.3.0-notes.rst =================================================================== --- trunk/doc/release/1.3.0-notes.rst 2009-02-26 14:41:19 UTC (rev 6499) +++ trunk/doc/release/1.3.0-notes.rst 2009-02-27 04:50:13 UTC (rev 6500) @@ -4,28 +4,18 @@ This minor release comes almost four months after the 1.1.0 release. -Changes -------- +New features +============ -Generalized ufuncs +Python 2.6 support ~~~~~~~~~~~~~~~~~~ -http://projects.scipy.org/scipy/numpy/ticket/887 +http://www.python.org/dev/peps/pep-0361/ -Refactoring numpy.core math configuration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -http://projects.scipy.org/scipy/numpy/browser/trunk/doc/neps/math_config_clean.txt - -Improvements to build warnings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -http://projects.scipy.org/scipy/numpy/browser/trunk/doc/neps/warnfix.txt - -Python 2.6 support +Generalized ufuncs ~~~~~~~~~~~~~~~~~~ -http://www.python.org/dev/peps/pep-0361/ +http://projects.scipy.org/scipy/numpy/ticket/887 Histogram ~~~~~~~~~ @@ -39,3 +29,45 @@ The previous behavior is still accessible using `new=False`, but is scheduled to be deprecated in the next release (1.3). + +Masked arrays +~~~~~~~~~~~~~ + +TODO + +gfortran support on windows (32 and 64 bits) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Gfortran can now be used as a fortran compiler for numpy, even when the C +compiler is Visual Studio. + +Documentation changes +===================== + +Internal changes +================ + +Refactoring numpy.core math configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This should make the porting to new platforms easier, and more robust. In +particular, the configuration stage does not need to execute any code on the +target platform, which is a first step toward cross-compilation. + +http://projects.scipy.org/scipy/numpy/browser/trunk/doc/neps/math_config_clean.txt + +Improvements to build warnings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Numpy can now build with -W -Wall without warnings. + +http://projects.scipy.org/scipy/numpy/browser/trunk/doc/neps/warnfix.txt + +Separate core math library +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The core math functions (sin, cos, etc... for basic C types) have been put into +a separate library. The library includes platform-specific fixes for various +maths functions, such as using those versions should be more robust than using +your platform functions directly. The API for existing functions is exactly the +same as the C99 math functions API, except they are prefixed with npy_. From numpy-svn at scipy.org Fri Feb 27 02:21:14 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 01:21:14 -0600 (CST) Subject: [Numpy-svn] r6501 - trunk/doc/release Message-ID: <20090227072114.5A80CC7C009@scipy.org> Author: cdavid Date: 2009-02-27 01:21:09 -0600 (Fri, 27 Feb 2009) New Revision: 6501 Added: trunk/doc/release/time_based_proposal.txt Log: Start the proposal for time-based release. Added: trunk/doc/release/time_based_proposal.txt =================================================================== --- trunk/doc/release/time_based_proposal.txt 2009-02-27 04:50:13 UTC (rev 6500) +++ trunk/doc/release/time_based_proposal.txt 2009-02-27 07:21:09 UTC (rev 6501) @@ -0,0 +1,96 @@ +.. vim:syntax=rst + +Introduction +============ + +This document proposes some enhancements for numpy and scipy releases. +Successive numpy and scipy releases are too far appart from a time point of +view - some people who are in the numpy release team feel that it cannot +improve without a bit more formal release process. The main proposal is to +follow a time-based release, with expected dates for code freeze, beta and rc. +The goal is two folds: make release more predictible, and move the code forward. + +Rationale +========= + +Right now, the release process of numpy is relatively organic. When some +features are there, we may decide to make a new release. Because there is not +fixed schedule, people don't really know when new features and bug fixes will +go into a release. More significantly, having an expected release schedule +helps to *coordinate* efforts: at the beginning of a cycle, everybody can jump +in and put new code, even break things if needed. But after some point, only +bug fixes are accepted: this makes beta and RC releases much easier; calming +things down toward the release date helps focusing on bugs and regressions + +Proposal +======== + +Time schedule +------------- + +The proposed schedule is to release numpy every 3 months - the exact period can +be tweaked if it ends up not working as expected. There will be several stages +for the cycle: + + * Development: anything can happen (by anything, we mean as currently + done). The focus is on new features, refactoring, etc... + * Beta: no new features. No bug fixing which requires heavy changes. + regression fixes which appear on supported platforms and were not + caught earlier. + * Polish/RC: only docstring changes and blocker regressions are allowed. + +The schedule would be as follows: + + +------+-----------------+-----------------+------------------+ + | Week | 1.3.0 | 1.4.0 | Release time | + +======+=================+=================+==================+ + | 1 | Development | - | | + | 2 | Development | - | | + | 3 | Development | - | | + | 4 | Development | - | | + | 5 | Development | - | | + | 6 | Development | - | | + | 7 | Beta | - | | + | 8 | Beta | - | | + | 9 | Beta | - | 1.3.0 released | + | 10 | Polish | Development | | + | 11 | Polish | Development | | + | 12 | Polish | Development | | + | 13 | Polish | Development | | + | 14 | | Development | | + | 15 | | Development | | + | 16 | | Beta | | + | 17 | | Beta | | + | 18 | | Beta | 1.4.0 released | + +------+-----------------+-----------------+------------------+ + +Each stage can be defined as follows: + + Development Beta Polish + Python Frozen: - slushy Y + Docstring Frozen: - slushy thicker slush + C code Frozen: - thicker slush thicker slush + +Terminology: + + * slushy: you can change it if you beg the release team and it's really + important and you coordinate with docs/translations; no "big" changes. + + * thicker slush: you can change it if it's an open bug marked + showstopper for the Polish release, you beg the release team, the + change is very very small yet very very important, and you feel + extremely guilty about your transgressions. + +The different frozen states are intended to be gradients. The exact meaning is +decided by the release manager: he has the last word on what's go in, what +doesn't. + +The proposed schedule means that there would be at most 4 months between +putting code into the source code repository and being released. + +Release team +------------ + +For every release, there would be at least one release manager. We propose to +rotate the release manager: rotation means it is not always the same person +doing the dirty job, and it should also keep the release manager honest. From numpy-svn at scipy.org Fri Feb 27 02:21:27 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 01:21:27 -0600 (CST) Subject: [Numpy-svn] r6502 - trunk/doc/release Message-ID: <20090227072127.5AAF4C7C009@scipy.org> Author: cdavid Date: 2009-02-27 01:21:23 -0600 (Fri, 27 Feb 2009) New Revision: 6502 Modified: trunk/doc/release/time_based_proposal.txt Log: Add references for time based releases proposal. Modified: trunk/doc/release/time_based_proposal.txt =================================================================== --- trunk/doc/release/time_based_proposal.txt 2009-02-27 07:21:09 UTC (rev 6501) +++ trunk/doc/release/time_based_proposal.txt 2009-02-27 07:21:23 UTC (rev 6502) @@ -94,3 +94,13 @@ For every release, there would be at least one release manager. We propose to rotate the release manager: rotation means it is not always the same person doing the dirty job, and it should also keep the release manager honest. + +References +========== + + * Proposed schedule for Gnome from Havoc Pennington (one of the core + GTK and Gnome manager): + http://mail.gnome.org/archives/gnome-hackers/2002-June/msg00041.html + The proposed schedule is heavily based on this email + + * http://live.gnome.org/ReleasePlanning/Freezes From numpy-svn at scipy.org Fri Feb 27 02:23:08 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 01:23:08 -0600 (CST) Subject: [Numpy-svn] r6503 - trunk/doc/release Message-ID: <20090227072308.213CDC7C009@scipy.org> Author: cdavid Date: 2009-02-27 01:23:01 -0600 (Fri, 27 Feb 2009) New Revision: 6503 Added: trunk/doc/release/time_based_proposal.rst Removed: trunk/doc/release/time_based_proposal.txt Log: Rename the time-based proposal such as it is recognized as REST by trac. Copied: trunk/doc/release/time_based_proposal.rst (from rev 6502, trunk/doc/release/time_based_proposal.txt) =================================================================== --- trunk/doc/release/time_based_proposal.txt 2009-02-27 07:21:23 UTC (rev 6502) +++ trunk/doc/release/time_based_proposal.rst 2009-02-27 07:23:01 UTC (rev 6503) @@ -0,0 +1,106 @@ +.. vim:syntax=rst + +Introduction +============ + +This document proposes some enhancements for numpy and scipy releases. +Successive numpy and scipy releases are too far appart from a time point of +view - some people who are in the numpy release team feel that it cannot +improve without a bit more formal release process. The main proposal is to +follow a time-based release, with expected dates for code freeze, beta and rc. +The goal is two folds: make release more predictible, and move the code forward. + +Rationale +========= + +Right now, the release process of numpy is relatively organic. When some +features are there, we may decide to make a new release. Because there is not +fixed schedule, people don't really know when new features and bug fixes will +go into a release. More significantly, having an expected release schedule +helps to *coordinate* efforts: at the beginning of a cycle, everybody can jump +in and put new code, even break things if needed. But after some point, only +bug fixes are accepted: this makes beta and RC releases much easier; calming +things down toward the release date helps focusing on bugs and regressions + +Proposal +======== + +Time schedule +------------- + +The proposed schedule is to release numpy every 3 months - the exact period can +be tweaked if it ends up not working as expected. There will be several stages +for the cycle: + + * Development: anything can happen (by anything, we mean as currently + done). The focus is on new features, refactoring, etc... + * Beta: no new features. No bug fixing which requires heavy changes. + regression fixes which appear on supported platforms and were not + caught earlier. + * Polish/RC: only docstring changes and blocker regressions are allowed. + +The schedule would be as follows: + + +------+-----------------+-----------------+------------------+ + | Week | 1.3.0 | 1.4.0 | Release time | + +======+=================+=================+==================+ + | 1 | Development | - | | + | 2 | Development | - | | + | 3 | Development | - | | + | 4 | Development | - | | + | 5 | Development | - | | + | 6 | Development | - | | + | 7 | Beta | - | | + | 8 | Beta | - | | + | 9 | Beta | - | 1.3.0 released | + | 10 | Polish | Development | | + | 11 | Polish | Development | | + | 12 | Polish | Development | | + | 13 | Polish | Development | | + | 14 | | Development | | + | 15 | | Development | | + | 16 | | Beta | | + | 17 | | Beta | | + | 18 | | Beta | 1.4.0 released | + +------+-----------------+-----------------+------------------+ + +Each stage can be defined as follows: + + Development Beta Polish + Python Frozen: - slushy Y + Docstring Frozen: - slushy thicker slush + C code Frozen: - thicker slush thicker slush + +Terminology: + + * slushy: you can change it if you beg the release team and it's really + important and you coordinate with docs/translations; no "big" changes. + + * thicker slush: you can change it if it's an open bug marked + showstopper for the Polish release, you beg the release team, the + change is very very small yet very very important, and you feel + extremely guilty about your transgressions. + +The different frozen states are intended to be gradients. The exact meaning is +decided by the release manager: he has the last word on what's go in, what +doesn't. + +The proposed schedule means that there would be at most 4 months between +putting code into the source code repository and being released. + +Release team +------------ + +For every release, there would be at least one release manager. We propose to +rotate the release manager: rotation means it is not always the same person +doing the dirty job, and it should also keep the release manager honest. + +References +========== + + * Proposed schedule for Gnome from Havoc Pennington (one of the core + GTK and Gnome manager): + http://mail.gnome.org/archives/gnome-hackers/2002-June/msg00041.html + The proposed schedule is heavily based on this email + + * http://live.gnome.org/ReleasePlanning/Freezes Deleted: trunk/doc/release/time_based_proposal.txt =================================================================== --- trunk/doc/release/time_based_proposal.txt 2009-02-27 07:21:23 UTC (rev 6502) +++ trunk/doc/release/time_based_proposal.txt 2009-02-27 07:23:01 UTC (rev 6503) @@ -1,106 +0,0 @@ -.. vim:syntax=rst - -Introduction -============ - -This document proposes some enhancements for numpy and scipy releases. -Successive numpy and scipy releases are too far appart from a time point of -view - some people who are in the numpy release team feel that it cannot -improve without a bit more formal release process. The main proposal is to -follow a time-based release, with expected dates for code freeze, beta and rc. -The goal is two folds: make release more predictible, and move the code forward. - -Rationale -========= - -Right now, the release process of numpy is relatively organic. When some -features are there, we may decide to make a new release. Because there is not -fixed schedule, people don't really know when new features and bug fixes will -go into a release. More significantly, having an expected release schedule -helps to *coordinate* efforts: at the beginning of a cycle, everybody can jump -in and put new code, even break things if needed. But after some point, only -bug fixes are accepted: this makes beta and RC releases much easier; calming -things down toward the release date helps focusing on bugs and regressions - -Proposal -======== - -Time schedule -------------- - -The proposed schedule is to release numpy every 3 months - the exact period can -be tweaked if it ends up not working as expected. There will be several stages -for the cycle: - - * Development: anything can happen (by anything, we mean as currently - done). The focus is on new features, refactoring, etc... - * Beta: no new features. No bug fixing which requires heavy changes. - regression fixes which appear on supported platforms and were not - caught earlier. - * Polish/RC: only docstring changes and blocker regressions are allowed. - -The schedule would be as follows: - - +------+-----------------+-----------------+------------------+ - | Week | 1.3.0 | 1.4.0 | Release time | - +======+=================+=================+==================+ - | 1 | Development | - | | - | 2 | Development | - | | - | 3 | Development | - | | - | 4 | Development | - | | - | 5 | Development | - | | - | 6 | Development | - | | - | 7 | Beta | - | | - | 8 | Beta | - | | - | 9 | Beta | - | 1.3.0 released | - | 10 | Polish | Development | | - | 11 | Polish | Development | | - | 12 | Polish | Development | | - | 13 | Polish | Development | | - | 14 | | Development | | - | 15 | | Development | | - | 16 | | Beta | | - | 17 | | Beta | | - | 18 | | Beta | 1.4.0 released | - +------+-----------------+-----------------+------------------+ - -Each stage can be defined as follows: - - Development Beta Polish - Python Frozen: - slushy Y - Docstring Frozen: - slushy thicker slush - C code Frozen: - thicker slush thicker slush - -Terminology: - - * slushy: you can change it if you beg the release team and it's really - important and you coordinate with docs/translations; no "big" changes. - - * thicker slush: you can change it if it's an open bug marked - showstopper for the Polish release, you beg the release team, the - change is very very small yet very very important, and you feel - extremely guilty about your transgressions. - -The different frozen states are intended to be gradients. The exact meaning is -decided by the release manager: he has the last word on what's go in, what -doesn't. - -The proposed schedule means that there would be at most 4 months between -putting code into the source code repository and being released. - -Release team ------------- - -For every release, there would be at least one release manager. We propose to -rotate the release manager: rotation means it is not always the same person -doing the dirty job, and it should also keep the release manager honest. - -References -========== - - * Proposed schedule for Gnome from Havoc Pennington (one of the core - GTK and Gnome manager): - http://mail.gnome.org/archives/gnome-hackers/2002-June/msg00041.html - The proposed schedule is heavily based on this email - - * http://live.gnome.org/ReleasePlanning/Freezes From numpy-svn at scipy.org Fri Feb 27 02:24:22 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 01:24:22 -0600 (CST) Subject: [Numpy-svn] r6504 - trunk/doc/release Message-ID: <20090227072422.3F132C7C009@scipy.org> Author: cdavid Date: 2009-02-27 01:24:17 -0600 (Fri, 27 Feb 2009) New Revision: 6504 Modified: trunk/doc/release/time_based_proposal.rst Log: Spelling in time-based release doc. Modified: trunk/doc/release/time_based_proposal.rst =================================================================== --- trunk/doc/release/time_based_proposal.rst 2009-02-27 07:23:01 UTC (rev 6503) +++ trunk/doc/release/time_based_proposal.rst 2009-02-27 07:24:17 UTC (rev 6504) @@ -4,11 +4,11 @@ ============ This document proposes some enhancements for numpy and scipy releases. -Successive numpy and scipy releases are too far appart from a time point of +Successive numpy and scipy releases are too far apart from a time point of view - some people who are in the numpy release team feel that it cannot improve without a bit more formal release process. The main proposal is to follow a time-based release, with expected dates for code freeze, beta and rc. -The goal is two folds: make release more predictible, and move the code forward. +The goal is two folds: make release more predictable, and move the code forward. Rationale ========= From numpy-svn at scipy.org Fri Feb 27 02:29:46 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 01:29:46 -0600 (CST) Subject: [Numpy-svn] r6505 - trunk/doc/release Message-ID: <20090227072946.0F98AC7C009@scipy.org> Author: cdavid Date: 2009-02-27 01:29:42 -0600 (Fri, 27 Feb 2009) New Revision: 6505 Modified: trunk/doc/release/time_based_proposal.rst Log: Fix tables and syntax issues for rest document. Modified: trunk/doc/release/time_based_proposal.rst =================================================================== --- trunk/doc/release/time_based_proposal.rst 2009-02-27 07:24:17 UTC (rev 6504) +++ trunk/doc/release/time_based_proposal.rst 2009-02-27 07:29:42 UTC (rev 6505) @@ -34,9 +34,11 @@ * Development: anything can happen (by anything, we mean as currently done). The focus is on new features, refactoring, etc... + * Beta: no new features. No bug fixing which requires heavy changes. regression fixes which appear on supported platforms and were not caught earlier. + * Polish/RC: only docstring changes and blocker regressions are allowed. The schedule would be as follows: @@ -45,31 +47,53 @@ | Week | 1.3.0 | 1.4.0 | Release time | +======+=================+=================+==================+ | 1 | Development | - | | + +------+-----------------+-----------------+------------------+ | 2 | Development | - | | + +------+-----------------+-----------------+------------------+ | 3 | Development | - | | + +------+-----------------+-----------------+------------------+ | 4 | Development | - | | + +------+-----------------+-----------------+------------------+ | 5 | Development | - | | + +------+-----------------+-----------------+------------------+ | 6 | Development | - | | + +------+-----------------+-----------------+------------------+ | 7 | Beta | - | | + +------+-----------------+-----------------+------------------+ | 8 | Beta | - | | + +------+-----------------+-----------------+------------------+ | 9 | Beta | - | 1.3.0 released | + +------+-----------------+-----------------+------------------+ | 10 | Polish | Development | | + +------+-----------------+-----------------+------------------+ | 11 | Polish | Development | | + +------+-----------------+-----------------+------------------+ | 12 | Polish | Development | | + +------+-----------------+-----------------+------------------+ | 13 | Polish | Development | | + +------+-----------------+-----------------+------------------+ | 14 | | Development | | + +------+-----------------+-----------------+------------------+ | 15 | | Development | | + +------+-----------------+-----------------+------------------+ | 16 | | Beta | | + +------+-----------------+-----------------+------------------+ | 17 | | Beta | | + +------+-----------------+-----------------+------------------+ | 18 | | Beta | 1.4.0 released | +------+-----------------+-----------------+------------------+ Each stage can be defined as follows: - Development Beta Polish - Python Frozen: - slushy Y - Docstring Frozen: - slushy thicker slush - C code Frozen: - thicker slush thicker slush + +------------------+-------------+----------------+----------------+ + | | Development | Beta | Polish | + +==================+=============+================+================+ + | Python Frozen | - | slushy | Y | + +------------------+-------------+----------------+----------------+ + | Docstring Frozen | - | slushy | thicker slush | + +------------------+-------------+----------------+----------------+ + | C code Frozen | - | thicker slush | thicker slush | + +------------------+-------------+----------------+----------------+ Terminology: @@ -83,11 +107,9 @@ The different frozen states are intended to be gradients. The exact meaning is decided by the release manager: he has the last word on what's go in, what -doesn't. +doesn't. The proposed schedule means that there would be at most 4 months +between putting code into the source code repository and being released. -The proposed schedule means that there would be at most 4 months between -putting code into the source code repository and being released. - Release team ------------ From numpy-svn at scipy.org Fri Feb 27 02:32:08 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 01:32:08 -0600 (CST) Subject: [Numpy-svn] r6506 - trunk/doc/release Message-ID: <20090227073208.541DEC7C009@scipy.org> Author: cdavid Date: 2009-02-27 01:32:04 -0600 (Fri, 27 Feb 2009) New Revision: 6506 Modified: trunk/doc/release/time_based_proposal.rst Log: More esthetical changes in rest document. Modified: trunk/doc/release/time_based_proposal.rst =================================================================== --- trunk/doc/release/time_based_proposal.rst 2009-02-27 07:29:42 UTC (rev 6505) +++ trunk/doc/release/time_based_proposal.rst 2009-02-27 07:32:04 UTC (rev 6506) @@ -33,11 +33,11 @@ for the cycle: * Development: anything can happen (by anything, we mean as currently - done). The focus is on new features, refactoring, etc... + done). The focus is on new features, refactoring, etc... * Beta: no new features. No bug fixing which requires heavy changes. - regression fixes which appear on supported platforms and were not - caught earlier. + regression fixes which appear on supported platforms and were not + caught earlier. * Polish/RC: only docstring changes and blocker regressions are allowed. @@ -46,23 +46,23 @@ +------+-----------------+-----------------+------------------+ | Week | 1.3.0 | 1.4.0 | Release time | +======+=================+=================+==================+ - | 1 | Development | - | | + | 1 | Development | | | +------+-----------------+-----------------+------------------+ - | 2 | Development | - | | + | 2 | Development | | | +------+-----------------+-----------------+------------------+ - | 3 | Development | - | | + | 3 | Development | | | +------+-----------------+-----------------+------------------+ - | 4 | Development | - | | + | 4 | Development | | | +------+-----------------+-----------------+------------------+ - | 5 | Development | - | | + | 5 | Development | | | +------+-----------------+-----------------+------------------+ - | 6 | Development | - | | + | 6 | Development | | | +------+-----------------+-----------------+------------------+ - | 7 | Beta | - | | + | 7 | Beta | | | +------+-----------------+-----------------+------------------+ - | 8 | Beta | - | | + | 8 | Beta | | | +------+-----------------+-----------------+------------------+ - | 9 | Beta | - | 1.3.0 released | + | 9 | Beta | | 1.3.0 released | +------+-----------------+-----------------+------------------+ | 10 | Polish | Development | | +------+-----------------+-----------------+------------------+ @@ -88,22 +88,23 @@ +------------------+-------------+----------------+----------------+ | | Development | Beta | Polish | +==================+=============+================+================+ - | Python Frozen | - | slushy | Y | + | Python Frozen | | slushy | Y | +------------------+-------------+----------------+----------------+ - | Docstring Frozen | - | slushy | thicker slush | + | Docstring Frozen | | slushy | thicker slush | +------------------+-------------+----------------+----------------+ - | C code Frozen | - | thicker slush | thicker slush | + | C code Frozen | | thicker slush | thicker slush | +------------------+-------------+----------------+----------------+ Terminology: * slushy: you can change it if you beg the release team and it's really - important and you coordinate with docs/translations; no "big" changes. + important and you coordinate with docs/translations; no "big" + changes. * thicker slush: you can change it if it's an open bug marked - showstopper for the Polish release, you beg the release team, the - change is very very small yet very very important, and you feel - extremely guilty about your transgressions. + showstopper for the Polish release, you beg the release team, the + change is very very small yet very very important, and you feel + extremely guilty about your transgressions. The different frozen states are intended to be gradients. The exact meaning is decided by the release manager: he has the last word on what's go in, what From numpy-svn at scipy.org Fri Feb 27 03:30:00 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 02:30:00 -0600 (CST) Subject: [Numpy-svn] r6507 - trunk/doc/neps Message-ID: <20090227083000.30CCCC7C009@scipy.org> Author: cdavid Date: 2009-02-27 02:29:55 -0600 (Fri, 27 Feb 2009) New Revision: 6507 Added: trunk/doc/neps/newbugtracker.rst Log: Start working on bug tracker limitations, scenario and possible solutions. Added: trunk/doc/neps/newbugtracker.rst =================================================================== --- trunk/doc/neps/newbugtracker.rst 2009-02-27 07:32:04 UTC (rev 6506) +++ trunk/doc/neps/newbugtracker.rst 2009-02-27 08:29:55 UTC (rev 6507) @@ -0,0 +1,155 @@ +Some release managers of both numpy and scipy are becoming more and more +disatisfied with the current development workflow, in particular for bug +tracking. This document is a tentative to explain some problematic scenario, +current trac limitations, and what can be done about it. + +Scenario +======== + +new release +----------- + +The workflow for a release is roughly as follows: + + * find all known regressions from last release, and fix them + + * get an idea of all bugs reported since last release + + * triage bugs in regressions/blocker issues/etc..., and assign them in + the according roadmap, subpackage and maintainers + + * pinging subpackage maintainers + +Most of those tasks are quite inefficient in the current trac as used on scipy: + + * it is hard to keep track of issues. In particular, everytime one goes + to trac, we don't really know what's new from what's not. If you + think of issues as emails, the current situation would be like not + having read/unread feature. + + * Batch handling of issues: changing characteristics of several issues + at the same time is difficult, because the only available UI is + web-based. Command-line based UI are much more efficient for this + kind of scenario + +More generally, making useful reports is very awkward with the currently +deployed trac. Trac 0.11 may solve of those problems, but it has to be much +better than the actually deployed version on scipy website. Finding issues with +patches, old patches, etc... and making reports has to be much more streamlined +that it is now. + +subcomponent maintainer +----------------------- + +Say you are the maintainer of scipy.foo, then you are mostly interested in +getting bugs concerning scipy.foo only. But it should be easy for the general +team to follow your work - it should also be easy for casual users (e.g. not +developers) to follow some new features development pace. + +Review, newcoming code +---------------------- + +The goal is simple: make the bar as low as possible, and make sure people know +what to do at every step to contribute to numpy or scipy: + + * Right now, patches languish for too long in trac. Of course, lack of + time is one big reason; but the process of following new contributes + could be made much simpler + + * It should be possible to be pinged only for reviews one a subset of + numpy/scipy. + + * It should be possible for people interested in the patches to follow + its progression. Comments, but also 'mini' timelines could be useful, + particularly for massive issues (massive from a coding POV). + +Current trac limitation +======================= + +Note: by trac, we mean the currently deployed one. Some more recent versions +may solve some of the issues. + + * Multi-project support: we have three trac instances, one for scipy, + one for numpy, one for scikits. Creating accounts, maintaining and + updating each of them is a maintainance burden. Nobody likes to do + this kind of work, so anything which can reduce the burden is a plus. + Also, it happens quite frequently that a bug against numpy is filled + on scipy trac and vice and versa. You have to handle this manually, + currently. + + * Clients not based on the web-ui. This can be made through the xmlrpc + plugin + some clients. In particular, something like + http://tracexplorer.devjavu.com/ can be interesting for people who + like IDE. At least one person expressed his desire to have as much + integration as possible with Eclipse. + + * Powerful queries: it should be possible to quickly find issues + between two releases, the new issues from a given date, issues with + patch, issues waiting for reviews, etc... The issues data have to be + customizable, because most bug-tracker do not support things like + review, etc... so we need to handle this ourselves (through tags, + etc...) + + * Marking issues as read/unread. It should also be possible for any + user to 'mask' issues to ignore them. + + * ticket dependency. This is quite helpful in my experience for big + features which can be split into several issues. Roadmap can only be + created by trac admin, and they are kind of heavy-weight. + +Possible candidates +=================== + +Updated trac + plugins +---------------------- + +Pros: + + * Same system + + * In python, so we can hack it if we want + +Cons: + + * Trac is aimed at being basic, and extended with plugins. But most + plugins are broken, or not up to date. The information on which + plugins are mature is not easily available. + + * At least the scipy.org trac was slow, and needed to be restarted + constantly. This is simply not acceptable. + +Redmine +------- + +Pros: + + * Support most features (except xmlrpc ?). Multi-project, etc... + + * (subjective): I (cdavid) find the out-of-the-box experience with + redmine much more enjoyable. More informations are available easily, + less clicks, more streamlined. See + http://www.redmine.org/wiki/redmine/TheyAreUsingRedmine for examples + + * Conversion scripts from trac (no experience with it yet for numpy/scipy). + + * Community seems friendly and gets a lof of features done + +Cons: + + * new system, less mature ? + + * in Ruby: since we are a python project, most of dev are familiar with + python. + + * Wiki integration, etc... ? + +Unknown: + + * xmlrpc API + * performances + * maintenance cost + +Roundup +------- + +TODO From numpy-svn at scipy.org Fri Feb 27 04:13:47 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 03:13:47 -0600 (CST) Subject: [Numpy-svn] r6508 - trunk/doc/release Message-ID: <20090227091347.DF741C7C009@scipy.org> Author: cdavid Date: 2009-02-27 03:13:43 -0600 (Fri, 27 Feb 2009) New Revision: 6508 Modified: trunk/doc/release/time_based_proposal.rst Log: Try to hide that I can't compute how many weeks there are in a month... Modified: trunk/doc/release/time_based_proposal.rst =================================================================== --- trunk/doc/release/time_based_proposal.rst 2009-02-27 08:29:55 UTC (rev 6507) +++ trunk/doc/release/time_based_proposal.rst 2009-02-27 09:13:43 UTC (rev 6508) @@ -28,7 +28,7 @@ Time schedule ------------- -The proposed schedule is to release numpy every 3 months - the exact period can +The proposed schedule is to release numpy every 9 weeks - the exact period can be tweaked if it ends up not working as expected. There will be several stages for the cycle: @@ -108,7 +108,7 @@ The different frozen states are intended to be gradients. The exact meaning is decided by the release manager: he has the last word on what's go in, what -doesn't. The proposed schedule means that there would be at most 4 months +doesn't. The proposed schedule means that there would be at most 12 weeks between putting code into the source code repository and being released. Release team From numpy-svn at scipy.org Fri Feb 27 06:37:30 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 05:37:30 -0600 (CST) Subject: [Numpy-svn] r6509 - trunk/tools/win32build/misc/msvcrt90 Message-ID: <20090227113730.62B5FC7C02E@scipy.org> Author: cdavid Date: 2009-02-27 05:37:26 -0600 (Fri, 27 Feb 2009) New Revision: 6509 Modified: trunk/tools/win32build/misc/msvcrt90/ Log: Ignore generated def file for msvcr90.def.in. Property changes on: trunk/tools/win32build/misc/msvcrt90 ___________________________________________________________________ Name: svn:ignore + msvcr90.def From numpy-svn at scipy.org Fri Feb 27 06:54:35 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 05:54:35 -0600 (CST) Subject: [Numpy-svn] r6510 - trunk/numpy/core Message-ID: <20090227115435.169A1C7C1C8@scipy.org> Author: cdavid Date: 2009-02-27 05:54:31 -0600 (Fri, 27 Feb 2009) New Revision: 6510 Modified: trunk/numpy/core/setup.py Log: Force double convertion for long double on windows x64 as well. Modified: trunk/numpy/core/setup.py =================================================================== --- trunk/numpy/core/setup.py 2009-02-27 11:37:26 UTC (rev 6509) +++ trunk/numpy/core/setup.py 2009-02-27 11:54:31 UTC (rev 6510) @@ -89,7 +89,7 @@ # On win32, force long double format string to be 'g', not # 'Lg', since the MS runtime does not support long double whose # size is > sizeof(double) - if a =="Intel": + if a == "Intel" or a == "AMD64": deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING') def check_math_capabilities(config, moredefs, mathlibs): From numpy-svn at scipy.org Fri Feb 27 06:57:10 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 05:57:10 -0600 (CST) Subject: [Numpy-svn] r6511 - trunk/numpy/core/tests Message-ID: <20090227115710.8301DC7C1C8@scipy.org> Author: cdavid Date: 2009-02-27 05:57:06 -0600 (Fri, 27 Feb 2009) New Revision: 6511 Modified: trunk/numpy/core/tests/test_unicode.py Log: All unicode test now pass with mingw64. Modified: trunk/numpy/core/tests/test_unicode.py =================================================================== --- trunk/numpy/core/tests/test_unicode.py 2009-02-27 11:54:31 UTC (rev 6510) +++ trunk/numpy/core/tests/test_unicode.py 2009-02-27 11:57:06 UTC (rev 6511) @@ -3,10 +3,6 @@ from numpy.testing import * from numpy.core import * -def iswin64(): - import platform - return platform.architecture()[0] == "64bit" and sys.platform == "win32" - # Guess the UCS length for this python interpreter if len(buffer(u'u')) == 4: ucs4 = True @@ -42,20 +38,17 @@ else: self.assert_(len(buffer(ua_scalar)) == 0) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_zeros0D(self): """Check creation of 0-dimensional objects""" ua = zeros((), dtype='U%s' % self.ulen) self.content_check(ua, ua[()], 4*self.ulen) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_zerosSD(self): """Check creation of single-dimensional objects""" ua = zeros((2,), dtype='U%s' % self.ulen) self.content_check(ua, ua[0], 4*self.ulen*2) self.content_check(ua, ua[1], 4*self.ulen*2) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_zerosMD(self): """Check creation of multi-dimensional objects""" ua = zeros((2,3,4), dtype='U%s' % self.ulen) @@ -105,20 +98,17 @@ # regular 2-byte word self.assert_(len(buffer(ua_scalar)) == 2*self.ulen) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_values0D(self): """Check creation of 0-dimensional objects with values""" ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) self.content_check(ua, ua[()], 4*self.ulen) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesSD(self): """Check creation of single-dimensional objects with values""" ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) self.content_check(ua, ua[0], 4*self.ulen*2) self.content_check(ua, ua[1], 4*self.ulen*2) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesMD(self): """Check creation of multi-dimensional objects with values""" ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen) From numpy-svn at scipy.org Fri Feb 27 06:59:29 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 05:59:29 -0600 (CST) Subject: [Numpy-svn] r6512 - trunk/numpy/core/tests Message-ID: <20090227115929.A5285C7C1C8@scipy.org> Author: cdavid Date: 2009-02-27 05:59:26 -0600 (Fri, 27 Feb 2009) New Revision: 6512 Modified: trunk/numpy/core/tests/test_unicode.py Log: Screwed up the commit - effectively remove all unicode known failures on win64. Modified: trunk/numpy/core/tests/test_unicode.py =================================================================== --- trunk/numpy/core/tests/test_unicode.py 2009-02-27 11:57:06 UTC (rev 6511) +++ trunk/numpy/core/tests/test_unicode.py 2009-02-27 11:59:26 UTC (rev 6512) @@ -183,14 +183,12 @@ # regular 2-byte word self.assert_(len(buffer(ua_scalar)) == 2*self.ulen) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_values0D(self): """Check assignment of 0-dimensional objects with values""" ua = zeros((), dtype='U%s' % self.ulen) ua[()] = self.ucs_value*self.ulen self.content_check(ua, ua[()], 4*self.ulen) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesSD(self): """Check assignment of single-dimensional objects with values""" ua = zeros((2,), dtype='U%s' % self.ulen) @@ -199,7 +197,6 @@ ua[1] = self.ucs_value*self.ulen self.content_check(ua, ua[1], 4*self.ulen*2) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesMD(self): """Check assignment of multi-dimensional objects with values""" ua = zeros((2,3,4), dtype='U%s' % self.ulen) @@ -253,7 +250,6 @@ class byteorder_values: """Check the byteorder of unicode arrays in round-trip conversions""" - @dec.knownfailureif(iswin64(), "Crash on win64") def test_values0D(self): """Check byteorder of 0-dimensional objects""" ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen) @@ -266,7 +262,6 @@ # Arrays must be equal after the round-trip assert_equal(ua, ua3) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesSD(self): """Check byteorder of single-dimensional objects""" ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen) @@ -277,7 +272,6 @@ # Arrays must be equal after the round-trip assert_equal(ua, ua3) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_valuesMD(self): """Check byteorder of multi-dimensional objects""" ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, From numpy-svn at scipy.org Fri Feb 27 07:00:45 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 06:00:45 -0600 (CST) Subject: [Numpy-svn] r6513 - trunk/numpy/core/tests Message-ID: <20090227120045.B11B9C7C02E@scipy.org> Author: cdavid Date: 2009-02-27 06:00:41 -0600 (Fri, 27 Feb 2009) New Revision: 6513 Modified: trunk/numpy/core/tests/test_scalarmath.py Log: Remove one more win64 known failure which now passes. Modified: trunk/numpy/core/tests/test_scalarmath.py =================================================================== --- trunk/numpy/core/tests/test_scalarmath.py 2009-02-27 11:59:26 UTC (rev 6512) +++ trunk/numpy/core/tests/test_scalarmath.py 2009-02-27 12:00:41 UTC (rev 6513) @@ -7,10 +7,6 @@ np.single, np.double, np.longdouble, np.csingle, np.cdouble, np.clongdouble] -def iswin64(): - import platform - return platform.architecture()[0] == "64bit" and sys.platform == "win32" - # This compares scalarmath against ufuncs. class TestTypes(TestCase): @@ -47,7 +43,6 @@ b = a ** 4 assert b == 81, "error with %r: got %r" % (t,b) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_large_types(self): for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: a = t(51) From numpy-svn at scipy.org Fri Feb 27 07:02:28 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 06:02:28 -0600 (CST) Subject: [Numpy-svn] r6514 - trunk/numpy/core/tests Message-ID: <20090227120228.2F606CB4076@scipy.org> Author: cdavid Date: 2009-02-27 06:02:24 -0600 (Fri, 27 Feb 2009) New Revision: 6514 Modified: trunk/numpy/core/tests/test_regression.py Log: All regression tests now pass on win64. Modified: trunk/numpy/core/tests/test_regression.py =================================================================== --- trunk/numpy/core/tests/test_regression.py 2009-02-27 12:00:41 UTC (rev 6513) +++ trunk/numpy/core/tests/test_regression.py 2009-02-27 12:02:24 UTC (rev 6514) @@ -8,10 +8,6 @@ rlevel = 1 -def iswin64(): - import platform - return platform.architecture()[0] == "64bit" and sys.platform == "win32" - def assert_valid_refcount(op): a = np.arange(100 * 100) b = np.arange(100*100).reshape(100, 100) @@ -287,7 +283,6 @@ x[0].tolist() [i for i in x[0]] - @dec.knownfailureif(iswin64(), "Crash on win64") def test_unicode_string_comparison(self,level=rlevel): """Ticket #190""" a = np.array('hello',np.unicode_) @@ -758,7 +753,6 @@ x |= y self.failUnlessRaises(TypeError,rs) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_unicode_scalar(self, level=rlevel): """Ticket #600""" import cPickle @@ -1170,7 +1164,6 @@ b = np.array(['1','2','3']) assert_equal(a,b) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_unaligned_unicode_access(self, level=rlevel) : """Ticket #825""" for i in range(1,9) : From numpy-svn at scipy.org Fri Feb 27 07:04:26 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 06:04:26 -0600 (CST) Subject: [Numpy-svn] r6515 - trunk/numpy/core/tests Message-ID: <20090227120426.885C2CB4076@scipy.org> Author: cdavid Date: 2009-02-27 06:04:23 -0600 (Fri, 27 Feb 2009) New Revision: 6515 Modified: trunk/numpy/core/tests/test_multiarray.py Log: More string comp test know pass on win64. Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2009-02-27 12:02:24 UTC (rev 6514) +++ trunk/numpy/core/tests/test_multiarray.py 2009-02-27 12:04:23 UTC (rev 6515) @@ -560,7 +560,6 @@ class TestStringCompare(TestCase): - @dec.knownfailureif(iswin64(), "Crash on win64") def test_string(self): g1 = array(["This","is","example"]) g2 = array(["This","was","example"]) @@ -571,7 +570,6 @@ assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0,1,2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0,1,2]]) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_mixed(self): g1 = array(["spam","spa","spammer","and eggs"]) g2 = "spam" @@ -583,7 +581,6 @@ assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_unicode(self): g1 = array([u"This",u"is",u"example"]) g2 = array([u"This",u"was",u"example"]) From numpy-svn at scipy.org Fri Feb 27 07:06:43 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 06:06:43 -0600 (CST) Subject: [Numpy-svn] r6516 - trunk/numpy/core/tests Message-ID: <20090227120643.08F04C7C1C8@scipy.org> Author: cdavid Date: 2009-02-27 06:06:40 -0600 (Fri, 27 Feb 2009) New Revision: 6516 Modified: trunk/numpy/core/tests/test_multiarray.py Log: float format unit test now passes on win64. Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2009-02-27 12:04:23 UTC (rev 6515) +++ trunk/numpy/core/tests/test_multiarray.py 2009-02-27 12:06:40 UTC (rev 6516) @@ -800,7 +800,6 @@ os.unlink(self.filename) #tmp_file.close() - @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_file(self): f = open(self.filename, 'wb') self.x.tofile(f) @@ -812,13 +811,11 @@ assert_array_equal(y, self.x.flat) os.unlink(self.filename) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_filename(self): self.x.tofile(self.filename) y = np.fromfile(self.filename, dtype=self.dtype) assert_array_equal(y, self.x.flat) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_binary_str(self): s = self.x.tostring() y = np.fromstring(s, dtype=self.dtype) @@ -828,7 +825,6 @@ y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flatten('F')) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_str(self): x = self.x.real.ravel() s = "@".join(map(str, x)) @@ -838,14 +834,12 @@ assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_roundtrip_repr(self): x = self.x.real.ravel() s = "@".join(map(repr, x)) y = np.fromstring(s, sep="@") assert_array_equal(x, y) - @dec.knownfailureif(iswin64(), "Crash on win64") def _check_from(self, s, value, **kw): y = np.fromstring(s, **kw) assert_array_equal(y, value) @@ -856,66 +850,53 @@ y = np.fromfile(self.filename, **kw) assert_array_equal(y, value) - @dec.knownfailureif(iswin64(), "Crash on win64") def test_nan(self): self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [nan, nan, nan, nan, nan, nan, nan], sep=' ') - @dec.knownfailureif(iswin64(), "Crash on win64") def test_inf(self): self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF", [inf, inf, -inf, inf, -inf, inf, -inf], sep=' ') - @dec.knownfailureif(iswin64(), "Crash on win64") def test_numbers(self): self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') - @dec.knownfailureif(iswin64(), "Crash on win64") def test_binary(self): self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', array([1,2,3,4]), dtype=' Author: cdavid Date: 2009-02-27 06:07:22 -0600 (Fri, 27 Feb 2009) New Revision: 6517 Modified: trunk/numpy/core/tests/test_multiarray.py Log: Remove unused iswin64 in multiarray unit test. Modified: trunk/numpy/core/tests/test_multiarray.py =================================================================== --- trunk/numpy/core/tests/test_multiarray.py 2009-02-27 12:06:40 UTC (rev 6516) +++ trunk/numpy/core/tests/test_multiarray.py 2009-02-27 12:07:22 UTC (rev 6517) @@ -7,10 +7,6 @@ from test_print import in_foreign_locale -def iswin64(): - import platform - return platform.architecture()[0] == "64bit" and sys.platform == "win32" - class TestFlags(TestCase): def setUp(self): self.a = arange(10) From numpy-svn at scipy.org Fri Feb 27 13:34:51 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 12:34:51 -0600 (CST) Subject: [Numpy-svn] r6518 - trunk Message-ID: <20090227183451.A2130C84143@scipy.org> Author: cdavid Date: 2009-02-27 12:34:42 -0600 (Fri, 27 Feb 2009) New Revision: 6518 Modified: trunk/TEST_COMMIT Log: Test commit. Modified: trunk/TEST_COMMIT =================================================================== --- trunk/TEST_COMMIT 2009-02-27 12:07:22 UTC (rev 6517) +++ trunk/TEST_COMMIT 2009-02-27 18:34:42 UTC (rev 6518) @@ -1,4 +1,5 @@ oliphant: yes + rkern: yes pearu: yes fperez: yes From numpy-svn at scipy.org Fri Feb 27 17:27:33 2009 From: numpy-svn at scipy.org (numpy-svn at scipy.org) Date: Fri, 27 Feb 2009 16:27:33 -0600 (CST) Subject: [Numpy-svn] r6519 - in trunk/doc/source: . _templates Message-ID: <20090227222733.1EAB6C7C02E@scipy.org> Author: ptvirtan Date: 2009-02-27 16:27:14 -0600 (Fri, 27 Feb 2009) New Revision: 6519 Modified: trunk/doc/source/_templates/layout.html trunk/doc/source/conf.py Log: docs: move edit link to the sidebar, and show it for all pages Modified: trunk/doc/source/_templates/layout.html =================================================================== --- trunk/doc/source/_templates/layout.html 2009-02-27 18:34:42 UTC (rev 6518) +++ trunk/doc/source/_templates/layout.html 2009-02-27 22:27:14 UTC (rev 6519) @@ -2,3 +2,16 @@ {% block rootrellink %}
  • {{ shorttitle }}{{ reldelim1 }}
  • {% endblock %} + +{% block sidebarsearch %} +{%- if sourcename %} + +{%- endif %} +{{ super() }} +{% endblock %} Modified: trunk/doc/source/conf.py =================================================================== --- trunk/doc/source/conf.py 2009-02-27 18:34:42 UTC (rev 6518) +++ trunk/doc/source/conf.py 2009-02-27 22:27:14 UTC (rev 6519) @@ -207,9 +207,6 @@ # If we want to do a phantom import from an XML file for all autodocs phantom_import_file = 'dump.xml' -# Edit links -numpydoc_edit_link = '`Edit
    `__' - # ----------------------------------------------------------------------------- # Coverage checker # -----------------------------------------------------------------------------