From solipsis at pitrou.net Tue Dec 1 03:42:59 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Tue, 01 Dec 2015 08:42:59 +0000 Subject: [Python-checkins] Daily reference leaks (734247d5d0f9): sum=7 Message-ID: <20151201084259.22392.3682@psf.io> results for 734247d5d0f9 on branch "default" -------------------------------------------- test_asyncio leaked [0, 3, 0] memory blocks, sum=3 test_functools leaked [0, 2, 2] memory blocks, sum=4 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/refloghSfbUK', '--timeout', '7200'] From python-checkins at python.org Tue Dec 1 09:11:14 2015 From: python-checkins at python.org (steven.daprano) Date: Tue, 01 Dec 2015 14:11:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Fix_for_issue_?= =?utf-8?q?=2325177_with_the_mean_of_very_small_and_very_large_numbers=2E?= Message-ID: <20151201141109.13433.42167@psf.io> https://hg.python.org/cpython/rev/4bc9405c4f7b changeset: 99406:4bc9405c4f7b branch: 3.4 parent: 99400:65a23d24fd12 user: Steven D'Aprano date: Tue Dec 01 13:48:48 2015 +1100 summary: Fix for issue #25177 with the mean of very small and very large numbers. files: Lib/statistics.py | 181 +++++++---- Lib/test/test_statistics.py | 363 ++++++++++++++++++++--- Misc/NEWS | 4 + 3 files changed, 431 insertions(+), 117 deletions(-) diff --git a/Lib/statistics.py b/Lib/statistics.py --- a/Lib/statistics.py +++ b/Lib/statistics.py @@ -104,6 +104,8 @@ from fractions import Fraction from decimal import Decimal +from itertools import groupby + # === Exceptions === @@ -115,86 +117,102 @@ # === Private utilities === def _sum(data, start=0): - """_sum(data [, start]) -> value + """_sum(data [, start]) -> (type, sum, count) - Return a high-precision sum of the given numeric data. If optional - argument ``start`` is given, it is added to the total. If ``data`` is - empty, ``start`` (defaulting to 0) is returned. + Return a high-precision sum of the given numeric data as a fraction, + together with the type to be converted to and the count of items. + + If optional argument ``start`` is given, it is added to the total. + If ``data`` is empty, ``start`` (defaulting to 0) is returned. Examples -------- >>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75) - 11.0 + (, Fraction(11, 1), 5) Some sources of round-off error will be avoided: >>> _sum([1e50, 1, -1e50] * 1000) # Built-in sum returns zero. - 1000.0 + (, Fraction(1000, 1), 3000) Fractions and Decimals are also supported: >>> from fractions import Fraction as F >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)]) - Fraction(63, 20) + (, Fraction(63, 20), 4) >>> from decimal import Decimal as D >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")] >>> _sum(data) - Decimal('0.6963') + (, Fraction(6963, 10000), 4) Mixed types are currently treated as an error, except that int is allowed. """ - # We fail as soon as we reach a value that is not an int or the type of - # the first value which is not an int. E.g. _sum([int, int, float, int]) - # is okay, but sum([int, int, float, Fraction]) is not. - allowed_types = set([int, type(start)]) + count = 0 n, d = _exact_ratio(start) - partials = {d: n} # map {denominator: sum of numerators} - # Micro-optimizations. - exact_ratio = _exact_ratio + partials = {d: n} partials_get = partials.get - # Add numerators for each denominator. - for x in data: - _check_type(type(x), allowed_types) - n, d = exact_ratio(x) - partials[d] = partials_get(d, 0) + n - # Find the expected result type. If allowed_types has only one item, it - # will be int; if it has two, use the one which isn't int. - assert len(allowed_types) in (1, 2) - if len(allowed_types) == 1: - assert allowed_types.pop() is int - T = int + T = _coerce(int, type(start)) + for typ, values in groupby(data, type): + T = _coerce(T, typ) # or raise TypeError + for n,d in map(_exact_ratio, values): + count += 1 + partials[d] = partials_get(d, 0) + n + if None in partials: + # The sum will be a NAN or INF. We can ignore all the finite + # partials, and just look at this special one. + total = partials[None] + assert not _isfinite(total) else: - T = (allowed_types - set([int])).pop() - if None in partials: - assert issubclass(T, (float, Decimal)) - assert not math.isfinite(partials[None]) - return T(partials[None]) - total = Fraction() - for d, n in sorted(partials.items()): - total += Fraction(n, d) - if issubclass(T, int): - assert total.denominator == 1 - return T(total.numerator) - if issubclass(T, Decimal): - return T(total.numerator)/total.denominator - return T(total) + # Sum all the partial sums using builtin sum. + # FIXME is this faster if we sum them in order of the denominator? + total = sum(Fraction(n, d) for d, n in sorted(partials.items())) + return (T, total, count) -def _check_type(T, allowed): - if T not in allowed: - if len(allowed) == 1: - allowed.add(T) - else: - types = ', '.join([t.__name__ for t in allowed] + [T.__name__]) - raise TypeError("unsupported mixed types: %s" % types) +def _isfinite(x): + try: + return x.is_finite() # Likely a Decimal. + except AttributeError: + return math.isfinite(x) # Coerces to float first. + + +def _coerce(T, S): + """Coerce types T and S to a common type, or raise TypeError. + + Coercion rules are currently an implementation detail. See the CoerceTest + test class in test_statistics for details. + """ + # See http://bugs.python.org/issue24068. + assert T is not bool, "initial type T is bool" + # If the types are the same, no need to coerce anything. Put this + # first, so that the usual case (no coercion needed) happens as soon + # as possible. + if T is S: return T + # Mixed int & other coerce to the other type. + if S is int or S is bool: return T + if T is int: return S + # If one is a (strict) subclass of the other, coerce to the subclass. + if issubclass(S, T): return S + if issubclass(T, S): return T + # Ints coerce to the other type. + if issubclass(T, int): return S + if issubclass(S, int): return T + # Mixed fraction & float coerces to float (or float subclass). + if issubclass(T, Fraction) and issubclass(S, float): + return S + if issubclass(T, float) and issubclass(S, Fraction): + return T + # Any other combination is disallowed. + msg = "don't know how to coerce %s and %s" + raise TypeError(msg % (T.__name__, S.__name__)) def _exact_ratio(x): - """Convert Real number x exactly to (numerator, denominator) pair. + """Return Real number x to exact (numerator, denominator) pair. >>> _exact_ratio(0.25) (1, 4) @@ -202,29 +220,31 @@ x is expected to be an int, Fraction, Decimal or float. """ try: + # Optimise the common case of floats. We expect that the most often + # used numeric type will be builtin floats, so try to make this as + # fast as possible. + if type(x) is float: + return x.as_integer_ratio() try: - # int, Fraction + # x may be an int, Fraction, or Integral ABC. return (x.numerator, x.denominator) except AttributeError: - # float try: + # x may be a float subclass. return x.as_integer_ratio() except AttributeError: - # Decimal try: + # x may be a Decimal. return _decimal_to_ratio(x) except AttributeError: - msg = "can't convert type '{}' to numerator/denominator" - raise TypeError(msg.format(type(x).__name__)) from None + # Just give up? + pass except (OverflowError, ValueError): - # INF or NAN - if __debug__: - # Decimal signalling NANs cannot be converted to float :-( - if isinstance(x, Decimal): - assert not x.is_finite() - else: - assert not math.isfinite(x) + # float NAN or INF. + assert not math.isfinite(x) return (x, None) + msg = "can't convert type '{}' to numerator/denominator" + raise TypeError(msg.format(type(x).__name__)) # FIXME This is faster than Fraction.from_decimal, but still too slow. @@ -239,7 +259,7 @@ sign, digits, exp = d.as_tuple() if exp in ('F', 'n', 'N'): # INF, NAN, sNAN assert not d.is_finite() - raise ValueError + return (d, None) num = 0 for digit in digits: num = num*10 + digit @@ -253,6 +273,24 @@ return (num, den) +def _convert(value, T): + """Convert value to given numeric type T.""" + if type(value) is T: + # This covers the cases where T is Fraction, or where value is + # a NAN or INF (Decimal or float). + return value + if issubclass(T, int) and value.denominator != 1: + T = float + try: + # FIXME: what do we do if this overflows? + return T(value) + except TypeError: + if issubclass(T, Decimal): + return T(value.numerator)/T(value.denominator) + else: + raise + + def _counts(data): # Generate a table of sorted (value, frequency) pairs. table = collections.Counter(iter(data)).most_common() @@ -290,7 +328,9 @@ n = len(data) if n < 1: raise StatisticsError('mean requires at least one data point') - return _sum(data)/n + T, total, count = _sum(data) + assert count == n + return _convert(total/n, T) # FIXME: investigate ways to calculate medians without sorting? Quickselect? @@ -460,12 +500,14 @@ """ if c is None: c = mean(data) - ss = _sum((x-c)**2 for x in data) + T, total, count = _sum((x-c)**2 for x in data) # The following sum should mathematically equal zero, but due to rounding # error may not. - ss -= _sum((x-c) for x in data)**2/len(data) - assert not ss < 0, 'negative sum of square deviations: %f' % ss - return ss + U, total2, count2 = _sum((x-c) for x in data) + assert T == U and count == count2 + total -= total2**2/len(data) + assert not total < 0, 'negative sum of square deviations: %f' % total + return (T, total) def variance(data, xbar=None): @@ -511,8 +553,8 @@ n = len(data) if n < 2: raise StatisticsError('variance requires at least two data points') - ss = _ss(data, xbar) - return ss/(n-1) + T, ss = _ss(data, xbar) + return _convert(ss/(n-1), T) def pvariance(data, mu=None): @@ -560,7 +602,8 @@ if n < 1: raise StatisticsError('pvariance requires at least one data point') ss = _ss(data, mu) - return ss/n + T, ss = _ss(data, mu) + return _convert(ss/n, T) def stdev(data, xbar=None): diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py --- a/Lib/test/test_statistics.py +++ b/Lib/test/test_statistics.py @@ -21,6 +21,37 @@ # === Helper functions and class === +def _nan_equal(a, b): + """Return True if a and b are both the same kind of NAN. + + >>> _nan_equal(Decimal('NAN'), Decimal('NAN')) + True + >>> _nan_equal(Decimal('sNAN'), Decimal('sNAN')) + True + >>> _nan_equal(Decimal('NAN'), Decimal('sNAN')) + False + >>> _nan_equal(Decimal(42), Decimal('NAN')) + False + + >>> _nan_equal(float('NAN'), float('NAN')) + True + >>> _nan_equal(float('NAN'), 0.5) + False + + >>> _nan_equal(float('NAN'), Decimal('NAN')) + False + + NAN payloads are not compared. + """ + if type(a) is not type(b): + return False + if isinstance(a, float): + return math.isnan(a) and math.isnan(b) + aexp = a.as_tuple()[2] + bexp = b.as_tuple()[2] + return (aexp == bexp) and (aexp in ('n', 'N')) # Both NAN or both sNAN. + + def _calc_errors(actual, expected): """Return the absolute and relative errors between two numbers. @@ -675,15 +706,60 @@ self.assertEqual(_exact_ratio(D("12.345")), (12345, 1000)) self.assertEqual(_exact_ratio(D("-1.98")), (-198, 100)) + def test_inf(self): + INF = float("INF") + class MyFloat(float): + pass + class MyDecimal(Decimal): + pass + for inf in (INF, -INF): + for type_ in (float, MyFloat, Decimal, MyDecimal): + x = type_(inf) + ratio = statistics._exact_ratio(x) + self.assertEqual(ratio, (x, None)) + self.assertEqual(type(ratio[0]), type_) + self.assertTrue(math.isinf(ratio[0])) + + def test_float_nan(self): + NAN = float("NAN") + class MyFloat(float): + pass + for nan in (NAN, MyFloat(NAN)): + ratio = statistics._exact_ratio(nan) + self.assertTrue(math.isnan(ratio[0])) + self.assertIs(ratio[1], None) + self.assertEqual(type(ratio[0]), type(nan)) + + def test_decimal_nan(self): + NAN = Decimal("NAN") + sNAN = Decimal("sNAN") + class MyDecimal(Decimal): + pass + for nan in (NAN, MyDecimal(NAN), sNAN, MyDecimal(sNAN)): + ratio = statistics._exact_ratio(nan) + self.assertTrue(_nan_equal(ratio[0], nan)) + self.assertIs(ratio[1], None) + self.assertEqual(type(ratio[0]), type(nan)) + class DecimalToRatioTest(unittest.TestCase): # Test _decimal_to_ratio private function. - def testSpecialsRaise(self): - # Test that NANs and INFs raise ValueError. - # Non-special values are covered by _exact_ratio above. - for d in (Decimal('NAN'), Decimal('sNAN'), Decimal('INF')): - self.assertRaises(ValueError, statistics._decimal_to_ratio, d) + def test_infinity(self): + # Test that INFs are handled correctly. + inf = Decimal('INF') + self.assertEqual(statistics._decimal_to_ratio(inf), (inf, None)) + self.assertEqual(statistics._decimal_to_ratio(-inf), (-inf, None)) + + def test_nan(self): + # Test that NANs are handled correctly. + for nan in (Decimal('NAN'), Decimal('sNAN')): + num, den = statistics._decimal_to_ratio(nan) + # Because NANs always compare non-equal, we cannot use assertEqual. + # Nor can we use an identity test, as we don't guarantee anything + # about the object identity. + self.assertTrue(_nan_equal(num, nan)) + self.assertIs(den, None) def test_sign(self): # Test sign is calculated correctly. @@ -718,25 +794,181 @@ self.assertEqual(t, (147000, 1)) -class CheckTypeTest(unittest.TestCase): - # Test _check_type private function. +class IsFiniteTest(unittest.TestCase): + # Test _isfinite private function. - def test_allowed(self): - # Test that a type which should be allowed is allowed. - allowed = set([int, float]) - statistics._check_type(int, allowed) - statistics._check_type(float, allowed) + def test_finite(self): + # Test that finite numbers are recognised as finite. + for x in (5, Fraction(1, 3), 2.5, Decimal("5.5")): + self.assertTrue(statistics._isfinite(x)) - def test_not_allowed(self): - # Test that a type which should not be allowed raises. - allowed = set([int, float]) - self.assertRaises(TypeError, statistics._check_type, Decimal, allowed) + def test_infinity(self): + # Test that INFs are not recognised as finite. + for x in (float("inf"), Decimal("inf")): + self.assertFalse(statistics._isfinite(x)) - def test_add_to_allowed(self): - # Test that a second type will be added to the allowed set. - allowed = set([int]) - statistics._check_type(float, allowed) - self.assertEqual(allowed, set([int, float])) + def test_nan(self): + # Test that NANs are not recognised as finite. + for x in (float("nan"), Decimal("NAN"), Decimal("sNAN")): + self.assertFalse(statistics._isfinite(x)) + + +class CoerceTest(unittest.TestCase): + # Test that private function _coerce correctly deals with types. + + # The coercion rules are currently an implementation detail, although at + # some point that should change. The tests and comments here define the + # correct implementation. + + # Pre-conditions of _coerce: + # + # - The first time _sum calls _coerce, the + # - coerce(T, S) will never be called with bool as the first argument; + # this is a pre-condition, guarded with an assertion. + + # + # - coerce(T, T) will always return T; we assume T is a valid numeric + # type. Violate this assumption at your own risk. + # + # - Apart from as above, bool is treated as if it were actually int. + # + # - coerce(int, X) and coerce(X, int) return X. + # - + def test_bool(self): + # bool is somewhat special, due to the pre-condition that it is + # never given as the first argument to _coerce, and that it cannot + # be subclassed. So we test it specially. + for T in (int, float, Fraction, Decimal): + self.assertIs(statistics._coerce(T, bool), T) + class MyClass(T): pass + self.assertIs(statistics._coerce(MyClass, bool), MyClass) + + def assertCoerceTo(self, A, B): + """Assert that type A coerces to B.""" + self.assertIs(statistics._coerce(A, B), B) + self.assertIs(statistics._coerce(B, A), B) + + def check_coerce_to(self, A, B): + """Checks that type A coerces to B, including subclasses.""" + # Assert that type A is coerced to B. + self.assertCoerceTo(A, B) + # Subclasses of A are also coerced to B. + class SubclassOfA(A): pass + self.assertCoerceTo(SubclassOfA, B) + # A, and subclasses of A, are coerced to subclasses of B. + class SubclassOfB(B): pass + self.assertCoerceTo(A, SubclassOfB) + self.assertCoerceTo(SubclassOfA, SubclassOfB) + + def assertCoerceRaises(self, A, B): + """Assert that coercing A to B, or vice versa, raises TypeError.""" + self.assertRaises(TypeError, statistics._coerce, (A, B)) + self.assertRaises(TypeError, statistics._coerce, (B, A)) + + def check_type_coercions(self, T): + """Check that type T coerces correctly with subclasses of itself.""" + assert T is not bool + # Coercing a type with itself returns the same type. + self.assertIs(statistics._coerce(T, T), T) + # Coercing a type with a subclass of itself returns the subclass. + class U(T): pass + class V(T): pass + class W(U): pass + for typ in (U, V, W): + self.assertCoerceTo(T, typ) + self.assertCoerceTo(U, W) + # Coercing two subclasses that aren't parent/child is an error. + self.assertCoerceRaises(U, V) + self.assertCoerceRaises(V, W) + + def test_int(self): + # Check that int coerces correctly. + self.check_type_coercions(int) + for typ in (float, Fraction, Decimal): + self.check_coerce_to(int, typ) + + def test_fraction(self): + # Check that Fraction coerces correctly. + self.check_type_coercions(Fraction) + self.check_coerce_to(Fraction, float) + + def test_decimal(self): + # Check that Decimal coerces correctly. + self.check_type_coercions(Decimal) + + def test_float(self): + # Check that float coerces correctly. + self.check_type_coercions(float) + + def test_non_numeric_types(self): + for bad_type in (str, list, type(None), tuple, dict): + for good_type in (int, float, Fraction, Decimal): + self.assertCoerceRaises(good_type, bad_type) + + def test_incompatible_types(self): + # Test that incompatible types raise. + for T in (float, Fraction): + class MySubclass(T): pass + self.assertCoerceRaises(T, Decimal) + self.assertCoerceRaises(MySubclass, Decimal) + + +class ConvertTest(unittest.TestCase): + # Test private _convert function. + + def check_exact_equal(self, x, y): + """Check that x equals y, and has the same type as well.""" + self.assertEqual(x, y) + self.assertIs(type(x), type(y)) + + def test_int(self): + # Test conversions to int. + x = statistics._convert(Fraction(71), int) + self.check_exact_equal(x, 71) + class MyInt(int): pass + x = statistics._convert(Fraction(17), MyInt) + self.check_exact_equal(x, MyInt(17)) + + def test_fraction(self): + # Test conversions to Fraction. + x = statistics._convert(Fraction(95, 99), Fraction) + self.check_exact_equal(x, Fraction(95, 99)) + class MyFraction(Fraction): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(71, 13), MyFraction) + self.check_exact_equal(x, MyFraction(71, 13)) + + def test_float(self): + # Test conversions to float. + x = statistics._convert(Fraction(-1, 2), float) + self.check_exact_equal(x, -0.5) + class MyFloat(float): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(9, 8), MyFloat) + self.check_exact_equal(x, MyFloat(1.125)) + + def test_decimal(self): + # Test conversions to Decimal. + x = statistics._convert(Fraction(1, 40), Decimal) + self.check_exact_equal(x, Decimal("0.025")) + class MyDecimal(Decimal): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(-15, 16), MyDecimal) + self.check_exact_equal(x, MyDecimal("-0.9375")) + + def test_inf(self): + for INF in (float('inf'), Decimal('inf')): + for inf in (INF, -INF): + x = statistics._convert(inf, type(inf)) + self.check_exact_equal(x, inf) + + def test_nan(self): + for nan in (float('nan'), Decimal('NAN'), Decimal('sNAN')): + x = statistics._convert(nan, type(nan)) + self.assertTrue(_nan_equal(x, nan)) # === Tests for public functions === @@ -874,52 +1106,71 @@ self.assertIs(type(result), kind) -class TestSum(NumericTestCase, UnivariateCommonMixin, UnivariateTypeMixin): +class TestSumCommon(UnivariateCommonMixin, UnivariateTypeMixin): + # Common test cases for statistics._sum() function. + + # This test suite looks only at the numeric value returned by _sum, + # after conversion to the appropriate type. + def setUp(self): + def simplified_sum(*args): + T, value, n = statistics._sum(*args) + return statistics._coerce(value, T) + self.func = simplified_sum + + +class TestSum(NumericTestCase): # Test cases for statistics._sum() function. + # These tests look at the entire three value tuple returned by _sum. + def setUp(self): self.func = statistics._sum def test_empty_data(self): # Override test for empty data. for data in ([], (), iter([])): - self.assertEqual(self.func(data), 0) - self.assertEqual(self.func(data, 23), 23) - self.assertEqual(self.func(data, 2.3), 2.3) + self.assertEqual(self.func(data), (int, Fraction(0), 0)) + self.assertEqual(self.func(data, 23), (int, Fraction(23), 0)) + self.assertEqual(self.func(data, 2.3), (float, Fraction(2.3), 0)) def test_ints(self): - self.assertEqual(self.func([1, 5, 3, -4, -8, 20, 42, 1]), 60) - self.assertEqual(self.func([4, 2, 3, -8, 7], 1000), 1008) + self.assertEqual(self.func([1, 5, 3, -4, -8, 20, 42, 1]), + (int, Fraction(60), 8)) + self.assertEqual(self.func([4, 2, 3, -8, 7], 1000), + (int, Fraction(1008), 5)) def test_floats(self): - self.assertEqual(self.func([0.25]*20), 5.0) - self.assertEqual(self.func([0.125, 0.25, 0.5, 0.75], 1.5), 3.125) + self.assertEqual(self.func([0.25]*20), + (float, Fraction(5.0), 20)) + self.assertEqual(self.func([0.125, 0.25, 0.5, 0.75], 1.5), + (float, Fraction(3.125), 4)) def test_fractions(self): - F = Fraction - self.assertEqual(self.func([Fraction(1, 1000)]*500), Fraction(1, 2)) + self.assertEqual(self.func([Fraction(1, 1000)]*500), + (Fraction, Fraction(1, 2), 500)) def test_decimals(self): D = Decimal data = [D("0.001"), D("5.246"), D("1.702"), D("-0.025"), D("3.974"), D("2.328"), D("4.617"), D("2.843"), ] - self.assertEqual(self.func(data), Decimal("20.686")) + self.assertEqual(self.func(data), + (Decimal, Decimal("20.686"), 8)) def test_compare_with_math_fsum(self): # Compare with the math.fsum function. # Ideally we ought to get the exact same result, but sometimes # we differ by a very slight amount :-( data = [random.uniform(-100, 1000) for _ in range(1000)] - self.assertApproxEqual(self.func(data), math.fsum(data), rel=2e-16) + self.assertApproxEqual(float(self.func(data)[1]), math.fsum(data), rel=2e-16) def test_start_argument(self): # Test that the optional start argument works correctly. data = [random.uniform(1, 1000) for _ in range(100)] - t = self.func(data) - self.assertEqual(t+42, self.func(data, 42)) - self.assertEqual(t-23, self.func(data, -23)) - self.assertEqual(t+1e20, self.func(data, 1e20)) + t = self.func(data)[1] + self.assertEqual(t+42, self.func(data, 42)[1]) + self.assertEqual(t-23, self.func(data, -23)[1]) + self.assertEqual(t+Fraction(1e20), self.func(data, 1e20)[1]) def test_strings_fail(self): # Sum of strings should fail. @@ -934,7 +1185,7 @@ def test_mixed_sum(self): # Mixed input types are not (currently) allowed. # Check that mixed data types fail. - self.assertRaises(TypeError, self.func, [1, 2.0, Fraction(1, 2)]) + self.assertRaises(TypeError, self.func, [1, 2.0, Decimal(1)]) # And so does mixed start argument. self.assertRaises(TypeError, self.func, [1, 2.0], Decimal(1)) @@ -942,11 +1193,14 @@ class SumTortureTest(NumericTestCase): def test_torture(self): # Tim Peters' torture test for sum, and variants of same. - self.assertEqual(statistics._sum([1, 1e100, 1, -1e100]*10000), 20000.0) - self.assertEqual(statistics._sum([1e100, 1, 1, -1e100]*10000), 20000.0) - self.assertApproxEqual( - statistics._sum([1e-100, 1, 1e-100, -1]*10000), 2.0e-96, rel=5e-16 - ) + self.assertEqual(statistics._sum([1, 1e100, 1, -1e100]*10000), + (float, Fraction(20000.0), 40000)) + self.assertEqual(statistics._sum([1e100, 1, 1, -1e100]*10000), + (float, Fraction(20000.0), 40000)) + T, num, count = statistics._sum([1e-100, 1, 1e-100, -1]*10000) + self.assertIs(T, float) + self.assertEqual(count, 40000) + self.assertApproxEqual(float(num), 2.0e-96, rel=5e-16) class SumSpecialValues(NumericTestCase): @@ -955,7 +1209,7 @@ def test_nan(self): for type_ in (float, Decimal): nan = type_('nan') - result = statistics._sum([1, nan, 2]) + result = statistics._sum([1, nan, 2])[1] self.assertIs(type(result), type_) self.assertTrue(math.isnan(result)) @@ -968,10 +1222,10 @@ def do_test_inf(self, inf): # Adding a single infinity gives infinity. - result = statistics._sum([1, 2, inf, 3]) + result = statistics._sum([1, 2, inf, 3])[1] self.check_infinity(result, inf) # Adding two infinities of the same sign also gives infinity. - result = statistics._sum([1, 2, inf, 3, inf, 4]) + result = statistics._sum([1, 2, inf, 3, inf, 4])[1] self.check_infinity(result, inf) def test_float_inf(self): @@ -987,7 +1241,7 @@ def test_float_mismatched_infs(self): # Test that adding two infinities of opposite sign gives a NAN. inf = float('inf') - result = statistics._sum([1, 2, inf, 3, -inf, 4]) + result = statistics._sum([1, 2, inf, 3, -inf, 4])[1] self.assertTrue(math.isnan(result)) def test_decimal_extendedcontext_mismatched_infs_to_nan(self): @@ -995,7 +1249,7 @@ inf = Decimal('inf') data = [1, 2, inf, 3, -inf, 4] with decimal.localcontext(decimal.ExtendedContext): - self.assertTrue(math.isnan(statistics._sum(data))) + self.assertTrue(math.isnan(statistics._sum(data)[1])) def test_decimal_basiccontext_mismatched_infs_to_nan(self): # Test adding Decimal INFs with opposite sign raises InvalidOperation. @@ -1111,6 +1365,19 @@ d = Decimal('1e4') self.assertEqual(statistics.mean([d]), d) + def test_regression_25177(self): + # Regression test for issue 25177. + # Ensure very big and very small floats don't overflow. + # See http://bugs.python.org/issue25177. + self.assertEqual(statistics.mean( + [8.988465674311579e+307, 8.98846567431158e+307]), + 8.98846567431158e+307) + big = 8.98846567431158e+307 + tiny = 5e-324 + for n in (2, 3, 5, 200): + self.assertEqual(statistics.mean([big]*n), big) + self.assertEqual(statistics.mean([tiny]*n), tiny) + class TestMedian(NumericTestCase, AverageMixin): # Common tests for median and all median.* functions. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -113,6 +113,10 @@ Library ------- +- Issue #25177: Fixed problem with the mean of very small and very large + numbers. As a side effect, statistics.mean and statistics.variance should + be significantly faster. + - Issue #25718: Fixed copying object with state with boolean value is false. - Issue #10131: Fixed deep copying of minidom documents. Based on patch -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Dec 1 09:11:16 2015 From: python-checkins at python.org (steven.daprano) Date: Tue, 01 Dec 2015 14:11:16 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325177=3A_Fixed_pr?= =?utf-8?q?oblem_with_the_mean_of_very_small_and_very_large_numbers=2E?= Message-ID: <20151201141110.81854.94143@psf.io> https://hg.python.org/cpython/rev/0eeb39fc8ff5 changeset: 99408:0eeb39fc8ff5 parent: 99405:734247d5d0f9 user: Steven D'Aprano date: Tue Dec 01 19:59:53 2015 +1100 summary: Issue #25177: Fixed problem with the mean of very small and very large numbers. files: Lib/statistics.py | 181 +++++++---- Lib/test/test_statistics.py | 363 ++++++++++++++++++++--- Misc/NEWS | 4 + 3 files changed, 431 insertions(+), 117 deletions(-) diff --git a/Lib/statistics.py b/Lib/statistics.py --- a/Lib/statistics.py +++ b/Lib/statistics.py @@ -104,6 +104,8 @@ from fractions import Fraction from decimal import Decimal +from itertools import groupby + # === Exceptions === @@ -115,86 +117,102 @@ # === Private utilities === def _sum(data, start=0): - """_sum(data [, start]) -> value + """_sum(data [, start]) -> (type, sum, count) - Return a high-precision sum of the given numeric data. If optional - argument ``start`` is given, it is added to the total. If ``data`` is - empty, ``start`` (defaulting to 0) is returned. + Return a high-precision sum of the given numeric data as a fraction, + together with the type to be converted to and the count of items. + + If optional argument ``start`` is given, it is added to the total. + If ``data`` is empty, ``start`` (defaulting to 0) is returned. Examples -------- >>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75) - 11.0 + (, Fraction(11, 1), 5) Some sources of round-off error will be avoided: >>> _sum([1e50, 1, -1e50] * 1000) # Built-in sum returns zero. - 1000.0 + (, Fraction(1000, 1), 3000) Fractions and Decimals are also supported: >>> from fractions import Fraction as F >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)]) - Fraction(63, 20) + (, Fraction(63, 20), 4) >>> from decimal import Decimal as D >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")] >>> _sum(data) - Decimal('0.6963') + (, Fraction(6963, 10000), 4) Mixed types are currently treated as an error, except that int is allowed. """ - # We fail as soon as we reach a value that is not an int or the type of - # the first value which is not an int. E.g. _sum([int, int, float, int]) - # is okay, but sum([int, int, float, Fraction]) is not. - allowed_types = {int, type(start)} + count = 0 n, d = _exact_ratio(start) - partials = {d: n} # map {denominator: sum of numerators} - # Micro-optimizations. - exact_ratio = _exact_ratio + partials = {d: n} partials_get = partials.get - # Add numerators for each denominator. - for x in data: - _check_type(type(x), allowed_types) - n, d = exact_ratio(x) - partials[d] = partials_get(d, 0) + n - # Find the expected result type. If allowed_types has only one item, it - # will be int; if it has two, use the one which isn't int. - assert len(allowed_types) in (1, 2) - if len(allowed_types) == 1: - assert allowed_types.pop() is int - T = int + T = _coerce(int, type(start)) + for typ, values in groupby(data, type): + T = _coerce(T, typ) # or raise TypeError + for n,d in map(_exact_ratio, values): + count += 1 + partials[d] = partials_get(d, 0) + n + if None in partials: + # The sum will be a NAN or INF. We can ignore all the finite + # partials, and just look at this special one. + total = partials[None] + assert not _isfinite(total) else: - T = (allowed_types - {int}).pop() - if None in partials: - assert issubclass(T, (float, Decimal)) - assert not math.isfinite(partials[None]) - return T(partials[None]) - total = Fraction() - for d, n in sorted(partials.items()): - total += Fraction(n, d) - if issubclass(T, int): - assert total.denominator == 1 - return T(total.numerator) - if issubclass(T, Decimal): - return T(total.numerator)/total.denominator - return T(total) + # Sum all the partial sums using builtin sum. + # FIXME is this faster if we sum them in order of the denominator? + total = sum(Fraction(n, d) for d, n in sorted(partials.items())) + return (T, total, count) -def _check_type(T, allowed): - if T not in allowed: - if len(allowed) == 1: - allowed.add(T) - else: - types = ', '.join([t.__name__ for t in allowed] + [T.__name__]) - raise TypeError("unsupported mixed types: %s" % types) +def _isfinite(x): + try: + return x.is_finite() # Likely a Decimal. + except AttributeError: + return math.isfinite(x) # Coerces to float first. + + +def _coerce(T, S): + """Coerce types T and S to a common type, or raise TypeError. + + Coercion rules are currently an implementation detail. See the CoerceTest + test class in test_statistics for details. + """ + # See http://bugs.python.org/issue24068. + assert T is not bool, "initial type T is bool" + # If the types are the same, no need to coerce anything. Put this + # first, so that the usual case (no coercion needed) happens as soon + # as possible. + if T is S: return T + # Mixed int & other coerce to the other type. + if S is int or S is bool: return T + if T is int: return S + # If one is a (strict) subclass of the other, coerce to the subclass. + if issubclass(S, T): return S + if issubclass(T, S): return T + # Ints coerce to the other type. + if issubclass(T, int): return S + if issubclass(S, int): return T + # Mixed fraction & float coerces to float (or float subclass). + if issubclass(T, Fraction) and issubclass(S, float): + return S + if issubclass(T, float) and issubclass(S, Fraction): + return T + # Any other combination is disallowed. + msg = "don't know how to coerce %s and %s" + raise TypeError(msg % (T.__name__, S.__name__)) def _exact_ratio(x): - """Convert Real number x exactly to (numerator, denominator) pair. + """Return Real number x to exact (numerator, denominator) pair. >>> _exact_ratio(0.25) (1, 4) @@ -202,29 +220,31 @@ x is expected to be an int, Fraction, Decimal or float. """ try: + # Optimise the common case of floats. We expect that the most often + # used numeric type will be builtin floats, so try to make this as + # fast as possible. + if type(x) is float: + return x.as_integer_ratio() try: - # int, Fraction + # x may be an int, Fraction, or Integral ABC. return (x.numerator, x.denominator) except AttributeError: - # float try: + # x may be a float subclass. return x.as_integer_ratio() except AttributeError: - # Decimal try: + # x may be a Decimal. return _decimal_to_ratio(x) except AttributeError: - msg = "can't convert type '{}' to numerator/denominator" - raise TypeError(msg.format(type(x).__name__)) from None + # Just give up? + pass except (OverflowError, ValueError): - # INF or NAN - if __debug__: - # Decimal signalling NANs cannot be converted to float :-( - if isinstance(x, Decimal): - assert not x.is_finite() - else: - assert not math.isfinite(x) + # float NAN or INF. + assert not math.isfinite(x) return (x, None) + msg = "can't convert type '{}' to numerator/denominator" + raise TypeError(msg.format(type(x).__name__)) # FIXME This is faster than Fraction.from_decimal, but still too slow. @@ -239,7 +259,7 @@ sign, digits, exp = d.as_tuple() if exp in ('F', 'n', 'N'): # INF, NAN, sNAN assert not d.is_finite() - raise ValueError + return (d, None) num = 0 for digit in digits: num = num*10 + digit @@ -253,6 +273,24 @@ return (num, den) +def _convert(value, T): + """Convert value to given numeric type T.""" + if type(value) is T: + # This covers the cases where T is Fraction, or where value is + # a NAN or INF (Decimal or float). + return value + if issubclass(T, int) and value.denominator != 1: + T = float + try: + # FIXME: what do we do if this overflows? + return T(value) + except TypeError: + if issubclass(T, Decimal): + return T(value.numerator)/T(value.denominator) + else: + raise + + def _counts(data): # Generate a table of sorted (value, frequency) pairs. table = collections.Counter(iter(data)).most_common() @@ -290,7 +328,9 @@ n = len(data) if n < 1: raise StatisticsError('mean requires at least one data point') - return _sum(data)/n + T, total, count = _sum(data) + assert count == n + return _convert(total/n, T) # FIXME: investigate ways to calculate medians without sorting? Quickselect? @@ -460,12 +500,14 @@ """ if c is None: c = mean(data) - ss = _sum((x-c)**2 for x in data) + T, total, count = _sum((x-c)**2 for x in data) # The following sum should mathematically equal zero, but due to rounding # error may not. - ss -= _sum((x-c) for x in data)**2/len(data) - assert not ss < 0, 'negative sum of square deviations: %f' % ss - return ss + U, total2, count2 = _sum((x-c) for x in data) + assert T == U and count == count2 + total -= total2**2/len(data) + assert not total < 0, 'negative sum of square deviations: %f' % total + return (T, total) def variance(data, xbar=None): @@ -511,8 +553,8 @@ n = len(data) if n < 2: raise StatisticsError('variance requires at least two data points') - ss = _ss(data, xbar) - return ss/(n-1) + T, ss = _ss(data, xbar) + return _convert(ss/(n-1), T) def pvariance(data, mu=None): @@ -560,7 +602,8 @@ if n < 1: raise StatisticsError('pvariance requires at least one data point') ss = _ss(data, mu) - return ss/n + T, ss = _ss(data, mu) + return _convert(ss/n, T) def stdev(data, xbar=None): diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py --- a/Lib/test/test_statistics.py +++ b/Lib/test/test_statistics.py @@ -21,6 +21,37 @@ # === Helper functions and class === +def _nan_equal(a, b): + """Return True if a and b are both the same kind of NAN. + + >>> _nan_equal(Decimal('NAN'), Decimal('NAN')) + True + >>> _nan_equal(Decimal('sNAN'), Decimal('sNAN')) + True + >>> _nan_equal(Decimal('NAN'), Decimal('sNAN')) + False + >>> _nan_equal(Decimal(42), Decimal('NAN')) + False + + >>> _nan_equal(float('NAN'), float('NAN')) + True + >>> _nan_equal(float('NAN'), 0.5) + False + + >>> _nan_equal(float('NAN'), Decimal('NAN')) + False + + NAN payloads are not compared. + """ + if type(a) is not type(b): + return False + if isinstance(a, float): + return math.isnan(a) and math.isnan(b) + aexp = a.as_tuple()[2] + bexp = b.as_tuple()[2] + return (aexp == bexp) and (aexp in ('n', 'N')) # Both NAN or both sNAN. + + def _calc_errors(actual, expected): """Return the absolute and relative errors between two numbers. @@ -675,15 +706,60 @@ self.assertEqual(_exact_ratio(D("12.345")), (12345, 1000)) self.assertEqual(_exact_ratio(D("-1.98")), (-198, 100)) + def test_inf(self): + INF = float("INF") + class MyFloat(float): + pass + class MyDecimal(Decimal): + pass + for inf in (INF, -INF): + for type_ in (float, MyFloat, Decimal, MyDecimal): + x = type_(inf) + ratio = statistics._exact_ratio(x) + self.assertEqual(ratio, (x, None)) + self.assertEqual(type(ratio[0]), type_) + self.assertTrue(math.isinf(ratio[0])) + + def test_float_nan(self): + NAN = float("NAN") + class MyFloat(float): + pass + for nan in (NAN, MyFloat(NAN)): + ratio = statistics._exact_ratio(nan) + self.assertTrue(math.isnan(ratio[0])) + self.assertIs(ratio[1], None) + self.assertEqual(type(ratio[0]), type(nan)) + + def test_decimal_nan(self): + NAN = Decimal("NAN") + sNAN = Decimal("sNAN") + class MyDecimal(Decimal): + pass + for nan in (NAN, MyDecimal(NAN), sNAN, MyDecimal(sNAN)): + ratio = statistics._exact_ratio(nan) + self.assertTrue(_nan_equal(ratio[0], nan)) + self.assertIs(ratio[1], None) + self.assertEqual(type(ratio[0]), type(nan)) + class DecimalToRatioTest(unittest.TestCase): # Test _decimal_to_ratio private function. - def testSpecialsRaise(self): - # Test that NANs and INFs raise ValueError. - # Non-special values are covered by _exact_ratio above. - for d in (Decimal('NAN'), Decimal('sNAN'), Decimal('INF')): - self.assertRaises(ValueError, statistics._decimal_to_ratio, d) + def test_infinity(self): + # Test that INFs are handled correctly. + inf = Decimal('INF') + self.assertEqual(statistics._decimal_to_ratio(inf), (inf, None)) + self.assertEqual(statistics._decimal_to_ratio(-inf), (-inf, None)) + + def test_nan(self): + # Test that NANs are handled correctly. + for nan in (Decimal('NAN'), Decimal('sNAN')): + num, den = statistics._decimal_to_ratio(nan) + # Because NANs always compare non-equal, we cannot use assertEqual. + # Nor can we use an identity test, as we don't guarantee anything + # about the object identity. + self.assertTrue(_nan_equal(num, nan)) + self.assertIs(den, None) def test_sign(self): # Test sign is calculated correctly. @@ -718,25 +794,181 @@ self.assertEqual(t, (147000, 1)) -class CheckTypeTest(unittest.TestCase): - # Test _check_type private function. +class IsFiniteTest(unittest.TestCase): + # Test _isfinite private function. - def test_allowed(self): - # Test that a type which should be allowed is allowed. - allowed = set([int, float]) - statistics._check_type(int, allowed) - statistics._check_type(float, allowed) + def test_finite(self): + # Test that finite numbers are recognised as finite. + for x in (5, Fraction(1, 3), 2.5, Decimal("5.5")): + self.assertTrue(statistics._isfinite(x)) - def test_not_allowed(self): - # Test that a type which should not be allowed raises. - allowed = set([int, float]) - self.assertRaises(TypeError, statistics._check_type, Decimal, allowed) + def test_infinity(self): + # Test that INFs are not recognised as finite. + for x in (float("inf"), Decimal("inf")): + self.assertFalse(statistics._isfinite(x)) - def test_add_to_allowed(self): - # Test that a second type will be added to the allowed set. - allowed = set([int]) - statistics._check_type(float, allowed) - self.assertEqual(allowed, set([int, float])) + def test_nan(self): + # Test that NANs are not recognised as finite. + for x in (float("nan"), Decimal("NAN"), Decimal("sNAN")): + self.assertFalse(statistics._isfinite(x)) + + +class CoerceTest(unittest.TestCase): + # Test that private function _coerce correctly deals with types. + + # The coercion rules are currently an implementation detail, although at + # some point that should change. The tests and comments here define the + # correct implementation. + + # Pre-conditions of _coerce: + # + # - The first time _sum calls _coerce, the + # - coerce(T, S) will never be called with bool as the first argument; + # this is a pre-condition, guarded with an assertion. + + # + # - coerce(T, T) will always return T; we assume T is a valid numeric + # type. Violate this assumption at your own risk. + # + # - Apart from as above, bool is treated as if it were actually int. + # + # - coerce(int, X) and coerce(X, int) return X. + # - + def test_bool(self): + # bool is somewhat special, due to the pre-condition that it is + # never given as the first argument to _coerce, and that it cannot + # be subclassed. So we test it specially. + for T in (int, float, Fraction, Decimal): + self.assertIs(statistics._coerce(T, bool), T) + class MyClass(T): pass + self.assertIs(statistics._coerce(MyClass, bool), MyClass) + + def assertCoerceTo(self, A, B): + """Assert that type A coerces to B.""" + self.assertIs(statistics._coerce(A, B), B) + self.assertIs(statistics._coerce(B, A), B) + + def check_coerce_to(self, A, B): + """Checks that type A coerces to B, including subclasses.""" + # Assert that type A is coerced to B. + self.assertCoerceTo(A, B) + # Subclasses of A are also coerced to B. + class SubclassOfA(A): pass + self.assertCoerceTo(SubclassOfA, B) + # A, and subclasses of A, are coerced to subclasses of B. + class SubclassOfB(B): pass + self.assertCoerceTo(A, SubclassOfB) + self.assertCoerceTo(SubclassOfA, SubclassOfB) + + def assertCoerceRaises(self, A, B): + """Assert that coercing A to B, or vice versa, raises TypeError.""" + self.assertRaises(TypeError, statistics._coerce, (A, B)) + self.assertRaises(TypeError, statistics._coerce, (B, A)) + + def check_type_coercions(self, T): + """Check that type T coerces correctly with subclasses of itself.""" + assert T is not bool + # Coercing a type with itself returns the same type. + self.assertIs(statistics._coerce(T, T), T) + # Coercing a type with a subclass of itself returns the subclass. + class U(T): pass + class V(T): pass + class W(U): pass + for typ in (U, V, W): + self.assertCoerceTo(T, typ) + self.assertCoerceTo(U, W) + # Coercing two subclasses that aren't parent/child is an error. + self.assertCoerceRaises(U, V) + self.assertCoerceRaises(V, W) + + def test_int(self): + # Check that int coerces correctly. + self.check_type_coercions(int) + for typ in (float, Fraction, Decimal): + self.check_coerce_to(int, typ) + + def test_fraction(self): + # Check that Fraction coerces correctly. + self.check_type_coercions(Fraction) + self.check_coerce_to(Fraction, float) + + def test_decimal(self): + # Check that Decimal coerces correctly. + self.check_type_coercions(Decimal) + + def test_float(self): + # Check that float coerces correctly. + self.check_type_coercions(float) + + def test_non_numeric_types(self): + for bad_type in (str, list, type(None), tuple, dict): + for good_type in (int, float, Fraction, Decimal): + self.assertCoerceRaises(good_type, bad_type) + + def test_incompatible_types(self): + # Test that incompatible types raise. + for T in (float, Fraction): + class MySubclass(T): pass + self.assertCoerceRaises(T, Decimal) + self.assertCoerceRaises(MySubclass, Decimal) + + +class ConvertTest(unittest.TestCase): + # Test private _convert function. + + def check_exact_equal(self, x, y): + """Check that x equals y, and has the same type as well.""" + self.assertEqual(x, y) + self.assertIs(type(x), type(y)) + + def test_int(self): + # Test conversions to int. + x = statistics._convert(Fraction(71), int) + self.check_exact_equal(x, 71) + class MyInt(int): pass + x = statistics._convert(Fraction(17), MyInt) + self.check_exact_equal(x, MyInt(17)) + + def test_fraction(self): + # Test conversions to Fraction. + x = statistics._convert(Fraction(95, 99), Fraction) + self.check_exact_equal(x, Fraction(95, 99)) + class MyFraction(Fraction): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(71, 13), MyFraction) + self.check_exact_equal(x, MyFraction(71, 13)) + + def test_float(self): + # Test conversions to float. + x = statistics._convert(Fraction(-1, 2), float) + self.check_exact_equal(x, -0.5) + class MyFloat(float): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(9, 8), MyFloat) + self.check_exact_equal(x, MyFloat(1.125)) + + def test_decimal(self): + # Test conversions to Decimal. + x = statistics._convert(Fraction(1, 40), Decimal) + self.check_exact_equal(x, Decimal("0.025")) + class MyDecimal(Decimal): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(-15, 16), MyDecimal) + self.check_exact_equal(x, MyDecimal("-0.9375")) + + def test_inf(self): + for INF in (float('inf'), Decimal('inf')): + for inf in (INF, -INF): + x = statistics._convert(inf, type(inf)) + self.check_exact_equal(x, inf) + + def test_nan(self): + for nan in (float('nan'), Decimal('NAN'), Decimal('sNAN')): + x = statistics._convert(nan, type(nan)) + self.assertTrue(_nan_equal(x, nan)) # === Tests for public functions === @@ -874,52 +1106,71 @@ self.assertIs(type(result), kind) -class TestSum(NumericTestCase, UnivariateCommonMixin, UnivariateTypeMixin): +class TestSumCommon(UnivariateCommonMixin, UnivariateTypeMixin): + # Common test cases for statistics._sum() function. + + # This test suite looks only at the numeric value returned by _sum, + # after conversion to the appropriate type. + def setUp(self): + def simplified_sum(*args): + T, value, n = statistics._sum(*args) + return statistics._coerce(value, T) + self.func = simplified_sum + + +class TestSum(NumericTestCase): # Test cases for statistics._sum() function. + # These tests look at the entire three value tuple returned by _sum. + def setUp(self): self.func = statistics._sum def test_empty_data(self): # Override test for empty data. for data in ([], (), iter([])): - self.assertEqual(self.func(data), 0) - self.assertEqual(self.func(data, 23), 23) - self.assertEqual(self.func(data, 2.3), 2.3) + self.assertEqual(self.func(data), (int, Fraction(0), 0)) + self.assertEqual(self.func(data, 23), (int, Fraction(23), 0)) + self.assertEqual(self.func(data, 2.3), (float, Fraction(2.3), 0)) def test_ints(self): - self.assertEqual(self.func([1, 5, 3, -4, -8, 20, 42, 1]), 60) - self.assertEqual(self.func([4, 2, 3, -8, 7], 1000), 1008) + self.assertEqual(self.func([1, 5, 3, -4, -8, 20, 42, 1]), + (int, Fraction(60), 8)) + self.assertEqual(self.func([4, 2, 3, -8, 7], 1000), + (int, Fraction(1008), 5)) def test_floats(self): - self.assertEqual(self.func([0.25]*20), 5.0) - self.assertEqual(self.func([0.125, 0.25, 0.5, 0.75], 1.5), 3.125) + self.assertEqual(self.func([0.25]*20), + (float, Fraction(5.0), 20)) + self.assertEqual(self.func([0.125, 0.25, 0.5, 0.75], 1.5), + (float, Fraction(3.125), 4)) def test_fractions(self): - F = Fraction - self.assertEqual(self.func([Fraction(1, 1000)]*500), Fraction(1, 2)) + self.assertEqual(self.func([Fraction(1, 1000)]*500), + (Fraction, Fraction(1, 2), 500)) def test_decimals(self): D = Decimal data = [D("0.001"), D("5.246"), D("1.702"), D("-0.025"), D("3.974"), D("2.328"), D("4.617"), D("2.843"), ] - self.assertEqual(self.func(data), Decimal("20.686")) + self.assertEqual(self.func(data), + (Decimal, Decimal("20.686"), 8)) def test_compare_with_math_fsum(self): # Compare with the math.fsum function. # Ideally we ought to get the exact same result, but sometimes # we differ by a very slight amount :-( data = [random.uniform(-100, 1000) for _ in range(1000)] - self.assertApproxEqual(self.func(data), math.fsum(data), rel=2e-16) + self.assertApproxEqual(float(self.func(data)[1]), math.fsum(data), rel=2e-16) def test_start_argument(self): # Test that the optional start argument works correctly. data = [random.uniform(1, 1000) for _ in range(100)] - t = self.func(data) - self.assertEqual(t+42, self.func(data, 42)) - self.assertEqual(t-23, self.func(data, -23)) - self.assertEqual(t+1e20, self.func(data, 1e20)) + t = self.func(data)[1] + self.assertEqual(t+42, self.func(data, 42)[1]) + self.assertEqual(t-23, self.func(data, -23)[1]) + self.assertEqual(t+Fraction(1e20), self.func(data, 1e20)[1]) def test_strings_fail(self): # Sum of strings should fail. @@ -934,7 +1185,7 @@ def test_mixed_sum(self): # Mixed input types are not (currently) allowed. # Check that mixed data types fail. - self.assertRaises(TypeError, self.func, [1, 2.0, Fraction(1, 2)]) + self.assertRaises(TypeError, self.func, [1, 2.0, Decimal(1)]) # And so does mixed start argument. self.assertRaises(TypeError, self.func, [1, 2.0], Decimal(1)) @@ -942,11 +1193,14 @@ class SumTortureTest(NumericTestCase): def test_torture(self): # Tim Peters' torture test for sum, and variants of same. - self.assertEqual(statistics._sum([1, 1e100, 1, -1e100]*10000), 20000.0) - self.assertEqual(statistics._sum([1e100, 1, 1, -1e100]*10000), 20000.0) - self.assertApproxEqual( - statistics._sum([1e-100, 1, 1e-100, -1]*10000), 2.0e-96, rel=5e-16 - ) + self.assertEqual(statistics._sum([1, 1e100, 1, -1e100]*10000), + (float, Fraction(20000.0), 40000)) + self.assertEqual(statistics._sum([1e100, 1, 1, -1e100]*10000), + (float, Fraction(20000.0), 40000)) + T, num, count = statistics._sum([1e-100, 1, 1e-100, -1]*10000) + self.assertIs(T, float) + self.assertEqual(count, 40000) + self.assertApproxEqual(float(num), 2.0e-96, rel=5e-16) class SumSpecialValues(NumericTestCase): @@ -955,7 +1209,7 @@ def test_nan(self): for type_ in (float, Decimal): nan = type_('nan') - result = statistics._sum([1, nan, 2]) + result = statistics._sum([1, nan, 2])[1] self.assertIs(type(result), type_) self.assertTrue(math.isnan(result)) @@ -968,10 +1222,10 @@ def do_test_inf(self, inf): # Adding a single infinity gives infinity. - result = statistics._sum([1, 2, inf, 3]) + result = statistics._sum([1, 2, inf, 3])[1] self.check_infinity(result, inf) # Adding two infinities of the same sign also gives infinity. - result = statistics._sum([1, 2, inf, 3, inf, 4]) + result = statistics._sum([1, 2, inf, 3, inf, 4])[1] self.check_infinity(result, inf) def test_float_inf(self): @@ -987,7 +1241,7 @@ def test_float_mismatched_infs(self): # Test that adding two infinities of opposite sign gives a NAN. inf = float('inf') - result = statistics._sum([1, 2, inf, 3, -inf, 4]) + result = statistics._sum([1, 2, inf, 3, -inf, 4])[1] self.assertTrue(math.isnan(result)) def test_decimal_extendedcontext_mismatched_infs_to_nan(self): @@ -995,7 +1249,7 @@ inf = Decimal('inf') data = [1, 2, inf, 3, -inf, 4] with decimal.localcontext(decimal.ExtendedContext): - self.assertTrue(math.isnan(statistics._sum(data))) + self.assertTrue(math.isnan(statistics._sum(data)[1])) def test_decimal_basiccontext_mismatched_infs_to_nan(self): # Test adding Decimal INFs with opposite sign raises InvalidOperation. @@ -1111,6 +1365,19 @@ d = Decimal('1e4') self.assertEqual(statistics.mean([d]), d) + def test_regression_25177(self): + # Regression test for issue 25177. + # Ensure very big and very small floats don't overflow. + # See http://bugs.python.org/issue25177. + self.assertEqual(statistics.mean( + [8.988465674311579e+307, 8.98846567431158e+307]), + 8.98846567431158e+307) + big = 8.98846567431158e+307 + tiny = 5e-324 + for n in (2, 3, 5, 200): + self.assertEqual(statistics.mean([big]*n), big) + self.assertEqual(statistics.mean([tiny]*n), tiny) + class TestMedian(NumericTestCase, AverageMixin): # Common tests for median and all median.* functions. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -107,6 +107,10 @@ Library ------- +- Issue #25177: Fixed problem with the mean of very small and very large + numbers. As a side effect, statistics.mean and statistics.variance should + be significantly faster. + - Issue #25718: Fixed copying object with state with boolean value is false. - Issue #10131: Fixed deep copying of minidom documents. Based on patch -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Dec 1 09:12:02 2015 From: python-checkins at python.org (steven.daprano) Date: Tue, 01 Dec 2015 14:12:02 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNCk6?= =?utf-8?q?_Merge_heads=2E?= Message-ID: <20151201141110.28224.16490@psf.io> https://hg.python.org/cpython/rev/7b5057b89a56 changeset: 99409:7b5057b89a56 branch: 3.4 parent: 99403:f475379bf22c parent: 99406:4bc9405c4f7b user: Steven D'Aprano date: Wed Dec 02 01:10:09 2015 +1100 summary: Merge heads. files: Lib/statistics.py | 181 +++++++---- Lib/test/test_statistics.py | 363 ++++++++++++++++++++--- Misc/NEWS | 4 + 3 files changed, 431 insertions(+), 117 deletions(-) diff --git a/Lib/statistics.py b/Lib/statistics.py --- a/Lib/statistics.py +++ b/Lib/statistics.py @@ -104,6 +104,8 @@ from fractions import Fraction from decimal import Decimal +from itertools import groupby + # === Exceptions === @@ -115,86 +117,102 @@ # === Private utilities === def _sum(data, start=0): - """_sum(data [, start]) -> value + """_sum(data [, start]) -> (type, sum, count) - Return a high-precision sum of the given numeric data. If optional - argument ``start`` is given, it is added to the total. If ``data`` is - empty, ``start`` (defaulting to 0) is returned. + Return a high-precision sum of the given numeric data as a fraction, + together with the type to be converted to and the count of items. + + If optional argument ``start`` is given, it is added to the total. + If ``data`` is empty, ``start`` (defaulting to 0) is returned. Examples -------- >>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75) - 11.0 + (, Fraction(11, 1), 5) Some sources of round-off error will be avoided: >>> _sum([1e50, 1, -1e50] * 1000) # Built-in sum returns zero. - 1000.0 + (, Fraction(1000, 1), 3000) Fractions and Decimals are also supported: >>> from fractions import Fraction as F >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)]) - Fraction(63, 20) + (, Fraction(63, 20), 4) >>> from decimal import Decimal as D >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")] >>> _sum(data) - Decimal('0.6963') + (, Fraction(6963, 10000), 4) Mixed types are currently treated as an error, except that int is allowed. """ - # We fail as soon as we reach a value that is not an int or the type of - # the first value which is not an int. E.g. _sum([int, int, float, int]) - # is okay, but sum([int, int, float, Fraction]) is not. - allowed_types = set([int, type(start)]) + count = 0 n, d = _exact_ratio(start) - partials = {d: n} # map {denominator: sum of numerators} - # Micro-optimizations. - exact_ratio = _exact_ratio + partials = {d: n} partials_get = partials.get - # Add numerators for each denominator. - for x in data: - _check_type(type(x), allowed_types) - n, d = exact_ratio(x) - partials[d] = partials_get(d, 0) + n - # Find the expected result type. If allowed_types has only one item, it - # will be int; if it has two, use the one which isn't int. - assert len(allowed_types) in (1, 2) - if len(allowed_types) == 1: - assert allowed_types.pop() is int - T = int + T = _coerce(int, type(start)) + for typ, values in groupby(data, type): + T = _coerce(T, typ) # or raise TypeError + for n,d in map(_exact_ratio, values): + count += 1 + partials[d] = partials_get(d, 0) + n + if None in partials: + # The sum will be a NAN or INF. We can ignore all the finite + # partials, and just look at this special one. + total = partials[None] + assert not _isfinite(total) else: - T = (allowed_types - set([int])).pop() - if None in partials: - assert issubclass(T, (float, Decimal)) - assert not math.isfinite(partials[None]) - return T(partials[None]) - total = Fraction() - for d, n in sorted(partials.items()): - total += Fraction(n, d) - if issubclass(T, int): - assert total.denominator == 1 - return T(total.numerator) - if issubclass(T, Decimal): - return T(total.numerator)/total.denominator - return T(total) + # Sum all the partial sums using builtin sum. + # FIXME is this faster if we sum them in order of the denominator? + total = sum(Fraction(n, d) for d, n in sorted(partials.items())) + return (T, total, count) -def _check_type(T, allowed): - if T not in allowed: - if len(allowed) == 1: - allowed.add(T) - else: - types = ', '.join([t.__name__ for t in allowed] + [T.__name__]) - raise TypeError("unsupported mixed types: %s" % types) +def _isfinite(x): + try: + return x.is_finite() # Likely a Decimal. + except AttributeError: + return math.isfinite(x) # Coerces to float first. + + +def _coerce(T, S): + """Coerce types T and S to a common type, or raise TypeError. + + Coercion rules are currently an implementation detail. See the CoerceTest + test class in test_statistics for details. + """ + # See http://bugs.python.org/issue24068. + assert T is not bool, "initial type T is bool" + # If the types are the same, no need to coerce anything. Put this + # first, so that the usual case (no coercion needed) happens as soon + # as possible. + if T is S: return T + # Mixed int & other coerce to the other type. + if S is int or S is bool: return T + if T is int: return S + # If one is a (strict) subclass of the other, coerce to the subclass. + if issubclass(S, T): return S + if issubclass(T, S): return T + # Ints coerce to the other type. + if issubclass(T, int): return S + if issubclass(S, int): return T + # Mixed fraction & float coerces to float (or float subclass). + if issubclass(T, Fraction) and issubclass(S, float): + return S + if issubclass(T, float) and issubclass(S, Fraction): + return T + # Any other combination is disallowed. + msg = "don't know how to coerce %s and %s" + raise TypeError(msg % (T.__name__, S.__name__)) def _exact_ratio(x): - """Convert Real number x exactly to (numerator, denominator) pair. + """Return Real number x to exact (numerator, denominator) pair. >>> _exact_ratio(0.25) (1, 4) @@ -202,29 +220,31 @@ x is expected to be an int, Fraction, Decimal or float. """ try: + # Optimise the common case of floats. We expect that the most often + # used numeric type will be builtin floats, so try to make this as + # fast as possible. + if type(x) is float: + return x.as_integer_ratio() try: - # int, Fraction + # x may be an int, Fraction, or Integral ABC. return (x.numerator, x.denominator) except AttributeError: - # float try: + # x may be a float subclass. return x.as_integer_ratio() except AttributeError: - # Decimal try: + # x may be a Decimal. return _decimal_to_ratio(x) except AttributeError: - msg = "can't convert type '{}' to numerator/denominator" - raise TypeError(msg.format(type(x).__name__)) from None + # Just give up? + pass except (OverflowError, ValueError): - # INF or NAN - if __debug__: - # Decimal signalling NANs cannot be converted to float :-( - if isinstance(x, Decimal): - assert not x.is_finite() - else: - assert not math.isfinite(x) + # float NAN or INF. + assert not math.isfinite(x) return (x, None) + msg = "can't convert type '{}' to numerator/denominator" + raise TypeError(msg.format(type(x).__name__)) # FIXME This is faster than Fraction.from_decimal, but still too slow. @@ -239,7 +259,7 @@ sign, digits, exp = d.as_tuple() if exp in ('F', 'n', 'N'): # INF, NAN, sNAN assert not d.is_finite() - raise ValueError + return (d, None) num = 0 for digit in digits: num = num*10 + digit @@ -253,6 +273,24 @@ return (num, den) +def _convert(value, T): + """Convert value to given numeric type T.""" + if type(value) is T: + # This covers the cases where T is Fraction, or where value is + # a NAN or INF (Decimal or float). + return value + if issubclass(T, int) and value.denominator != 1: + T = float + try: + # FIXME: what do we do if this overflows? + return T(value) + except TypeError: + if issubclass(T, Decimal): + return T(value.numerator)/T(value.denominator) + else: + raise + + def _counts(data): # Generate a table of sorted (value, frequency) pairs. table = collections.Counter(iter(data)).most_common() @@ -290,7 +328,9 @@ n = len(data) if n < 1: raise StatisticsError('mean requires at least one data point') - return _sum(data)/n + T, total, count = _sum(data) + assert count == n + return _convert(total/n, T) # FIXME: investigate ways to calculate medians without sorting? Quickselect? @@ -460,12 +500,14 @@ """ if c is None: c = mean(data) - ss = _sum((x-c)**2 for x in data) + T, total, count = _sum((x-c)**2 for x in data) # The following sum should mathematically equal zero, but due to rounding # error may not. - ss -= _sum((x-c) for x in data)**2/len(data) - assert not ss < 0, 'negative sum of square deviations: %f' % ss - return ss + U, total2, count2 = _sum((x-c) for x in data) + assert T == U and count == count2 + total -= total2**2/len(data) + assert not total < 0, 'negative sum of square deviations: %f' % total + return (T, total) def variance(data, xbar=None): @@ -511,8 +553,8 @@ n = len(data) if n < 2: raise StatisticsError('variance requires at least two data points') - ss = _ss(data, xbar) - return ss/(n-1) + T, ss = _ss(data, xbar) + return _convert(ss/(n-1), T) def pvariance(data, mu=None): @@ -560,7 +602,8 @@ if n < 1: raise StatisticsError('pvariance requires at least one data point') ss = _ss(data, mu) - return ss/n + T, ss = _ss(data, mu) + return _convert(ss/n, T) def stdev(data, xbar=None): diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py --- a/Lib/test/test_statistics.py +++ b/Lib/test/test_statistics.py @@ -21,6 +21,37 @@ # === Helper functions and class === +def _nan_equal(a, b): + """Return True if a and b are both the same kind of NAN. + + >>> _nan_equal(Decimal('NAN'), Decimal('NAN')) + True + >>> _nan_equal(Decimal('sNAN'), Decimal('sNAN')) + True + >>> _nan_equal(Decimal('NAN'), Decimal('sNAN')) + False + >>> _nan_equal(Decimal(42), Decimal('NAN')) + False + + >>> _nan_equal(float('NAN'), float('NAN')) + True + >>> _nan_equal(float('NAN'), 0.5) + False + + >>> _nan_equal(float('NAN'), Decimal('NAN')) + False + + NAN payloads are not compared. + """ + if type(a) is not type(b): + return False + if isinstance(a, float): + return math.isnan(a) and math.isnan(b) + aexp = a.as_tuple()[2] + bexp = b.as_tuple()[2] + return (aexp == bexp) and (aexp in ('n', 'N')) # Both NAN or both sNAN. + + def _calc_errors(actual, expected): """Return the absolute and relative errors between two numbers. @@ -675,15 +706,60 @@ self.assertEqual(_exact_ratio(D("12.345")), (12345, 1000)) self.assertEqual(_exact_ratio(D("-1.98")), (-198, 100)) + def test_inf(self): + INF = float("INF") + class MyFloat(float): + pass + class MyDecimal(Decimal): + pass + for inf in (INF, -INF): + for type_ in (float, MyFloat, Decimal, MyDecimal): + x = type_(inf) + ratio = statistics._exact_ratio(x) + self.assertEqual(ratio, (x, None)) + self.assertEqual(type(ratio[0]), type_) + self.assertTrue(math.isinf(ratio[0])) + + def test_float_nan(self): + NAN = float("NAN") + class MyFloat(float): + pass + for nan in (NAN, MyFloat(NAN)): + ratio = statistics._exact_ratio(nan) + self.assertTrue(math.isnan(ratio[0])) + self.assertIs(ratio[1], None) + self.assertEqual(type(ratio[0]), type(nan)) + + def test_decimal_nan(self): + NAN = Decimal("NAN") + sNAN = Decimal("sNAN") + class MyDecimal(Decimal): + pass + for nan in (NAN, MyDecimal(NAN), sNAN, MyDecimal(sNAN)): + ratio = statistics._exact_ratio(nan) + self.assertTrue(_nan_equal(ratio[0], nan)) + self.assertIs(ratio[1], None) + self.assertEqual(type(ratio[0]), type(nan)) + class DecimalToRatioTest(unittest.TestCase): # Test _decimal_to_ratio private function. - def testSpecialsRaise(self): - # Test that NANs and INFs raise ValueError. - # Non-special values are covered by _exact_ratio above. - for d in (Decimal('NAN'), Decimal('sNAN'), Decimal('INF')): - self.assertRaises(ValueError, statistics._decimal_to_ratio, d) + def test_infinity(self): + # Test that INFs are handled correctly. + inf = Decimal('INF') + self.assertEqual(statistics._decimal_to_ratio(inf), (inf, None)) + self.assertEqual(statistics._decimal_to_ratio(-inf), (-inf, None)) + + def test_nan(self): + # Test that NANs are handled correctly. + for nan in (Decimal('NAN'), Decimal('sNAN')): + num, den = statistics._decimal_to_ratio(nan) + # Because NANs always compare non-equal, we cannot use assertEqual. + # Nor can we use an identity test, as we don't guarantee anything + # about the object identity. + self.assertTrue(_nan_equal(num, nan)) + self.assertIs(den, None) def test_sign(self): # Test sign is calculated correctly. @@ -718,25 +794,181 @@ self.assertEqual(t, (147000, 1)) -class CheckTypeTest(unittest.TestCase): - # Test _check_type private function. +class IsFiniteTest(unittest.TestCase): + # Test _isfinite private function. - def test_allowed(self): - # Test that a type which should be allowed is allowed. - allowed = set([int, float]) - statistics._check_type(int, allowed) - statistics._check_type(float, allowed) + def test_finite(self): + # Test that finite numbers are recognised as finite. + for x in (5, Fraction(1, 3), 2.5, Decimal("5.5")): + self.assertTrue(statistics._isfinite(x)) - def test_not_allowed(self): - # Test that a type which should not be allowed raises. - allowed = set([int, float]) - self.assertRaises(TypeError, statistics._check_type, Decimal, allowed) + def test_infinity(self): + # Test that INFs are not recognised as finite. + for x in (float("inf"), Decimal("inf")): + self.assertFalse(statistics._isfinite(x)) - def test_add_to_allowed(self): - # Test that a second type will be added to the allowed set. - allowed = set([int]) - statistics._check_type(float, allowed) - self.assertEqual(allowed, set([int, float])) + def test_nan(self): + # Test that NANs are not recognised as finite. + for x in (float("nan"), Decimal("NAN"), Decimal("sNAN")): + self.assertFalse(statistics._isfinite(x)) + + +class CoerceTest(unittest.TestCase): + # Test that private function _coerce correctly deals with types. + + # The coercion rules are currently an implementation detail, although at + # some point that should change. The tests and comments here define the + # correct implementation. + + # Pre-conditions of _coerce: + # + # - The first time _sum calls _coerce, the + # - coerce(T, S) will never be called with bool as the first argument; + # this is a pre-condition, guarded with an assertion. + + # + # - coerce(T, T) will always return T; we assume T is a valid numeric + # type. Violate this assumption at your own risk. + # + # - Apart from as above, bool is treated as if it were actually int. + # + # - coerce(int, X) and coerce(X, int) return X. + # - + def test_bool(self): + # bool is somewhat special, due to the pre-condition that it is + # never given as the first argument to _coerce, and that it cannot + # be subclassed. So we test it specially. + for T in (int, float, Fraction, Decimal): + self.assertIs(statistics._coerce(T, bool), T) + class MyClass(T): pass + self.assertIs(statistics._coerce(MyClass, bool), MyClass) + + def assertCoerceTo(self, A, B): + """Assert that type A coerces to B.""" + self.assertIs(statistics._coerce(A, B), B) + self.assertIs(statistics._coerce(B, A), B) + + def check_coerce_to(self, A, B): + """Checks that type A coerces to B, including subclasses.""" + # Assert that type A is coerced to B. + self.assertCoerceTo(A, B) + # Subclasses of A are also coerced to B. + class SubclassOfA(A): pass + self.assertCoerceTo(SubclassOfA, B) + # A, and subclasses of A, are coerced to subclasses of B. + class SubclassOfB(B): pass + self.assertCoerceTo(A, SubclassOfB) + self.assertCoerceTo(SubclassOfA, SubclassOfB) + + def assertCoerceRaises(self, A, B): + """Assert that coercing A to B, or vice versa, raises TypeError.""" + self.assertRaises(TypeError, statistics._coerce, (A, B)) + self.assertRaises(TypeError, statistics._coerce, (B, A)) + + def check_type_coercions(self, T): + """Check that type T coerces correctly with subclasses of itself.""" + assert T is not bool + # Coercing a type with itself returns the same type. + self.assertIs(statistics._coerce(T, T), T) + # Coercing a type with a subclass of itself returns the subclass. + class U(T): pass + class V(T): pass + class W(U): pass + for typ in (U, V, W): + self.assertCoerceTo(T, typ) + self.assertCoerceTo(U, W) + # Coercing two subclasses that aren't parent/child is an error. + self.assertCoerceRaises(U, V) + self.assertCoerceRaises(V, W) + + def test_int(self): + # Check that int coerces correctly. + self.check_type_coercions(int) + for typ in (float, Fraction, Decimal): + self.check_coerce_to(int, typ) + + def test_fraction(self): + # Check that Fraction coerces correctly. + self.check_type_coercions(Fraction) + self.check_coerce_to(Fraction, float) + + def test_decimal(self): + # Check that Decimal coerces correctly. + self.check_type_coercions(Decimal) + + def test_float(self): + # Check that float coerces correctly. + self.check_type_coercions(float) + + def test_non_numeric_types(self): + for bad_type in (str, list, type(None), tuple, dict): + for good_type in (int, float, Fraction, Decimal): + self.assertCoerceRaises(good_type, bad_type) + + def test_incompatible_types(self): + # Test that incompatible types raise. + for T in (float, Fraction): + class MySubclass(T): pass + self.assertCoerceRaises(T, Decimal) + self.assertCoerceRaises(MySubclass, Decimal) + + +class ConvertTest(unittest.TestCase): + # Test private _convert function. + + def check_exact_equal(self, x, y): + """Check that x equals y, and has the same type as well.""" + self.assertEqual(x, y) + self.assertIs(type(x), type(y)) + + def test_int(self): + # Test conversions to int. + x = statistics._convert(Fraction(71), int) + self.check_exact_equal(x, 71) + class MyInt(int): pass + x = statistics._convert(Fraction(17), MyInt) + self.check_exact_equal(x, MyInt(17)) + + def test_fraction(self): + # Test conversions to Fraction. + x = statistics._convert(Fraction(95, 99), Fraction) + self.check_exact_equal(x, Fraction(95, 99)) + class MyFraction(Fraction): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(71, 13), MyFraction) + self.check_exact_equal(x, MyFraction(71, 13)) + + def test_float(self): + # Test conversions to float. + x = statistics._convert(Fraction(-1, 2), float) + self.check_exact_equal(x, -0.5) + class MyFloat(float): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(9, 8), MyFloat) + self.check_exact_equal(x, MyFloat(1.125)) + + def test_decimal(self): + # Test conversions to Decimal. + x = statistics._convert(Fraction(1, 40), Decimal) + self.check_exact_equal(x, Decimal("0.025")) + class MyDecimal(Decimal): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(-15, 16), MyDecimal) + self.check_exact_equal(x, MyDecimal("-0.9375")) + + def test_inf(self): + for INF in (float('inf'), Decimal('inf')): + for inf in (INF, -INF): + x = statistics._convert(inf, type(inf)) + self.check_exact_equal(x, inf) + + def test_nan(self): + for nan in (float('nan'), Decimal('NAN'), Decimal('sNAN')): + x = statistics._convert(nan, type(nan)) + self.assertTrue(_nan_equal(x, nan)) # === Tests for public functions === @@ -874,52 +1106,71 @@ self.assertIs(type(result), kind) -class TestSum(NumericTestCase, UnivariateCommonMixin, UnivariateTypeMixin): +class TestSumCommon(UnivariateCommonMixin, UnivariateTypeMixin): + # Common test cases for statistics._sum() function. + + # This test suite looks only at the numeric value returned by _sum, + # after conversion to the appropriate type. + def setUp(self): + def simplified_sum(*args): + T, value, n = statistics._sum(*args) + return statistics._coerce(value, T) + self.func = simplified_sum + + +class TestSum(NumericTestCase): # Test cases for statistics._sum() function. + # These tests look at the entire three value tuple returned by _sum. + def setUp(self): self.func = statistics._sum def test_empty_data(self): # Override test for empty data. for data in ([], (), iter([])): - self.assertEqual(self.func(data), 0) - self.assertEqual(self.func(data, 23), 23) - self.assertEqual(self.func(data, 2.3), 2.3) + self.assertEqual(self.func(data), (int, Fraction(0), 0)) + self.assertEqual(self.func(data, 23), (int, Fraction(23), 0)) + self.assertEqual(self.func(data, 2.3), (float, Fraction(2.3), 0)) def test_ints(self): - self.assertEqual(self.func([1, 5, 3, -4, -8, 20, 42, 1]), 60) - self.assertEqual(self.func([4, 2, 3, -8, 7], 1000), 1008) + self.assertEqual(self.func([1, 5, 3, -4, -8, 20, 42, 1]), + (int, Fraction(60), 8)) + self.assertEqual(self.func([4, 2, 3, -8, 7], 1000), + (int, Fraction(1008), 5)) def test_floats(self): - self.assertEqual(self.func([0.25]*20), 5.0) - self.assertEqual(self.func([0.125, 0.25, 0.5, 0.75], 1.5), 3.125) + self.assertEqual(self.func([0.25]*20), + (float, Fraction(5.0), 20)) + self.assertEqual(self.func([0.125, 0.25, 0.5, 0.75], 1.5), + (float, Fraction(3.125), 4)) def test_fractions(self): - F = Fraction - self.assertEqual(self.func([Fraction(1, 1000)]*500), Fraction(1, 2)) + self.assertEqual(self.func([Fraction(1, 1000)]*500), + (Fraction, Fraction(1, 2), 500)) def test_decimals(self): D = Decimal data = [D("0.001"), D("5.246"), D("1.702"), D("-0.025"), D("3.974"), D("2.328"), D("4.617"), D("2.843"), ] - self.assertEqual(self.func(data), Decimal("20.686")) + self.assertEqual(self.func(data), + (Decimal, Decimal("20.686"), 8)) def test_compare_with_math_fsum(self): # Compare with the math.fsum function. # Ideally we ought to get the exact same result, but sometimes # we differ by a very slight amount :-( data = [random.uniform(-100, 1000) for _ in range(1000)] - self.assertApproxEqual(self.func(data), math.fsum(data), rel=2e-16) + self.assertApproxEqual(float(self.func(data)[1]), math.fsum(data), rel=2e-16) def test_start_argument(self): # Test that the optional start argument works correctly. data = [random.uniform(1, 1000) for _ in range(100)] - t = self.func(data) - self.assertEqual(t+42, self.func(data, 42)) - self.assertEqual(t-23, self.func(data, -23)) - self.assertEqual(t+1e20, self.func(data, 1e20)) + t = self.func(data)[1] + self.assertEqual(t+42, self.func(data, 42)[1]) + self.assertEqual(t-23, self.func(data, -23)[1]) + self.assertEqual(t+Fraction(1e20), self.func(data, 1e20)[1]) def test_strings_fail(self): # Sum of strings should fail. @@ -934,7 +1185,7 @@ def test_mixed_sum(self): # Mixed input types are not (currently) allowed. # Check that mixed data types fail. - self.assertRaises(TypeError, self.func, [1, 2.0, Fraction(1, 2)]) + self.assertRaises(TypeError, self.func, [1, 2.0, Decimal(1)]) # And so does mixed start argument. self.assertRaises(TypeError, self.func, [1, 2.0], Decimal(1)) @@ -942,11 +1193,14 @@ class SumTortureTest(NumericTestCase): def test_torture(self): # Tim Peters' torture test for sum, and variants of same. - self.assertEqual(statistics._sum([1, 1e100, 1, -1e100]*10000), 20000.0) - self.assertEqual(statistics._sum([1e100, 1, 1, -1e100]*10000), 20000.0) - self.assertApproxEqual( - statistics._sum([1e-100, 1, 1e-100, -1]*10000), 2.0e-96, rel=5e-16 - ) + self.assertEqual(statistics._sum([1, 1e100, 1, -1e100]*10000), + (float, Fraction(20000.0), 40000)) + self.assertEqual(statistics._sum([1e100, 1, 1, -1e100]*10000), + (float, Fraction(20000.0), 40000)) + T, num, count = statistics._sum([1e-100, 1, 1e-100, -1]*10000) + self.assertIs(T, float) + self.assertEqual(count, 40000) + self.assertApproxEqual(float(num), 2.0e-96, rel=5e-16) class SumSpecialValues(NumericTestCase): @@ -955,7 +1209,7 @@ def test_nan(self): for type_ in (float, Decimal): nan = type_('nan') - result = statistics._sum([1, nan, 2]) + result = statistics._sum([1, nan, 2])[1] self.assertIs(type(result), type_) self.assertTrue(math.isnan(result)) @@ -968,10 +1222,10 @@ def do_test_inf(self, inf): # Adding a single infinity gives infinity. - result = statistics._sum([1, 2, inf, 3]) + result = statistics._sum([1, 2, inf, 3])[1] self.check_infinity(result, inf) # Adding two infinities of the same sign also gives infinity. - result = statistics._sum([1, 2, inf, 3, inf, 4]) + result = statistics._sum([1, 2, inf, 3, inf, 4])[1] self.check_infinity(result, inf) def test_float_inf(self): @@ -987,7 +1241,7 @@ def test_float_mismatched_infs(self): # Test that adding two infinities of opposite sign gives a NAN. inf = float('inf') - result = statistics._sum([1, 2, inf, 3, -inf, 4]) + result = statistics._sum([1, 2, inf, 3, -inf, 4])[1] self.assertTrue(math.isnan(result)) def test_decimal_extendedcontext_mismatched_infs_to_nan(self): @@ -995,7 +1249,7 @@ inf = Decimal('inf') data = [1, 2, inf, 3, -inf, 4] with decimal.localcontext(decimal.ExtendedContext): - self.assertTrue(math.isnan(statistics._sum(data))) + self.assertTrue(math.isnan(statistics._sum(data)[1])) def test_decimal_basiccontext_mismatched_infs_to_nan(self): # Test adding Decimal INFs with opposite sign raises InvalidOperation. @@ -1111,6 +1365,19 @@ d = Decimal('1e4') self.assertEqual(statistics.mean([d]), d) + def test_regression_25177(self): + # Regression test for issue 25177. + # Ensure very big and very small floats don't overflow. + # See http://bugs.python.org/issue25177. + self.assertEqual(statistics.mean( + [8.988465674311579e+307, 8.98846567431158e+307]), + 8.98846567431158e+307) + big = 8.98846567431158e+307 + tiny = 5e-324 + for n in (2, 3, 5, 200): + self.assertEqual(statistics.mean([big]*n), big) + self.assertEqual(statistics.mean([tiny]*n), tiny) + class TestMedian(NumericTestCase, AverageMixin): # Common tests for median and all median.* functions. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -113,6 +113,10 @@ Library ------- +- Issue #25177: Fixed problem with the mean of very small and very large + numbers. As a side effect, statistics.mean and statistics.variance should + be significantly faster. + - Issue #25718: Fixed copying object with state with boolean value is false. - Issue #10131: Fixed deep copying of minidom documents. Based on patch -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Tue Dec 1 09:12:02 2015 From: python-checkins at python.org (steven.daprano) Date: Tue, 01 Dec 2015 14:12:02 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Fixed_issue_?= =?utf-8?q?=2325177=2C_problems_with_the_mean_of_very_small_and_very_large?= =?utf-8?q?_numbers=2E?= Message-ID: <20151201141109.654.6754@psf.io> https://hg.python.org/cpython/rev/ed45a09e5a69 changeset: 99407:ed45a09e5a69 branch: 3.5 parent: 99404:fee19d2d7713 user: Steven D'Aprano date: Tue Dec 01 17:04:32 2015 +1100 summary: Fixed issue #25177, problems with the mean of very small and very large numbers. files: Lib/statistics.py | 181 +++++++---- Lib/test/test_statistics.py | 363 ++++++++++++++++++++--- Misc/NEWS | 4 + 3 files changed, 431 insertions(+), 117 deletions(-) diff --git a/Lib/statistics.py b/Lib/statistics.py --- a/Lib/statistics.py +++ b/Lib/statistics.py @@ -104,6 +104,8 @@ from fractions import Fraction from decimal import Decimal +from itertools import groupby + # === Exceptions === @@ -115,86 +117,102 @@ # === Private utilities === def _sum(data, start=0): - """_sum(data [, start]) -> value + """_sum(data [, start]) -> (type, sum, count) - Return a high-precision sum of the given numeric data. If optional - argument ``start`` is given, it is added to the total. If ``data`` is - empty, ``start`` (defaulting to 0) is returned. + Return a high-precision sum of the given numeric data as a fraction, + together with the type to be converted to and the count of items. + + If optional argument ``start`` is given, it is added to the total. + If ``data`` is empty, ``start`` (defaulting to 0) is returned. Examples -------- >>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75) - 11.0 + (, Fraction(11, 1), 5) Some sources of round-off error will be avoided: >>> _sum([1e50, 1, -1e50] * 1000) # Built-in sum returns zero. - 1000.0 + (, Fraction(1000, 1), 3000) Fractions and Decimals are also supported: >>> from fractions import Fraction as F >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)]) - Fraction(63, 20) + (, Fraction(63, 20), 4) >>> from decimal import Decimal as D >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")] >>> _sum(data) - Decimal('0.6963') + (, Fraction(6963, 10000), 4) Mixed types are currently treated as an error, except that int is allowed. """ - # We fail as soon as we reach a value that is not an int or the type of - # the first value which is not an int. E.g. _sum([int, int, float, int]) - # is okay, but sum([int, int, float, Fraction]) is not. - allowed_types = {int, type(start)} + count = 0 n, d = _exact_ratio(start) - partials = {d: n} # map {denominator: sum of numerators} - # Micro-optimizations. - exact_ratio = _exact_ratio + partials = {d: n} partials_get = partials.get - # Add numerators for each denominator. - for x in data: - _check_type(type(x), allowed_types) - n, d = exact_ratio(x) - partials[d] = partials_get(d, 0) + n - # Find the expected result type. If allowed_types has only one item, it - # will be int; if it has two, use the one which isn't int. - assert len(allowed_types) in (1, 2) - if len(allowed_types) == 1: - assert allowed_types.pop() is int - T = int + T = _coerce(int, type(start)) + for typ, values in groupby(data, type): + T = _coerce(T, typ) # or raise TypeError + for n,d in map(_exact_ratio, values): + count += 1 + partials[d] = partials_get(d, 0) + n + if None in partials: + # The sum will be a NAN or INF. We can ignore all the finite + # partials, and just look at this special one. + total = partials[None] + assert not _isfinite(total) else: - T = (allowed_types - {int}).pop() - if None in partials: - assert issubclass(T, (float, Decimal)) - assert not math.isfinite(partials[None]) - return T(partials[None]) - total = Fraction() - for d, n in sorted(partials.items()): - total += Fraction(n, d) - if issubclass(T, int): - assert total.denominator == 1 - return T(total.numerator) - if issubclass(T, Decimal): - return T(total.numerator)/total.denominator - return T(total) + # Sum all the partial sums using builtin sum. + # FIXME is this faster if we sum them in order of the denominator? + total = sum(Fraction(n, d) for d, n in sorted(partials.items())) + return (T, total, count) -def _check_type(T, allowed): - if T not in allowed: - if len(allowed) == 1: - allowed.add(T) - else: - types = ', '.join([t.__name__ for t in allowed] + [T.__name__]) - raise TypeError("unsupported mixed types: %s" % types) +def _isfinite(x): + try: + return x.is_finite() # Likely a Decimal. + except AttributeError: + return math.isfinite(x) # Coerces to float first. + + +def _coerce(T, S): + """Coerce types T and S to a common type, or raise TypeError. + + Coercion rules are currently an implementation detail. See the CoerceTest + test class in test_statistics for details. + """ + # See http://bugs.python.org/issue24068. + assert T is not bool, "initial type T is bool" + # If the types are the same, no need to coerce anything. Put this + # first, so that the usual case (no coercion needed) happens as soon + # as possible. + if T is S: return T + # Mixed int & other coerce to the other type. + if S is int or S is bool: return T + if T is int: return S + # If one is a (strict) subclass of the other, coerce to the subclass. + if issubclass(S, T): return S + if issubclass(T, S): return T + # Ints coerce to the other type. + if issubclass(T, int): return S + if issubclass(S, int): return T + # Mixed fraction & float coerces to float (or float subclass). + if issubclass(T, Fraction) and issubclass(S, float): + return S + if issubclass(T, float) and issubclass(S, Fraction): + return T + # Any other combination is disallowed. + msg = "don't know how to coerce %s and %s" + raise TypeError(msg % (T.__name__, S.__name__)) def _exact_ratio(x): - """Convert Real number x exactly to (numerator, denominator) pair. + """Return Real number x to exact (numerator, denominator) pair. >>> _exact_ratio(0.25) (1, 4) @@ -202,29 +220,31 @@ x is expected to be an int, Fraction, Decimal or float. """ try: + # Optimise the common case of floats. We expect that the most often + # used numeric type will be builtin floats, so try to make this as + # fast as possible. + if type(x) is float: + return x.as_integer_ratio() try: - # int, Fraction + # x may be an int, Fraction, or Integral ABC. return (x.numerator, x.denominator) except AttributeError: - # float try: + # x may be a float subclass. return x.as_integer_ratio() except AttributeError: - # Decimal try: + # x may be a Decimal. return _decimal_to_ratio(x) except AttributeError: - msg = "can't convert type '{}' to numerator/denominator" - raise TypeError(msg.format(type(x).__name__)) from None + # Just give up? + pass except (OverflowError, ValueError): - # INF or NAN - if __debug__: - # Decimal signalling NANs cannot be converted to float :-( - if isinstance(x, Decimal): - assert not x.is_finite() - else: - assert not math.isfinite(x) + # float NAN or INF. + assert not math.isfinite(x) return (x, None) + msg = "can't convert type '{}' to numerator/denominator" + raise TypeError(msg.format(type(x).__name__)) # FIXME This is faster than Fraction.from_decimal, but still too slow. @@ -239,7 +259,7 @@ sign, digits, exp = d.as_tuple() if exp in ('F', 'n', 'N'): # INF, NAN, sNAN assert not d.is_finite() - raise ValueError + return (d, None) num = 0 for digit in digits: num = num*10 + digit @@ -253,6 +273,24 @@ return (num, den) +def _convert(value, T): + """Convert value to given numeric type T.""" + if type(value) is T: + # This covers the cases where T is Fraction, or where value is + # a NAN or INF (Decimal or float). + return value + if issubclass(T, int) and value.denominator != 1: + T = float + try: + # FIXME: what do we do if this overflows? + return T(value) + except TypeError: + if issubclass(T, Decimal): + return T(value.numerator)/T(value.denominator) + else: + raise + + def _counts(data): # Generate a table of sorted (value, frequency) pairs. table = collections.Counter(iter(data)).most_common() @@ -290,7 +328,9 @@ n = len(data) if n < 1: raise StatisticsError('mean requires at least one data point') - return _sum(data)/n + T, total, count = _sum(data) + assert count == n + return _convert(total/n, T) # FIXME: investigate ways to calculate medians without sorting? Quickselect? @@ -460,12 +500,14 @@ """ if c is None: c = mean(data) - ss = _sum((x-c)**2 for x in data) + T, total, count = _sum((x-c)**2 for x in data) # The following sum should mathematically equal zero, but due to rounding # error may not. - ss -= _sum((x-c) for x in data)**2/len(data) - assert not ss < 0, 'negative sum of square deviations: %f' % ss - return ss + U, total2, count2 = _sum((x-c) for x in data) + assert T == U and count == count2 + total -= total2**2/len(data) + assert not total < 0, 'negative sum of square deviations: %f' % total + return (T, total) def variance(data, xbar=None): @@ -511,8 +553,8 @@ n = len(data) if n < 2: raise StatisticsError('variance requires at least two data points') - ss = _ss(data, xbar) - return ss/(n-1) + T, ss = _ss(data, xbar) + return _convert(ss/(n-1), T) def pvariance(data, mu=None): @@ -560,7 +602,8 @@ if n < 1: raise StatisticsError('pvariance requires at least one data point') ss = _ss(data, mu) - return ss/n + T, ss = _ss(data, mu) + return _convert(ss/n, T) def stdev(data, xbar=None): diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py --- a/Lib/test/test_statistics.py +++ b/Lib/test/test_statistics.py @@ -21,6 +21,37 @@ # === Helper functions and class === +def _nan_equal(a, b): + """Return True if a and b are both the same kind of NAN. + + >>> _nan_equal(Decimal('NAN'), Decimal('NAN')) + True + >>> _nan_equal(Decimal('sNAN'), Decimal('sNAN')) + True + >>> _nan_equal(Decimal('NAN'), Decimal('sNAN')) + False + >>> _nan_equal(Decimal(42), Decimal('NAN')) + False + + >>> _nan_equal(float('NAN'), float('NAN')) + True + >>> _nan_equal(float('NAN'), 0.5) + False + + >>> _nan_equal(float('NAN'), Decimal('NAN')) + False + + NAN payloads are not compared. + """ + if type(a) is not type(b): + return False + if isinstance(a, float): + return math.isnan(a) and math.isnan(b) + aexp = a.as_tuple()[2] + bexp = b.as_tuple()[2] + return (aexp == bexp) and (aexp in ('n', 'N')) # Both NAN or both sNAN. + + def _calc_errors(actual, expected): """Return the absolute and relative errors between two numbers. @@ -675,15 +706,60 @@ self.assertEqual(_exact_ratio(D("12.345")), (12345, 1000)) self.assertEqual(_exact_ratio(D("-1.98")), (-198, 100)) + def test_inf(self): + INF = float("INF") + class MyFloat(float): + pass + class MyDecimal(Decimal): + pass + for inf in (INF, -INF): + for type_ in (float, MyFloat, Decimal, MyDecimal): + x = type_(inf) + ratio = statistics._exact_ratio(x) + self.assertEqual(ratio, (x, None)) + self.assertEqual(type(ratio[0]), type_) + self.assertTrue(math.isinf(ratio[0])) + + def test_float_nan(self): + NAN = float("NAN") + class MyFloat(float): + pass + for nan in (NAN, MyFloat(NAN)): + ratio = statistics._exact_ratio(nan) + self.assertTrue(math.isnan(ratio[0])) + self.assertIs(ratio[1], None) + self.assertEqual(type(ratio[0]), type(nan)) + + def test_decimal_nan(self): + NAN = Decimal("NAN") + sNAN = Decimal("sNAN") + class MyDecimal(Decimal): + pass + for nan in (NAN, MyDecimal(NAN), sNAN, MyDecimal(sNAN)): + ratio = statistics._exact_ratio(nan) + self.assertTrue(_nan_equal(ratio[0], nan)) + self.assertIs(ratio[1], None) + self.assertEqual(type(ratio[0]), type(nan)) + class DecimalToRatioTest(unittest.TestCase): # Test _decimal_to_ratio private function. - def testSpecialsRaise(self): - # Test that NANs and INFs raise ValueError. - # Non-special values are covered by _exact_ratio above. - for d in (Decimal('NAN'), Decimal('sNAN'), Decimal('INF')): - self.assertRaises(ValueError, statistics._decimal_to_ratio, d) + def test_infinity(self): + # Test that INFs are handled correctly. + inf = Decimal('INF') + self.assertEqual(statistics._decimal_to_ratio(inf), (inf, None)) + self.assertEqual(statistics._decimal_to_ratio(-inf), (-inf, None)) + + def test_nan(self): + # Test that NANs are handled correctly. + for nan in (Decimal('NAN'), Decimal('sNAN')): + num, den = statistics._decimal_to_ratio(nan) + # Because NANs always compare non-equal, we cannot use assertEqual. + # Nor can we use an identity test, as we don't guarantee anything + # about the object identity. + self.assertTrue(_nan_equal(num, nan)) + self.assertIs(den, None) def test_sign(self): # Test sign is calculated correctly. @@ -718,25 +794,181 @@ self.assertEqual(t, (147000, 1)) -class CheckTypeTest(unittest.TestCase): - # Test _check_type private function. +class IsFiniteTest(unittest.TestCase): + # Test _isfinite private function. - def test_allowed(self): - # Test that a type which should be allowed is allowed. - allowed = set([int, float]) - statistics._check_type(int, allowed) - statistics._check_type(float, allowed) + def test_finite(self): + # Test that finite numbers are recognised as finite. + for x in (5, Fraction(1, 3), 2.5, Decimal("5.5")): + self.assertTrue(statistics._isfinite(x)) - def test_not_allowed(self): - # Test that a type which should not be allowed raises. - allowed = set([int, float]) - self.assertRaises(TypeError, statistics._check_type, Decimal, allowed) + def test_infinity(self): + # Test that INFs are not recognised as finite. + for x in (float("inf"), Decimal("inf")): + self.assertFalse(statistics._isfinite(x)) - def test_add_to_allowed(self): - # Test that a second type will be added to the allowed set. - allowed = set([int]) - statistics._check_type(float, allowed) - self.assertEqual(allowed, set([int, float])) + def test_nan(self): + # Test that NANs are not recognised as finite. + for x in (float("nan"), Decimal("NAN"), Decimal("sNAN")): + self.assertFalse(statistics._isfinite(x)) + + +class CoerceTest(unittest.TestCase): + # Test that private function _coerce correctly deals with types. + + # The coercion rules are currently an implementation detail, although at + # some point that should change. The tests and comments here define the + # correct implementation. + + # Pre-conditions of _coerce: + # + # - The first time _sum calls _coerce, the + # - coerce(T, S) will never be called with bool as the first argument; + # this is a pre-condition, guarded with an assertion. + + # + # - coerce(T, T) will always return T; we assume T is a valid numeric + # type. Violate this assumption at your own risk. + # + # - Apart from as above, bool is treated as if it were actually int. + # + # - coerce(int, X) and coerce(X, int) return X. + # - + def test_bool(self): + # bool is somewhat special, due to the pre-condition that it is + # never given as the first argument to _coerce, and that it cannot + # be subclassed. So we test it specially. + for T in (int, float, Fraction, Decimal): + self.assertIs(statistics._coerce(T, bool), T) + class MyClass(T): pass + self.assertIs(statistics._coerce(MyClass, bool), MyClass) + + def assertCoerceTo(self, A, B): + """Assert that type A coerces to B.""" + self.assertIs(statistics._coerce(A, B), B) + self.assertIs(statistics._coerce(B, A), B) + + def check_coerce_to(self, A, B): + """Checks that type A coerces to B, including subclasses.""" + # Assert that type A is coerced to B. + self.assertCoerceTo(A, B) + # Subclasses of A are also coerced to B. + class SubclassOfA(A): pass + self.assertCoerceTo(SubclassOfA, B) + # A, and subclasses of A, are coerced to subclasses of B. + class SubclassOfB(B): pass + self.assertCoerceTo(A, SubclassOfB) + self.assertCoerceTo(SubclassOfA, SubclassOfB) + + def assertCoerceRaises(self, A, B): + """Assert that coercing A to B, or vice versa, raises TypeError.""" + self.assertRaises(TypeError, statistics._coerce, (A, B)) + self.assertRaises(TypeError, statistics._coerce, (B, A)) + + def check_type_coercions(self, T): + """Check that type T coerces correctly with subclasses of itself.""" + assert T is not bool + # Coercing a type with itself returns the same type. + self.assertIs(statistics._coerce(T, T), T) + # Coercing a type with a subclass of itself returns the subclass. + class U(T): pass + class V(T): pass + class W(U): pass + for typ in (U, V, W): + self.assertCoerceTo(T, typ) + self.assertCoerceTo(U, W) + # Coercing two subclasses that aren't parent/child is an error. + self.assertCoerceRaises(U, V) + self.assertCoerceRaises(V, W) + + def test_int(self): + # Check that int coerces correctly. + self.check_type_coercions(int) + for typ in (float, Fraction, Decimal): + self.check_coerce_to(int, typ) + + def test_fraction(self): + # Check that Fraction coerces correctly. + self.check_type_coercions(Fraction) + self.check_coerce_to(Fraction, float) + + def test_decimal(self): + # Check that Decimal coerces correctly. + self.check_type_coercions(Decimal) + + def test_float(self): + # Check that float coerces correctly. + self.check_type_coercions(float) + + def test_non_numeric_types(self): + for bad_type in (str, list, type(None), tuple, dict): + for good_type in (int, float, Fraction, Decimal): + self.assertCoerceRaises(good_type, bad_type) + + def test_incompatible_types(self): + # Test that incompatible types raise. + for T in (float, Fraction): + class MySubclass(T): pass + self.assertCoerceRaises(T, Decimal) + self.assertCoerceRaises(MySubclass, Decimal) + + +class ConvertTest(unittest.TestCase): + # Test private _convert function. + + def check_exact_equal(self, x, y): + """Check that x equals y, and has the same type as well.""" + self.assertEqual(x, y) + self.assertIs(type(x), type(y)) + + def test_int(self): + # Test conversions to int. + x = statistics._convert(Fraction(71), int) + self.check_exact_equal(x, 71) + class MyInt(int): pass + x = statistics._convert(Fraction(17), MyInt) + self.check_exact_equal(x, MyInt(17)) + + def test_fraction(self): + # Test conversions to Fraction. + x = statistics._convert(Fraction(95, 99), Fraction) + self.check_exact_equal(x, Fraction(95, 99)) + class MyFraction(Fraction): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(71, 13), MyFraction) + self.check_exact_equal(x, MyFraction(71, 13)) + + def test_float(self): + # Test conversions to float. + x = statistics._convert(Fraction(-1, 2), float) + self.check_exact_equal(x, -0.5) + class MyFloat(float): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(9, 8), MyFloat) + self.check_exact_equal(x, MyFloat(1.125)) + + def test_decimal(self): + # Test conversions to Decimal. + x = statistics._convert(Fraction(1, 40), Decimal) + self.check_exact_equal(x, Decimal("0.025")) + class MyDecimal(Decimal): + def __truediv__(self, other): + return self.__class__(super().__truediv__(other)) + x = statistics._convert(Fraction(-15, 16), MyDecimal) + self.check_exact_equal(x, MyDecimal("-0.9375")) + + def test_inf(self): + for INF in (float('inf'), Decimal('inf')): + for inf in (INF, -INF): + x = statistics._convert(inf, type(inf)) + self.check_exact_equal(x, inf) + + def test_nan(self): + for nan in (float('nan'), Decimal('NAN'), Decimal('sNAN')): + x = statistics._convert(nan, type(nan)) + self.assertTrue(_nan_equal(x, nan)) # === Tests for public functions === @@ -874,52 +1106,71 @@ self.assertIs(type(result), kind) -class TestSum(NumericTestCase, UnivariateCommonMixin, UnivariateTypeMixin): +class TestSumCommon(UnivariateCommonMixin, UnivariateTypeMixin): + # Common test cases for statistics._sum() function. + + # This test suite looks only at the numeric value returned by _sum, + # after conversion to the appropriate type. + def setUp(self): + def simplified_sum(*args): + T, value, n = statistics._sum(*args) + return statistics._coerce(value, T) + self.func = simplified_sum + + +class TestSum(NumericTestCase): # Test cases for statistics._sum() function. + # These tests look at the entire three value tuple returned by _sum. + def setUp(self): self.func = statistics._sum def test_empty_data(self): # Override test for empty data. for data in ([], (), iter([])): - self.assertEqual(self.func(data), 0) - self.assertEqual(self.func(data, 23), 23) - self.assertEqual(self.func(data, 2.3), 2.3) + self.assertEqual(self.func(data), (int, Fraction(0), 0)) + self.assertEqual(self.func(data, 23), (int, Fraction(23), 0)) + self.assertEqual(self.func(data, 2.3), (float, Fraction(2.3), 0)) def test_ints(self): - self.assertEqual(self.func([1, 5, 3, -4, -8, 20, 42, 1]), 60) - self.assertEqual(self.func([4, 2, 3, -8, 7], 1000), 1008) + self.assertEqual(self.func([1, 5, 3, -4, -8, 20, 42, 1]), + (int, Fraction(60), 8)) + self.assertEqual(self.func([4, 2, 3, -8, 7], 1000), + (int, Fraction(1008), 5)) def test_floats(self): - self.assertEqual(self.func([0.25]*20), 5.0) - self.assertEqual(self.func([0.125, 0.25, 0.5, 0.75], 1.5), 3.125) + self.assertEqual(self.func([0.25]*20), + (float, Fraction(5.0), 20)) + self.assertEqual(self.func([0.125, 0.25, 0.5, 0.75], 1.5), + (float, Fraction(3.125), 4)) def test_fractions(self): - F = Fraction - self.assertEqual(self.func([Fraction(1, 1000)]*500), Fraction(1, 2)) + self.assertEqual(self.func([Fraction(1, 1000)]*500), + (Fraction, Fraction(1, 2), 500)) def test_decimals(self): D = Decimal data = [D("0.001"), D("5.246"), D("1.702"), D("-0.025"), D("3.974"), D("2.328"), D("4.617"), D("2.843"), ] - self.assertEqual(self.func(data), Decimal("20.686")) + self.assertEqual(self.func(data), + (Decimal, Decimal("20.686"), 8)) def test_compare_with_math_fsum(self): # Compare with the math.fsum function. # Ideally we ought to get the exact same result, but sometimes # we differ by a very slight amount :-( data = [random.uniform(-100, 1000) for _ in range(1000)] - self.assertApproxEqual(self.func(data), math.fsum(data), rel=2e-16) + self.assertApproxEqual(float(self.func(data)[1]), math.fsum(data), rel=2e-16) def test_start_argument(self): # Test that the optional start argument works correctly. data = [random.uniform(1, 1000) for _ in range(100)] - t = self.func(data) - self.assertEqual(t+42, self.func(data, 42)) - self.assertEqual(t-23, self.func(data, -23)) - self.assertEqual(t+1e20, self.func(data, 1e20)) + t = self.func(data)[1] + self.assertEqual(t+42, self.func(data, 42)[1]) + self.assertEqual(t-23, self.func(data, -23)[1]) + self.assertEqual(t+Fraction(1e20), self.func(data, 1e20)[1]) def test_strings_fail(self): # Sum of strings should fail. @@ -934,7 +1185,7 @@ def test_mixed_sum(self): # Mixed input types are not (currently) allowed. # Check that mixed data types fail. - self.assertRaises(TypeError, self.func, [1, 2.0, Fraction(1, 2)]) + self.assertRaises(TypeError, self.func, [1, 2.0, Decimal(1)]) # And so does mixed start argument. self.assertRaises(TypeError, self.func, [1, 2.0], Decimal(1)) @@ -942,11 +1193,14 @@ class SumTortureTest(NumericTestCase): def test_torture(self): # Tim Peters' torture test for sum, and variants of same. - self.assertEqual(statistics._sum([1, 1e100, 1, -1e100]*10000), 20000.0) - self.assertEqual(statistics._sum([1e100, 1, 1, -1e100]*10000), 20000.0) - self.assertApproxEqual( - statistics._sum([1e-100, 1, 1e-100, -1]*10000), 2.0e-96, rel=5e-16 - ) + self.assertEqual(statistics._sum([1, 1e100, 1, -1e100]*10000), + (float, Fraction(20000.0), 40000)) + self.assertEqual(statistics._sum([1e100, 1, 1, -1e100]*10000), + (float, Fraction(20000.0), 40000)) + T, num, count = statistics._sum([1e-100, 1, 1e-100, -1]*10000) + self.assertIs(T, float) + self.assertEqual(count, 40000) + self.assertApproxEqual(float(num), 2.0e-96, rel=5e-16) class SumSpecialValues(NumericTestCase): @@ -955,7 +1209,7 @@ def test_nan(self): for type_ in (float, Decimal): nan = type_('nan') - result = statistics._sum([1, nan, 2]) + result = statistics._sum([1, nan, 2])[1] self.assertIs(type(result), type_) self.assertTrue(math.isnan(result)) @@ -968,10 +1222,10 @@ def do_test_inf(self, inf): # Adding a single infinity gives infinity. - result = statistics._sum([1, 2, inf, 3]) + result = statistics._sum([1, 2, inf, 3])[1] self.check_infinity(result, inf) # Adding two infinities of the same sign also gives infinity. - result = statistics._sum([1, 2, inf, 3, inf, 4]) + result = statistics._sum([1, 2, inf, 3, inf, 4])[1] self.check_infinity(result, inf) def test_float_inf(self): @@ -987,7 +1241,7 @@ def test_float_mismatched_infs(self): # Test that adding two infinities of opposite sign gives a NAN. inf = float('inf') - result = statistics._sum([1, 2, inf, 3, -inf, 4]) + result = statistics._sum([1, 2, inf, 3, -inf, 4])[1] self.assertTrue(math.isnan(result)) def test_decimal_extendedcontext_mismatched_infs_to_nan(self): @@ -995,7 +1249,7 @@ inf = Decimal('inf') data = [1, 2, inf, 3, -inf, 4] with decimal.localcontext(decimal.ExtendedContext): - self.assertTrue(math.isnan(statistics._sum(data))) + self.assertTrue(math.isnan(statistics._sum(data)[1])) def test_decimal_basiccontext_mismatched_infs_to_nan(self): # Test adding Decimal INFs with opposite sign raises InvalidOperation. @@ -1111,6 +1365,19 @@ d = Decimal('1e4') self.assertEqual(statistics.mean([d]), d) + def test_regression_25177(self): + # Regression test for issue 25177. + # Ensure very big and very small floats don't overflow. + # See http://bugs.python.org/issue25177. + self.assertEqual(statistics.mean( + [8.988465674311579e+307, 8.98846567431158e+307]), + 8.98846567431158e+307) + big = 8.98846567431158e+307 + tiny = 5e-324 + for n in (2, 3, 5, 200): + self.assertEqual(statistics.mean([big]*n), big) + self.assertEqual(statistics.mean([tiny]*n), tiny) + class TestMedian(NumericTestCase, AverageMixin): # Common tests for median and all median.* functions. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -20,6 +20,10 @@ Library ------- +- Issue #25177: Fixed problem with the mean of very small and very large + numbers. As a side effect, statistics.mean and statistics.variance should + be significantly faster. + - Issue #25718: Fixed copying object with state with boolean value is false. - Issue #10131: Fixed deep copying of minidom documents. Based on patch -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Wed Dec 2 03:41:55 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Wed, 02 Dec 2015 08:41:55 +0000 Subject: [Python-checkins] Daily reference leaks (0eeb39fc8ff5): sum=4 Message-ID: <20151202084154.10714.96594@psf.io> results for 0eeb39fc8ff5 on branch "default" -------------------------------------------- test_functools leaked [0, 2, 2] memory blocks, sum=4 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflog7DOTE3', '--timeout', '7200'] From lp_benchmark_robot at intel.com Wed Dec 2 04:43:17 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Wed, 2 Dec 2015 09:43:17 +0000 Subject: [Python-checkins] Benchmark Results for Python Default 2015-12-02 Message-ID: <956d227e-6b15-46d2-8a04-d4ea998d3501@irsmsx101.ger.corp.intel.com> Results for project Python default, build date 2015-12-02 04:02:23 +0000 commit: 0eeb39fc8ff5ac1f77f33cc6d4468eceb0812e0e revision date: 2015-12-01 08:59:53 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ---------------------------------------------------------------------------------- benchmark relative change since change since current rev run std_dev* last run baseline with PGO ---------------------------------------------------------------------------------- :-) django_v2 0.24% -2.89% 7.47% 17.39% :-| pybench 0.18% -0.06% -1.91% 8.69% :-( regex_v8 3.58% -1.59% -5.60% 6.55% :-| nbody 0.20% -0.05% -0.50% 10.12% :-| json_dump_v2 0.29% 0.83% -0.88% 10.35% :-| normal_startup 0.78% 0.06% 0.26% 4.82% ---------------------------------------------------------------------------------- Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Wed Dec 2 04:44:02 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Wed, 2 Dec 2015 09:44:02 +0000 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-12-02 Message-ID: <1df6c6df-e395-4044-b85d-f7e0eadcb05e@irsmsx101.ger.corp.intel.com> No new revisions. Here are the previous results: Results for project Python 2.7, build date 2015-12-02 04:49:51 +0000 commit: 6a35865eded462da46b7ac2ea35b10a45db31bbc revision date: 2015-11-30 22:32:49 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ---------------------------------------------------------------------------------- benchmark relative change since change since current rev run std_dev* last run baseline with PGO ---------------------------------------------------------------------------------- :-) django_v2 0.15% 0.67% 2.19% 11.65% :-) pybench 0.24% 0.15% 6.20% 7.33% :-( regex_v8 1.43% -0.01% -3.13% 9.08% :-) nbody 0.12% -0.12% 9.53% 5.31% :-) json_dump_v2 0.30% 0.28% 2.67% 14.79% :-| normal_startup 1.77% -0.59% -1.87% 2.93% :-| ssbench 0.37% -0.29% 0.35% 1.04% ---------------------------------------------------------------------------------- Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Wed Dec 2 08:36:25 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 02 Dec 2015 13:36:25 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Null_merge_3=2E5=2C_patch_was_already_applied_to_default?= =?utf-8?b?IChpc3VzZSAjMjUxNzcp?= Message-ID: <20151202133624.1811.91129@psf.io> https://hg.python.org/cpython/rev/a7d2307055e7 changeset: 99410:a7d2307055e7 parent: 99408:0eeb39fc8ff5 parent: 99407:ed45a09e5a69 user: Victor Stinner date: Wed Dec 02 14:36:15 2015 +0100 summary: Null merge 3.5, patch was already applied to default (isuse #25177) files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 08:38:13 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 02 Dec 2015 13:38:13 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogRml4IHRlc3RfZG9j?= =?utf-8?q?test_in_verbose_mode?= Message-ID: <20151202133809.32297.99367@psf.io> https://hg.python.org/cpython/rev/f3019e3f67d6 changeset: 99411:f3019e3f67d6 branch: 3.5 parent: 99407:ed45a09e5a69 user: Victor Stinner date: Wed Dec 02 14:37:17 2015 +0100 summary: Fix test_doctest in verbose mode files: Lib/test/test_doctest.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py --- a/Lib/test/test_doctest.py +++ b/Lib/test/test_doctest.py @@ -2647,7 +2647,7 @@ >>> with open(fn, 'wb') as f: ... f.write(b'Test:\r\n\r\n >>> x = 1 + 1\r\n\r\nDone.\r\n') 35 - >>> doctest.testfile(fn, False) + >>> doctest.testfile(fn, module_relative=False, verbose=False) TestResults(failed=0, attempted=1) >>> os.remove(fn) @@ -2657,7 +2657,7 @@ >>> with open(fn, 'wb') as f: ... f.write(b'Test:\n\n >>> x = 1 + 1\n\nDone.\n') 30 - >>> doctest.testfile(fn, False) + >>> doctest.testfile(fn, module_relative=False, verbose=False) TestResults(failed=0, attempted=1) >>> os.remove(fn) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 08:38:41 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 02 Dec 2015 13:38:41 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_=28Merge_3=2E5=29_Fix_test=5Fdoctest_in_verbose_mode?= Message-ID: <20151202133809.124518.1269@psf.io> https://hg.python.org/cpython/rev/1ff29b57628e changeset: 99412:1ff29b57628e parent: 99410:a7d2307055e7 parent: 99411:f3019e3f67d6 user: Victor Stinner date: Wed Dec 02 14:37:35 2015 +0100 summary: (Merge 3.5) Fix test_doctest in verbose mode files: Lib/test/test_doctest.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py --- a/Lib/test/test_doctest.py +++ b/Lib/test/test_doctest.py @@ -2647,7 +2647,7 @@ >>> with open(fn, 'wb') as f: ... f.write(b'Test:\r\n\r\n >>> x = 1 + 1\r\n\r\nDone.\r\n') 35 - >>> doctest.testfile(fn, False) + >>> doctest.testfile(fn, module_relative=False, verbose=False) TestResults(failed=0, attempted=1) >>> os.remove(fn) @@ -2657,7 +2657,7 @@ >>> with open(fn, 'wb') as f: ... f.write(b'Test:\n\n >>> x = 1 + 1\n\nDone.\n') 30 - >>> doctest.testfile(fn, False) + >>> doctest.testfile(fn, module_relative=False, verbose=False) TestResults(failed=0, attempted=1) >>> os.remove(fn) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 08:39:58 2015 From: python-checkins at python.org (victor.stinner) Date: Wed, 02 Dec 2015 13:39:58 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogRml4IHRlc3RfZG9j?= =?utf-8?q?test_in_verbose_mode?= Message-ID: <20151202133954.57684.64984@psf.io> https://hg.python.org/cpython/rev/ff351607a90d changeset: 99413:ff351607a90d branch: 2.7 parent: 99399:6a35865eded4 user: Victor Stinner date: Wed Dec 02 14:39:37 2015 +0100 summary: Fix test_doctest in verbose mode files: Lib/test/test_doctest.py | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py --- a/Lib/test/test_doctest.py +++ b/Lib/test/test_doctest.py @@ -2582,7 +2582,7 @@ >>> fn = tempfile.mktemp() >>> with open(fn, 'wb') as f: ... f.write('Test:\r\n\r\n >>> x = 1 + 1\r\n\r\nDone.\r\n') - >>> doctest.testfile(fn, False) + >>> doctest.testfile(fn, module_relative=False, verbose=False) TestResults(failed=0, attempted=1) >>> os.remove(fn) @@ -2591,7 +2591,7 @@ >>> fn = tempfile.mktemp() >>> with open(fn, 'wb') as f: ... f.write('Test:\n\n >>> x = 1 + 1\n\nDone.\n') - >>> doctest.testfile(fn, False) + >>> doctest.testfile(fn, module_relative=False, verbose=False) TestResults(failed=0, attempted=1) >>> os.remove(fn) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 12:19:26 2015 From: python-checkins at python.org (steve.dower) Date: Wed, 02 Dec 2015 17:19:26 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1NzE1?= =?utf-8?q?=3A_Python_3=2E5=2E1_installer_shows_wrong_upgrade_path_and_inc?= =?utf-8?q?orrect?= Message-ID: <20151202171924.14118.75306@psf.io> https://hg.python.org/cpython/rev/8537ec50c254 changeset: 99414:8537ec50c254 branch: 3.5 parent: 99411:f3019e3f67d6 user: Steve Dower date: Wed Dec 02 08:28:51 2015 -0800 summary: Issue #25715: Python 3.5.1 installer shows wrong upgrade path and incorrect logic for launcher detection. files: Misc/NEWS | 5 + Tools/msi/bundle/Default.thm | 2 +- Tools/msi/bundle/Default.wxl | 4 +- Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp | 223 +++++---- Tools/msi/bundle/bundle.wxs | 4 +- Tools/msi/bundle/packagegroups/launcher.wxs | 4 +- 6 files changed, 138 insertions(+), 104 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -62,6 +62,11 @@ Library ------- +Windows +------- + +- Issue #25715: Python 3.5.1 installer shows wrong upgrade path and incorrect + logic for launcher detection. What's New in Python 3.5.1 release candidate 1? =============================================== diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -66,7 +66,7 @@ #(loc.Include_launcherLabel) #(loc.InstallLauncherAllUsersLabel) - #(loc.Include_launcherHelpLabel) + diff --git a/Tools/msi/bundle/Default.wxl b/Tools/msi/bundle/Default.wxl --- a/Tools/msi/bundle/Default.wxl +++ b/Tools/msi/bundle/Default.wxl @@ -76,7 +76,9 @@ Python &test suite Installs the standard library test suite. py &launcher - Installs the global 'py' launcher to make it easier to start Python. + Installs the global 'py' launcher to make it easier to start Python. + Use Programs and Features to remove the 'py' launcher. + Upgrades the global 'py' launcher from the previous version. Associate &files with Python (requires the py launcher) Create shortcuts for installed applications diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -94,6 +94,7 @@ ID_CUSTOM_ASSOCIATE_FILES_CHECKBOX, ID_CUSTOM_INSTALL_ALL_USERS_CHECKBOX, ID_CUSTOM_INSTALL_LAUNCHER_ALL_USERS_CHECKBOX, + ID_CUSTOM_INCLUDE_LAUNCHER_HELP_LABEL, ID_CUSTOM_COMPILE_ALL_CHECKBOX, ID_CUSTOM_BROWSE_BUTTON, ID_CUSTOM_BROWSE_BUTTON_LABEL, @@ -158,6 +159,7 @@ { ID_CUSTOM_ASSOCIATE_FILES_CHECKBOX, L"AssociateFiles" }, { ID_CUSTOM_INSTALL_ALL_USERS_CHECKBOX, L"InstallAllUsers" }, { ID_CUSTOM_INSTALL_LAUNCHER_ALL_USERS_CHECKBOX, L"CustomInstallLauncherAllUsers" }, + { ID_CUSTOM_INCLUDE_LAUNCHER_HELP_LABEL, L"Include_launcherHelp" }, { ID_CUSTOM_COMPILE_ALL_CHECKBOX, L"CompileAll" }, { ID_CUSTOM_BROWSE_BUTTON, L"CustomBrowseButton" }, { ID_CUSTOM_BROWSE_BUTTON_LABEL, L"CustomBrowseButtonLabel" }, @@ -454,6 +456,20 @@ ThemeSendControlMessage(_theme, ID_CUSTOM_INSTALL_LAUNCHER_ALL_USERS_CHECKBOX, BM_SETCHECK, installLauncherAllUsers ? BST_CHECKED : BST_UNCHECKED, 0); + + LOC_STRING *pLocString = nullptr; + LPCWSTR locKey = L"#(loc.Include_launcherHelp)"; + LONGLONG detectedLauncher; + + if (SUCCEEDED(BalGetNumericVariable(L"DetectedLauncher", &detectedLauncher)) && detectedLauncher) { + locKey = L"#(loc.Include_launcherRemove)"; + } else if (SUCCEEDED(BalGetNumericVariable(L"DetectedOldLauncher", &detectedLauncher)) && detectedLauncher) { + locKey = L"#(loc.Include_launcherUpgrade)"; + } + + if (SUCCEEDED(LocGetString(_wixLoc, locKey, &pLocString)) && pLocString) { + ThemeSetTextControl(_theme, ID_CUSTOM_INCLUDE_LAUNCHER_HELP_LABEL, pLocString->wzText); + } } void Custom2Page_Show() { @@ -641,6 +657,29 @@ return nResult; } + virtual STDMETHODIMP_(int) OnDetectRelatedMsiPackage( + __in_z LPCWSTR wzPackageId, + __in_z LPCWSTR /*wzProductCode*/, + __in BOOL fPerMachine, + __in DWORD64 /*dw64Version*/, + __in BOOTSTRAPPER_RELATED_OPERATION operation + ) { + if (BOOTSTRAPPER_RELATED_OPERATION_MAJOR_UPGRADE == operation && + (CSTR_EQUAL == ::CompareStringW(LOCALE_NEUTRAL, 0, wzPackageId, -1, L"launcher_AllUsers", -1) || + CSTR_EQUAL == ::CompareStringW(LOCALE_NEUTRAL, 0, wzPackageId, -1, L"launcher_JustForMe", -1))) { + auto hr = LoadAssociateFilesStateFromKey(_engine, fPerMachine ? HKEY_LOCAL_MACHINE : HKEY_CURRENT_USER); + if (hr == S_OK) { + _engine->SetVariableNumeric(L"AssociateFiles", 1); + } else if (FAILED(hr)) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Failed to load AssociateFiles state: error code 0x%08X", hr); + } + + _engine->SetVariableNumeric(L"Include_launcher", 1); + _engine->SetVariableNumeric(L"DetectedOldLauncher", 1); + _engine->SetVariableNumeric(L"InstallLauncherAllUsers", fPerMachine ? 1 : 0); + } + return CheckCanceled() ? IDCANCEL : IDNOACTION; + } virtual STDMETHODIMP_(int) OnDetectRelatedBundle( __in LPCWSTR wzBundleId, @@ -656,24 +695,8 @@ if (BOOTSTRAPPER_RELATED_OPERATION_DOWNGRADE == operation) { _downgradingOtherVersion = TRUE; } else if (BOOTSTRAPPER_RELATED_OPERATION_MAJOR_UPGRADE == operation) { - _upgradingOldVersion = TRUE; - - // Assume we don't want the launcher or file associations, and if - // they have already been installed then loading the state will - // reactivate these settings. - _engine->SetVariableNumeric(L"Include_launcher", 0); - _engine->SetVariableNumeric(L"AssociateFiles", 0); - auto hr = LoadLauncherStateFromKey(_engine, HKEY_CURRENT_USER); - if (hr == S_FALSE) { - hr = LoadLauncherStateFromKey(_engine, HKEY_LOCAL_MACHINE); - } - if (FAILED(hr)) { - BalLog( - BOOTSTRAPPER_LOG_LEVEL_ERROR, - "Failed to load launcher state: error code 0x%08X", - hr - ); - } + BalLog(BOOTSTRAPPER_LOG_LEVEL_STANDARD, "Detected previous version - planning upgrade"); + _upgrading = TRUE; LoadOptionalFeatureStates(_engine); } else if (BOOTSTRAPPER_RELATED_OPERATION_NONE == operation) { @@ -699,9 +722,42 @@ virtual STDMETHODIMP_(void) OnDetectPackageComplete( __in LPCWSTR wzPackageId, - __in HRESULT /*hrStatus*/, + __in HRESULT hrStatus, __in BOOTSTRAPPER_PACKAGE_STATE state - ) { } + ) { + if (FAILED(hrStatus)) { + return; + } + + BOOL detectedLauncher = FALSE; + HKEY hkey = HKEY_LOCAL_MACHINE; + if (CSTR_EQUAL == ::CompareStringW(LOCALE_NEUTRAL, 0, wzPackageId, -1, L"launcher_AllUsers", -1)) { + if (BOOTSTRAPPER_PACKAGE_STATE_PRESENT == state || BOOTSTRAPPER_PACKAGE_STATE_OBSOLETE == state) { + detectedLauncher = TRUE; + _engine->SetVariableNumeric(L"InstallLauncherAllUsers", 1); + } + } else if (CSTR_EQUAL == ::CompareStringW(LOCALE_NEUTRAL, 0, wzPackageId, -1, L"launcher_JustForMe", -1)) { + if (BOOTSTRAPPER_PACKAGE_STATE_PRESENT == state || BOOTSTRAPPER_PACKAGE_STATE_OBSOLETE == state) { + detectedLauncher = TRUE; + _engine->SetVariableNumeric(L"InstallLauncherAllUsers", 0); + } + } + + if (detectedLauncher) { + /* When we detect the current version of the launcher. */ + _engine->SetVariableNumeric(L"Include_launcher", 1); + _engine->SetVariableNumeric(L"DetectedLauncher", 1); + _engine->SetVariableString(L"Include_launcherState", L"disable"); + _engine->SetVariableString(L"InstallLauncherAllUsersState", L"disable"); + + auto hr = LoadAssociateFilesStateFromKey(_engine, hkey); + if (hr == S_OK) { + _engine->SetVariableNumeric(L"AssociateFiles", 1); + } else if (FAILED(hr)) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Failed to load AssociateFiles state: error code 0x%08X", hr); + } + } + } virtual STDMETHODIMP_(void) OnDetectComplete(__in HRESULT hrStatus) { @@ -716,32 +772,7 @@ if (SUCCEEDED(hrStatus)) { // Ensure the default path has been set - LONGLONG installAll; - LPWSTR targetDir = nullptr; - LPWSTR defaultTargetDir = nullptr; - - hrStatus = BalGetStringVariable(L"TargetDir", &targetDir); - if (FAILED(hrStatus) || !targetDir || !targetDir[0]) { - ReleaseStr(targetDir); - targetDir = nullptr; - - if (FAILED(BalGetNumericVariable(L"InstallAllUsers", &installAll))) { - installAll = 0; - } - - hrStatus = BalGetStringVariable( - installAll ? L"DefaultAllUsersTargetDir" : L"DefaultJustForMeTargetDir", - &defaultTargetDir - ); - - if (SUCCEEDED(hrStatus) && defaultTargetDir) { - if (defaultTargetDir[0] && SUCCEEDED(BalFormatString(defaultTargetDir, &targetDir))) { - hrStatus = _engine->SetVariableString(L"TargetDir", targetDir); - ReleaseStr(targetDir); - } - ReleaseStr(defaultTargetDir); - } - } + hrStatus = EnsureTargetDir(); } SetState(PYBA_STATE_DETECTED, hrStatus); @@ -1396,9 +1427,14 @@ hr = LoadBootstrapperBAFunctions(); BalExitOnFailure(hr, "Failed to load bootstrapper functions."); + hr = UpdateUIStrings(_command.action); BalExitOnFailure(hr, "Failed to load UI strings."); + if (_command.action == BOOTSTRAPPER_ACTION_MODIFY) { + LoadOptionalFeatureStates(_engine); + } + GetBundleFileVersion(); // don't fail if we couldn't get the version info; best-effort only LExit: @@ -1906,27 +1942,6 @@ for (DWORD i = 0; i < _theme->cControls; ++i) { THEME_CONTROL* pControl = _theme->rgControls + i; LPWSTR text = nullptr; - LPWSTR name = nullptr; - LOC_STRING *locText = nullptr; - - // If a command link has a note, then add it. - if ((pControl->dwStyle & BS_TYPEMASK) == BS_COMMANDLINK || - (pControl->dwStyle & BS_TYPEMASK) == BS_DEFCOMMANDLINK) { - hr = StrAllocFormatted(&name, L"#(loc.%lsNote)", pControl->sczName); - if (SUCCEEDED(hr)) { - hr = LocGetString(_wixLoc, name, &locText); - ReleaseStr(name); - if (SUCCEEDED(hr) && locText && locText->wzText && locText->wzText[0]) { - hr = BalFormatString(locText->wzText, &text); - if (SUCCEEDED(hr) && text && text[0]) { - ThemeSendControlMessage(_theme, pControl->wId, BCM_SETNOTE, 0, (LPARAM)text); - ReleaseStr(text); - text = nullptr; - } - } - } - hr = S_OK; - } if (!pControl->wPageId && pControl->sczText && *pControl->sczText) { HRESULT hrFormat; @@ -2048,6 +2063,7 @@ return; } + HRESULT UpdateUIStrings(__in BOOTSTRAPPER_ACTION action) { HRESULT hr = S_OK; LPCWSTR likeInstalling = nullptr; @@ -2270,6 +2286,30 @@ StrFree(controlState); } StrFree(controlName); + controlName = nullptr; + + + // If a command link has a note, then add it. + if ((pControl->dwStyle & BS_TYPEMASK) == BS_COMMANDLINK || + (pControl->dwStyle & BS_TYPEMASK) == BS_DEFCOMMANDLINK) { + hr = StrAllocFormatted(&controlName, L"#(loc.%lsNote)", pControl->sczName); + if (SUCCEEDED(hr)) { + LOC_STRING *locText = nullptr; + hr = LocGetString(_wixLoc, controlName, &locText); + if (SUCCEEDED(hr) && locText && locText->wzText && locText->wzText[0]) { + LPWSTR text = nullptr; + hr = BalFormatString(locText->wzText, &text); + if (SUCCEEDED(hr) && text && text[0]) { + ThemeSendControlMessage(_theme, pControl->wId, BCM_SETNOTE, 0, (LPARAM)text); + ReleaseStr(text); + text = nullptr; + } + } + ReleaseStr(controlName); + controlName = nullptr; + } + hr = S_OK; + } } ThemeControlEnable(_theme, pControl->wId, enableControl); @@ -2515,9 +2555,8 @@ if (_installPage == PAGE_LOADING) { switch (_command.action) { case BOOTSTRAPPER_ACTION_INSTALL: - if (_upgradingOldVersion) { + if (_upgrading) { _installPage = PAGE_UPGRADE; - _upgrading = TRUE; } else if (SUCCEEDED(BalGetNumericVariable(L"SimpleInstall", &simple)) && simple) { _installPage = PAGE_SIMPLE_INSTALL; } else { @@ -2560,11 +2599,9 @@ static BAL_CONDITION WILL_ELEVATE_CONDITION = { L"not WixBundleElevated and (" /*Elevate when installing for all users*/ - L"InstallAllUsers or" + L"InstallAllUsers or " /*Elevate when installing the launcher for all users and it was not detected*/ - L"(InstallLauncherAllUsers and Include_launcher and not DetectedLauncher) or" - /*Elevate when the launcher was installed for all users and it is being removed*/ - L"(DetectedLauncher and DetectedLauncherAllUsers and not Include_launcher)" + L"(Include_launcher and InstallLauncherAllUsers and not DetectedLauncher)" L")", L"" }; @@ -2867,19 +2904,16 @@ return HRESULT_FROM_WIN32(res); } - static HRESULT LoadLauncherStateFromKey( + static HRESULT LoadAssociateFilesStateFromKey( __in IBootstrapperEngine* pEngine, __in HKEY hkHive ) { const LPCWSTR subkey = L"Software\\Python\\PyLauncher"; HKEY hKey; LRESULT res; - - if (IsTargetPlatformx64(pEngine)) { - res = RegOpenKeyExW(hkHive, subkey, 0, KEY_READ | KEY_WOW64_64KEY, &hKey); - } else { - res = RegOpenKeyExW(hkHive, subkey, 0, KEY_READ | KEY_WOW64_32KEY, &hKey); - } + HRESULT hr; + + res = RegOpenKeyExW(hkHive, subkey, 0, KEY_READ | KEY_WOW64_32KEY, &hKey); if (res == ERROR_FILE_NOT_FOUND) { return S_FALSE; @@ -2888,26 +2922,17 @@ return HRESULT_FROM_WIN32(res); } - res = RegQueryValueExW(hKey, nullptr, nullptr, nullptr, nullptr, nullptr); - if (res == ERROR_FILE_NOT_FOUND) { - pEngine->SetVariableNumeric(L"Include_launcher", 0); - } else if (res == ERROR_SUCCESS) { - pEngine->SetVariableNumeric(L"Include_launcher", 1); - pEngine->SetVariableNumeric(L"DetectedLauncher", 1); - pEngine->SetVariableNumeric(L"InstallLauncherAllUsers", (hkHive == HKEY_LOCAL_MACHINE) ? 1 : 0); - pEngine->SetVariableNumeric(L"DetectedLauncherAllUsers", (hkHive == HKEY_LOCAL_MACHINE) ? 1 : 0); - pEngine->SetVariableString(L"InstallLauncherAllUsersState", L"disable"); - } - res = RegQueryValueExW(hKey, L"AssociateFiles", nullptr, nullptr, nullptr, nullptr); if (res == ERROR_FILE_NOT_FOUND) { - pEngine->SetVariableNumeric(L"AssociateFiles", 0); + hr = S_FALSE; } else if (res == ERROR_SUCCESS) { - pEngine->SetVariableNumeric(L"AssociateFiles", 1); + hr = S_OK; + } else { + hr = HRESULT_FROM_WIN32(res); } RegCloseKey(hKey); - return S_OK; + return hr; } static void LoadOptionalFeatureStates(__in IBootstrapperEngine* pEngine) { @@ -2918,7 +2943,11 @@ HKEY hkHive; // The launcher installation is separate from the Python install, so we - // check its state later. This also checks the file association option. + // check its state later. For now, assume we don't want the launcher or + // file associations, and if they have already been installed then + // loading the state will reactivate these settings. + pEngine->SetVariableNumeric(L"Include_launcher", 0); + pEngine->SetVariableNumeric(L"AssociateFiles", 0); // Get the registry key from the bundle, to save having to duplicate it // in multiple places. @@ -3089,7 +3118,6 @@ _hrFinal = hrHostInitialization; _downgradingOtherVersion = FALSE; - _upgradingOldVersion = FALSE; _restartResult = BOOTSTRAPPER_APPLY_RESTART_NONE; _restartRequired = FALSE; _allowRestart = FALSE; @@ -3113,8 +3141,6 @@ _hBAFModule = nullptr; _baFunction = nullptr; - - EnsureTargetDir(); } @@ -3174,7 +3200,6 @@ DWORD _calculatedExecuteProgress; BOOL _downgradingOtherVersion; - BOOL _upgradingOldVersion; BOOTSTRAPPER_APPLY_RESTART _restartResult; BOOL _restartRequired; BOOL _allowRestart; diff --git a/Tools/msi/bundle/bundle.wxs b/Tools/msi/bundle/bundle.wxs --- a/Tools/msi/bundle/bundle.wxs +++ b/Tools/msi/bundle/bundle.wxs @@ -51,7 +51,7 @@ --> - + @@ -72,6 +72,7 @@ + @@ -81,6 +82,7 @@ + diff --git a/Tools/msi/bundle/packagegroups/launcher.wxs b/Tools/msi/bundle/packagegroups/launcher.wxs --- a/Tools/msi/bundle/packagegroups/launcher.wxs +++ b/Tools/msi/bundle/packagegroups/launcher.wxs @@ -11,7 +11,7 @@ EnableFeatureSelection="yes" Permanent="yes" Visible="yes" - InstallCondition="(InstallAllUsers or InstallLauncherAllUsers) and Include_launcher" /> + InstallCondition="(InstallAllUsers or InstallLauncherAllUsers) and Include_launcher and not DetectedLauncher" /> + InstallCondition="not (InstallAllUsers or InstallLauncherAllUsers) and Include_launcher and not DetectedLauncher" /> \ No newline at end of file -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 12:19:26 2015 From: python-checkins at python.org (steve.dower) Date: Wed, 02 Dec 2015 17:19:26 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_from_3=2E5_=28including_all_NEWS_entries=29?= Message-ID: <20151202171924.14106.51279@psf.io> https://hg.python.org/cpython/rev/7dcc03b344b5 changeset: 99415:7dcc03b344b5 parent: 99412:1ff29b57628e parent: 99414:8537ec50c254 user: Steve Dower date: Wed Dec 02 09:19:07 2015 -0800 summary: Merge from 3.5 (including all NEWS entries) files: Misc/NEWS | 260 +++++++++- Tools/msi/bundle/Default.thm | 2 +- Tools/msi/bundle/Default.wxl | 4 +- Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp | 223 ++++--- Tools/msi/bundle/bundle.wxs | 4 +- Tools/msi/bundle/packagegroups/launcher.wxs | 4 +- 6 files changed, 389 insertions(+), 108 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -476,21 +476,84 @@ `python3 -m venv`. +What's New in Python 3.5.1 final? +================================= + +Release date: 2015-12-06 + +Core and Builtins +----------------- + +Library +------- + +Windows +------- + +- Issue #25715: Python 3.5.1 installer shows wrong upgrade path and incorrect + logic for launcher detection. + What's New in Python 3.5.1 release candidate 1? =============================================== -Release date: TBA +Release date: 2015-11-22 Core and Builtins ----------------- +- Issue #25630: Fix a possible segfault during argument parsing in functions + that accept filesystem paths. + +- Issue #23564: Fixed a partially broken sanity check in the _posixsubprocess + internals regarding how fds_to_pass were passed to the child. The bug had + no actual impact as subprocess.py already avoided it. + +- Issue #25388: Fixed tokenizer crash when processing undecodable source code + with a null byte. + +- Issue #25462: The hash of the key now is calculated only once in most + operations in C implementation of OrderedDict. + +- Issue #22995: Default implementation of __reduce__ and __reduce_ex__ now + rejects builtin types with not defined __new__. + +- Issue #25555: Fix parser and AST: fill lineno and col_offset of "arg" node + when compiling AST from Python objects. + - Issue #24802: Avoid buffer overreads when int(), float(), compile(), exec() and eval() are passed bytes-like objects. These objects are not necessarily terminated by a null byte, but the functions assumed they were. +- Issue #24726: Fixed a crash and leaking NULL in repr() of OrderedDict that + was mutated by direct calls of dict methods. + +- Issue #25449: Iterating OrderedDict with keys with unstable hash now raises + KeyError in C implementations as well as in Python implementation. + +- Issue #25395: Fixed crash when highly nested OrderedDict structures were + garbage collected. + +- Issue #25274: sys.setrecursionlimit() now raises a RecursionError if the new + recursion limit is too low depending at the current recursion depth. Modify + also the "lower-water mark" formula to make it monotonic. This mark is used + to decide when the overflowed flag of the thread state is reset. + - Issue #24402: Fix input() to prompt to the redirected stdout when sys.stdout.fileno() fails. +- Issue #24806: Prevent builtin types that are not allowed to be subclassed from + being subclassed through multiple inheritance. + +- Issue #24848: Fixed a number of bugs in UTF-7 decoding of misformed data. + +- Issue #25280: Import trace messages emitted in verbose (-v) mode are no + longer formatted twice. + +- Issue #25003: On Solaris 11.3 or newer, os.urandom() now uses the + getrandom() function instead of the getentropy() function. The getentropy() + function is blocking to generate very good quality entropy, os.urandom() + doesn't need such high-quality entropy. + - Issue #25182: The stdprinter (used as sys.stderr before the io module is imported at startup) now uses the backslashreplace error handler. @@ -514,6 +577,12 @@ - Issue #25583: Avoid incorrect errors raised by os.makedirs(exist_ok=True) when the OS gives priority to errors such as EACCES over EEXIST. +- Issue #25593: Change semantics of EventLoop.stop() in asyncio. + +- Issue #6973: When we know a subprocess.Popen process has died, do + not allow the send_signal(), terminate(), or kill() methods to do + anything as they could potentially signal a different process. + - Issue #25590: In the Readline completer, only call getattr() once per attribute. @@ -521,6 +590,71 @@ by wrapping a memoryview. This was a regression made in 3.5a1. Based on patch by Eryksun. +- Issue #25584: Added "escape" to the __all__ list in the glob module. + +- Issue #25584: Fixed recursive glob() with patterns starting with '\*\*'. + +- Issue #25446: Fix regression in smtplib's AUTH LOGIN support. + +- Issue #18010: Fix the pydoc web server's module search function to handle + exceptions from importing packages. + +- Issue #25554: Got rid of circular references in regular expression parsing. + +- Issue #25510: fileinput.FileInput.readline() now returns b'' instead of '' + at the end if the FileInput was opened with binary mode. + Patch by Ryosuke Ito. + +- Issue #25503: Fixed inspect.getdoc() for inherited docstrings of properties. + Original patch by John Mark Vandenberg. + +- Issue #25515: Always use os.urandom as a source of randomness in uuid.uuid4. + +- Issue #21827: Fixed textwrap.dedent() for the case when largest common + whitespace is a substring of smallest leading whitespace. + Based on patch by Robert Li. + +- Issue #25447: The lru_cache() wrapper objects now can be copied and pickled + (by returning the original object unchanged). + +- Issue #25390: typing: Don't crash on Union[str, Pattern]. + +- Issue #25441: asyncio: Raise error from drain() when socket is closed. + +- Issue #25410: Cleaned up and fixed minor bugs in C implementation of + OrderedDict. + +- Issue #25411: Improved Unicode support in SMTPHandler through better use of + the email package. Thanks to user simon04 for the patch. + +- Issue #25407: Remove mentions of the formatter module being removed in + Python 3.6. + +- Issue #25406: Fixed a bug in C implementation of OrderedDict.move_to_end() + that caused segmentation fault or hang in iterating after moving several + items to the start of ordered dict. + +- Issue #25364: zipfile now works in threads disabled builds. + +- Issue #25328: smtpd's SMTPChannel now correctly raises a ValueError if both + decode_data and enable_SMTPUTF8 are set to true. + +- Issue #25316: distutils raises OSError instead of DistutilsPlatformError + when MSVC is not installed. + +- Issue #25380: Fixed protocol for the STACK_GLOBAL opcode in + pickletools.opcodes. + +- Issue #23972: Updates asyncio datagram create method allowing reuseport + and reuseaddr socket options to be set prior to binding the socket. + Mirroring the existing asyncio create_server method the reuseaddr option + for datagram sockets defaults to True if the O/S is 'posix' (except if the + platform is Cygwin). Patch by Chris Laws. + +- Issue #25304: Add asyncio.run_coroutine_threadsafe(). This lets you + submit a coroutine to a loop from another thread, returning a + concurrent.futures.Future. By Vincent Michel. + - Issue #25232: Fix CGIRequestHandler to split the query from the URL at the first question mark (?) rather than the last. Patch from Xiang Zhang. @@ -548,6 +682,9 @@ - Issue #25233: Rewrite the guts of asyncio.Queue and asyncio.Semaphore to be more understandable and correct. +- Issue #25203: Failed readline.set_completer_delims() no longer left the + module in inconsistent state. + - Issue #23600: Default implementation of tzinfo.fromutc() was returning wrong results in some cases. @@ -621,6 +758,19 @@ - Issue #24881: Fixed setting binary mode in Python implementation of FileIO on Windows and Cygwin. Patch from Akira Li. +- Issue #25578: Fix (another) memory leak in SSLSocket.getpeercer(). + +- Issue #25530: Disable the vulnerable SSLv3 protocol by default when creating + ssl.SSLContext. + +- Issue #25569: Fix memory leak in SSLSocket.getpeercert(). + +- Issue #25471: Sockets returned from accept() shouldn't appear to be + nonblocking. + +- Issue #25319: When threading.Event is reinitialized, the underlying condition + should use a regular lock rather than a recursive lock. + - Issue #21112: Fix regression in unittest.expectedFailure on subclasses. Patch from Berker Peksag. @@ -642,8 +792,104 @@ - Issue #23572: Fixed functools.singledispatch on classes with falsy metaclasses. Patch by Ethan Furman. -- Issue #12006: Add ISO 8601 year, week, and day directives (%G, %V, %u) to - strptime. +- asyncio: ensure_future() now accepts awaitable objects. + +IDLE +---- + +- Issue 15348: Stop the debugger engine (normally in a user process) + before closing the debugger window (running in the IDLE process). + This prevents the RuntimeErrors that were being caught and ignored. + +- Issue #24455: Prevent IDLE from hanging when a) closing the shell while the + debugger is active (15347); b) closing the debugger with the [X] button + (15348); and c) activating the debugger when already active (24455). + The patch by Mark Roseman does this by making two changes. + 1. Suspend and resume the gui.interaction method with the tcl vwait + mechanism intended for this purpose (instead of root.mainloop & .quit). + 2. In gui.run, allow any existing interaction to terminate first. + +- Change 'The program' to 'Your program' in an IDLE 'kill program?' message + to make it clearer that the program referred to is the currently running + user program, not IDLE itself. + +- Issue #24750: Improve the appearance of the IDLE editor window status bar. + Patch by Mark Roseman. + +- Issue #25313: Change the handling of new built-in text color themes to better + address the compatibility problem introduced by the addition of IDLE Dark. + Consistently use the revised idleConf.CurrentTheme everywhere in idlelib. + +- Issue #24782: Extension configuration is now a tab in the IDLE Preferences + dialog rather than a separate dialog. The former tabs are now a sorted + list. Patch by Mark Roseman. + +- Issue #22726: Re-activate the config dialog help button with some content + about the other buttons and the new IDLE Dark theme. + +- Issue #24820: IDLE now has an 'IDLE Dark' built-in text color theme. + It is more or less IDLE Classic inverted, with a cobalt blue background. + Strings, comments, keywords, ... are still green, red, orange, ... . + To use it with IDLEs released before November 2015, hit the + 'Save as New Custom Theme' button and enter a new name, + such as 'Custom Dark'. The custom theme will work with any IDLE + release, and can be modified. + +- Issue #25224: README.txt is now an idlelib index for IDLE developers and + curious users. The previous user content is now in the IDLE doc chapter. + 'IDLE' now means 'Integrated Development and Learning Environment'. + +- Issue #24820: Users can now set breakpoint colors in + Settings -> Custom Highlighting. Original patch by Mark Roseman. + +- Issue #24972: Inactive selection background now matches active selection + background, as configured by users, on all systems. Found items are now + always highlighted on Windows. Initial patch by Mark Roseman. + +- Issue #24570: Idle: make calltip and completion boxes appear on Macs + affected by a tk regression. Initial patch by Mark Roseman. + +- Issue #24988: Idle ScrolledList context menus (used in debugger) + now work on Mac Aqua. Patch by Mark Roseman. + +- Issue #24801: Make right-click for context menu work on Mac Aqua. + Patch by Mark Roseman. + +- Issue #25173: Associate tkinter messageboxes with a specific widget. + For Mac OSX, make them a 'sheet'. Patch by Mark Roseman. + +- Issue #25198: Enhance the initial html viewer now used for Idle Help. + * Properly indent fixed-pitch text (patch by Mark Roseman). + * Give code snippet a very Sphinx-like light blueish-gray background. + * Re-use initial width and height set by users for shell and editor. + * When the Table of Contents (TOC) menu is used, put the section header + at the top of the screen. + +- Issue #25225: Condense and rewrite Idle doc section on text colors. + +- Issue #21995: Explain some differences between IDLE and console Python. + +- Issue #22820: Explain need for *print* when running file from Idle editor. + +- Issue #25224: Doc: augment Idle feature list and no-subprocess section. + +- Issue #25219: Update doc for Idle command line options. + Some were missing and notes were not correct. + +- Issue #24861: Most of idlelib is private and subject to change. + Use idleib.idle.* to start Idle. See idlelib.__init__.__doc__. + +- Issue #25199: Idle: add synchronization comments for future maintainers. + +- Issue #16893: Replace help.txt with help.html for Idle doc display. + The new idlelib/help.html is rstripped Doc/build/html/library/idle.html. + It looks better than help.txt and will better document Idle as released. + The tkinter html viewer that works for this file was written by Mark Roseman. + The now unused EditorWindow.HelpDialog class and helt.txt file are deprecated. + +- Issue #24199: Deprecate unused idlelib.idlever with possible removal in 3.6. + +- Issue #24790: Remove extraneous code (which also create 2 & 3 conflicts). Documentation ------------- @@ -670,6 +916,8 @@ Tests ----- +- Issue #25449: Added tests for OrderedDict subclasses. + - Issue #25099: Make test_compileall not fail when an entry on sys.path cannot be written to (commonly seen in administrative installs on Windows). @@ -689,7 +937,6 @@ - Issue #24986: It is now possible to build Python on Windows without errors when external libraries are not available. - Windows ------- @@ -728,6 +975,11 @@ - Issue #25022: Removed very outdated PC/example_nt/ directory. +Tools/Demos +----------- + +- Issue #25440: Fix output of python-config --extension-suffix. + What's New in Python 3.5.0 final? ================================= diff --git a/Tools/msi/bundle/Default.thm b/Tools/msi/bundle/Default.thm --- a/Tools/msi/bundle/Default.thm +++ b/Tools/msi/bundle/Default.thm @@ -66,7 +66,7 @@ #(loc.Include_launcherLabel) #(loc.InstallLauncherAllUsersLabel) - #(loc.Include_launcherHelpLabel) + diff --git a/Tools/msi/bundle/Default.wxl b/Tools/msi/bundle/Default.wxl --- a/Tools/msi/bundle/Default.wxl +++ b/Tools/msi/bundle/Default.wxl @@ -76,7 +76,9 @@ Python &test suite Installs the standard library test suite. py &launcher - Installs the global 'py' launcher to make it easier to start Python. + Installs the global 'py' launcher to make it easier to start Python. + Use Programs and Features to remove the 'py' launcher. + Upgrades the global 'py' launcher from the previous version. Associate &files with Python (requires the py launcher) Create shortcuts for installed applications diff --git a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp --- a/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp +++ b/Tools/msi/bundle/bootstrap/PythonBootstrapperApplication.cpp @@ -94,6 +94,7 @@ ID_CUSTOM_ASSOCIATE_FILES_CHECKBOX, ID_CUSTOM_INSTALL_ALL_USERS_CHECKBOX, ID_CUSTOM_INSTALL_LAUNCHER_ALL_USERS_CHECKBOX, + ID_CUSTOM_INCLUDE_LAUNCHER_HELP_LABEL, ID_CUSTOM_COMPILE_ALL_CHECKBOX, ID_CUSTOM_BROWSE_BUTTON, ID_CUSTOM_BROWSE_BUTTON_LABEL, @@ -158,6 +159,7 @@ { ID_CUSTOM_ASSOCIATE_FILES_CHECKBOX, L"AssociateFiles" }, { ID_CUSTOM_INSTALL_ALL_USERS_CHECKBOX, L"InstallAllUsers" }, { ID_CUSTOM_INSTALL_LAUNCHER_ALL_USERS_CHECKBOX, L"CustomInstallLauncherAllUsers" }, + { ID_CUSTOM_INCLUDE_LAUNCHER_HELP_LABEL, L"Include_launcherHelp" }, { ID_CUSTOM_COMPILE_ALL_CHECKBOX, L"CompileAll" }, { ID_CUSTOM_BROWSE_BUTTON, L"CustomBrowseButton" }, { ID_CUSTOM_BROWSE_BUTTON_LABEL, L"CustomBrowseButtonLabel" }, @@ -454,6 +456,20 @@ ThemeSendControlMessage(_theme, ID_CUSTOM_INSTALL_LAUNCHER_ALL_USERS_CHECKBOX, BM_SETCHECK, installLauncherAllUsers ? BST_CHECKED : BST_UNCHECKED, 0); + + LOC_STRING *pLocString = nullptr; + LPCWSTR locKey = L"#(loc.Include_launcherHelp)"; + LONGLONG detectedLauncher; + + if (SUCCEEDED(BalGetNumericVariable(L"DetectedLauncher", &detectedLauncher)) && detectedLauncher) { + locKey = L"#(loc.Include_launcherRemove)"; + } else if (SUCCEEDED(BalGetNumericVariable(L"DetectedOldLauncher", &detectedLauncher)) && detectedLauncher) { + locKey = L"#(loc.Include_launcherUpgrade)"; + } + + if (SUCCEEDED(LocGetString(_wixLoc, locKey, &pLocString)) && pLocString) { + ThemeSetTextControl(_theme, ID_CUSTOM_INCLUDE_LAUNCHER_HELP_LABEL, pLocString->wzText); + } } void Custom2Page_Show() { @@ -641,6 +657,29 @@ return nResult; } + virtual STDMETHODIMP_(int) OnDetectRelatedMsiPackage( + __in_z LPCWSTR wzPackageId, + __in_z LPCWSTR /*wzProductCode*/, + __in BOOL fPerMachine, + __in DWORD64 /*dw64Version*/, + __in BOOTSTRAPPER_RELATED_OPERATION operation + ) { + if (BOOTSTRAPPER_RELATED_OPERATION_MAJOR_UPGRADE == operation && + (CSTR_EQUAL == ::CompareStringW(LOCALE_NEUTRAL, 0, wzPackageId, -1, L"launcher_AllUsers", -1) || + CSTR_EQUAL == ::CompareStringW(LOCALE_NEUTRAL, 0, wzPackageId, -1, L"launcher_JustForMe", -1))) { + auto hr = LoadAssociateFilesStateFromKey(_engine, fPerMachine ? HKEY_LOCAL_MACHINE : HKEY_CURRENT_USER); + if (hr == S_OK) { + _engine->SetVariableNumeric(L"AssociateFiles", 1); + } else if (FAILED(hr)) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Failed to load AssociateFiles state: error code 0x%08X", hr); + } + + _engine->SetVariableNumeric(L"Include_launcher", 1); + _engine->SetVariableNumeric(L"DetectedOldLauncher", 1); + _engine->SetVariableNumeric(L"InstallLauncherAllUsers", fPerMachine ? 1 : 0); + } + return CheckCanceled() ? IDCANCEL : IDNOACTION; + } virtual STDMETHODIMP_(int) OnDetectRelatedBundle( __in LPCWSTR wzBundleId, @@ -656,24 +695,8 @@ if (BOOTSTRAPPER_RELATED_OPERATION_DOWNGRADE == operation) { _downgradingOtherVersion = TRUE; } else if (BOOTSTRAPPER_RELATED_OPERATION_MAJOR_UPGRADE == operation) { - _upgradingOldVersion = TRUE; - - // Assume we don't want the launcher or file associations, and if - // they have already been installed then loading the state will - // reactivate these settings. - _engine->SetVariableNumeric(L"Include_launcher", 0); - _engine->SetVariableNumeric(L"AssociateFiles", 0); - auto hr = LoadLauncherStateFromKey(_engine, HKEY_CURRENT_USER); - if (hr == S_FALSE) { - hr = LoadLauncherStateFromKey(_engine, HKEY_LOCAL_MACHINE); - } - if (FAILED(hr)) { - BalLog( - BOOTSTRAPPER_LOG_LEVEL_ERROR, - "Failed to load launcher state: error code 0x%08X", - hr - ); - } + BalLog(BOOTSTRAPPER_LOG_LEVEL_STANDARD, "Detected previous version - planning upgrade"); + _upgrading = TRUE; LoadOptionalFeatureStates(_engine); } else if (BOOTSTRAPPER_RELATED_OPERATION_NONE == operation) { @@ -699,9 +722,42 @@ virtual STDMETHODIMP_(void) OnDetectPackageComplete( __in LPCWSTR wzPackageId, - __in HRESULT /*hrStatus*/, + __in HRESULT hrStatus, __in BOOTSTRAPPER_PACKAGE_STATE state - ) { } + ) { + if (FAILED(hrStatus)) { + return; + } + + BOOL detectedLauncher = FALSE; + HKEY hkey = HKEY_LOCAL_MACHINE; + if (CSTR_EQUAL == ::CompareStringW(LOCALE_NEUTRAL, 0, wzPackageId, -1, L"launcher_AllUsers", -1)) { + if (BOOTSTRAPPER_PACKAGE_STATE_PRESENT == state || BOOTSTRAPPER_PACKAGE_STATE_OBSOLETE == state) { + detectedLauncher = TRUE; + _engine->SetVariableNumeric(L"InstallLauncherAllUsers", 1); + } + } else if (CSTR_EQUAL == ::CompareStringW(LOCALE_NEUTRAL, 0, wzPackageId, -1, L"launcher_JustForMe", -1)) { + if (BOOTSTRAPPER_PACKAGE_STATE_PRESENT == state || BOOTSTRAPPER_PACKAGE_STATE_OBSOLETE == state) { + detectedLauncher = TRUE; + _engine->SetVariableNumeric(L"InstallLauncherAllUsers", 0); + } + } + + if (detectedLauncher) { + /* When we detect the current version of the launcher. */ + _engine->SetVariableNumeric(L"Include_launcher", 1); + _engine->SetVariableNumeric(L"DetectedLauncher", 1); + _engine->SetVariableString(L"Include_launcherState", L"disable"); + _engine->SetVariableString(L"InstallLauncherAllUsersState", L"disable"); + + auto hr = LoadAssociateFilesStateFromKey(_engine, hkey); + if (hr == S_OK) { + _engine->SetVariableNumeric(L"AssociateFiles", 1); + } else if (FAILED(hr)) { + BalLog(BOOTSTRAPPER_LOG_LEVEL_ERROR, "Failed to load AssociateFiles state: error code 0x%08X", hr); + } + } + } virtual STDMETHODIMP_(void) OnDetectComplete(__in HRESULT hrStatus) { @@ -716,32 +772,7 @@ if (SUCCEEDED(hrStatus)) { // Ensure the default path has been set - LONGLONG installAll; - LPWSTR targetDir = nullptr; - LPWSTR defaultTargetDir = nullptr; - - hrStatus = BalGetStringVariable(L"TargetDir", &targetDir); - if (FAILED(hrStatus) || !targetDir || !targetDir[0]) { - ReleaseStr(targetDir); - targetDir = nullptr; - - if (FAILED(BalGetNumericVariable(L"InstallAllUsers", &installAll))) { - installAll = 0; - } - - hrStatus = BalGetStringVariable( - installAll ? L"DefaultAllUsersTargetDir" : L"DefaultJustForMeTargetDir", - &defaultTargetDir - ); - - if (SUCCEEDED(hrStatus) && defaultTargetDir) { - if (defaultTargetDir[0] && SUCCEEDED(BalFormatString(defaultTargetDir, &targetDir))) { - hrStatus = _engine->SetVariableString(L"TargetDir", targetDir); - ReleaseStr(targetDir); - } - ReleaseStr(defaultTargetDir); - } - } + hrStatus = EnsureTargetDir(); } SetState(PYBA_STATE_DETECTED, hrStatus); @@ -1396,9 +1427,14 @@ hr = LoadBootstrapperBAFunctions(); BalExitOnFailure(hr, "Failed to load bootstrapper functions."); + hr = UpdateUIStrings(_command.action); BalExitOnFailure(hr, "Failed to load UI strings."); + if (_command.action == BOOTSTRAPPER_ACTION_MODIFY) { + LoadOptionalFeatureStates(_engine); + } + GetBundleFileVersion(); // don't fail if we couldn't get the version info; best-effort only LExit: @@ -1906,27 +1942,6 @@ for (DWORD i = 0; i < _theme->cControls; ++i) { THEME_CONTROL* pControl = _theme->rgControls + i; LPWSTR text = nullptr; - LPWSTR name = nullptr; - LOC_STRING *locText = nullptr; - - // If a command link has a note, then add it. - if ((pControl->dwStyle & BS_TYPEMASK) == BS_COMMANDLINK || - (pControl->dwStyle & BS_TYPEMASK) == BS_DEFCOMMANDLINK) { - hr = StrAllocFormatted(&name, L"#(loc.%lsNote)", pControl->sczName); - if (SUCCEEDED(hr)) { - hr = LocGetString(_wixLoc, name, &locText); - ReleaseStr(name); - if (SUCCEEDED(hr) && locText && locText->wzText && locText->wzText[0]) { - hr = BalFormatString(locText->wzText, &text); - if (SUCCEEDED(hr) && text && text[0]) { - ThemeSendControlMessage(_theme, pControl->wId, BCM_SETNOTE, 0, (LPARAM)text); - ReleaseStr(text); - text = nullptr; - } - } - } - hr = S_OK; - } if (!pControl->wPageId && pControl->sczText && *pControl->sczText) { HRESULT hrFormat; @@ -2048,6 +2063,7 @@ return; } + HRESULT UpdateUIStrings(__in BOOTSTRAPPER_ACTION action) { HRESULT hr = S_OK; LPCWSTR likeInstalling = nullptr; @@ -2270,6 +2286,30 @@ StrFree(controlState); } StrFree(controlName); + controlName = nullptr; + + + // If a command link has a note, then add it. + if ((pControl->dwStyle & BS_TYPEMASK) == BS_COMMANDLINK || + (pControl->dwStyle & BS_TYPEMASK) == BS_DEFCOMMANDLINK) { + hr = StrAllocFormatted(&controlName, L"#(loc.%lsNote)", pControl->sczName); + if (SUCCEEDED(hr)) { + LOC_STRING *locText = nullptr; + hr = LocGetString(_wixLoc, controlName, &locText); + if (SUCCEEDED(hr) && locText && locText->wzText && locText->wzText[0]) { + LPWSTR text = nullptr; + hr = BalFormatString(locText->wzText, &text); + if (SUCCEEDED(hr) && text && text[0]) { + ThemeSendControlMessage(_theme, pControl->wId, BCM_SETNOTE, 0, (LPARAM)text); + ReleaseStr(text); + text = nullptr; + } + } + ReleaseStr(controlName); + controlName = nullptr; + } + hr = S_OK; + } } ThemeControlEnable(_theme, pControl->wId, enableControl); @@ -2515,9 +2555,8 @@ if (_installPage == PAGE_LOADING) { switch (_command.action) { case BOOTSTRAPPER_ACTION_INSTALL: - if (_upgradingOldVersion) { + if (_upgrading) { _installPage = PAGE_UPGRADE; - _upgrading = TRUE; } else if (SUCCEEDED(BalGetNumericVariable(L"SimpleInstall", &simple)) && simple) { _installPage = PAGE_SIMPLE_INSTALL; } else { @@ -2560,11 +2599,9 @@ static BAL_CONDITION WILL_ELEVATE_CONDITION = { L"not WixBundleElevated and (" /*Elevate when installing for all users*/ - L"InstallAllUsers or" + L"InstallAllUsers or " /*Elevate when installing the launcher for all users and it was not detected*/ - L"(InstallLauncherAllUsers and Include_launcher and not DetectedLauncher) or" - /*Elevate when the launcher was installed for all users and it is being removed*/ - L"(DetectedLauncher and DetectedLauncherAllUsers and not Include_launcher)" + L"(Include_launcher and InstallLauncherAllUsers and not DetectedLauncher)" L")", L"" }; @@ -2867,19 +2904,16 @@ return HRESULT_FROM_WIN32(res); } - static HRESULT LoadLauncherStateFromKey( + static HRESULT LoadAssociateFilesStateFromKey( __in IBootstrapperEngine* pEngine, __in HKEY hkHive ) { const LPCWSTR subkey = L"Software\\Python\\PyLauncher"; HKEY hKey; LRESULT res; - - if (IsTargetPlatformx64(pEngine)) { - res = RegOpenKeyExW(hkHive, subkey, 0, KEY_READ | KEY_WOW64_64KEY, &hKey); - } else { - res = RegOpenKeyExW(hkHive, subkey, 0, KEY_READ | KEY_WOW64_32KEY, &hKey); - } + HRESULT hr; + + res = RegOpenKeyExW(hkHive, subkey, 0, KEY_READ | KEY_WOW64_32KEY, &hKey); if (res == ERROR_FILE_NOT_FOUND) { return S_FALSE; @@ -2888,26 +2922,17 @@ return HRESULT_FROM_WIN32(res); } - res = RegQueryValueExW(hKey, nullptr, nullptr, nullptr, nullptr, nullptr); - if (res == ERROR_FILE_NOT_FOUND) { - pEngine->SetVariableNumeric(L"Include_launcher", 0); - } else if (res == ERROR_SUCCESS) { - pEngine->SetVariableNumeric(L"Include_launcher", 1); - pEngine->SetVariableNumeric(L"DetectedLauncher", 1); - pEngine->SetVariableNumeric(L"InstallLauncherAllUsers", (hkHive == HKEY_LOCAL_MACHINE) ? 1 : 0); - pEngine->SetVariableNumeric(L"DetectedLauncherAllUsers", (hkHive == HKEY_LOCAL_MACHINE) ? 1 : 0); - pEngine->SetVariableString(L"InstallLauncherAllUsersState", L"disable"); - } - res = RegQueryValueExW(hKey, L"AssociateFiles", nullptr, nullptr, nullptr, nullptr); if (res == ERROR_FILE_NOT_FOUND) { - pEngine->SetVariableNumeric(L"AssociateFiles", 0); + hr = S_FALSE; } else if (res == ERROR_SUCCESS) { - pEngine->SetVariableNumeric(L"AssociateFiles", 1); + hr = S_OK; + } else { + hr = HRESULT_FROM_WIN32(res); } RegCloseKey(hKey); - return S_OK; + return hr; } static void LoadOptionalFeatureStates(__in IBootstrapperEngine* pEngine) { @@ -2918,7 +2943,11 @@ HKEY hkHive; // The launcher installation is separate from the Python install, so we - // check its state later. This also checks the file association option. + // check its state later. For now, assume we don't want the launcher or + // file associations, and if they have already been installed then + // loading the state will reactivate these settings. + pEngine->SetVariableNumeric(L"Include_launcher", 0); + pEngine->SetVariableNumeric(L"AssociateFiles", 0); // Get the registry key from the bundle, to save having to duplicate it // in multiple places. @@ -3089,7 +3118,6 @@ _hrFinal = hrHostInitialization; _downgradingOtherVersion = FALSE; - _upgradingOldVersion = FALSE; _restartResult = BOOTSTRAPPER_APPLY_RESTART_NONE; _restartRequired = FALSE; _allowRestart = FALSE; @@ -3113,8 +3141,6 @@ _hBAFModule = nullptr; _baFunction = nullptr; - - EnsureTargetDir(); } @@ -3174,7 +3200,6 @@ DWORD _calculatedExecuteProgress; BOOL _downgradingOtherVersion; - BOOL _upgradingOldVersion; BOOTSTRAPPER_APPLY_RESTART _restartResult; BOOL _restartRequired; BOOL _allowRestart; diff --git a/Tools/msi/bundle/bundle.wxs b/Tools/msi/bundle/bundle.wxs --- a/Tools/msi/bundle/bundle.wxs +++ b/Tools/msi/bundle/bundle.wxs @@ -51,7 +51,7 @@ --> - + @@ -72,6 +72,7 @@ + @@ -81,6 +82,7 @@ + diff --git a/Tools/msi/bundle/packagegroups/launcher.wxs b/Tools/msi/bundle/packagegroups/launcher.wxs --- a/Tools/msi/bundle/packagegroups/launcher.wxs +++ b/Tools/msi/bundle/packagegroups/launcher.wxs @@ -11,7 +11,7 @@ EnableFeatureSelection="yes" Permanent="yes" Visible="yes" - InstallCondition="(InstallAllUsers or InstallLauncherAllUsers) and Include_launcher" /> + InstallCondition="(InstallAllUsers or InstallLauncherAllUsers) and Include_launcher and not DetectedLauncher" /> + InstallCondition="not (InstallAllUsers or InstallLauncherAllUsers) and Include_launcher and not DetectedLauncher" /> \ No newline at end of file -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 18:06:25 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 02 Dec 2015 23:06:25 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Null_merge?= Message-ID: <20151202230625.4864.75826@psf.io> https://hg.python.org/cpython/rev/c0522985bd7c changeset: 99416:c0522985bd7c branch: 3.5 parent: 99414:8537ec50c254 parent: 99409:7b5057b89a56 user: Serhiy Storchaka date: Thu Dec 03 00:57:41 2015 +0200 summary: Null merge files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 18:06:26 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 02 Dec 2015 23:06:26 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1NzA5?= =?utf-8?q?=3A_Fixed_problem_with_in-place_string_concatenation_and_utf-8_?= =?utf-8?q?cache=2E?= Message-ID: <20151202230625.21943.24373@psf.io> https://hg.python.org/cpython/rev/67718032badb changeset: 99418:67718032badb branch: 3.4 parent: 99409:7b5057b89a56 user: Serhiy Storchaka date: Thu Dec 03 01:02:03 2015 +0200 summary: Issue #25709: Fixed problem with in-place string concatenation and utf-8 cache. files: Lib/test/test_unicode.py | 17 +++++++++++++++++ Misc/NEWS | 2 ++ Objects/unicodeobject.c | 5 +++++ 3 files changed, 24 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -2672,6 +2672,23 @@ self.assertTrue(astral >= bmp2) self.assertFalse(astral >= astral2) + @support.cpython_only + def test_pep393_utf8_caching_bug(self): + # Issue #25709: Problem with string concatenation and utf-8 cache + from _testcapi import getargs_s_hash + for k in 0x24, 0xa4, 0x20ac, 0x1f40d: + s = '' + for i in range(5): + # Due to CPython specific optimization the 's' string can be + # resized in-place. + s += chr(k) + # Parsing with the "s#" format code calls indirectly + # PyUnicode_AsUTF8AndSize() which creates the UTF-8 + # encoded string cached in the Unicode object. + self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1)) + # Check that the second call returns the same result + self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1)) + class StringModuleTest(unittest.TestCase): def test_formatter_parser(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #25709: Fixed problem with in-place string concatenation and utf-8 cache. + - Issue #24097: Fixed crash in object.__reduce__() if slot name is freed inside __getattr__. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -724,6 +724,11 @@ } new_size = (struct_size + (length + 1) * char_size); + if (_PyUnicode_HAS_UTF8_MEMORY(unicode)) { + PyObject_DEL(_PyUnicode_UTF8(unicode)); + _PyUnicode_UTF8(unicode) = NULL; + _PyUnicode_UTF8_LENGTH(unicode) = 0; + } _Py_DEC_REFTOTAL; _Py_ForgetReference(unicode); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 18:06:26 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 02 Dec 2015 23:06:26 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325709=3A_Fixed_problem_with_in-place_string_con?= =?utf-8?q?catenation_and_utf-8_cache=2E?= Message-ID: <20151202230626.28754.20609@psf.io> https://hg.python.org/cpython/rev/9e800b2aeeac changeset: 99420:9e800b2aeeac parent: 99417:a89f92ac7d19 parent: 99419:a0e2376768dc user: Serhiy Storchaka date: Thu Dec 03 01:05:52 2015 +0200 summary: Issue #25709: Fixed problem with in-place string concatenation and utf-8 cache. files: Lib/test/test_unicode.py | 17 +++++++++++++++++ Misc/NEWS | 2 ++ Objects/unicodeobject.c | 5 +++++ 3 files changed, 24 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -2702,6 +2702,23 @@ self.assertTrue(astral >= bmp2) self.assertFalse(astral >= astral2) + @support.cpython_only + def test_pep393_utf8_caching_bug(self): + # Issue #25709: Problem with string concatenation and utf-8 cache + from _testcapi import getargs_s_hash + for k in 0x24, 0xa4, 0x20ac, 0x1f40d: + s = '' + for i in range(5): + # Due to CPython specific optimization the 's' string can be + # resized in-place. + s += chr(k) + # Parsing with the "s#" format code calls indirectly + # PyUnicode_AsUTF8AndSize() which creates the UTF-8 + # encoded string cached in the Unicode object. + self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1)) + # Check that the second call returns the same result + self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1)) + class StringModuleTest(unittest.TestCase): def test_formatter_parser(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #25709: Fixed problem with in-place string concatenation and utf-8 cache. + - Issue #5319: New Py_FinalizeEx() API allowing Python to set an exit status of 120 on failure to flush buffered streams. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -885,6 +885,11 @@ } new_size = (struct_size + (length + 1) * char_size); + if (_PyUnicode_HAS_UTF8_MEMORY(unicode)) { + PyObject_DEL(_PyUnicode_UTF8(unicode)); + _PyUnicode_UTF8(unicode) = NULL; + _PyUnicode_UTF8_LENGTH(unicode) = 0; + } _Py_DEC_REFTOTAL; _Py_ForgetReference(unicode); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 18:06:29 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 02 Dec 2015 23:06:29 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325709=3A_Fixed_problem_with_in-place_string_concatena?= =?utf-8?q?tion_and_utf-8_cache=2E?= Message-ID: <20151202230625.57674.63601@psf.io> https://hg.python.org/cpython/rev/a0e2376768dc changeset: 99419:a0e2376768dc branch: 3.5 parent: 99416:c0522985bd7c parent: 99418:67718032badb user: Serhiy Storchaka date: Thu Dec 03 01:04:37 2015 +0200 summary: Issue #25709: Fixed problem with in-place string concatenation and utf-8 cache. files: Lib/test/test_unicode.py | 17 +++++++++++++++++ Misc/NEWS | 2 ++ Objects/unicodeobject.c | 5 +++++ 3 files changed, 24 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -2702,6 +2702,23 @@ self.assertTrue(astral >= bmp2) self.assertFalse(astral >= astral2) + @support.cpython_only + def test_pep393_utf8_caching_bug(self): + # Issue #25709: Problem with string concatenation and utf-8 cache + from _testcapi import getargs_s_hash + for k in 0x24, 0xa4, 0x20ac, 0x1f40d: + s = '' + for i in range(5): + # Due to CPython specific optimization the 's' string can be + # resized in-place. + s += chr(k) + # Parsing with the "s#" format code calls indirectly + # PyUnicode_AsUTF8AndSize() which creates the UTF-8 + # encoded string cached in the Unicode object. + self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1)) + # Check that the second call returns the same result + self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1)) + class StringModuleTest(unittest.TestCase): def test_formatter_parser(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #25709: Fixed problem with in-place string concatenation and utf-8 cache. + - Issue #24097: Fixed crash in object.__reduce__() if slot name is freed inside __getattr__. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -722,6 +722,11 @@ } new_size = (struct_size + (length + 1) * char_size); + if (_PyUnicode_HAS_UTF8_MEMORY(unicode)) { + PyObject_DEL(_PyUnicode_UTF8(unicode)); + _PyUnicode_UTF8(unicode) = NULL; + _PyUnicode_UTF8_LENGTH(unicode) = 0; + } _Py_DEC_REFTOTAL; _Py_ForgetReference(unicode); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 18:06:46 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Wed, 02 Dec 2015 23:06:46 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Null_merge?= Message-ID: <20151202230625.57684.25615@psf.io> https://hg.python.org/cpython/rev/a89f92ac7d19 changeset: 99417:a89f92ac7d19 parent: 99415:7dcc03b344b5 parent: 99416:c0522985bd7c user: Serhiy Storchaka date: Thu Dec 03 00:58:13 2015 +0200 summary: Null merge files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 20:53:59 2015 From: python-checkins at python.org (martin.panter) Date: Thu, 03 Dec 2015 01:53:59 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzE0Mjg1?= =?utf-8?q?=3A_Do_not_catch_=5F=5Finit=5F=5F=2Epy_exceptions_in_runpy?= Message-ID: <20151203015359.13865.29868@psf.io> https://hg.python.org/cpython/rev/784a64a21fd0 changeset: 99421:784a64a21fd0 branch: 3.5 parent: 99419:a0e2376768dc user: Martin Panter date: Thu Dec 03 01:23:10 2015 +0000 summary: Issue #14285: Do not catch __init__.py exceptions in runpy Initialize package before calling find_spec() for __main__, so that we do not incorrectly handle exceptions from __init__.py. When runpy is used from the Python CLI, use an internal exception rather than ImportError, to avoid catching unexpected exceptions. Also remove exception message rewriting in _run_module_as_main(), because it seems to be redundant with the _get_main_module_details() function. files: Lib/runpy.py | 44 +++++------ Lib/test/test_cmd_line_script.py | 71 +++++++++++++++++-- Lib/test/test_runpy.py | 22 ++++++ Misc/NEWS | 5 + 4 files changed, 110 insertions(+), 32 deletions(-) diff --git a/Lib/runpy.py b/Lib/runpy.py --- a/Lib/runpy.py +++ b/Lib/runpy.py @@ -99,7 +99,7 @@ return mod_globals.copy() # Helper to get the loader, code and filename for a module -def _get_module_details(mod_name): +def _get_module_details(mod_name, error=ImportError): try: spec = importlib.util.find_spec(mod_name) except (ImportError, AttributeError, TypeError, ValueError) as ex: @@ -107,27 +107,34 @@ # importlib, where the latter raises other errors for cases where # pkgutil previously raised ImportError msg = "Error while finding spec for {!r} ({}: {})" - raise ImportError(msg.format(mod_name, type(ex), ex)) from ex + raise error(msg.format(mod_name, type(ex), ex)) from ex if spec is None: - raise ImportError("No module named %s" % mod_name) + raise error("No module named %s" % mod_name) if spec.submodule_search_locations is not None: if mod_name == "__main__" or mod_name.endswith(".__main__"): - raise ImportError("Cannot use package as __main__ module") + raise error("Cannot use package as __main__ module") + __import__(mod_name) # Do not catch exceptions initializing package try: pkg_main_name = mod_name + ".__main__" return _get_module_details(pkg_main_name) except ImportError as e: - raise ImportError(("%s; %r is a package and cannot " + + raise error(("%s; %r is a package and cannot " + "be directly executed") %(e, mod_name)) loader = spec.loader if loader is None: - raise ImportError("%r is a namespace package and cannot be executed" + raise error("%r is a namespace package and cannot be executed" % mod_name) - code = loader.get_code(mod_name) + try: + code = loader.get_code(mod_name) + except ImportError as e: + raise error(format(e)) from e if code is None: - raise ImportError("No code object available for %s" % mod_name) + raise error("No code object available for %s" % mod_name) return mod_name, spec, code +class _Error(Exception): + """Error that _run_module_as_main() should report without a traceback""" + # XXX ncoghlan: Should this be documented and made public? # (Current thoughts: don't repeat the mistake that lead to its # creation when run_module() no longer met the needs of @@ -148,20 +155,11 @@ """ try: if alter_argv or mod_name != "__main__": # i.e. -m switch - mod_name, mod_spec, code = _get_module_details(mod_name) + mod_name, mod_spec, code = _get_module_details(mod_name, _Error) else: # i.e. directory or zipfile execution - mod_name, mod_spec, code = _get_main_module_details() - except ImportError as exc: - # Try to provide a good error message - # for directories, zip files and the -m switch - if alter_argv: - # For -m switch, just display the exception - info = str(exc) - else: - # For directories/zipfiles, let the user - # know what the code was looking for - info = "can't find '__main__' module in %r" % sys.argv[0] - msg = "%s: %s" % (sys.executable, info) + mod_name, mod_spec, code = _get_main_module_details(_Error) + except _Error as exc: + msg = "%s: %s" % (sys.executable, exc) sys.exit(msg) main_globals = sys.modules["__main__"].__dict__ if alter_argv: @@ -184,7 +182,7 @@ # Leave the sys module alone return _run_code(code, {}, init_globals, run_name, mod_spec) -def _get_main_module_details(): +def _get_main_module_details(error=ImportError): # Helper that gives a nicer error message when attempting to # execute a zipfile or directory by invoking __main__.py # Also moves the standard __main__ out of the way so that the @@ -196,7 +194,7 @@ return _get_module_details(main_name) except ImportError as exc: if main_name in str(exc): - raise ImportError("can't find %r module in %r" % + raise error("can't find %r module in %r" % (main_name, sys.path[0])) from exc raise finally: diff --git a/Lib/test/test_cmd_line_script.py b/Lib/test/test_cmd_line_script.py --- a/Lib/test/test_cmd_line_script.py +++ b/Lib/test/test_cmd_line_script.py @@ -397,20 +397,73 @@ script_name, script_name, '', '', importlib.machinery.SourceFileLoader) + @contextlib.contextmanager + def setup_test_pkg(self, *args): + with support.temp_dir() as script_dir, \ + support.change_cwd(path=script_dir): + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir, *args) + yield pkg_dir + + def check_dash_m_failure(self, *args): + rc, out, err = assert_python_failure('-m', *args, __isolated=False) + if verbose > 1: + print(repr(out)) + self.assertEqual(rc, 1) + return err + def test_dash_m_error_code_is_one(self): # If a module is invoked with the -m command line flag # and results in an error that the return code to the # shell is '1' - with support.temp_dir() as script_dir: - with support.change_cwd(path=script_dir): - pkg_dir = os.path.join(script_dir, 'test_pkg') - make_pkg(pkg_dir) - script_name = _make_test_script(pkg_dir, 'other', - "if __name__ == '__main__': raise ValueError") - rc, out, err = assert_python_failure('-m', 'test_pkg.other', *example_args) - if verbose > 1: - print(repr(out)) + with self.setup_test_pkg() as pkg_dir: + script_name = _make_test_script(pkg_dir, 'other', + "if __name__ == '__main__': raise ValueError") + err = self.check_dash_m_failure('test_pkg.other', *example_args) + self.assertIn(b'ValueError', err) + + def test_dash_m_errors(self): + # Exercise error reporting for various invalid package executions + tests = ( + ('builtins', br'No code object available'), + ('builtins.x', br'Error while finding spec.*AttributeError'), + ('builtins.x.y', br'Error while finding spec.*' + br'ImportError.*No module named.*not a package'), + ('os.path', br'loader.*cannot handle'), + ('importlib', br'No module named.*' + br'is a package and cannot be directly executed'), + ('importlib.nonexistant', br'No module named'), + ) + for name, regex in tests: + with self.subTest(name): + rc, _, err = assert_python_failure('-m', name) self.assertEqual(rc, 1) + self.assertRegex(err, regex) + self.assertNotIn(b'Traceback', err) + + def test_dash_m_init_traceback(self): + # These were wrapped in an ImportError and tracebacks were + # suppressed; see Issue 14285 + exceptions = (ImportError, AttributeError, TypeError, ValueError) + for exception in exceptions: + exception = exception.__name__ + init = "raise {0}('Exception in __init__.py')".format(exception) + with self.subTest(exception), \ + self.setup_test_pkg(init) as pkg_dir: + err = self.check_dash_m_failure('test_pkg') + self.assertIn(exception.encode('ascii'), err) + self.assertIn(b'Exception in __init__.py', err) + self.assertIn(b'Traceback', err) + + def test_dash_m_main_traceback(self): + # Ensure that an ImportError's traceback is reported + with self.setup_test_pkg() as pkg_dir: + main = "raise ImportError('Exception in __main__ module')" + _make_test_script(pkg_dir, '__main__', main) + err = self.check_dash_m_failure('test_pkg') + self.assertIn(b'ImportError', err) + self.assertIn(b'Exception in __main__ module', err) + self.assertIn(b'Traceback', err) def test_pep_409_verbiage(self): # Make sure PEP 409 syntax properly suppresses diff --git a/Lib/test/test_runpy.py b/Lib/test/test_runpy.py --- a/Lib/test/test_runpy.py +++ b/Lib/test/test_runpy.py @@ -439,6 +439,28 @@ if verbose > 1: print("Testing package depth:", depth) self._check_package(depth) + def test_run_package_init_exceptions(self): + # These were previously wrapped in an ImportError; see Issue 14285 + result = self._make_pkg("", 1, "__main__") + pkg_dir, _, mod_name, _ = result + mod_name = mod_name.replace(".__main__", "") + self.addCleanup(self._del_pkg, pkg_dir, 1, mod_name) + init = os.path.join(pkg_dir, "__runpy_pkg__", "__init__.py") + + exceptions = (ImportError, AttributeError, TypeError, ValueError) + for exception in exceptions: + name = exception.__name__ + with self.subTest(name): + source = "raise {0}('{0} in __init__.py.')".format(name) + with open(init, "wt", encoding="ascii") as mod_file: + mod_file.write(source) + try: + run_module(mod_name) + except exception as err: + self.assertNotIn("finding spec", format(err)) + else: + self.fail("Nothing raised; expected {}".format(name)) + def test_run_package_in_namespace_package(self): for depth in range(1, 4): if verbose > 1: print("Testing package depth:", depth) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,11 @@ Library ------- +- Issue #14285: When executing a package with the "python -m package" option, + and package initialization fails, a proper traceback is now reported. The + "runpy" module now lets exceptions from package initialization pass back to + the caller, rather than raising ImportError. + - Issue #25177: Fixed problem with the mean of very small and very large numbers. As a side effect, statistics.mean and statistics.variance should be significantly faster. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Wed Dec 2 20:54:00 2015 From: python-checkins at python.org (martin.panter) Date: Thu, 03 Dec 2015 01:54:00 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2314285=3A_Merge_runpy_exception_fix_from_3=2E5?= Message-ID: <20151203015400.1997.3869@psf.io> https://hg.python.org/cpython/rev/01397c11ebb8 changeset: 99422:01397c11ebb8 parent: 99420:9e800b2aeeac parent: 99421:784a64a21fd0 user: Martin Panter date: Thu Dec 03 01:36:03 2015 +0000 summary: Issue #14285: Merge runpy exception fix from 3.5 files: Lib/runpy.py | 44 +++++------ Lib/test/test_cmd_line_script.py | 71 +++++++++++++++++-- Lib/test/test_runpy.py | 22 ++++++ Misc/NEWS | 5 + 4 files changed, 110 insertions(+), 32 deletions(-) diff --git a/Lib/runpy.py b/Lib/runpy.py --- a/Lib/runpy.py +++ b/Lib/runpy.py @@ -99,7 +99,7 @@ return mod_globals.copy() # Helper to get the loader, code and filename for a module -def _get_module_details(mod_name): +def _get_module_details(mod_name, error=ImportError): try: spec = importlib.util.find_spec(mod_name) except (ImportError, AttributeError, TypeError, ValueError) as ex: @@ -107,27 +107,34 @@ # importlib, where the latter raises other errors for cases where # pkgutil previously raised ImportError msg = "Error while finding spec for {!r} ({}: {})" - raise ImportError(msg.format(mod_name, type(ex), ex)) from ex + raise error(msg.format(mod_name, type(ex), ex)) from ex if spec is None: - raise ImportError("No module named %s" % mod_name) + raise error("No module named %s" % mod_name) if spec.submodule_search_locations is not None: if mod_name == "__main__" or mod_name.endswith(".__main__"): - raise ImportError("Cannot use package as __main__ module") + raise error("Cannot use package as __main__ module") + __import__(mod_name) # Do not catch exceptions initializing package try: pkg_main_name = mod_name + ".__main__" return _get_module_details(pkg_main_name) except ImportError as e: - raise ImportError(("%s; %r is a package and cannot " + + raise error(("%s; %r is a package and cannot " + "be directly executed") %(e, mod_name)) loader = spec.loader if loader is None: - raise ImportError("%r is a namespace package and cannot be executed" + raise error("%r is a namespace package and cannot be executed" % mod_name) - code = loader.get_code(mod_name) + try: + code = loader.get_code(mod_name) + except ImportError as e: + raise error(format(e)) from e if code is None: - raise ImportError("No code object available for %s" % mod_name) + raise error("No code object available for %s" % mod_name) return mod_name, spec, code +class _Error(Exception): + """Error that _run_module_as_main() should report without a traceback""" + # XXX ncoghlan: Should this be documented and made public? # (Current thoughts: don't repeat the mistake that lead to its # creation when run_module() no longer met the needs of @@ -148,20 +155,11 @@ """ try: if alter_argv or mod_name != "__main__": # i.e. -m switch - mod_name, mod_spec, code = _get_module_details(mod_name) + mod_name, mod_spec, code = _get_module_details(mod_name, _Error) else: # i.e. directory or zipfile execution - mod_name, mod_spec, code = _get_main_module_details() - except ImportError as exc: - # Try to provide a good error message - # for directories, zip files and the -m switch - if alter_argv: - # For -m switch, just display the exception - info = str(exc) - else: - # For directories/zipfiles, let the user - # know what the code was looking for - info = "can't find '__main__' module in %r" % sys.argv[0] - msg = "%s: %s" % (sys.executable, info) + mod_name, mod_spec, code = _get_main_module_details(_Error) + except _Error as exc: + msg = "%s: %s" % (sys.executable, exc) sys.exit(msg) main_globals = sys.modules["__main__"].__dict__ if alter_argv: @@ -184,7 +182,7 @@ # Leave the sys module alone return _run_code(code, {}, init_globals, run_name, mod_spec) -def _get_main_module_details(): +def _get_main_module_details(error=ImportError): # Helper that gives a nicer error message when attempting to # execute a zipfile or directory by invoking __main__.py # Also moves the standard __main__ out of the way so that the @@ -196,7 +194,7 @@ return _get_module_details(main_name) except ImportError as exc: if main_name in str(exc): - raise ImportError("can't find %r module in %r" % + raise error("can't find %r module in %r" % (main_name, sys.path[0])) from exc raise finally: diff --git a/Lib/test/test_cmd_line_script.py b/Lib/test/test_cmd_line_script.py --- a/Lib/test/test_cmd_line_script.py +++ b/Lib/test/test_cmd_line_script.py @@ -397,20 +397,73 @@ script_name, script_name, '', '', importlib.machinery.SourceFileLoader) + @contextlib.contextmanager + def setup_test_pkg(self, *args): + with support.temp_dir() as script_dir, \ + support.change_cwd(path=script_dir): + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir, *args) + yield pkg_dir + + def check_dash_m_failure(self, *args): + rc, out, err = assert_python_failure('-m', *args, __isolated=False) + if verbose > 1: + print(repr(out)) + self.assertEqual(rc, 1) + return err + def test_dash_m_error_code_is_one(self): # If a module is invoked with the -m command line flag # and results in an error that the return code to the # shell is '1' - with support.temp_dir() as script_dir: - with support.change_cwd(path=script_dir): - pkg_dir = os.path.join(script_dir, 'test_pkg') - make_pkg(pkg_dir) - script_name = _make_test_script(pkg_dir, 'other', - "if __name__ == '__main__': raise ValueError") - rc, out, err = assert_python_failure('-m', 'test_pkg.other', *example_args) - if verbose > 1: - print(repr(out)) + with self.setup_test_pkg() as pkg_dir: + script_name = _make_test_script(pkg_dir, 'other', + "if __name__ == '__main__': raise ValueError") + err = self.check_dash_m_failure('test_pkg.other', *example_args) + self.assertIn(b'ValueError', err) + + def test_dash_m_errors(self): + # Exercise error reporting for various invalid package executions + tests = ( + ('builtins', br'No code object available'), + ('builtins.x', br'Error while finding spec.*AttributeError'), + ('builtins.x.y', br'Error while finding spec.*' + br'ImportError.*No module named.*not a package'), + ('os.path', br'loader.*cannot handle'), + ('importlib', br'No module named.*' + br'is a package and cannot be directly executed'), + ('importlib.nonexistant', br'No module named'), + ) + for name, regex in tests: + with self.subTest(name): + rc, _, err = assert_python_failure('-m', name) self.assertEqual(rc, 1) + self.assertRegex(err, regex) + self.assertNotIn(b'Traceback', err) + + def test_dash_m_init_traceback(self): + # These were wrapped in an ImportError and tracebacks were + # suppressed; see Issue 14285 + exceptions = (ImportError, AttributeError, TypeError, ValueError) + for exception in exceptions: + exception = exception.__name__ + init = "raise {0}('Exception in __init__.py')".format(exception) + with self.subTest(exception), \ + self.setup_test_pkg(init) as pkg_dir: + err = self.check_dash_m_failure('test_pkg') + self.assertIn(exception.encode('ascii'), err) + self.assertIn(b'Exception in __init__.py', err) + self.assertIn(b'Traceback', err) + + def test_dash_m_main_traceback(self): + # Ensure that an ImportError's traceback is reported + with self.setup_test_pkg() as pkg_dir: + main = "raise ImportError('Exception in __main__ module')" + _make_test_script(pkg_dir, '__main__', main) + err = self.check_dash_m_failure('test_pkg') + self.assertIn(b'ImportError', err) + self.assertIn(b'Exception in __main__ module', err) + self.assertIn(b'Traceback', err) def test_pep_409_verbiage(self): # Make sure PEP 409 syntax properly suppresses diff --git a/Lib/test/test_runpy.py b/Lib/test/test_runpy.py --- a/Lib/test/test_runpy.py +++ b/Lib/test/test_runpy.py @@ -439,6 +439,28 @@ if verbose > 1: print("Testing package depth:", depth) self._check_package(depth) + def test_run_package_init_exceptions(self): + # These were previously wrapped in an ImportError; see Issue 14285 + result = self._make_pkg("", 1, "__main__") + pkg_dir, _, mod_name, _ = result + mod_name = mod_name.replace(".__main__", "") + self.addCleanup(self._del_pkg, pkg_dir, 1, mod_name) + init = os.path.join(pkg_dir, "__runpy_pkg__", "__init__.py") + + exceptions = (ImportError, AttributeError, TypeError, ValueError) + for exception in exceptions: + name = exception.__name__ + with self.subTest(name): + source = "raise {0}('{0} in __init__.py.')".format(name) + with open(init, "wt", encoding="ascii") as mod_file: + mod_file.write(source) + try: + run_module(mod_name) + except exception as err: + self.assertNotIn("finding spec", format(err)) + else: + self.fail("Nothing raised; expected {}".format(name)) + def test_run_package_in_namespace_package(self): for depth in range(1, 4): if verbose > 1: print("Testing package depth:", depth) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -109,6 +109,11 @@ Library ------- +- Issue #14285: When executing a package with the "python -m package" option, + and package initialization fails, a proper traceback is now reported. The + "runpy" module now lets exceptions from package initialization pass back to + the caller, rather than raising ImportError. + - Issue #25177: Fixed problem with the mean of very small and very large numbers. As a side effect, statistics.mean and statistics.variance should be significantly faster. -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Thu Dec 3 03:43:03 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Thu, 03 Dec 2015 08:43:03 +0000 Subject: [Python-checkins] Daily reference leaks (01397c11ebb8): sum=4 Message-ID: <20151203084302.29749.81209@psf.io> results for 01397c11ebb8 on branch "default" -------------------------------------------- test_functools leaked [0, 2, 2] memory blocks, sum=4 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflog0MKx4_', '--timeout', '7200'] From lp_benchmark_robot at intel.com Thu Dec 3 12:00:10 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Thu, 3 Dec 2015 17:00:10 +0000 Subject: [Python-checkins] Benchmark Results for Python Default 2015-12-03 Message-ID: <88843176-a722-4499-aab3-44238c688615@irsmsx151.ger.corp.intel.com> Results for project Python default, build date 2015-12-03 04:02:59 +0000 commit: 01397c11ebb809a7ccc0a1b4c873183e318dc9dc revision date: 2015-12-03 01:36:03 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ---------------------------------------------------------------------------------- benchmark relative change since change since current rev run std_dev* last run baseline with PGO ---------------------------------------------------------------------------------- :-) django_v2 0.20% 2.73% 10.00% 15.45% :-( pybench 0.20% -0.14% -2.06% 9.28% :-( regex_v8 2.73% 1.82% -3.69% 4.90% :-( nbody 0.06% -2.57% -3.09% 12.18% :-| json_dump_v2 0.23% -0.21% -1.09% 11.34% :-| normal_startup 0.87% 0.09% 0.41% 4.48% ---------------------------------------------------------------------------------- Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Thu Dec 3 12:00:37 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Thu, 3 Dec 2015 17:00:37 +0000 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-12-03 Message-ID: <6f47ad62-6d81-4f28-af36-9777a7406f82@irsmsx151.ger.corp.intel.com> Results for project Python 2.7, build date 2015-12-03 04:50:31 +0000 commit: ff351607a90d345d1335eef3518ab5f43f1ee118 revision date: 2015-12-02 13:39:37 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ---------------------------------------------------------------------------------- benchmark relative change since change since current rev run std_dev* last run baseline with PGO ---------------------------------------------------------------------------------- :-| django_v2 0.18% -0.59% 1.62% 8.58% :-) pybench 0.19% -0.12% 6.09% 7.21% :-( regex_v8 1.40% -0.20% -3.33% 9.06% :-) nbody 0.13% -0.03% 9.51% 2.45% :-) json_dump_v2 0.32% 0.07% 2.74% 13.46% :-| normal_startup 1.94% 0.40% -1.47% 2.15% :-| ssbench 0.35% 0.01% 0.35% 2.39% ---------------------------------------------------------------------------------- Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Thu Dec 3 13:48:12 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 03 Dec 2015 18:48:12 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzE5NTQz?= =?utf-8?q?=3A_Added_Py3k_warning_for_decoding_unicode=2E?= Message-ID: <20151203184812.21502.21478@psf.io> https://hg.python.org/cpython/rev/c89a0f24d5f6 changeset: 99423:c89a0f24d5f6 branch: 2.7 parent: 99413:ff351607a90d user: Serhiy Storchaka date: Thu Dec 03 20:47:48 2015 +0200 summary: Issue #19543: Added Py3k warning for decoding unicode. files: Lib/test/test_unicode.py | 10 ++++++---- Misc/NEWS | 2 ++ Objects/unicodeobject.c | 3 +++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -1037,10 +1037,12 @@ self.assertEqual(unicode('Andr\202 x','ascii','ignore'), u"Andr x") self.assertEqual(unicode('Andr\202 x','ascii','replace'), u'Andr\uFFFD x') self.assertEqual(unicode('\202 x', 'ascii', 'replace'), u'\uFFFD x') - self.assertEqual(u'abcde'.decode('ascii', 'ignore'), - u'abcde'.decode('ascii', errors='ignore')) - self.assertEqual(u'abcde'.decode('ascii', 'replace'), - u'abcde'.decode(encoding='ascii', errors='replace')) + with test_support.check_py3k_warnings(): + self.assertEqual(u'abcde'.decode('ascii', 'ignore'), + u'abcde'.decode('ascii', errors='ignore')) + with test_support.check_py3k_warnings(): + self.assertEqual(u'abcde'.decode('ascii', 'replace'), + u'abcde'.decode(encoding='ascii', errors='replace')) # Error handling (unknown character names) self.assertEqual("\\N{foo}xx".decode("unicode-escape", "ignore"), u"xx") diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,8 @@ Core and Builtins ----------------- +- Issue #19543: Added Py3k warning for decoding unicode. + - Issue #24097: Fixed crash in object.__reduce__() if slot name is freed inside __getattr__. diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -1288,6 +1288,9 @@ goto onError; } + if (PyErr_WarnPy3k("decoding Unicode is not supported in 3.x", 1) < 0) + goto onError; + if (encoding == NULL) encoding = PyUnicode_GetDefaultEncoding(); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Dec 3 15:28:11 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 03 Dec 2015 20:28:11 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzY0Nzg6?= =?utf-8?q?_=5Fstrptime=27s_regexp_cache_now_is_reset_after_changing_timez?= =?utf-8?q?one?= Message-ID: <20151203202811.4074.80006@psf.io> https://hg.python.org/cpython/rev/4b0a4da1aa27 changeset: 99425:4b0a4da1aa27 branch: 3.4 parent: 99418:67718032badb user: Serhiy Storchaka date: Thu Dec 03 22:21:07 2015 +0200 summary: Issue #6478: _strptime's regexp cache now is reset after changing timezone with time.tzset(). files: Lib/_strptime.py | 21 +++++++++---- Lib/test/test_strptime.py | 40 ++++++++++++++++++++++---- Misc/NEWS | 3 ++ 3 files changed, 50 insertions(+), 14 deletions(-) diff --git a/Lib/_strptime.py b/Lib/_strptime.py --- a/Lib/_strptime.py +++ b/Lib/_strptime.py @@ -77,6 +77,8 @@ self.__calc_date_time() if _getlang() != self.lang: raise ValueError("locale changed during initialization") + if time.tzname != self.tzname or time.daylight != self.daylight: + raise ValueError("timezone changed during initialization") def __pad(self, seq, front): # Add '' to seq to either the front (is True), else the back. @@ -161,15 +163,17 @@ def __calc_timezone(self): # Set self.timezone by using time.tzname. - # Do not worry about possibility of time.tzname[0] == timetzname[1] - # and time.daylight; handle that in strptime . + # Do not worry about possibility of time.tzname[0] == time.tzname[1] + # and time.daylight; handle that in strptime. try: time.tzset() except AttributeError: pass - no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()]) - if time.daylight: - has_saving = frozenset([time.tzname[1].lower()]) + self.tzname = time.tzname + self.daylight = time.daylight + no_saving = frozenset(["utc", "gmt", self.tzname[0].lower()]) + if self.daylight: + has_saving = frozenset([self.tzname[1].lower()]) else: has_saving = frozenset() self.timezone = (no_saving, has_saving) @@ -308,12 +312,15 @@ global _TimeRE_cache, _regex_cache with _cache_lock: - if _getlang() != _TimeRE_cache.locale_time.lang: + locale_time = _TimeRE_cache.locale_time + if (_getlang() != locale_time.lang or + time.tzname != locale_time.tzname or + time.daylight != locale_time.daylight): _TimeRE_cache = TimeRE() _regex_cache.clear() + locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() - locale_time = _TimeRE_cache.locale_time format_regex = _regex_cache.get(format) if not format_regex: try: diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py --- a/Lib/test/test_strptime.py +++ b/Lib/test/test_strptime.py @@ -4,6 +4,7 @@ import time import locale import re +import os import sys from test import support from datetime import date as datetime_date @@ -324,9 +325,10 @@ tz_name = time.tzname[0] if tz_name.upper() in ("UTC", "GMT"): self.skipTest('need non-UTC/GMT timezone') - try: - original_tzname = time.tzname - original_daylight = time.daylight + + with support.swap_attr(time, 'tzname', (tz_name, tz_name)), \ + support.swap_attr(time, 'daylight', 1), \ + support.swap_attr(time, 'tzset', lambda: None): time.tzname = (tz_name, tz_name) time.daylight = 1 tz_value = _strptime._strptime_time(tz_name, "%Z")[8] @@ -334,9 +336,6 @@ "%s lead to a timezone value of %s instead of -1 when " "time.daylight set to %s and passing in %s" % (time.tzname, tz_value, time.daylight, tz_name)) - finally: - time.tzname = original_tzname - time.daylight = original_daylight def test_date_time(self): # Test %c directive @@ -548,7 +547,7 @@ _strptime._strptime_time("10", "%d") self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time) - def test_TimeRE_recreation(self): + def test_TimeRE_recreation_locale(self): # The TimeRE instance should be recreated upon changing the locale. locale_info = locale.getlocale(locale.LC_TIME) try: @@ -577,6 +576,33 @@ finally: locale.setlocale(locale.LC_TIME, locale_info) + @support.run_with_tz('STD-1DST') + def test_TimeRE_recreation_timezone(self): + # The TimeRE instance should be recreated upon changing the timezone. + oldtzname = time.tzname + tm = _strptime._strptime_time(time.tzname[0], '%Z') + self.assertEqual(tm.tm_isdst, 0) + tm = _strptime._strptime_time(time.tzname[1], '%Z') + self.assertEqual(tm.tm_isdst, 1) + # Get id of current cache object. + first_time_re = _strptime._TimeRE_cache + # Change the timezone and force a recreation of the cache. + os.environ['TZ'] = 'EST+05EDT,M3.2.0,M11.1.0' + time.tzset() + tm = _strptime._strptime_time(time.tzname[0], '%Z') + self.assertEqual(tm.tm_isdst, 0) + tm = _strptime._strptime_time(time.tzname[1], '%Z') + self.assertEqual(tm.tm_isdst, 1) + # Get the new cache object's id. + second_time_re = _strptime._TimeRE_cache + # They should not be equal. + self.assertIsNot(first_time_re, second_time_re) + # Make sure old names no longer accepted. + with self.assertRaises(ValueError): + _strptime._strptime_time(oldtzname[0], '%Z') + with self.assertRaises(ValueError): + _strptime._strptime_time(oldtzname[1], '%Z') + def test_main(): support.run_unittest( diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -115,6 +115,9 @@ Library ------- +- Issue #6478: _strptime's regexp cache now is reset after changing timezone + with time.tzset(). + - Issue #25177: Fixed problem with the mean of very small and very large numbers. As a side effect, statistics.mean and statistics.variance should be significantly faster. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Dec 3 15:28:14 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 03 Dec 2015 20:28:14 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=236478=3A_=5Fstrptime=27s_regexp_cache_now_is_res?= =?utf-8?q?et_after_changing_timezone?= Message-ID: <20151203202811.105473.78656@psf.io> https://hg.python.org/cpython/rev/ea576db13827 changeset: 99427:ea576db13827 parent: 99422:01397c11ebb8 parent: 99426:5fa855d20624 user: Serhiy Storchaka date: Thu Dec 03 22:27:31 2015 +0200 summary: Issue #6478: _strptime's regexp cache now is reset after changing timezone with time.tzset(). files: Lib/_strptime.py | 22 +++++++++----- Lib/test/test_strptime.py | 40 ++++++++++++++++++++++---- Misc/NEWS | 3 ++ 3 files changed, 50 insertions(+), 15 deletions(-) diff --git a/Lib/_strptime.py b/Lib/_strptime.py --- a/Lib/_strptime.py +++ b/Lib/_strptime.py @@ -77,6 +77,8 @@ self.__calc_date_time() if _getlang() != self.lang: raise ValueError("locale changed during initialization") + if time.tzname != self.tzname or time.daylight != self.daylight: + raise ValueError("timezone changed during initialization") def __pad(self, seq, front): # Add '' to seq to either the front (is True), else the back. @@ -161,15 +163,17 @@ def __calc_timezone(self): # Set self.timezone by using time.tzname. - # Do not worry about possibility of time.tzname[0] == timetzname[1] - # and time.daylight; handle that in strptime . + # Do not worry about possibility of time.tzname[0] == time.tzname[1] + # and time.daylight; handle that in strptime. try: time.tzset() except AttributeError: pass - no_saving = frozenset({"utc", "gmt", time.tzname[0].lower()}) - if time.daylight: - has_saving = frozenset({time.tzname[1].lower()}) + self.tzname = time.tzname + self.daylight = time.daylight + no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()}) + if self.daylight: + has_saving = frozenset({self.tzname[1].lower()}) else: has_saving = frozenset() self.timezone = (no_saving, has_saving) @@ -326,13 +330,15 @@ global _TimeRE_cache, _regex_cache with _cache_lock: - - if _getlang() != _TimeRE_cache.locale_time.lang: + locale_time = _TimeRE_cache.locale_time + if (_getlang() != locale_time.lang or + time.tzname != locale_time.tzname or + time.daylight != locale_time.daylight): _TimeRE_cache = TimeRE() _regex_cache.clear() + locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() - locale_time = _TimeRE_cache.locale_time format_regex = _regex_cache.get(format) if not format_regex: try: diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py --- a/Lib/test/test_strptime.py +++ b/Lib/test/test_strptime.py @@ -4,6 +4,7 @@ import time import locale import re +import os import sys from test import support from datetime import date as datetime_date @@ -344,9 +345,10 @@ tz_name = time.tzname[0] if tz_name.upper() in ("UTC", "GMT"): self.skipTest('need non-UTC/GMT timezone') - try: - original_tzname = time.tzname - original_daylight = time.daylight + + with support.swap_attr(time, 'tzname', (tz_name, tz_name)), \ + support.swap_attr(time, 'daylight', 1), \ + support.swap_attr(time, 'tzset', lambda: None): time.tzname = (tz_name, tz_name) time.daylight = 1 tz_value = _strptime._strptime_time(tz_name, "%Z")[8] @@ -354,9 +356,6 @@ "%s lead to a timezone value of %s instead of -1 when " "time.daylight set to %s and passing in %s" % (time.tzname, tz_value, time.daylight, tz_name)) - finally: - time.tzname = original_tzname - time.daylight = original_daylight def test_date_time(self): # Test %c directive @@ -579,7 +578,7 @@ _strptime._strptime_time("10", "%d") self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time) - def test_TimeRE_recreation(self): + def test_TimeRE_recreation_locale(self): # The TimeRE instance should be recreated upon changing the locale. locale_info = locale.getlocale(locale.LC_TIME) try: @@ -608,6 +607,33 @@ finally: locale.setlocale(locale.LC_TIME, locale_info) + @support.run_with_tz('STD-1DST') + def test_TimeRE_recreation_timezone(self): + # The TimeRE instance should be recreated upon changing the timezone. + oldtzname = time.tzname + tm = _strptime._strptime_time(time.tzname[0], '%Z') + self.assertEqual(tm.tm_isdst, 0) + tm = _strptime._strptime_time(time.tzname[1], '%Z') + self.assertEqual(tm.tm_isdst, 1) + # Get id of current cache object. + first_time_re = _strptime._TimeRE_cache + # Change the timezone and force a recreation of the cache. + os.environ['TZ'] = 'EST+05EDT,M3.2.0,M11.1.0' + time.tzset() + tm = _strptime._strptime_time(time.tzname[0], '%Z') + self.assertEqual(tm.tm_isdst, 0) + tm = _strptime._strptime_time(time.tzname[1], '%Z') + self.assertEqual(tm.tm_isdst, 1) + # Get the new cache object's id. + second_time_re = _strptime._TimeRE_cache + # They should not be equal. + self.assertIsNot(first_time_re, second_time_re) + # Make sure old names no longer accepted. + with self.assertRaises(ValueError): + _strptime._strptime_time(oldtzname[0], '%Z') + with self.assertRaises(ValueError): + _strptime._strptime_time(oldtzname[1], '%Z') + if __name__ == '__main__': unittest.main() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -109,6 +109,9 @@ Library ------- +- Issue #6478: _strptime's regexp cache now is reset after changing timezone + with time.tzset(). + - Issue #14285: When executing a package with the "python -m package" option, and package initialization fails, a proper traceback is now reported. The "runpy" module now lets exceptions from package initialization pass back to -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Dec 3 15:28:17 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 03 Dec 2015 20:28:17 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzY0Nzg6?= =?utf-8?q?_=5Fstrptime=27s_regexp_cache_now_is_reset_after_changing_timez?= =?utf-8?q?one?= Message-ID: <20151203202811.33671.31291@psf.io> https://hg.python.org/cpython/rev/2ae5c51c5dea changeset: 99424:2ae5c51c5dea branch: 2.7 user: Serhiy Storchaka date: Thu Dec 03 22:20:45 2015 +0200 summary: Issue #6478: _strptime's regexp cache now is reset after changing timezone with time.tzset(). files: Lib/_strptime.py | 21 ++++++++---- Lib/test/test_strptime.py | 44 +++++++++++++++++++++----- Lib/test/test_support.py | 35 ++++++++++++++++++++- Misc/NEWS | 3 + 4 files changed, 86 insertions(+), 17 deletions(-) diff --git a/Lib/_strptime.py b/Lib/_strptime.py --- a/Lib/_strptime.py +++ b/Lib/_strptime.py @@ -75,6 +75,8 @@ self.__calc_date_time() if _getlang() != self.lang: raise ValueError("locale changed during initialization") + if time.tzname != self.tzname or time.daylight != self.daylight: + raise ValueError("timezone changed during initialization") def __pad(self, seq, front): # Add '' to seq to either the front (is True), else the back. @@ -159,15 +161,17 @@ def __calc_timezone(self): # Set self.timezone by using time.tzname. - # Do not worry about possibility of time.tzname[0] == timetzname[1] - # and time.daylight; handle that in strptime . + # Do not worry about possibility of time.tzname[0] == time.tzname[1] + # and time.daylight; handle that in strptime. try: time.tzset() except AttributeError: pass - no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()]) - if time.daylight: - has_saving = frozenset([time.tzname[1].lower()]) + self.tzname = time.tzname + self.daylight = time.daylight + no_saving = frozenset(["utc", "gmt", self.tzname[0].lower()]) + if self.daylight: + has_saving = frozenset([self.tzname[1].lower()]) else: has_saving = frozenset() self.timezone = (no_saving, has_saving) @@ -296,12 +300,15 @@ """Return a time struct based on the input string and the format string.""" global _TimeRE_cache, _regex_cache with _cache_lock: - if _getlang() != _TimeRE_cache.locale_time.lang: + locale_time = _TimeRE_cache.locale_time + if (_getlang() != locale_time.lang or + time.tzname != locale_time.tzname or + time.daylight != locale_time.daylight): _TimeRE_cache = TimeRE() _regex_cache.clear() + locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() - locale_time = _TimeRE_cache.locale_time format_regex = _regex_cache.get(format) if not format_regex: try: diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py --- a/Lib/test/test_strptime.py +++ b/Lib/test/test_strptime.py @@ -4,8 +4,9 @@ import time import locale import re +import os import sys -from test import test_support +from test import test_support as support from datetime import date as datetime_date import _strptime @@ -314,9 +315,10 @@ tz_name = time.tzname[0] if tz_name.upper() in ("UTC", "GMT"): self.skipTest('need non-UTC/GMT timezone') - try: - original_tzname = time.tzname - original_daylight = time.daylight + + with support.swap_attr(time, 'tzname', (tz_name, tz_name)), \ + support.swap_attr(time, 'daylight', 1), \ + support.swap_attr(time, 'tzset', lambda: None): time.tzname = (tz_name, tz_name) time.daylight = 1 tz_value = _strptime._strptime_time(tz_name, "%Z")[8] @@ -324,9 +326,6 @@ "%s lead to a timezone value of %s instead of -1 when " "time.daylight set to %s and passing in %s" % (time.tzname, tz_value, time.daylight, tz_name)) - finally: - time.tzname = original_tzname - time.daylight = original_daylight def test_date_time(self): # Test %c directive @@ -538,7 +537,7 @@ _strptime._strptime_time("10", "%d") self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time) - def test_TimeRE_recreation(self): + def test_TimeRE_recreation_locale(self): # The TimeRE instance should be recreated upon changing the locale. locale_info = locale.getlocale(locale.LC_TIME) try: @@ -567,9 +566,36 @@ finally: locale.setlocale(locale.LC_TIME, locale_info) + @support.run_with_tz('STD-1DST') + def test_TimeRE_recreation_timezone(self): + # The TimeRE instance should be recreated upon changing the timezone. + oldtzname = time.tzname + tm = _strptime._strptime_time(time.tzname[0], '%Z') + self.assertEqual(tm.tm_isdst, 0) + tm = _strptime._strptime_time(time.tzname[1], '%Z') + self.assertEqual(tm.tm_isdst, 1) + # Get id of current cache object. + first_time_re = _strptime._TimeRE_cache + # Change the timezone and force a recreation of the cache. + os.environ['TZ'] = 'EST+05EDT,M3.2.0,M11.1.0' + time.tzset() + tm = _strptime._strptime_time(time.tzname[0], '%Z') + self.assertEqual(tm.tm_isdst, 0) + tm = _strptime._strptime_time(time.tzname[1], '%Z') + self.assertEqual(tm.tm_isdst, 1) + # Get the new cache object's id. + second_time_re = _strptime._TimeRE_cache + # They should not be equal. + self.assertIsNot(first_time_re, second_time_re) + # Make sure old names no longer accepted. + with self.assertRaises(ValueError): + _strptime._strptime_time(oldtzname[0], '%Z') + with self.assertRaises(ValueError): + _strptime._strptime_time(oldtzname[1], '%Z') + def test_main(): - test_support.run_unittest( + support.run_unittest( getlang_Tests, LocaleTime_Tests, TimeRETests, diff --git a/Lib/test/test_support.py b/Lib/test/test_support.py --- a/Lib/test/test_support.py +++ b/Lib/test/test_support.py @@ -40,7 +40,7 @@ "threading_cleanup", "reap_threads", "start_threads", "cpython_only", "check_impl_detail", "get_attribute", "py3k_bytes", "import_fresh_module", "threading_cleanup", "reap_children", - "strip_python_stderr", "IPV6_ENABLED"] + "strip_python_stderr", "IPV6_ENABLED", "run_with_tz"] class Error(Exception): """Base class for regression test exceptions.""" @@ -1226,6 +1226,39 @@ return decorator #======================================================================= +# Decorator for running a function in a specific timezone, correctly +# resetting it afterwards. + +def run_with_tz(tz): + def decorator(func): + def inner(*args, **kwds): + try: + tzset = time.tzset + except AttributeError: + raise unittest.SkipTest("tzset required") + if 'TZ' in os.environ: + orig_tz = os.environ['TZ'] + else: + orig_tz = None + os.environ['TZ'] = tz + tzset() + + # now run the function, resetting the tz on exceptions + try: + return func(*args, **kwds) + finally: + if orig_tz is None: + del os.environ['TZ'] + else: + os.environ['TZ'] = orig_tz + time.tzset() + + inner.__name__ = func.__name__ + inner.__doc__ = func.__doc__ + return inner + return decorator + +#======================================================================= # Big-memory-test support. Separate from 'resources' because memory use should be configurable. # Some handy shorthands. Note that these are used for byte-limits as well diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #6478: _strptime's regexp cache now is reset after changing timezone + with time.tzset(). + - Issue #25718: Fixed copying object with state with boolean value is false. - Issue #25742: :func:`locale.setlocale` now accepts a Unicode string for -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Dec 3 15:28:18 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Thu, 03 Dec 2015 20:28:18 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=236478=3A_=5Fstrptime=27s_regexp_cache_now_is_reset_aft?= =?utf-8?q?er_changing_timezone?= Message-ID: <20151203202811.33671.93236@psf.io> https://hg.python.org/cpython/rev/5fa855d20624 changeset: 99426:5fa855d20624 branch: 3.5 parent: 99421:784a64a21fd0 parent: 99425:4b0a4da1aa27 user: Serhiy Storchaka date: Thu Dec 03 22:26:36 2015 +0200 summary: Issue #6478: _strptime's regexp cache now is reset after changing timezone with time.tzset(). files: Lib/_strptime.py | 22 +++++++++----- Lib/test/test_strptime.py | 40 ++++++++++++++++++++++---- Misc/NEWS | 3 ++ 3 files changed, 50 insertions(+), 15 deletions(-) diff --git a/Lib/_strptime.py b/Lib/_strptime.py --- a/Lib/_strptime.py +++ b/Lib/_strptime.py @@ -77,6 +77,8 @@ self.__calc_date_time() if _getlang() != self.lang: raise ValueError("locale changed during initialization") + if time.tzname != self.tzname or time.daylight != self.daylight: + raise ValueError("timezone changed during initialization") def __pad(self, seq, front): # Add '' to seq to either the front (is True), else the back. @@ -161,15 +163,17 @@ def __calc_timezone(self): # Set self.timezone by using time.tzname. - # Do not worry about possibility of time.tzname[0] == timetzname[1] - # and time.daylight; handle that in strptime . + # Do not worry about possibility of time.tzname[0] == time.tzname[1] + # and time.daylight; handle that in strptime. try: time.tzset() except AttributeError: pass - no_saving = frozenset({"utc", "gmt", time.tzname[0].lower()}) - if time.daylight: - has_saving = frozenset({time.tzname[1].lower()}) + self.tzname = time.tzname + self.daylight = time.daylight + no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()}) + if self.daylight: + has_saving = frozenset({self.tzname[1].lower()}) else: has_saving = frozenset() self.timezone = (no_saving, has_saving) @@ -307,13 +311,15 @@ global _TimeRE_cache, _regex_cache with _cache_lock: - - if _getlang() != _TimeRE_cache.locale_time.lang: + locale_time = _TimeRE_cache.locale_time + if (_getlang() != locale_time.lang or + time.tzname != locale_time.tzname or + time.daylight != locale_time.daylight): _TimeRE_cache = TimeRE() _regex_cache.clear() + locale_time = _TimeRE_cache.locale_time if len(_regex_cache) > _CACHE_MAX_SIZE: _regex_cache.clear() - locale_time = _TimeRE_cache.locale_time format_regex = _regex_cache.get(format) if not format_regex: try: diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py --- a/Lib/test/test_strptime.py +++ b/Lib/test/test_strptime.py @@ -4,6 +4,7 @@ import time import locale import re +import os import sys from test import support from datetime import date as datetime_date @@ -324,9 +325,10 @@ tz_name = time.tzname[0] if tz_name.upper() in ("UTC", "GMT"): self.skipTest('need non-UTC/GMT timezone') - try: - original_tzname = time.tzname - original_daylight = time.daylight + + with support.swap_attr(time, 'tzname', (tz_name, tz_name)), \ + support.swap_attr(time, 'daylight', 1), \ + support.swap_attr(time, 'tzset', lambda: None): time.tzname = (tz_name, tz_name) time.daylight = 1 tz_value = _strptime._strptime_time(tz_name, "%Z")[8] @@ -334,9 +336,6 @@ "%s lead to a timezone value of %s instead of -1 when " "time.daylight set to %s and passing in %s" % (time.tzname, tz_value, time.daylight, tz_name)) - finally: - time.tzname = original_tzname - time.daylight = original_daylight def test_date_time(self): # Test %c directive @@ -548,7 +547,7 @@ _strptime._strptime_time("10", "%d") self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time) - def test_TimeRE_recreation(self): + def test_TimeRE_recreation_locale(self): # The TimeRE instance should be recreated upon changing the locale. locale_info = locale.getlocale(locale.LC_TIME) try: @@ -577,6 +576,33 @@ finally: locale.setlocale(locale.LC_TIME, locale_info) + @support.run_with_tz('STD-1DST') + def test_TimeRE_recreation_timezone(self): + # The TimeRE instance should be recreated upon changing the timezone. + oldtzname = time.tzname + tm = _strptime._strptime_time(time.tzname[0], '%Z') + self.assertEqual(tm.tm_isdst, 0) + tm = _strptime._strptime_time(time.tzname[1], '%Z') + self.assertEqual(tm.tm_isdst, 1) + # Get id of current cache object. + first_time_re = _strptime._TimeRE_cache + # Change the timezone and force a recreation of the cache. + os.environ['TZ'] = 'EST+05EDT,M3.2.0,M11.1.0' + time.tzset() + tm = _strptime._strptime_time(time.tzname[0], '%Z') + self.assertEqual(tm.tm_isdst, 0) + tm = _strptime._strptime_time(time.tzname[1], '%Z') + self.assertEqual(tm.tm_isdst, 1) + # Get the new cache object's id. + second_time_re = _strptime._TimeRE_cache + # They should not be equal. + self.assertIsNot(first_time_re, second_time_re) + # Make sure old names no longer accepted. + with self.assertRaises(ValueError): + _strptime._strptime_time(oldtzname[0], '%Z') + with self.assertRaises(ValueError): + _strptime._strptime_time(oldtzname[1], '%Z') + if __name__ == '__main__': unittest.main() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #6478: _strptime's regexp cache now is reset after changing timezone + with time.tzset(). + - Issue #14285: When executing a package with the "python -m package" option, and package initialization fails, a proper traceback is now reported. The "runpy" module now lets exceptions from package initialization pass back to -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Dec 3 18:27:55 2015 From: python-checkins at python.org (guido.van.rossum) Date: Thu, 03 Dec 2015 23:27:55 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Add_Awaitable=2C_AsyncIterabl?= =?utf-8?q?e=2C_AsyncIterator_to_PEP_484_=28to_be_released_3=2E5=2E2=29=2E?= Message-ID: <20151203232755.29619.83269@psf.io> https://hg.python.org/peps/rev/73dde33e2a4a changeset: 6134:73dde33e2a4a user: Guido van Rossum date: Thu Dec 03 15:27:36 2015 -0800 summary: Add Awaitable, AsyncIterable, AsyncIterator to PEP 484 (to be released 3.5.2). files: pep-0484.txt | 6 ++++++ 1 files changed, 6 insertions(+), 0 deletions(-) diff --git a/pep-0484.txt b/pep-0484.txt --- a/pep-0484.txt +++ b/pep-0484.txt @@ -1261,6 +1261,12 @@ Generic variants of container ABCs (and a few non-containers): +* Awaitable + +* AsyncIterable + +* AsyncIterator + * ByteString * Callable (see above, listed here for completeness) -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Thu Dec 3 20:32:25 2015 From: python-checkins at python.org (martin.panter) Date: Fri, 04 Dec 2015 01:32:25 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzE0Mjg1?= =?utf-8?q?=3A_Do_not_catch_ImportError_from_=5F=5Finit=5F=5F=2Epy_in_runp?= =?utf-8?q?y?= Message-ID: <20151204013225.60126.80921@psf.io> https://hg.python.org/cpython/rev/c4e950338e79 changeset: 99428:c4e950338e79 branch: 2.7 parent: 99424:2ae5c51c5dea user: Martin Panter date: Thu Dec 03 01:23:10 2015 +0000 summary: Issue #14285: Do not catch ImportError from __init__.py in runpy Initialize package before calling get_loader() for __main__, so that we do not incorrectly handle ImportError from __init__.py. When runpy is used from the Python CLI, use an internal exception rather than ImportError, to avoid catching an unexpected ImportError. Also simplify message formatting: str() is redundant with %s. Also fix test_dash_m_error_code_is_one() in test_cmd_line_script, which was failing because the test package was not in the current directlry, rather the desired ValueError. files: Lib/runpy.py | 42 ++++++++---- Lib/test/script_helper.py | 4 +- Lib/test/test_cmd_line_script.py | 64 ++++++++++++++++++- Lib/test/test_runpy.py | 24 +++++++ Misc/NEWS | 4 + 5 files changed, 115 insertions(+), 23 deletions(-) diff --git a/Lib/runpy.py b/Lib/runpy.py --- a/Lib/runpy.py +++ b/Lib/runpy.py @@ -97,27 +97,35 @@ return None # Helper to get the loader, code and filename for a module -def _get_module_details(mod_name): - loader = get_loader(mod_name) - if loader is None: - raise ImportError("No module named %s" % mod_name) - if loader.is_package(mod_name): +def _get_module_details(mod_name, error=ImportError): + try: + loader = get_loader(mod_name) + if loader is None: + raise error("No module named %s" % mod_name) + ispkg = loader.is_package(mod_name) + except ImportError as e: + raise error(format(e)) + if ispkg: if mod_name == "__main__" or mod_name.endswith(".__main__"): - raise ImportError("Cannot use package as __main__ module") + raise error("Cannot use package as __main__ module") + __import__(mod_name) # Do not catch exceptions initializing package try: pkg_main_name = mod_name + ".__main__" return _get_module_details(pkg_main_name) except ImportError, e: - raise ImportError(("%s; %r is a package and cannot " + + raise error(("%s; %r is a package and cannot " + "be directly executed") %(e, mod_name)) - code = loader.get_code(mod_name) + try: + code = loader.get_code(mod_name) + except ImportError as e: + raise error(format(e)) if code is None: - raise ImportError("No code object available for %s" % mod_name) + raise error("No code object available for %s" % mod_name) filename = _get_filename(loader, mod_name) return mod_name, loader, code, filename -def _get_main_module_details(): +def _get_main_module_details(error=ImportError): # Helper that gives a nicer error message when attempting to # execute a zipfile or directory by invoking __main__.py main_name = "__main__" @@ -125,10 +133,13 @@ return _get_module_details(main_name) except ImportError as exc: if main_name in str(exc): - raise ImportError("can't find %r module in %r" % + raise error("can't find %r module in %r" % (main_name, sys.path[0])) raise +class _Error(Exception): + """Error that _run_module_as_main() should report without a traceback""" + # This function is the actual implementation of the -m switch and direct # execution of zipfiles and directories and is deliberately kept private. # This avoids a repeat of the situation where run_module() no longer met the @@ -148,11 +159,12 @@ """ try: if alter_argv or mod_name != "__main__": # i.e. -m switch - mod_name, loader, code, fname = _get_module_details(mod_name) + mod_name, loader, code, fname = _get_module_details( + mod_name, _Error) else: # i.e. directory or zipfile execution - mod_name, loader, code, fname = _get_main_module_details() - except ImportError as exc: - msg = "%s: %s" % (sys.executable, str(exc)) + mod_name, loader, code, fname = _get_main_module_details(_Error) + except _Error as exc: + msg = "%s: %s" % (sys.executable, exc) sys.exit(msg) pkg_name = mod_name.rpartition('.')[0] main_globals = sys.modules["__main__"].__dict__ diff --git a/Lib/test/script_helper.py b/Lib/test/script_helper.py --- a/Lib/test/script_helper.py +++ b/Lib/test/script_helper.py @@ -134,9 +134,9 @@ # zip_file.close() return zip_name, os.path.join(zip_name, name_in_zip) -def make_pkg(pkg_dir): +def make_pkg(pkg_dir, init_source=''): os.mkdir(pkg_dir) - make_script(pkg_dir, '__init__', '') + make_script(pkg_dir, '__init__', init_source) def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename, source, depth=1, compiled=False): diff --git a/Lib/test/test_cmd_line_script.py b/Lib/test/test_cmd_line_script.py --- a/Lib/test/test_cmd_line_script.py +++ b/Lib/test/test_cmd_line_script.py @@ -1,5 +1,6 @@ # Tests command line execution of scripts +import contextlib import unittest import os import os.path @@ -207,18 +208,69 @@ launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg') self._check_import_error(launch_name, msg) + @contextlib.contextmanager + def setup_test_pkg(self, *args): + with temp_dir() as script_dir, \ + test.test_support.change_cwd(script_dir): + pkg_dir = os.path.join(script_dir, 'test_pkg') + make_pkg(pkg_dir, *args) + yield pkg_dir + + def check_dash_m_failure(self, *args): + rc, out, err = assert_python_failure('-m', *args) + if verbose > 1: + print(out) + self.assertEqual(rc, 1) + return err + def test_dash_m_error_code_is_one(self): # If a module is invoked with the -m command line flag # and results in an error that the return code to the # shell is '1' - with temp_dir() as script_dir: - pkg_dir = os.path.join(script_dir, 'test_pkg') - make_pkg(pkg_dir) + with self.setup_test_pkg() as pkg_dir: script_name = _make_test_script(pkg_dir, 'other', "if __name__ == '__main__': raise ValueError") - rc, out, err = assert_python_failure('-m', 'test_pkg.other', *example_args) - if verbose > 1: - print(out) + err = self.check_dash_m_failure('test_pkg.other', *example_args) + self.assertIn(b'ValueError', err) + + def test_dash_m_errors(self): + # Exercise error reporting for various invalid package executions + tests = ( + ('__builtin__', br'No code object available'), + ('__builtin__.x', br'No module named'), + ('__builtin__.x.y', br'No module named'), + ('os.path', br'Loader.*cannot handle'), + ('importlib', br'No module named.*' + br'is a package and cannot be directly executed'), + ('importlib.nonexistant', br'No module named'), + ) + for name, regex in tests: + rc, _, err = assert_python_failure('-m', name) self.assertEqual(rc, 1) + self.assertRegexpMatches(err, regex) + self.assertNotIn(b'Traceback', err) + + def test_dash_m_init_traceback(self): + # These were wrapped in an ImportError and tracebacks were + # suppressed; see Issue 14285 + exceptions = (ImportError, AttributeError, TypeError, ValueError) + for exception in exceptions: + exception = exception.__name__ + init = "raise {0}('Exception in __init__.py')".format(exception) + with self.setup_test_pkg(init) as pkg_dir: + err = self.check_dash_m_failure('test_pkg') + self.assertIn(exception.encode('ascii'), err) + self.assertIn(b'Exception in __init__.py', err) + self.assertIn(b'Traceback', err) + + def test_dash_m_main_traceback(self): + # Ensure that an ImportError's traceback is reported + with self.setup_test_pkg() as pkg_dir: + main = "raise ImportError('Exception in __main__ module')" + _make_test_script(pkg_dir, '__main__', main) + err = self.check_dash_m_failure('test_pkg') + self.assertIn(b'ImportError', err) + self.assertIn(b'Exception in __main__ module', err) + self.assertIn(b'Traceback', err) def test_main(): diff --git a/Lib/test/test_runpy.py b/Lib/test/test_runpy.py --- a/Lib/test/test_runpy.py +++ b/Lib/test/test_runpy.py @@ -270,6 +270,30 @@ if verbose: print "Testing package depth:", depth self._check_package(depth) + def test_run_package_init_exceptions(self): + # These were previously wrapped in an ImportError; see Issue 14285 + exceptions = (ImportError, AttributeError, TypeError, ValueError) + for exception in exceptions: + name = exception.__name__ + source = "raise {0}('{0} in __init__.py.')".format(name) + + result = self._make_pkg("", 1, "__main__") + pkg_dir, _, mod_name = result + mod_name = mod_name.replace(".__main__", "") + try: + init = os.path.join(pkg_dir, "__runpy_pkg__", "__init__.py") + with open(init, "wt") as mod_file: + mod_file.write(source) + try: + run_module(mod_name) + except exception as err: + msg = "cannot be directly executed" + self.assertNotIn(msg, format(err)) + else: + self.fail("Nothing raised; expected {}".format(name)) + finally: + self._del_pkg(pkg_dir, 1, mod_name) + def test_explicit_relative_import(self): for depth in range(2, 5): if verbose: print "Testing relative imports at depth:", depth diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,10 @@ Library ------- +- Issue #14285: When executing a package with the "python -m package" option, + and package initialization raises ImportError, a proper traceback is now + reported. + - Issue #6478: _strptime's regexp cache now is reset after changing timezone with time.tzset(). -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Dec 3 20:32:30 2015 From: python-checkins at python.org (guido.van.rossum) Date: Fri, 04 Dec 2015 01:32:30 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Add_Awaitable?= =?utf-8?q?=2C_AsyncIterable=2C_AsyncIterator_to_typing=2Epy=2E?= Message-ID: <20151204013230.29957.74324@psf.io> https://hg.python.org/cpython/rev/e9aeae1b2ea9 changeset: 99429:e9aeae1b2ea9 branch: 3.5 parent: 99426:5fa855d20624 user: Guido van Rossum date: Thu Dec 03 17:31:24 2015 -0800 summary: Add Awaitable, AsyncIterable, AsyncIterator to typing.py. files: Lib/test/test_typing.py | 61 +++++++++++++++++++++++++++++ Lib/typing.py | 15 +++++++ 2 files changed, 76 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py --- a/Lib/test/test_typing.py +++ b/Lib/test/test_typing.py @@ -1,3 +1,4 @@ +import asyncio import pickle import re import sys @@ -960,6 +961,36 @@ pass +T_a = TypeVar('T') + + +class AwaitableWrapper(typing.Awaitable[T_a]): + + def __init__(self, value): + self.value = value + + def __await__(self) -> typing.Iterator[T_a]: + yield + return self.value + + +class AsyncIteratorWrapper(typing.AsyncIterator[T_a]): + + def __init__(self, value: typing.Iterable[T_a]): + self.value = value + + def __aiter__(self) -> typing.AsyncIterator[T_a]: + return self + + @asyncio.coroutine + def __anext__(self) -> T_a: + data = yield from self.value + if data: + return data + else: + raise StopAsyncIteration + + class CollectionsAbcTests(TestCase): def test_hashable(self): @@ -984,6 +1015,36 @@ assert isinstance(it, typing.Iterator[int]) assert not isinstance(42, typing.Iterator) + def test_awaitable(self): + async def foo() -> typing.Awaitable[int]: + return await AwaitableWrapper(42) + g = foo() + assert issubclass(type(g), typing.Awaitable[int]) + assert isinstance(g, typing.Awaitable) + assert not isinstance(foo, typing.Awaitable) + assert issubclass(typing.Awaitable[Manager], + typing.Awaitable[Employee]) + assert not issubclass(typing.Awaitable[Employee], + typing.Awaitable[Manager]) + g.send(None) # Run foo() till completion, to avoid warning. + + def test_async_iterable(self): + base_it = range(10) # type: Iterator[int] + it = AsyncIteratorWrapper(base_it) + assert isinstance(it, typing.AsyncIterable) + assert isinstance(it, typing.AsyncIterable) + assert issubclass(typing.AsyncIterable[Manager], + typing.AsyncIterable[Employee]) + assert not isinstance(42, typing.AsyncIterable) + + def test_async_iterator(self): + base_it = range(10) # type: Iterator[int] + it = AsyncIteratorWrapper(base_it) + assert isinstance(it, typing.AsyncIterator) + assert issubclass(typing.AsyncIterator[Manager], + typing.AsyncIterator[Employee]) + assert not isinstance(42, typing.AsyncIterator) + def test_sized(self): assert isinstance([], typing.Sized) assert not isinstance(42, typing.Sized) diff --git a/Lib/typing.py b/Lib/typing.py --- a/Lib/typing.py +++ b/Lib/typing.py @@ -28,6 +28,9 @@ # ABCs (from collections.abc). 'AbstractSet', # collections.abc.Set. + 'Awaitable', + 'AsyncIterator', + 'AsyncIterable', 'ByteString', 'Container', 'Hashable', @@ -1261,6 +1264,18 @@ Hashable = collections_abc.Hashable # Not generic. +class Awaitable(Generic[T_co], extra=collections_abc.Awaitable): + __slots__ = () + + +class AsyncIterable(Generic[T_co], extra=collections_abc.AsyncIterable): + __slots__ = () + + +class AsyncIterator(AsyncIterable[T_co], extra=collections_abc.AsyncIterator): + __slots__ = () + + class Iterable(Generic[T_co], extra=collections_abc.Iterable): __slots__ = () -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Thu Dec 3 20:32:30 2015 From: python-checkins at python.org (guido.van.rossum) Date: Fri, 04 Dec 2015 01:32:30 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Add_Awaitable=2C_AsyncIterable=2C_AsyncIterator_to_typin?= =?utf-8?b?Zy5weS4gKE1lcmdlIDMuNS0+My42KQ==?= Message-ID: <20151204013230.21508.38429@psf.io> https://hg.python.org/cpython/rev/ad855c779bf3 changeset: 99430:ad855c779bf3 parent: 99427:ea576db13827 parent: 99429:e9aeae1b2ea9 user: Guido van Rossum date: Thu Dec 03 17:32:05 2015 -0800 summary: Add Awaitable, AsyncIterable, AsyncIterator to typing.py. (Merge 3.5->3.6) files: Lib/test/test_typing.py | 61 +++++++++++++++++++++++++++++ Lib/typing.py | 15 +++++++ 2 files changed, 76 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py --- a/Lib/test/test_typing.py +++ b/Lib/test/test_typing.py @@ -1,3 +1,4 @@ +import asyncio import pickle import re import sys @@ -960,6 +961,36 @@ pass +T_a = TypeVar('T') + + +class AwaitableWrapper(typing.Awaitable[T_a]): + + def __init__(self, value): + self.value = value + + def __await__(self) -> typing.Iterator[T_a]: + yield + return self.value + + +class AsyncIteratorWrapper(typing.AsyncIterator[T_a]): + + def __init__(self, value: typing.Iterable[T_a]): + self.value = value + + def __aiter__(self) -> typing.AsyncIterator[T_a]: + return self + + @asyncio.coroutine + def __anext__(self) -> T_a: + data = yield from self.value + if data: + return data + else: + raise StopAsyncIteration + + class CollectionsAbcTests(TestCase): def test_hashable(self): @@ -984,6 +1015,36 @@ assert isinstance(it, typing.Iterator[int]) assert not isinstance(42, typing.Iterator) + def test_awaitable(self): + async def foo() -> typing.Awaitable[int]: + return await AwaitableWrapper(42) + g = foo() + assert issubclass(type(g), typing.Awaitable[int]) + assert isinstance(g, typing.Awaitable) + assert not isinstance(foo, typing.Awaitable) + assert issubclass(typing.Awaitable[Manager], + typing.Awaitable[Employee]) + assert not issubclass(typing.Awaitable[Employee], + typing.Awaitable[Manager]) + g.send(None) # Run foo() till completion, to avoid warning. + + def test_async_iterable(self): + base_it = range(10) # type: Iterator[int] + it = AsyncIteratorWrapper(base_it) + assert isinstance(it, typing.AsyncIterable) + assert isinstance(it, typing.AsyncIterable) + assert issubclass(typing.AsyncIterable[Manager], + typing.AsyncIterable[Employee]) + assert not isinstance(42, typing.AsyncIterable) + + def test_async_iterator(self): + base_it = range(10) # type: Iterator[int] + it = AsyncIteratorWrapper(base_it) + assert isinstance(it, typing.AsyncIterator) + assert issubclass(typing.AsyncIterator[Manager], + typing.AsyncIterator[Employee]) + assert not isinstance(42, typing.AsyncIterator) + def test_sized(self): assert isinstance([], typing.Sized) assert not isinstance(42, typing.Sized) diff --git a/Lib/typing.py b/Lib/typing.py --- a/Lib/typing.py +++ b/Lib/typing.py @@ -28,6 +28,9 @@ # ABCs (from collections.abc). 'AbstractSet', # collections.abc.Set. + 'Awaitable', + 'AsyncIterator', + 'AsyncIterable', 'ByteString', 'Container', 'Hashable', @@ -1261,6 +1264,18 @@ Hashable = collections_abc.Hashable # Not generic. +class Awaitable(Generic[T_co], extra=collections_abc.Awaitable): + __slots__ = () + + +class AsyncIterable(Generic[T_co], extra=collections_abc.AsyncIterable): + __slots__ = () + + +class AsyncIterator(AsyncIterable[T_co], extra=collections_abc.AsyncIterator): + __slots__ = () + + class Iterable(Generic[T_co], extra=collections_abc.Iterable): __slots__ = () -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Fri Dec 4 03:42:29 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Fri, 04 Dec 2015 08:42:29 +0000 Subject: [Python-checkins] Daily reference leaks (ad855c779bf3): sum=4 Message-ID: <20151204084229.93628.73898@psf.io> results for ad855c779bf3 on branch "default" -------------------------------------------- test_functools leaked [0, 2, 2] memory blocks, sum=4 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflog8edhAb', '--timeout', '7200'] From lp_benchmark_robot at intel.com Fri Dec 4 09:32:45 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Fri, 4 Dec 2015 14:32:45 +0000 Subject: [Python-checkins] Benchmark Results for Python Default 2015-12-04 Message-ID: <154b0371-5438-4d8c-8e21-2c3dd59fb1d9@irsmsx104.ger.corp.intel.com> Results for project Python default, build date 2015-12-04 04:02:09 +0000 commit: ad855c779bf33c59d176c23900ec0bc4fca3dfc6 revision date: 2015-12-04 01:32:05 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v3.4.3, with hash b4cbecbc0781e89a309d03b60a1f75f8499250e6 from 2015-02-25 12:15:33+00:00 ---------------------------------------------------------------------------------- benchmark relative change since change since current rev run std_dev* last run baseline with PGO ---------------------------------------------------------------------------------- :-) django_v2 0.35% -0.81% 9.28% 15.27% :-( pybench 0.19% -0.05% -2.11% 8.38% :-( regex_v8 2.72% 0.04% -3.64% 4.58% :-( nbody 0.10% -0.05% -3.14% 10.75% :-| json_dump_v2 0.20% -0.49% -1.58% 11.13% :-| normal_startup 1.01% -0.12% -0.14% 4.46% ---------------------------------------------------------------------------------- Note: Benchmark results are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From lp_benchmark_robot at intel.com Fri Dec 4 09:33:46 2015 From: lp_benchmark_robot at intel.com (lp_benchmark_robot at intel.com) Date: Fri, 4 Dec 2015 14:33:46 +0000 Subject: [Python-checkins] Benchmark Results for Python 2.7 2015-12-04 Message-ID: Results for project Python 2.7, build date 2015-12-04 04:49:17 +0000 commit: c4e950338e79f475278e4875e4a3a9d607ef7e10 revision date: 2015-12-03 01:23:10 +0000 environment: Haswell-EP cpu: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz 2x18 cores, stepping 2, LLC 45 MB mem: 128 GB os: CentOS 7.1 kernel: Linux 3.10.0-229.4.2.el7.x86_64 Baseline results were generated using release v2.7.10, with hash 15c95b7d81dcf821daade360741e00714667653f from 2015-05-23 16:02:14+00:00 ---------------------------------------------------------------------------------- benchmark relative change since change since current rev run std_dev* last run baseline with PGO ---------------------------------------------------------------------------------- :-) django_v2 0.34% 0.99% 2.59% 11.27% :-) pybench 0.21% 0.04% 6.12% 7.16% :-( regex_v8 1.39% -0.41% -3.76% 9.58% :-) nbody 0.79% -2.13% 7.58% 5.25% :-) json_dump_v2 0.29% -0.42% 2.33% 15.63% :-( normal_startup 2.01% -0.60% -2.07% 3.26% :-| ssbench 0.52% 0.07% 0.42% 1.96% ---------------------------------------------------------------------------------- Note: Benchmark results for ssbench are measured in requests/second while all other are measured in seconds. * Relative Standard Deviation (Standard Deviation/Average) Our lab does a nightly source pull and build of the Python project and measures performance changes against the previous stable version and the previous nightly measurement. This is provided as a service to the community so that quality issues with current hardware can be identified quickly. Intel technologies' features and benefits depend on system configuration and may require enabled hardware, software or service activation. Performance varies depending on system configuration. From python-checkins at python.org Fri Dec 4 17:52:13 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 04 Dec 2015 22:52:13 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_for_issue_=2325500?= Message-ID: <20151204225213.21231.4683@psf.io> https://hg.python.org/cpython/rev/0259c2c555fb changeset: 99432:0259c2c555fb parent: 99430:ad855c779bf3 parent: 99431:567baf74ebad user: Brett Cannon date: Fri Dec 04 14:52:07 2015 -0800 summary: Merge for issue #25500 files: Doc/reference/import.rst | 9 ++++----- 1 files changed, 4 insertions(+), 5 deletions(-) diff --git a/Doc/reference/import.rst b/Doc/reference/import.rst --- a/Doc/reference/import.rst +++ b/Doc/reference/import.rst @@ -29,11 +29,10 @@ a name binding operation. When calling :func:`__import__` as part of an import statement, the -import system first checks the module global namespace for a function by -that name. If it is not found, then the standard builtin :func:`__import__` -is called. Other mechanisms for invoking the import system (such as -:func:`importlib.import_module`) do not perform this check and will always -use the standard import system. +standard builtin :func:`__import__` is called. Other mechanisms for +invoking the import system (such as :func:`importlib.import_module`) may +choose to subvert :func:`__import__` and use its own solution to +implement import semantics. When a module is first imported, Python searches for the module and if found, it creates a module object [#fnmo]_, initializing it. If the named module -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 17:52:15 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 04 Dec 2015 22:52:15 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1NTAw?= =?utf-8?q?=3A_Fix_the_language_reference_to_not_claim_that_import?= Message-ID: <20151204225213.60136.9699@psf.io> https://hg.python.org/cpython/rev/567baf74ebad changeset: 99431:567baf74ebad branch: 3.5 parent: 99429:e9aeae1b2ea9 user: Brett Cannon date: Fri Dec 04 14:51:26 2015 -0800 summary: Issue #25500: Fix the language reference to not claim that import statements search for __import__ in the global scope. Thanks to Sergei Lebedev for finding the documentation bug. files: Doc/reference/import.rst | 9 ++++----- Misc/NEWS | 6 ++++++ 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/Doc/reference/import.rst b/Doc/reference/import.rst --- a/Doc/reference/import.rst +++ b/Doc/reference/import.rst @@ -29,11 +29,10 @@ a name binding operation. When calling :func:`__import__` as part of an import statement, the -import system first checks the module global namespace for a function by -that name. If it is not found, then the standard builtin :func:`__import__` -is called. Other mechanisms for invoking the import system (such as -:func:`importlib.import_module`) do not perform this check and will always -use the standard import system. +standard builtin :func:`__import__` is called. Other mechanisms for +invoking the import system (such as :func:`importlib.import_module`) may +choose to subvert :func:`__import__` and use its own solution to +implement import semantics. When a module is first imported, Python searches for the module and if found, it creates a module object [#fnmo]_, initializing it. If the named module diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -54,6 +54,12 @@ - Issue #25624: ZipFile now always writes a ZIP_STORED header for directory entries. Patch by Dingyuan Wang. +Documentation +------------- + +- Issue #25500: Fix documentation to not claim that __import__ is searched for + in the global scope. + Tests ----- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 18:20:04 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 04 Dec 2015 23:20:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325771=3A_Tweak_Va?= =?utf-8?q?lueError_message_when_package_isn=27t_specified?= Message-ID: <20151204231950.60142.38521@psf.io> https://hg.python.org/cpython/rev/b3a0765671d6 changeset: 99433:b3a0765671d6 user: Brett Cannon date: Fri Dec 04 15:19:42 2015 -0800 summary: Issue #25771: Tweak ValueError message when package isn't specified for importlib.util.resolve_name() but is needed. Thanks to Martin Panter for the bug report. files: Lib/importlib/util.py | 4 ++-- Misc/NEWS | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Lib/importlib/util.py b/Lib/importlib/util.py --- a/Lib/importlib/util.py +++ b/Lib/importlib/util.py @@ -22,8 +22,8 @@ if not name.startswith('.'): return name elif not package: - raise ValueError('{!r} is not a relative name ' - '(no leading dot)'.format(name)) + raise ValueError(f'no package specified for {repr(name)} ' + '(required for relative module names)') level = 0 for character in name: if character != '.': diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -109,6 +109,9 @@ Library ------- +- Issue #25771: Tweak the exception message for importlib.util.resolve_name() + when 'package' isn't specified but necessary. + - Issue #6478: _strptime's regexp cache now is reset after changing timezone with time.tzset(). -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 18:46:48 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 04 Dec 2015 23:46:48 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge_for_issue_=2323936?= Message-ID: <20151204234648.29617.99973@psf.io> https://hg.python.org/cpython/rev/1b1900d2a537 changeset: 99435:1b1900d2a537 parent: 99433:b3a0765671d6 parent: 99434:88cee7d16ccb user: Brett Cannon date: Fri Dec 04 15:46:43 2015 -0800 summary: Merge for issue #23936 files: Doc/glossary.rst | 22 ++++++++++++++++------ Doc/library/sys.rst | 33 +++++++++++++++++++++++---------- 2 files changed, 39 insertions(+), 16 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -308,10 +308,14 @@ A synonym for :term:`file object`. finder - An object that tries to find the :term:`loader` for a module. It must - implement either a method named :meth:`find_loader` or a method named - :meth:`find_module`. See :pep:`302` and :pep:`420` for details and - :class:`importlib.abc.Finder` for an :term:`abstract base class`. + An object that tries to find the :term:`loader` for a module that is + being imported. + + Since Python 3.3, there are two types of finder: :term:`meta path finders + ` for use with :data:`sys.meta_path`, and :term:`path + entry finders ` for use with :data:`sys.path_hooks`. + + See :pep:`302`, :pep:`420` and :pep:`451` for much more detail. floor division Mathematical division that rounds down to nearest integer. The floor @@ -593,10 +597,13 @@ :class:`collections.OrderedDict` and :class:`collections.Counter`. meta path finder - A finder returned by a search of :data:`sys.meta_path`. Meta path + A :term:`finder` returned by a search of :data:`sys.meta_path`. Meta path finders are related to, but different from :term:`path entry finders `. + See :class:`importlib.abc.MetaPathFinder` for the methods that meta path + finders implement. + metaclass The class of a class. Class definitions create a class name, a class dictionary, and a list of base classes. The metaclass is responsible for @@ -630,7 +637,7 @@ module spec A namespace containing the import-related information used to load a - module. + module. An instance of :class:`importlib.machinery.ModuleSpec`. MRO See :term:`method resolution order`. @@ -757,6 +764,9 @@ (i.e. a :term:`path entry hook`) which knows how to locate modules given a :term:`path entry`. + See :class:`importlib.abc.PathEntryFinder` for the methods that path entry + finders implement. + path entry hook A callable on the :data:`sys.path_hook` list which returns a :term:`path entry finder` if it knows how to find modules on a specific :term:`path diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -779,19 +779,32 @@ .. data:: meta_path - A list of :term:`finder` objects that have their :meth:`find_module` - methods called to see if one of the objects can find the module to be - imported. The :meth:`find_module` method is called at least with the - absolute name of the module being imported. If the module to be imported is - contained in package then the parent package's :attr:`__path__` attribute - is passed in as a second argument. The method returns ``None`` if - the module cannot be found, else returns a :term:`loader`. + A list of :term:`meta path finder` objects that have their + :meth:`~importlib.abc.MetaPathFinder.find_spec` methods called to see if one + of the objects can find the module to be imported. The + :meth:`~importlib.abc.MetaPathFinder.find_spec` method is called with at + least the absolute name of the module being imported. If the module to be + imported is contained in a package, then the parent package's :attr:`__path__` + attribute is passed in as a second argument. The method returns a + :term:`module spec`, or ``None`` if the module cannot be found. - :data:`sys.meta_path` is searched before any implicit default finders or - :data:`sys.path`. + .. seealso:: - See :pep:`302` for the original specification. + :class:`importlib.abc.MetaPathFinder` + The abstract base class defining the interface of finder objects on + :data:`meta_path`. + :class:`importlib.machinery.ModuleSpec` + The concrete class which + :meth:`~importlib.abc.MetaPathFinder.find_spec` should return + instances of. + .. versionchanged:: 3.4 + + :term:`Module specs ` were introduced in Python 3.4, by + :pep:`451`. Earlier versions of Python looked for a method called + :meth:`~importlib.abc.MetaPathFinder.find_module`. + This is still called as a fallback if a :data:`meta_path` entry doesn't + have a :meth:`~importlib.abc.MetaPathFinder.find_spec` method. .. data:: modules -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 18:46:48 2015 From: python-checkins at python.org (brett.cannon) Date: Fri, 04 Dec 2015 23:46:48 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzIzOTM2?= =?utf-8?q?=3A_Clarify_what_finders_are=2E?= Message-ID: <20151204234647.29961.31679@psf.io> https://hg.python.org/cpython/rev/88cee7d16ccb changeset: 99434:88cee7d16ccb branch: 3.5 parent: 99431:567baf74ebad user: Brett Cannon date: Fri Dec 04 15:46:21 2015 -0800 summary: Issue #23936: Clarify what finders are. Thanks to Ra?l Cumplido for the bug report and Thomas Kluyver for the patch. files: Doc/glossary.rst | 22 ++++++++++++++++------ Doc/library/sys.rst | 33 +++++++++++++++++++++++---------- 2 files changed, 39 insertions(+), 16 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -308,10 +308,14 @@ A synonym for :term:`file object`. finder - An object that tries to find the :term:`loader` for a module. It must - implement either a method named :meth:`find_loader` or a method named - :meth:`find_module`. See :pep:`302` and :pep:`420` for details and - :class:`importlib.abc.Finder` for an :term:`abstract base class`. + An object that tries to find the :term:`loader` for a module that is + being imported. + + Since Python 3.3, there are two types of finder: :term:`meta path finders + ` for use with :data:`sys.meta_path`, and :term:`path + entry finders ` for use with :data:`sys.path_hooks`. + + See :pep:`302`, :pep:`420` and :pep:`451` for much more detail. floor division Mathematical division that rounds down to nearest integer. The floor @@ -593,10 +597,13 @@ :class:`collections.OrderedDict` and :class:`collections.Counter`. meta path finder - A finder returned by a search of :data:`sys.meta_path`. Meta path + A :term:`finder` returned by a search of :data:`sys.meta_path`. Meta path finders are related to, but different from :term:`path entry finders `. + See :class:`importlib.abc.MetaPathFinder` for the methods that meta path + finders implement. + metaclass The class of a class. Class definitions create a class name, a class dictionary, and a list of base classes. The metaclass is responsible for @@ -630,7 +637,7 @@ module spec A namespace containing the import-related information used to load a - module. + module. An instance of :class:`importlib.machinery.ModuleSpec`. MRO See :term:`method resolution order`. @@ -757,6 +764,9 @@ (i.e. a :term:`path entry hook`) which knows how to locate modules given a :term:`path entry`. + See :class:`importlib.abc.PathEntryFinder` for the methods that path entry + finders implement. + path entry hook A callable on the :data:`sys.path_hook` list which returns a :term:`path entry finder` if it knows how to find modules on a specific :term:`path diff --git a/Doc/library/sys.rst b/Doc/library/sys.rst --- a/Doc/library/sys.rst +++ b/Doc/library/sys.rst @@ -774,19 +774,32 @@ .. data:: meta_path - A list of :term:`finder` objects that have their :meth:`find_module` - methods called to see if one of the objects can find the module to be - imported. The :meth:`find_module` method is called at least with the - absolute name of the module being imported. If the module to be imported is - contained in package then the parent package's :attr:`__path__` attribute - is passed in as a second argument. The method returns ``None`` if - the module cannot be found, else returns a :term:`loader`. + A list of :term:`meta path finder` objects that have their + :meth:`~importlib.abc.MetaPathFinder.find_spec` methods called to see if one + of the objects can find the module to be imported. The + :meth:`~importlib.abc.MetaPathFinder.find_spec` method is called with at + least the absolute name of the module being imported. If the module to be + imported is contained in a package, then the parent package's :attr:`__path__` + attribute is passed in as a second argument. The method returns a + :term:`module spec`, or ``None`` if the module cannot be found. - :data:`sys.meta_path` is searched before any implicit default finders or - :data:`sys.path`. + .. seealso:: - See :pep:`302` for the original specification. + :class:`importlib.abc.MetaPathFinder` + The abstract base class defining the interface of finder objects on + :data:`meta_path`. + :class:`importlib.machinery.ModuleSpec` + The concrete class which + :meth:`~importlib.abc.MetaPathFinder.find_spec` should return + instances of. + .. versionchanged:: 3.4 + + :term:`Module specs ` were introduced in Python 3.4, by + :pep:`451`. Earlier versions of Python looked for a method called + :meth:`~importlib.abc.MetaPathFinder.find_module`. + This is still called as a fallback if a :data:`meta_path` entry doesn't + have a :meth:`~importlib.abc.MetaPathFinder.find_spec` method. .. data:: modules -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 21:35:35 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 05 Dec 2015 02:35:35 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1NzY0?= =?utf-8?q?=3A_Preserve_subprocess_fork_exception_when_preexec=5Ffn_used?= Message-ID: <20151205023535.33655.40098@psf.io> https://hg.python.org/cpython/rev/4f4e2cbd2138 changeset: 99436:4f4e2cbd2138 branch: 3.4 parent: 99425:4b0a4da1aa27 user: Martin Panter date: Mon Nov 30 02:21:41 2015 +0000 summary: Issue #25764: Preserve subprocess fork exception when preexec_fn used Also fix handling of failure to release the import lock. files: Lib/test/test_subprocess.py | 16 +++++++++++ Misc/NEWS | 3 ++ Modules/_posixsubprocess.c | 35 ++++++++++++------------ 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1416,6 +1416,22 @@ if not enabled: gc.disable() + def test_preexec_fork_failure(self): + # The internal code did not preserve the previous exception when + # re-enabling garbage collection + try: + from resource import getrlimit, setrlimit, RLIMIT_NPROC + except ImportError as err: + self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD + limits = getrlimit(RLIMIT_NPROC) + [_, hard] = limits + setrlimit(RLIMIT_NPROC, (0, hard)) + self.addCleanup(setrlimit, RLIMIT_NPROC, limits) + # Forking should raise EAGAIN, translated to BlockingIOError + with self.assertRaises(BlockingIOError): + subprocess.call([sys.executable, '-c', ''], + preexec_fn=lambda: None) + def test_args_string(self): # args is a string fd, fname = tempfile.mkstemp() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -115,6 +115,9 @@ Library ------- +- Issue #25764: In the subprocess module, preserve any exception caused by + fork() failure when preexec_fn is used. + - Issue #6478: _strptime's regexp cache now is reset after changing timezone with time.tzset(). diff --git a/Modules/_posixsubprocess.c b/Modules/_posixsubprocess.c --- a/Modules/_posixsubprocess.c +++ b/Modules/_posixsubprocess.c @@ -47,17 +47,25 @@ #define POSIX_CALL(call) do { if ((call) == -1) goto error; } while (0) -/* Given the gc module call gc.enable() and return 0 on success. */ +/* If gc was disabled, call gc.enable(). Return 0 on success. */ static int -_enable_gc(PyObject *gc_module) +_enable_gc(int need_to_reenable_gc, PyObject *gc_module) { PyObject *result; _Py_IDENTIFIER(enable); + PyObject *exctype, *val, *tb; - result = _PyObject_CallMethodId(gc_module, &PyId_enable, NULL); - if (result == NULL) - return 1; - Py_DECREF(result); + if (need_to_reenable_gc) { + PyErr_Fetch(&exctype, &val, &tb); + result = _PyObject_CallMethodId(gc_module, &PyId_enable, NULL); + if (exctype != NULL) { + PyErr_Restore(exctype, val, tb); + } + if (result == NULL) { + return 1; + } + Py_DECREF(result); + } return 0; } @@ -691,6 +699,7 @@ _PyImport_ReleaseLock() < 0 && !PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "not holding the import lock"); + pid = -1; } import_lock_held = 0; @@ -702,9 +711,8 @@ _Py_FreeCharPArray(exec_array); /* Reenable gc in the parent process (or if fork failed). */ - if (need_to_reenable_gc && _enable_gc(gc_module)) { - Py_XDECREF(gc_module); - return NULL; + if (_enable_gc(need_to_reenable_gc, gc_module)) { + pid = -1; } Py_XDECREF(preexec_fn_args_tuple); Py_XDECREF(gc_module); @@ -726,14 +734,7 @@ Py_XDECREF(converted_args); Py_XDECREF(fast_args); Py_XDECREF(preexec_fn_args_tuple); - - /* Reenable gc if it was disabled. */ - if (need_to_reenable_gc) { - PyObject *exctype, *val, *tb; - PyErr_Fetch(&exctype, &val, &tb); - _enable_gc(gc_module); - PyErr_Restore(exctype, val, tb); - } + _enable_gc(need_to_reenable_gc, gc_module); Py_XDECREF(gc_module); return NULL; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 21:35:35 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 05 Dec 2015 02:35:35 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325764=3A_Merge_subprocess_fix_from_3=2E4_into_3=2E5?= Message-ID: <20151205023535.61827.8920@psf.io> https://hg.python.org/cpython/rev/ae27ad306dbf changeset: 99437:ae27ad306dbf branch: 3.5 parent: 99434:88cee7d16ccb parent: 99436:4f4e2cbd2138 user: Martin Panter date: Sat Dec 05 02:03:42 2015 +0000 summary: Issue #25764: Merge subprocess fix from 3.4 into 3.5 files: Lib/test/test_subprocess.py | 16 +++++++++++ Misc/NEWS | 3 ++ Modules/_posixsubprocess.c | 35 ++++++++++++------------ 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1512,6 +1512,22 @@ if not enabled: gc.disable() + def test_preexec_fork_failure(self): + # The internal code did not preserve the previous exception when + # re-enabling garbage collection + try: + from resource import getrlimit, setrlimit, RLIMIT_NPROC + except ImportError as err: + self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD + limits = getrlimit(RLIMIT_NPROC) + [_, hard] = limits + setrlimit(RLIMIT_NPROC, (0, hard)) + self.addCleanup(setrlimit, RLIMIT_NPROC, limits) + # Forking should raise EAGAIN, translated to BlockingIOError + with self.assertRaises(BlockingIOError): + subprocess.call([sys.executable, '-c', ''], + preexec_fn=lambda: None) + def test_args_string(self): # args is a string fd, fname = tempfile.mkstemp() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,9 @@ Library ------- +- Issue #25764: In the subprocess module, preserve any exception caused by + fork() failure when preexec_fn is used. + - Issue #6478: _strptime's regexp cache now is reset after changing timezone with time.tzset(). diff --git a/Modules/_posixsubprocess.c b/Modules/_posixsubprocess.c --- a/Modules/_posixsubprocess.c +++ b/Modules/_posixsubprocess.c @@ -47,17 +47,25 @@ #define POSIX_CALL(call) do { if ((call) == -1) goto error; } while (0) -/* Given the gc module call gc.enable() and return 0 on success. */ +/* If gc was disabled, call gc.enable(). Return 0 on success. */ static int -_enable_gc(PyObject *gc_module) +_enable_gc(int need_to_reenable_gc, PyObject *gc_module) { PyObject *result; _Py_IDENTIFIER(enable); + PyObject *exctype, *val, *tb; - result = _PyObject_CallMethodId(gc_module, &PyId_enable, NULL); - if (result == NULL) - return 1; - Py_DECREF(result); + if (need_to_reenable_gc) { + PyErr_Fetch(&exctype, &val, &tb); + result = _PyObject_CallMethodId(gc_module, &PyId_enable, NULL); + if (exctype != NULL) { + PyErr_Restore(exctype, val, tb); + } + if (result == NULL) { + return 1; + } + Py_DECREF(result); + } return 0; } @@ -698,6 +706,7 @@ && _PyImport_ReleaseLock() < 0 && !PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "not holding the import lock"); + pid = -1; } import_lock_held = 0; #endif @@ -710,9 +719,8 @@ _Py_FreeCharPArray(exec_array); /* Reenable gc in the parent process (or if fork failed). */ - if (need_to_reenable_gc && _enable_gc(gc_module)) { - Py_XDECREF(gc_module); - return NULL; + if (_enable_gc(need_to_reenable_gc, gc_module)) { + pid = -1; } Py_XDECREF(preexec_fn_args_tuple); Py_XDECREF(gc_module); @@ -736,14 +744,7 @@ Py_XDECREF(converted_args); Py_XDECREF(fast_args); Py_XDECREF(preexec_fn_args_tuple); - - /* Reenable gc if it was disabled. */ - if (need_to_reenable_gc) { - PyObject *exctype, *val, *tb; - PyErr_Fetch(&exctype, &val, &tb); - _enable_gc(gc_module); - PyErr_Restore(exctype, val, tb); - } + _enable_gc(need_to_reenable_gc, gc_module); Py_XDECREF(gc_module); return NULL; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 21:35:35 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 05 Dec 2015 02:35:35 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325764=3A_Merge_subprocess_fix_from_3=2E5?= Message-ID: <20151205023535.105477.14377@psf.io> https://hg.python.org/cpython/rev/b10c58a740b9 changeset: 99438:b10c58a740b9 parent: 99435:1b1900d2a537 parent: 99437:ae27ad306dbf user: Martin Panter date: Sat Dec 05 02:27:58 2015 +0000 summary: Issue #25764: Merge subprocess fix from 3.5 files: Lib/test/test_subprocess.py | 16 +++++++++++ Misc/NEWS | 3 ++ Modules/_posixsubprocess.c | 35 ++++++++++++------------ 3 files changed, 37 insertions(+), 17 deletions(-) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1512,6 +1512,22 @@ if not enabled: gc.disable() + def test_preexec_fork_failure(self): + # The internal code did not preserve the previous exception when + # re-enabling garbage collection + try: + from resource import getrlimit, setrlimit, RLIMIT_NPROC + except ImportError as err: + self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD + limits = getrlimit(RLIMIT_NPROC) + [_, hard] = limits + setrlimit(RLIMIT_NPROC, (0, hard)) + self.addCleanup(setrlimit, RLIMIT_NPROC, limits) + # Forking should raise EAGAIN, translated to BlockingIOError + with self.assertRaises(BlockingIOError): + subprocess.call([sys.executable, '-c', ''], + preexec_fn=lambda: None) + def test_args_string(self): # args is a string fd, fname = tempfile.mkstemp() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -109,6 +109,9 @@ Library ------- +- Issue #25764: In the subprocess module, preserve any exception caused by + fork() failure when preexec_fn is used. + - Issue #25771: Tweak the exception message for importlib.util.resolve_name() when 'package' isn't specified but necessary. diff --git a/Modules/_posixsubprocess.c b/Modules/_posixsubprocess.c --- a/Modules/_posixsubprocess.c +++ b/Modules/_posixsubprocess.c @@ -47,17 +47,25 @@ #define POSIX_CALL(call) do { if ((call) == -1) goto error; } while (0) -/* Given the gc module call gc.enable() and return 0 on success. */ +/* If gc was disabled, call gc.enable(). Return 0 on success. */ static int -_enable_gc(PyObject *gc_module) +_enable_gc(int need_to_reenable_gc, PyObject *gc_module) { PyObject *result; _Py_IDENTIFIER(enable); + PyObject *exctype, *val, *tb; - result = _PyObject_CallMethodId(gc_module, &PyId_enable, NULL); - if (result == NULL) - return 1; - Py_DECREF(result); + if (need_to_reenable_gc) { + PyErr_Fetch(&exctype, &val, &tb); + result = _PyObject_CallMethodId(gc_module, &PyId_enable, NULL); + if (exctype != NULL) { + PyErr_Restore(exctype, val, tb); + } + if (result == NULL) { + return 1; + } + Py_DECREF(result); + } return 0; } @@ -698,6 +706,7 @@ && _PyImport_ReleaseLock() < 0 && !PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "not holding the import lock"); + pid = -1; } import_lock_held = 0; #endif @@ -710,9 +719,8 @@ _Py_FreeCharPArray(exec_array); /* Reenable gc in the parent process (or if fork failed). */ - if (need_to_reenable_gc && _enable_gc(gc_module)) { - Py_XDECREF(gc_module); - return NULL; + if (_enable_gc(need_to_reenable_gc, gc_module)) { + pid = -1; } Py_XDECREF(preexec_fn_args_tuple); Py_XDECREF(gc_module); @@ -736,14 +744,7 @@ Py_XDECREF(converted_args); Py_XDECREF(fast_args); Py_XDECREF(preexec_fn_args_tuple); - - /* Reenable gc if it was disabled. */ - if (need_to_reenable_gc) { - PyObject *exctype, *val, *tb; - PyErr_Fetch(&exctype, &val, &tb); - _enable_gc(gc_module); - PyErr_Restore(exctype, val, tb); - } + _enable_gc(need_to_reenable_gc, gc_module); Py_XDECREF(gc_module); return NULL; } -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 23:05:45 2015 From: python-checkins at python.org (r.david.murray) Date: Sat, 05 Dec 2015 04:05:45 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Merge=3A_=2324903=3A_Remove_misleading_error_message_to_fix_re?= =?utf-8?q?gression=2E?= Message-ID: <20151205040545.29603.55949@psf.io> https://hg.python.org/cpython/rev/c65e135df1dc changeset: 99440:c65e135df1dc branch: 3.5 parent: 99437:ae27ad306dbf parent: 99439:b63fd82a8528 user: R David Murray date: Fri Dec 04 23:04:37 2015 -0500 summary: Merge: #24903: Remove misleading error message to fix regression. files: Lib/compileall.py | 3 --- Lib/test/test_compileall.py | 8 -------- Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 5 insertions(+), 11 deletions(-) diff --git a/Lib/compileall.py b/Lib/compileall.py --- a/Lib/compileall.py +++ b/Lib/compileall.py @@ -238,9 +238,6 @@ args = parser.parse_args() compile_dests = args.compile_dest - if (args.ddir and (len(compile_dests) != 1 - or not os.path.isdir(compile_dests[0]))): - parser.exit('-d destdir requires exactly one directory argument') if args.rx: import re args.rx = re.compile(args.rx) diff --git a/Lib/test/test_compileall.py b/Lib/test/test_compileall.py --- a/Lib/test/test_compileall.py +++ b/Lib/test/test_compileall.py @@ -398,14 +398,6 @@ self.assertCompiled(init2fn) self.assertCompiled(bar2fn) - def test_d_takes_exactly_one_dir(self): - rc, out, err = self.assertRunNotOK('-d', 'foo') - self.assertEqual(out, b'') - self.assertRegex(err, b'-d') - rc, out, err = self.assertRunNotOK('-d', 'foo', 'bar') - self.assertEqual(out, b'') - self.assertRegex(err, b'-d') - def test_d_compile_error(self): script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax') rc, out, err = self.assertRunNotOK('-q', '-d', 'dinsdale', self.pkgdir) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -477,6 +477,7 @@ Nitin Ganatra Fred Gansevles Lars Marius Garshol +Jake Garver Dan Gass Andrew Gaul Matthieu Gautier diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,10 @@ Library ------- +- Issue #24903: Fix regression in number of arguments compileall accepts when + '-d' is specified. The check on the number of arguments has been dropped + completely as it never worked correctly anyway. + - Issue #25764: In the subprocess module, preserve any exception caused by fork() failure when preexec_fn is used. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 23:05:46 2015 From: python-checkins at python.org (r.david.murray) Date: Sat, 05 Dec 2015 04:05:46 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Merge=3A_=2324903=3A_Remove_misleading_error_message_to_?= =?utf-8?q?fix_regression=2E?= Message-ID: <20151205040545.105493.61058@psf.io> https://hg.python.org/cpython/rev/1e5aacddb67d changeset: 99441:1e5aacddb67d parent: 99438:b10c58a740b9 parent: 99440:c65e135df1dc user: R David Murray date: Fri Dec 04 23:05:20 2015 -0500 summary: Merge: #24903: Remove misleading error message to fix regression. files: Lib/compileall.py | 3 --- Lib/test/test_compileall.py | 8 -------- Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 5 insertions(+), 11 deletions(-) diff --git a/Lib/compileall.py b/Lib/compileall.py --- a/Lib/compileall.py +++ b/Lib/compileall.py @@ -238,9 +238,6 @@ args = parser.parse_args() compile_dests = args.compile_dest - if (args.ddir and (len(compile_dests) != 1 - or not os.path.isdir(compile_dests[0]))): - parser.exit('-d destdir requires exactly one directory argument') if args.rx: import re args.rx = re.compile(args.rx) diff --git a/Lib/test/test_compileall.py b/Lib/test/test_compileall.py --- a/Lib/test/test_compileall.py +++ b/Lib/test/test_compileall.py @@ -398,14 +398,6 @@ self.assertCompiled(init2fn) self.assertCompiled(bar2fn) - def test_d_takes_exactly_one_dir(self): - rc, out, err = self.assertRunNotOK('-d', 'foo') - self.assertEqual(out, b'') - self.assertRegex(err, b'-d') - rc, out, err = self.assertRunNotOK('-d', 'foo', 'bar') - self.assertEqual(out, b'') - self.assertRegex(err, b'-d') - def test_d_compile_error(self): script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax') rc, out, err = self.assertRunNotOK('-q', '-d', 'dinsdale', self.pkgdir) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -478,6 +478,7 @@ Nitin Ganatra Fred Gansevles Lars Marius Garshol +Jake Garver Dan Gass Andrew Gaul Matthieu Gautier diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -109,6 +109,10 @@ Library ------- +- Issue #24903: Fix regression in number of arguments compileall accepts when + '-d' is specified. The check on the number of arguments has been dropped + completely as it never worked correctly anyway. + - Issue #25764: In the subprocess module, preserve any exception caused by fork() failure when preexec_fn is used. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 23:05:46 2015 From: python-checkins at python.org (r.david.murray) Date: Sat, 05 Dec 2015 04:05:46 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogIzI0OTAzOiBSZW1v?= =?utf-8?q?ve_misleading_error_message_to_fix_regression=2E?= Message-ID: <20151205040544.61833.24508@psf.io> https://hg.python.org/cpython/rev/b63fd82a8528 changeset: 99439:b63fd82a8528 branch: 3.4 parent: 99436:4f4e2cbd2138 user: R David Murray date: Fri Dec 04 22:54:38 2015 -0500 summary: #24903: Remove misleading error message to fix regression. Before the argparse conversion, compileall would (sometimes) accept multiple paths when -d was specified. Afterward, it does not. The corresponding check in the original code claimed to prevent multiple *directories* from being specified...but it didn't really work even to do that. So this patch fixes the regression by invoking the consenting adults rule: if you specify a combination of arguments to compileall that produces files with inconsistent destdirs (which you could do before), it is on you. Patch by Jake Garver. files: Lib/compileall.py | 3 --- Lib/test/test_compileall.py | 8 -------- Misc/ACKS | 1 + Misc/NEWS | 4 ++++ 4 files changed, 5 insertions(+), 11 deletions(-) diff --git a/Lib/compileall.py b/Lib/compileall.py --- a/Lib/compileall.py +++ b/Lib/compileall.py @@ -196,9 +196,6 @@ compile_dests = args.compile_dest - if (args.ddir and (len(compile_dests) != 1 - or not os.path.isdir(compile_dests[0]))): - parser.exit('-d destdir requires exactly one directory argument') if args.rx: import re args.rx = re.compile(args.rx) diff --git a/Lib/test/test_compileall.py b/Lib/test/test_compileall.py --- a/Lib/test/test_compileall.py +++ b/Lib/test/test_compileall.py @@ -323,14 +323,6 @@ self.assertCompiled(init2fn) self.assertCompiled(bar2fn) - def test_d_takes_exactly_one_dir(self): - rc, out, err = self.assertRunNotOK('-d', 'foo') - self.assertEqual(out, b'') - self.assertRegex(err, b'-d') - rc, out, err = self.assertRunNotOK('-d', 'foo', 'bar') - self.assertEqual(out, b'') - self.assertRegex(err, b'-d') - def test_d_compile_error(self): script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax') rc, out, err = self.assertRunNotOK('-q', '-d', 'dinsdale', self.pkgdir) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -470,6 +470,7 @@ Nitin Ganatra Fred Gansevles Lars Marius Garshol +Jake Garver Dan Gass Andrew Gaul Matthieu Gautier diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -115,6 +115,10 @@ Library ------- +- Issue #24903: Fix regression in number of arguments compileall accepts when + '-d' is specified. The check on the number of arguments has been dropped + completely as it never worked correctly anyway. + - Issue #25764: In the subprocess module, preserve any exception caused by fork() failure when preexec_fn is used. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Fri Dec 4 23:18:11 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 05 Dec 2015 04:18:11 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325764=3A_Attempt_?= =?utf-8?q?to_debug_and_skip_OS_X_setrlimit=28=29_failure?= Message-ID: <20151205041810.61843.61976@psf.io> https://hg.python.org/cpython/rev/f53958873fae changeset: 99442:f53958873fae user: Martin Panter date: Sat Dec 05 04:16:45 2015 +0000 summary: Issue #25764: Attempt to debug and skip OS X setrlimit() failure files: Lib/test/test_subprocess.py | 8 +++++++- 1 files changed, 7 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1516,10 +1516,16 @@ # The internal code did not preserve the previous exception when # re-enabling garbage collection try: - from resource import getrlimit, setrlimit, RLIMIT_NPROC + from resource import getrlimit, setrlimit, RLIMIT_NPROC, RLIM_INFINITY except ImportError as err: self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD limits = getrlimit(RLIMIT_NPROC) + try: + setrlimit(RLIMIT_NPROC, limits) + except ValueError as err: + # Seems to happen on AMD64 Snow Leop and x86-64 Yosemite buildbots + print(f"Setting NPROC to {limits!r}: {err!r}, RLIM_INFINITY={RLIM_INFINITY!r}") + self.skipTest("Setting existing NPROC limit failed") [_, hard] = limits setrlimit(RLIMIT_NPROC, (0, hard)) self.addCleanup(setrlimit, RLIMIT_NPROC, limits) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 00:34:17 2015 From: python-checkins at python.org (zach.ware) Date: Sat, 05 Dec 2015 05:34:17 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1Nzk1?= =?utf-8?q?=3A_Fix_several_tests_to_run_independently=2E?= Message-ID: <20151205053417.29983.72213@psf.io> https://hg.python.org/cpython/rev/a54ee6a65f10 changeset: 99443:a54ee6a65f10 branch: 3.5 parent: 99440:c65e135df1dc user: Zachary Ware date: Fri Dec 04 23:32:23 2015 -0600 summary: Issue #25795: Fix several tests to run independently. These were broken in 3aec776fc796 when they were converted away from using support.run_unittest(). Oops :) Initial patch by Felippe da Motta Raposo. files: Lib/test/test_fork1.py | 1 + Lib/test/test_list.py | 1 + Lib/test/test_pyclbr.py | 4 ++-- Lib/test/test_telnetlib.py | 10 +++++----- Lib/test/test_tuple.py | 1 + Lib/test/test_userdict.py | 1 + Lib/test/test_userlist.py | 1 + Lib/test/test_wait4.py | 1 + 8 files changed, 13 insertions(+), 7 deletions(-) diff --git a/Lib/test/test_fork1.py b/Lib/test/test_fork1.py --- a/Lib/test/test_fork1.py +++ b/Lib/test/test_fork1.py @@ -6,6 +6,7 @@ import signal import sys import time +import unittest from test.fork_wait import ForkWait from test.support import (reap_children, get_attribute, diff --git a/Lib/test/test_list.py b/Lib/test/test_list.py --- a/Lib/test/test_list.py +++ b/Lib/test/test_list.py @@ -1,6 +1,7 @@ import sys from test import support, list_tests import pickle +import unittest class ListTest(list_tests.CommonTest): type2test = list diff --git a/Lib/test/test_pyclbr.py b/Lib/test/test_pyclbr.py --- a/Lib/test/test_pyclbr.py +++ b/Lib/test/test_pyclbr.py @@ -5,7 +5,7 @@ import sys from types import FunctionType, MethodType, BuiltinFunctionType import pyclbr -from unittest import TestCase +from unittest import TestCase, main as unittest_main StaticMethodType = type(staticmethod(lambda: None)) ClassMethodType = type(classmethod(lambda c: None)) @@ -173,4 +173,4 @@ if __name__ == "__main__": - unittest.main() + unittest_main() diff --git a/Lib/test/test_telnetlib.py b/Lib/test/test_telnetlib.py --- a/Lib/test/test_telnetlib.py +++ b/Lib/test/test_telnetlib.py @@ -4,8 +4,8 @@ import time import contextlib -from unittest import TestCase from test import support +import unittest threading = support.import_module('threading') HOST = support.HOST @@ -21,7 +21,7 @@ finally: serv.close() -class GeneralTests(TestCase): +class GeneralTests(unittest.TestCase): def setUp(self): self.evt = threading.Event() @@ -165,7 +165,7 @@ telnet._messages = '' # debuglevel output return telnet -class ExpectAndReadTestCase(TestCase): +class ExpectAndReadTestCase(unittest.TestCase): def setUp(self): self.old_selector = telnetlib._TelnetSelector telnetlib._TelnetSelector = MockSelector @@ -284,7 +284,7 @@ tl = telnetlib -class WriteTests(TestCase): +class WriteTests(unittest.TestCase): '''The only thing that write does is replace each tl.IAC for tl.IAC+tl.IAC''' @@ -300,7 +300,7 @@ written = b''.join(telnet.sock.writes) self.assertEqual(data.replace(tl.IAC,tl.IAC+tl.IAC), written) -class OptionTests(TestCase): +class OptionTests(unittest.TestCase): # RFC 854 commands cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP] diff --git a/Lib/test/test_tuple.py b/Lib/test/test_tuple.py --- a/Lib/test/test_tuple.py +++ b/Lib/test/test_tuple.py @@ -1,4 +1,5 @@ from test import support, seq_tests +import unittest import gc import pickle diff --git a/Lib/test/test_userdict.py b/Lib/test/test_userdict.py --- a/Lib/test/test_userdict.py +++ b/Lib/test/test_userdict.py @@ -1,6 +1,7 @@ # Check every path through every method of UserDict from test import support, mapping_tests +import unittest import collections d0 = {} diff --git a/Lib/test/test_userlist.py b/Lib/test/test_userlist.py --- a/Lib/test/test_userlist.py +++ b/Lib/test/test_userlist.py @@ -2,6 +2,7 @@ from collections import UserList from test import support, list_tests +import unittest class UserListTest(list_tests.CommonTest): type2test = UserList diff --git a/Lib/test/test_wait4.py b/Lib/test/test_wait4.py --- a/Lib/test/test_wait4.py +++ b/Lib/test/test_wait4.py @@ -4,6 +4,7 @@ import os import time import sys +import unittest from test.fork_wait import ForkWait from test.support import reap_children, get_attribute -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 00:34:17 2015 From: python-checkins at python.org (zach.ware) Date: Sat, 05 Dec 2015 05:34:17 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Closes_=2325795=3A_Merge_with_3=2E5?= Message-ID: <20151205053417.21233.69288@psf.io> https://hg.python.org/cpython/rev/401459dca320 changeset: 99444:401459dca320 parent: 99442:f53958873fae parent: 99443:a54ee6a65f10 user: Zachary Ware date: Fri Dec 04 23:33:59 2015 -0600 summary: Closes #25795: Merge with 3.5 files: Lib/test/test_fork1.py | 1 + Lib/test/test_list.py | 1 + Lib/test/test_pyclbr.py | 4 ++-- Lib/test/test_telnetlib.py | 10 +++++----- Lib/test/test_tuple.py | 1 + Lib/test/test_userdict.py | 1 + Lib/test/test_userlist.py | 1 + Lib/test/test_wait4.py | 1 + 8 files changed, 13 insertions(+), 7 deletions(-) diff --git a/Lib/test/test_fork1.py b/Lib/test/test_fork1.py --- a/Lib/test/test_fork1.py +++ b/Lib/test/test_fork1.py @@ -6,6 +6,7 @@ import signal import sys import time +import unittest from test.fork_wait import ForkWait from test.support import (reap_children, get_attribute, diff --git a/Lib/test/test_list.py b/Lib/test/test_list.py --- a/Lib/test/test_list.py +++ b/Lib/test/test_list.py @@ -1,6 +1,7 @@ import sys from test import support, list_tests import pickle +import unittest class ListTest(list_tests.CommonTest): type2test = list diff --git a/Lib/test/test_pyclbr.py b/Lib/test/test_pyclbr.py --- a/Lib/test/test_pyclbr.py +++ b/Lib/test/test_pyclbr.py @@ -5,7 +5,7 @@ import sys from types import FunctionType, MethodType, BuiltinFunctionType import pyclbr -from unittest import TestCase +from unittest import TestCase, main as unittest_main StaticMethodType = type(staticmethod(lambda: None)) ClassMethodType = type(classmethod(lambda c: None)) @@ -173,4 +173,4 @@ if __name__ == "__main__": - unittest.main() + unittest_main() diff --git a/Lib/test/test_telnetlib.py b/Lib/test/test_telnetlib.py --- a/Lib/test/test_telnetlib.py +++ b/Lib/test/test_telnetlib.py @@ -4,8 +4,8 @@ import time import contextlib -from unittest import TestCase from test import support +import unittest threading = support.import_module('threading') HOST = support.HOST @@ -21,7 +21,7 @@ finally: serv.close() -class GeneralTests(TestCase): +class GeneralTests(unittest.TestCase): def setUp(self): self.evt = threading.Event() @@ -170,7 +170,7 @@ telnet._messages = '' # debuglevel output return telnet -class ExpectAndReadTestCase(TestCase): +class ExpectAndReadTestCase(unittest.TestCase): def setUp(self): self.old_selector = telnetlib._TelnetSelector telnetlib._TelnetSelector = MockSelector @@ -289,7 +289,7 @@ tl = telnetlib -class WriteTests(TestCase): +class WriteTests(unittest.TestCase): '''The only thing that write does is replace each tl.IAC for tl.IAC+tl.IAC''' @@ -305,7 +305,7 @@ written = b''.join(telnet.sock.writes) self.assertEqual(data.replace(tl.IAC,tl.IAC+tl.IAC), written) -class OptionTests(TestCase): +class OptionTests(unittest.TestCase): # RFC 854 commands cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP] diff --git a/Lib/test/test_tuple.py b/Lib/test/test_tuple.py --- a/Lib/test/test_tuple.py +++ b/Lib/test/test_tuple.py @@ -1,4 +1,5 @@ from test import support, seq_tests +import unittest import gc import pickle diff --git a/Lib/test/test_userdict.py b/Lib/test/test_userdict.py --- a/Lib/test/test_userdict.py +++ b/Lib/test/test_userdict.py @@ -1,6 +1,7 @@ # Check every path through every method of UserDict from test import support, mapping_tests +import unittest import collections d0 = {} diff --git a/Lib/test/test_userlist.py b/Lib/test/test_userlist.py --- a/Lib/test/test_userlist.py +++ b/Lib/test/test_userlist.py @@ -2,6 +2,7 @@ from collections import UserList from test import support, list_tests +import unittest class UserListTest(list_tests.CommonTest): type2test = UserList diff --git a/Lib/test/test_wait4.py b/Lib/test/test_wait4.py --- a/Lib/test/test_wait4.py +++ b/Lib/test/test_wait4.py @@ -4,6 +4,7 @@ import os import time import sys +import unittest from test.fork_wait import ForkWait from test.support import reap_children, get_attribute -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 00:48:03 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 05 Dec 2015 05:48:03 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325764=3A_OS_X_now?= =?utf-8?q?_failing_on_the_second_setrlimit=28=29_call?= Message-ID: <20151205054803.4096.76217@psf.io> https://hg.python.org/cpython/rev/ccf42cdffc6d changeset: 99445:ccf42cdffc6d user: Martin Panter date: Sat Dec 05 05:42:18 2015 +0000 summary: Issue #25764: OS X now failing on the second setrlimit() call files: Lib/test/test_subprocess.py | 13 ++++++++----- 1 files changed, 8 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1520,14 +1520,17 @@ except ImportError as err: self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD limits = getrlimit(RLIMIT_NPROC) + [_, hard] = limits try: setrlimit(RLIMIT_NPROC, limits) + setrlimit(RLIMIT_NPROC, (0, hard)) except ValueError as err: - # Seems to happen on AMD64 Snow Leop and x86-64 Yosemite buildbots - print(f"Setting NPROC to {limits!r}: {err!r}, RLIM_INFINITY={RLIM_INFINITY!r}") - self.skipTest("Setting existing NPROC limit failed") - [_, hard] = limits - setrlimit(RLIMIT_NPROC, (0, hard)) + # Seems to happen on various OS X buildbots + print( + f"Setting NPROC failed: {err!r}, limits={limits!r}, " + f"RLIM_INFINITY={RLIM_INFINITY!r}, " + f"getrlimit() -> {getrlimit(RLIMIT_NPROC)!r}") + self.skipTest("Setting NPROC limit failed") self.addCleanup(setrlimit, RLIMIT_NPROC, limits) # Forking should raise EAGAIN, translated to BlockingIOError with self.assertRaises(BlockingIOError): -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 01:19:35 2015 From: python-checkins at python.org (zach.ware) Date: Sat, 05 Dec 2015 06:19:35 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1ODAw?= =?utf-8?q?=3A_Fix_running_test=5Fcapi_directly?= Message-ID: <20151205061935.21247.24813@psf.io> https://hg.python.org/cpython/rev/fae5186562d0 changeset: 99446:fae5186562d0 branch: 3.4 parent: 99439:b63fd82a8528 user: Zachary Ware date: Sat Dec 05 00:16:55 2015 -0600 summary: Issue #25800: Fix running test_capi directly files: Lib/test/test_capi.py | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_capi.py b/Lib/test/test_capi.py --- a/Lib/test/test_capi.py +++ b/Lib/test/test_capi.py @@ -257,7 +257,8 @@ class EmbeddingTests(unittest.TestCase): def setUp(self): - basepath = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + here = os.path.abspath(__file__) + basepath = os.path.dirname(os.path.dirname(os.path.dirname(here))) exename = "_testembed" if sys.platform.startswith("win"): ext = ("_d" if "_d" in sys.executable else "") + ".exe" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 01:19:36 2015 From: python-checkins at python.org (zach.ware) Date: Sat, 05 Dec 2015 06:19:36 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325800=3A_Merge_with_3=2E4?= Message-ID: <20151205061935.105481.1654@psf.io> https://hg.python.org/cpython/rev/de4108db61f7 changeset: 99447:de4108db61f7 branch: 3.5 parent: 99443:a54ee6a65f10 parent: 99446:fae5186562d0 user: Zachary Ware date: Sat Dec 05 00:18:29 2015 -0600 summary: Issue #25800: Merge with 3.4 files: Lib/test/test_capi.py | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_capi.py b/Lib/test/test_capi.py --- a/Lib/test/test_capi.py +++ b/Lib/test/test_capi.py @@ -343,7 +343,8 @@ class EmbeddingTests(unittest.TestCase): def setUp(self): - basepath = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + here = os.path.abspath(__file__) + basepath = os.path.dirname(os.path.dirname(os.path.dirname(here))) exename = "_testembed" if sys.platform.startswith("win"): ext = ("_d" if "_d" in sys.executable else "") + ".exe" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 01:19:37 2015 From: python-checkins at python.org (zach.ware) Date: Sat, 05 Dec 2015 06:19:37 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Closes_=2325800=3A_Merge_with_3=2E5?= Message-ID: <20151205061935.33661.28299@psf.io> https://hg.python.org/cpython/rev/bf3a7373e11a changeset: 99448:bf3a7373e11a parent: 99445:ccf42cdffc6d parent: 99447:de4108db61f7 user: Zachary Ware date: Sat Dec 05 00:19:08 2015 -0600 summary: Closes #25800: Merge with 3.5 files: Lib/test/test_capi.py | 3 ++- 1 files changed, 2 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_capi.py b/Lib/test/test_capi.py --- a/Lib/test/test_capi.py +++ b/Lib/test/test_capi.py @@ -343,7 +343,8 @@ class EmbeddingTests(unittest.TestCase): def setUp(self): - basepath = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + here = os.path.abspath(__file__) + basepath = os.path.dirname(os.path.dirname(os.path.dirname(here))) exename = "_testembed" if sys.platform.startswith("win"): ext = ("_d" if "_d" in sys.executable else "") + ".exe" -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:24:30 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:24:30 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_add_CVE_and_is?= =?utf-8?q?sue_number?= Message-ID: <20151205082430.60140.20073@psf.io> https://hg.python.org/cpython/rev/b2d64aff7225 changeset: 99449:b2d64aff7225 branch: 2.7 parent: 99292:43208b0f2535 user: Benjamin Peterson date: Sat Dec 05 00:17:57 2015 -0800 summary: add CVE and issue number files: Misc/NEWS | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -1354,8 +1354,9 @@ - Issue #21349: Passing a memoryview to _winreg.SetValueEx now correctly raises a TypeError where it previously crashed the interpreter. Patch by Brian Kearns -- Fix arbitrary memory access in JSONDecoder.raw_decode with a negative second - parameter. Bug reported by Guido Vranken. +- Issue #21529 (CVE-2014-4616): Fix arbitrary memory access in + JSONDecoder.raw_decode with a negative second parameter. Bug reported by Guido + Vranken. - Issue #21172: isinstance check relaxed from dict to collections.Mapping. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:24:30 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:24:30 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4yIC0+IDMuMyk6?= =?utf-8?q?_merge_3=2E2?= Message-ID: <20151205082430.21512.20541@psf.io> https://hg.python.org/cpython/rev/2932933afbe1 changeset: 99452:2932933afbe1 branch: 3.3 parent: 97944:8cc052c28910 parent: 99451:36be496963d1 user: Benjamin Peterson date: Sat Dec 05 00:21:12 2015 -0800 summary: merge 3.2 files: Misc/NEWS | 7 ++++--- 1 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -47,6 +47,10 @@ - Issue #23365: Fixed possible integer overflow in itertools.combinations_with_replacement. +- Issue #21529 (CVE-2014-4616): Fix arbitrary memory access in + JSONDecoder.raw_decode with a negative second parameter. Bug reported by Guido + Vranken. + C API ----- @@ -97,9 +101,6 @@ - Issue #21766: Prevent a security hole in CGIHTTPServer by URL unquoting paths before checking for a CGI script at that path. -- Fix arbitrary memory access in JSONDecoder.raw_decode with a negative second - parameter. Bug reported by Guido Vranken. - - Issue #20633: Replace relative import by absolute import. - Issue #21082: In os.makedirs, do not set the process-wide umask. Note this -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:24:30 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:24:30 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMi43IC0+IDIuNyk6?= =?utf-8?q?_merge_2=2E7=2E11_branch?= Message-ID: <20151205082430.29597.44182@psf.io> https://hg.python.org/cpython/rev/a3aa22a55db3 changeset: 99450:a3aa22a55db3 branch: 2.7 parent: 99428:c4e950338e79 parent: 99449:b2d64aff7225 user: Benjamin Peterson date: Sat Dec 05 00:18:11 2015 -0800 summary: merge 2.7.11 branch files: Misc/NEWS | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -1406,8 +1406,9 @@ - Issue #21349: Passing a memoryview to _winreg.SetValueEx now correctly raises a TypeError where it previously crashed the interpreter. Patch by Brian Kearns -- Fix arbitrary memory access in JSONDecoder.raw_decode with a negative second - parameter. Bug reported by Guido Vranken. +- Issue #21529 (CVE-2014-4616): Fix arbitrary memory access in + JSONDecoder.raw_decode with a negative second parameter. Bug reported by Guido + Vranken. - Issue #21172: isinstance check relaxed from dict to collections.Mapping. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:24:30 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:24:30 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E2=29=3A_add_CVE_and_is?= =?utf-8?q?sue_number?= Message-ID: <20151205082430.33659.73182@psf.io> https://hg.python.org/cpython/rev/36be496963d1 changeset: 99451:36be496963d1 branch: 3.2 parent: 96289:0cb037bb0f9a user: Benjamin Peterson date: Sat Dec 05 00:17:57 2015 -0800 summary: add CVE and issue number files: Misc/NEWS | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -99,8 +99,9 @@ - Issue #21766: Prevent a security hole in CGIHTTPServer by URL unquoting paths before checking for a CGI script at that path. -- Fix arbitrary memory access in JSONDecoder.raw_decode with a negative second - parameter. Bug reported by Guido Vranken. +- Issue #21529 (CVE-2014-4616): Fix arbitrary memory access in + JSONDecoder.raw_decode with a negative second parameter. Bug reported by Guido + Vranken. - Issue #21082: In os.makedirs, do not set the process-wide umask. Note this changes behavior of makedirs when exist_ok=True. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:24:30 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:24:30 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4zIC0+IDMuNCk6?= =?utf-8?q?_merge_3=2E3?= Message-ID: <20151205082430.61845.41640@psf.io> https://hg.python.org/cpython/rev/693d0b030626 changeset: 99453:693d0b030626 branch: 3.4 parent: 99446:fae5186562d0 parent: 99452:2932933afbe1 user: Benjamin Peterson date: Sat Dec 05 00:23:11 2015 -0800 summary: merge 3.3 files: Misc/NEWS | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -1841,8 +1841,9 @@ - Issue #20968: unittest.mock.MagicMock now supports division. Patch by Johannes Baiter. -- Fix arbitrary memory access in JSONDecoder.raw_decode with a negative second - parameter. Bug reported by Guido Vranken. +- Issue #21529 (CVE-2014-4616): Fix arbitrary memory access in + JSONDecoder.raw_decode with a negative second parameter. Bug reported by Guido + Vranken. - Issue #21169: getpass now handles non-ascii characters that the input stream encoding cannot encode by re-encoding using the -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:24:31 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:24:31 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_merge_3=2E4?= Message-ID: <20151205082431.93636.27407@psf.io> https://hg.python.org/cpython/rev/7984aea3a5e2 changeset: 99454:7984aea3a5e2 branch: 3.5 parent: 99447:de4108db61f7 parent: 99453:693d0b030626 user: Benjamin Peterson date: Sat Dec 05 00:24:01 2015 -0800 summary: merge 3.4 files: Misc/NEWS | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -3062,8 +3062,9 @@ - Issue #20968: unittest.mock.MagicMock now supports division. Patch by Johannes Baiter. -- Fix arbitrary memory access in JSONDecoder.raw_decode with a negative second - parameter. Bug reported by Guido Vranken. +- Issue #21529 (CVE-2014-4616): Fix arbitrary memory access in + JSONDecoder.raw_decode with a negative second parameter. Bug reported by Guido + Vranken. - Issue #21169: getpass now handles non-ascii characters that the input stream encoding cannot encode by re-encoding using the -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:24:31 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:24:31 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy41?= Message-ID: <20151205082431.21510.30972@psf.io> https://hg.python.org/cpython/rev/227c1082b6bf changeset: 99455:227c1082b6bf parent: 99448:bf3a7373e11a parent: 99454:7984aea3a5e2 user: Benjamin Peterson date: Sat Dec 05 00:24:10 2015 -0800 summary: merge 3.5 files: Misc/NEWS | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -3483,8 +3483,9 @@ - Issue #20968: unittest.mock.MagicMock now supports division. Patch by Johannes Baiter. -- Fix arbitrary memory access in JSONDecoder.raw_decode with a negative second - parameter. Bug reported by Guido Vranken. +- Issue #21529 (CVE-2014-4616): Fix arbitrary memory access in + JSONDecoder.raw_decode with a negative second parameter. Bug reported by Guido + Vranken. - Issue #21169: getpass now handles non-ascii characters that the input stream encoding cannot encode by re-encoding using the -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:27:51 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:27:51 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_merge_3=2E4?= Message-ID: <20151205082751.21243.94369@psf.io> https://hg.python.org/cpython/rev/ec2aa1d6d955 changeset: 99458:ec2aa1d6d955 branch: 3.5 parent: 99454:7984aea3a5e2 parent: 99457:cf86a04d68e7 user: Benjamin Peterson date: Sat Dec 05 00:27:33 2015 -0800 summary: merge 3.4 files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:27:51 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:27:51 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy4zIC0+IDMuNCk6?= =?utf-8?q?_merge_3=2E3?= Message-ID: <20151205082751.21504.70083@psf.io> https://hg.python.org/cpython/rev/cf86a04d68e7 changeset: 99457:cf86a04d68e7 branch: 3.4 parent: 99453:693d0b030626 parent: 99456:39a709f1ca3a user: Benjamin Peterson date: Sat Dec 05 00:27:23 2015 -0800 summary: merge 3.3 files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:27:51 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:27:51 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E3=29=3A_fix_reordering?= Message-ID: <20151205082751.93646.11388@psf.io> https://hg.python.org/cpython/rev/39a709f1ca3a changeset: 99456:39a709f1ca3a branch: 3.3 parent: 99452:2932933afbe1 user: Benjamin Peterson date: Sat Dec 05 00:27:11 2015 -0800 summary: fix reordering files: Misc/NEWS | 144 +++++++++++++++++++++--------------------- 1 files changed, 72 insertions(+), 72 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -47,82 +47,82 @@ - Issue #23365: Fixed possible integer overflow in itertools.combinations_with_replacement. +C API +----- + +- Issue #23998: PyImport_ReInitLock() now checks for lock allocation error + + +What's New in Python 3.3.6? +=========================== + +*Release date: 11-Oct-2014* + +Core and Builtins +----------------- + +- Issue #22643: Fix integer overflow in Unicode case operations (upper, lower, + title, swapcase, casefold). + +- Issue #22518: Fixed integer overflow issues in "backslashreplace", + "xmlcharrefreplace", and "surrogatepass" error handlers. + +- Issue #22520: Fix overflow checking when generating the repr of a unicode + object. + +- Issue #22519: Fix overflow checking in PyBytes_Repr. + +- Issue #22518: Fix integer overflow issues in latin-1 encoding. + +- Issue #23165: Perform overflow checks before allocating memory in the + _Py_char2wchar function. + +Library +------- + +- Issue #16043: Add a default limit for the amount of data xmlrpclib.gzip_decode + will return. This resolves CVE-2013-1753. + +- Issue #22517: When a io.BufferedRWPair object is deallocated, clear its + weakrefs. + +- Issue #22419: Limit the length of incoming HTTP request in wsgiref server to + 65536 bytes and send a 414 error code for higher lengths. Patch contributed + by Devin Cook. + +- Lax cookie parsing in http.cookies could be a security issue when combined + with non-standard cookie handling in some Web browsers. Reported by + Sergey Bobrov. + +- Issue #21766: Prevent a security hole in CGIHTTPServer by URL unquoting paths + before checking for a CGI script at that path. + +- Issue #20633: Replace relative import by absolute import. + +- Issue #21082: In os.makedirs, do not set the process-wide umask. Note this + changes behavior of makedirs when exist_ok=True. + +- Issue #20875: Prevent possible gzip "'read' is not defined" NameError. + Patch by Claudiu Popa. + +- Issue #11599: When an external command (e.g. compiler) fails, distutils now + prints out the whole command line (instead of just the command name) if the + environment variable DISTUTILS_DEBUG is set. + +- Issue #4931: distutils should not produce unhelpful "error: None" messages + anymore. distutils.util.grok_environment_error is kept but doc-deprecated. + +- Issue #20283: RE pattern methods now accept the string keyword parameters + as documented. The pattern and source keyword parameters are left as + deprecated aliases. + +- Issue #21323: Fix http.server to again handle scripts in CGI subdirectories, + broken by the fix for security issue #19435. Patch by Zach Byrne. + - Issue #21529 (CVE-2014-4616): Fix arbitrary memory access in JSONDecoder.raw_decode with a negative second parameter. Bug reported by Guido Vranken. -C API ------ - -- Issue #23998: PyImport_ReInitLock() now checks for lock allocation error - - -What's New in Python 3.3.6? -=========================== - -*Release date: 11-Oct-2014* - -Core and Builtins ------------------ - -- Issue #22643: Fix integer overflow in Unicode case operations (upper, lower, - title, swapcase, casefold). - -- Issue #22518: Fixed integer overflow issues in "backslashreplace", - "xmlcharrefreplace", and "surrogatepass" error handlers. - -- Issue #22520: Fix overflow checking when generating the repr of a unicode - object. - -- Issue #22519: Fix overflow checking in PyBytes_Repr. - -- Issue #22518: Fix integer overflow issues in latin-1 encoding. - -- Issue #23165: Perform overflow checks before allocating memory in the - _Py_char2wchar function. - -Library -------- - -- Issue #16043: Add a default limit for the amount of data xmlrpclib.gzip_decode - will return. This resolves CVE-2013-1753. - -- Issue #22517: When a io.BufferedRWPair object is deallocated, clear its - weakrefs. - -- Issue #22419: Limit the length of incoming HTTP request in wsgiref server to - 65536 bytes and send a 414 error code for higher lengths. Patch contributed - by Devin Cook. - -- Lax cookie parsing in http.cookies could be a security issue when combined - with non-standard cookie handling in some Web browsers. Reported by - Sergey Bobrov. - -- Issue #21766: Prevent a security hole in CGIHTTPServer by URL unquoting paths - before checking for a CGI script at that path. - -- Issue #20633: Replace relative import by absolute import. - -- Issue #21082: In os.makedirs, do not set the process-wide umask. Note this - changes behavior of makedirs when exist_ok=True. - -- Issue #20875: Prevent possible gzip "'read' is not defined" NameError. - Patch by Claudiu Popa. - -- Issue #11599: When an external command (e.g. compiler) fails, distutils now - prints out the whole command line (instead of just the command name) if the - environment variable DISTUTILS_DEBUG is set. - -- Issue #4931: distutils should not produce unhelpful "error: None" messages - anymore. distutils.util.grok_environment_error is kept but doc-deprecated. - -- Issue #20283: RE pattern methods now accept the string keyword parameters - as documented. The pattern and source keyword parameters are left as - deprecated aliases. - -- Issue #21323: Fix http.server to again handle scripts in CGI subdirectories, - broken by the fix for security issue #19435. Patch by Zach Byrne. - Tests ----- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:27:51 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:27:51 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?b?KTogbWVyZ2UgMy41?= Message-ID: <20151205082751.33659.79355@psf.io> https://hg.python.org/cpython/rev/4c50ffbcde5c changeset: 99459:4c50ffbcde5c parent: 99455:227c1082b6bf parent: 99458:ec2aa1d6d955 user: Benjamin Peterson date: Sat Dec 05 00:27:40 2015 -0800 summary: merge 3.5 files: -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 03:31:54 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 08:31:54 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_make_consultin?= =?utf-8?q?g_save=5Fmodules_O=281=29_rather_than_O=28n=29?= Message-ID: <20151205083154.93632.93146@psf.io> https://hg.python.org/cpython/rev/3192e2af7f52 changeset: 99460:3192e2af7f52 branch: 2.7 parent: 99450:a3aa22a55db3 user: Benjamin Peterson date: Sat Dec 05 00:29:56 2015 -0800 summary: make consulting save_modules O(1) rather than O(n) files: Lib/test/regrtest.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -462,7 +462,7 @@ test_times = [] test_support.use_resources = use_resources - save_modules = sys.modules.keys() + save_modules = set(sys.modules.keys()) def accumulate_result(test, result): ok, test_time = result -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Sat Dec 5 03:43:34 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sat, 05 Dec 2015 08:43:34 +0000 Subject: [Python-checkins] Daily reference leaks (401459dca320): sum=4 Message-ID: <20151205084334.105501.47823@psf.io> results for 401459dca320 on branch "default" -------------------------------------------- test_functools leaked [0, 2, 2] memory blocks, sum=4 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogwWkZgc', '--timeout', '7200'] From python-checkins at python.org Sat Dec 5 07:43:06 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 05 Dec 2015 12:43:06 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325764=3A_Remove_t?= =?utf-8?q?est_debugging?= Message-ID: <20151205124306.29621.10831@psf.io> https://hg.python.org/cpython/rev/0c9095566f21 changeset: 99464:0c9095566f21 user: Martin Panter date: Sat Dec 05 10:18:25 2015 +0000 summary: Issue #25764: Remove test debugging files: Lib/test/test_subprocess.py | 13 ++----------- 1 files changed, 2 insertions(+), 11 deletions(-) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1518,21 +1518,12 @@ # The internal code did not preserve the previous exception when # re-enabling garbage collection try: - from resource import getrlimit, setrlimit, RLIMIT_NPROC, RLIM_INFINITY + from resource import getrlimit, setrlimit, RLIMIT_NPROC except ImportError as err: self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD limits = getrlimit(RLIMIT_NPROC) [_, hard] = limits - try: - setrlimit(RLIMIT_NPROC, limits) - setrlimit(RLIMIT_NPROC, (0, hard)) - except ValueError as err: - # Seems to happen on various OS X buildbots - print( - f"Setting NPROC failed: {err!r}, limits={limits!r}, " - f"RLIM_INFINITY={RLIM_INFINITY!r}, " - f"getrlimit() -> {getrlimit(RLIMIT_NPROC)!r}") - self.skipTest("Setting NPROC limit failed") + setrlimit(RLIMIT_NPROC, (0, hard)) self.addCleanup(setrlimit, RLIMIT_NPROC, limits) # Forking should raise EAGAIN, translated to BlockingIOError with self.assertRaises(BlockingIOError): -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 07:43:06 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 05 Dec 2015 12:43:06 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325764=3A_Merge_OS_X_test_skipping_from_3=2E4_into_3?= =?utf-8?q?=2E5?= Message-ID: <20151205124305.4092.96694@psf.io> https://hg.python.org/cpython/rev/6211c41106cc changeset: 99462:6211c41106cc branch: 3.5 parent: 99458:ec2aa1d6d955 parent: 99461:6f831de45f43 user: Martin Panter date: Sat Dec 05 12:41:29 2015 +0000 summary: Issue #25764: Merge OS X test skipping from 3.4 into 3.5 files: Lib/test/test_subprocess.py | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1512,6 +1512,8 @@ if not enabled: gc.disable() + @unittest.skipIf( + sys.platform == 'darwin', 'setrlimit() seems to fail on OS X') def test_preexec_fork_failure(self): # The internal code did not preserve the previous exception when # re-enabling garbage collection -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 07:43:06 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 05 Dec 2015 12:43:06 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325764=3A_Merge_OS_X_test_skipping_from_3=2E5?= Message-ID: <20151205124306.61823.73652@psf.io> https://hg.python.org/cpython/rev/9a847520c40d changeset: 99463:9a847520c40d parent: 99459:4c50ffbcde5c parent: 99462:6211c41106cc user: Martin Panter date: Sat Dec 05 12:41:41 2015 +0000 summary: Issue #25764: Merge OS X test skipping from 3.5 files: Lib/test/test_subprocess.py | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1512,6 +1512,8 @@ if not enabled: gc.disable() + @unittest.skipIf( + sys.platform == 'darwin', 'setrlimit() seems to fail on OS X') def test_preexec_fork_failure(self): # The internal code did not preserve the previous exception when # re-enabling garbage collection -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 07:43:06 2015 From: python-checkins at python.org (martin.panter) Date: Sat, 05 Dec 2015 12:43:06 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1NzY0?= =?utf-8?q?=3A_Skip_the_test_on_OS_X?= Message-ID: <20151205124305.21486.5806@psf.io> https://hg.python.org/cpython/rev/6f831de45f43 changeset: 99461:6f831de45f43 branch: 3.4 parent: 99457:cf86a04d68e7 user: Martin Panter date: Sat Dec 05 09:51:52 2015 +0000 summary: Issue #25764: Skip the test on OS X The OS X buildbots were failing at the second setrlimit() call with EPERM, as if they were trying to raise the hard limit. The call should be keeping the hard limit the same and raising the soft limit back to its original value, so I don't understand the failure. files: Lib/test/test_subprocess.py | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_subprocess.py b/Lib/test/test_subprocess.py --- a/Lib/test/test_subprocess.py +++ b/Lib/test/test_subprocess.py @@ -1416,6 +1416,8 @@ if not enabled: gc.disable() + @unittest.skipIf( + sys.platform == 'darwin', 'setrlimit() seems to fail on OS X') def test_preexec_fork_failure(self): # The internal code did not preserve the previous exception when # re-enabling garbage collection -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 14:02:53 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 19:02:53 +0000 Subject: [Python-checkins] =?utf-8?q?release=3A_update_sphinx_to_1=2E3=2E3?= Message-ID: <20151205190253.29971.35971@psf.io> https://hg.python.org/release/rev/96c83c0116b9 changeset: 102:96c83c0116b9 user: Benjamin Peterson date: Sat Dec 05 11:02:47 2015 -0800 summary: update sphinx to 1.3.3 files: release.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/release.py b/release.py --- a/release.py +++ b/release.py @@ -283,7 +283,7 @@ with tempfile.TemporaryDirectory() as venv: run_cmd(['python3', '-m', 'venv', venv]) pip = os.path.join(venv, 'bin', 'pip') - run_cmd([pip, 'install', 'Sphinx==1.2.3']) + run_cmd([pip, 'install', 'Sphinx==1.3.3']) # run_cmd([pip, 'install', 'Sphinx']) sphinx_build = os.path.join(venv, 'bin', 'sphinx-build') with changed_dir('Doc'): -- Repository URL: https://hg.python.org/release From python-checkins at python.org Sat Dec 5 14:45:54 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 19:45:54 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogMi43LjExIGZpbmFs?= Message-ID: <20151205194554.21490.69842@psf.io> https://hg.python.org/cpython/rev/6d1b6a68f775 changeset: 99465:6d1b6a68f775 branch: 2.7 tag: v2.7.11 parent: 99449:b2d64aff7225 user: Benjamin Peterson date: Sat Dec 05 11:45:17 2015 -0800 summary: 2.7.11 final files: Include/patchlevel.h | 6 +++--- Misc/NEWS | 3 --- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -23,11 +23,11 @@ #define PY_MAJOR_VERSION 2 #define PY_MINOR_VERSION 7 #define PY_MICRO_VERSION 11 -#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_GAMMA -#define PY_RELEASE_SERIAL 1 +#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL +#define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "2.7.11rc1+" +#define PY_VERSION "2.7.11" /*--end constants--*/ /* Subversion Revision number of this file (not of the repository). Empty diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -7,9 +7,6 @@ *Release date: 2015-12-05* -Core and Builtins ------------------ - Library ------- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 14:45:54 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 19:45:54 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_Added_tag_v2?= =?utf-8?q?=2E7=2E11_for_changeset_6d1b6a68f775?= Message-ID: <20151205194554.4102.49276@psf.io> https://hg.python.org/cpython/rev/53d30ab403f1 changeset: 99466:53d30ab403f1 branch: 2.7 user: Benjamin Peterson date: Sat Dec 05 11:45:22 2015 -0800 summary: Added tag v2.7.11 for changeset 6d1b6a68f775 files: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -171,3 +171,4 @@ 80ccce248ba2657ed5da3ccf7999f35b78827f5e v2.7.10rc1 15c95b7d81dcf821daade360741e00714667653f v2.7.10 82dd9545bd93d6e7a9821e1dabc7b25508d0fa3a v2.7.11rc1 +6d1b6a68f775fada9877d295e62958bafa1ca11e v2.7.11 -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 14:45:54 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 19:45:54 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMi43IC0+IDIuNyk6?= =?utf-8?q?_merge_2=2E7=2E11_branch?= Message-ID: <20151205194554.61841.93223@psf.io> https://hg.python.org/cpython/rev/ec0289f3af28 changeset: 99467:ec0289f3af28 branch: 2.7 parent: 99460:3192e2af7f52 parent: 99466:53d30ab403f1 user: Benjamin Peterson date: Sat Dec 05 11:45:48 2015 -0800 summary: merge 2.7.11 branch files: .hgtags | 1 + Include/patchlevel.h | 6 +++--- Misc/NEWS | 3 --- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -171,3 +171,4 @@ 80ccce248ba2657ed5da3ccf7999f35b78827f5e v2.7.10rc1 15c95b7d81dcf821daade360741e00714667653f v2.7.10 82dd9545bd93d6e7a9821e1dabc7b25508d0fa3a v2.7.11rc1 +6d1b6a68f775fada9877d295e62958bafa1ca11e v2.7.11 diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -23,11 +23,11 @@ #define PY_MAJOR_VERSION 2 #define PY_MINOR_VERSION 7 #define PY_MICRO_VERSION 11 -#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_GAMMA -#define PY_RELEASE_SERIAL 1 +#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL +#define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "2.7.11rc1+" +#define PY_VERSION "2.7.11" /*--end constants--*/ /* Subversion Revision number of this file (not of the repository). Empty diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -59,9 +59,6 @@ *Release date: 2015-12-05* -Core and Builtins ------------------ - Library ------- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 14:46:37 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sat, 05 Dec 2015 19:46:37 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogb2ZmIHRvIDIuNy4x?= =?utf-8?q?2_we_go?= Message-ID: <20151205194636.105481.56369@psf.io> https://hg.python.org/cpython/rev/2096109fe812 changeset: 99468:2096109fe812 branch: 2.7 user: Benjamin Peterson date: Sat Dec 05 11:46:21 2015 -0800 summary: off to 2.7.12 we go files: Include/patchlevel.h | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -27,7 +27,7 @@ #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "2.7.11" +#define PY_VERSION "2.7.11+" /*--end constants--*/ /* Subversion Revision number of this file (not of the repository). Empty -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 19:54:47 2015 From: python-checkins at python.org (larry.hastings) Date: Sun, 06 Dec 2015 00:54:47 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Updated_the_URL_for_the_build?= =?utf-8?q?bots=2E?= Message-ID: <20151206005447.60132.8687@psf.io> https://hg.python.org/peps/rev/d13b79fa56cc changeset: 6135:d13b79fa56cc user: Larry Hastings date: Sat Dec 05 16:54:44 2015 -0800 summary: Updated the URL for the buildbots. files: pep-0101.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0101.txt b/pep-0101.txt --- a/pep-0101.txt +++ b/pep-0101.txt @@ -129,7 +129,7 @@ ___ Check the stable buildbots. - Go to http://www.python.org/dev/buildbot/stable/ + Go to http://buildbot.python.org/all/waterfall (the trailing slash is required). Look at the buildbots for the release you're making. Ignore any that are offline (or inform the community so -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Sat Dec 5 20:04:18 2015 From: python-checkins at python.org (larry.hastings) Date: Sun, 06 Dec 2015 01:04:18 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_Updated_another_path_that=27s?= =?utf-8?q?_changed_in_recent=2E=2E=2E_years=3F?= Message-ID: <20151206010418.105483.35903@psf.io> https://hg.python.org/peps/rev/7112c1e639a1 changeset: 6136:7112c1e639a1 user: Larry Hastings date: Sat Dec 05 17:04:15 2015 -0800 summary: Updated another path that's changed in recent... years? files: pep-0101.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0101.txt b/pep-0101.txt --- a/pep-0101.txt +++ b/pep-0101.txt @@ -179,7 +179,7 @@ ___ Commit your changes to pydoc_topics.py (and any fixes you made in the docs). - ___ Make sure the SOURCE_URI in ``Doc/tools/pyspecific.py`` + ___ Make sure the SOURCE_URI in ``Doc/tools/extensions/pyspecific.py`` points to the right branch in the hg repository (or ``default`` for unstable releases of the default branch). -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Sat Dec 5 21:01:31 2015 From: python-checkins at python.org (ethan.furman) Date: Sun, 06 Dec 2015 02:01:31 +0000 Subject: [Python-checkins] =?utf-8?q?peps=3A_the_trailing_slash_is_no_lang?= =?utf-8?q?er_required?= Message-ID: <20151206020131.60140.95669@psf.io> https://hg.python.org/peps/rev/4769e5c2ef8f changeset: 6137:4769e5c2ef8f user: Ethan Furman date: Sat Dec 05 18:01:21 2015 -0800 summary: the trailing slash is no langer required files: pep-0101.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0101.txt b/pep-0101.txt --- a/pep-0101.txt +++ b/pep-0101.txt @@ -131,7 +131,7 @@ Go to http://buildbot.python.org/all/waterfall - (the trailing slash is required). Look at the buildbots for the release + Look at the buildbots for the release you're making. Ignore any that are offline (or inform the community so they can be restarted). If what remains are (mostly) green buildbots, you're good to go. If you have non-offline red buildbots, you may want -- Repository URL: https://hg.python.org/peps From python-checkins at python.org Sat Dec 5 22:51:55 2015 From: python-checkins at python.org (martin.panter) Date: Sun, 06 Dec 2015 03:51:55 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1NzE3?= =?utf-8?q?=3A_Tolerate_fstat=28=29_failures_in_the_FileIO_constructor?= Message-ID: <20151206035155.60134.75685@psf.io> https://hg.python.org/cpython/rev/20ea12222b0e changeset: 99469:20ea12222b0e branch: 3.5 parent: 99462:6211c41106cc user: Martin Panter date: Sun Dec 06 03:15:05 2015 +0000 summary: Issue #25717: Tolerate fstat() failures in the FileIO constructor This restores 3.4 behaviour, which was removed by revision 3b5279b5bfd1. The fstat() call fails with ENOENT for a Virtual Box shared folder filesystem if the file entry has been unlinked, e.g. for a temporary file. files: Misc/NEWS | 4 +++ Modules/_io/fileio.c | 39 ++++++++++++++++++++++--------- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -22,6 +22,10 @@ Library ------- +- Issue #25717: Restore the previous behaviour of tolerating most fstat() + errors when opening files. This was a regression in 3.5a1, and stopped + anonymous temporary files from working in special cases. + - Issue #24903: Fix regression in number of arguments compileall accepts when '-d' is specified. The check on the number of arguments has been dropped completely as it never worked correctly anyway. diff --git a/Modules/_io/fileio.c b/Modules/_io/fileio.c --- a/Modules/_io/fileio.c +++ b/Modules/_io/fileio.c @@ -250,6 +250,7 @@ int *atomic_flag_works = NULL; #endif struct _Py_stat_struct fdfstat; + int fstat_result; int async_err = 0; assert(PyFileIO_Check(self)); @@ -438,22 +439,36 @@ } self->blksize = DEFAULT_BUFFER_SIZE; - if (_Py_fstat(self->fd, &fdfstat) < 0) - goto error; + Py_BEGIN_ALLOW_THREADS + fstat_result = _Py_fstat_noraise(self->fd, &fdfstat); + Py_END_ALLOW_THREADS + if (fstat_result < 0) { +#ifdef MS_WINDOWS + if (GetLastError() == ERROR_INVALID_HANDLE) { + PyErr_SetFromWindowsErr(0); +#else + if (errno == EBADF) { + PyErr_SetFromErrno(PyExc_OSError); +#endif + goto error; + } + } + else { #if defined(S_ISDIR) && defined(EISDIR) - /* On Unix, open will succeed for directories. - In Python, there should be no file objects referring to - directories, so we need a check. */ - if (S_ISDIR(fdfstat.st_mode)) { - errno = EISDIR; - PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, nameobj); - goto error; - } + /* On Unix, open will succeed for directories. + In Python, there should be no file objects referring to + directories, so we need a check. */ + if (S_ISDIR(fdfstat.st_mode)) { + errno = EISDIR; + PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, nameobj); + goto error; + } #endif /* defined(S_ISDIR) */ #ifdef HAVE_STRUCT_STAT_ST_BLKSIZE - if (fdfstat.st_blksize > 1) - self->blksize = fdfstat.st_blksize; + if (fdfstat.st_blksize > 1) + self->blksize = fdfstat.st_blksize; #endif /* HAVE_STRUCT_STAT_ST_BLKSIZE */ + } #if defined(MS_WINDOWS) || defined(__CYGWIN__) /* don't translate newlines (\r\n <=> \n) */ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 22:51:56 2015 From: python-checkins at python.org (martin.panter) Date: Sun, 06 Dec 2015 03:51:56 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325717=3A_Merge_fstat=28=29_fix_from_3=2E5?= Message-ID: <20151206035156.93652.61835@psf.io> https://hg.python.org/cpython/rev/8c978cbe057c changeset: 99470:8c978cbe057c parent: 99464:0c9095566f21 parent: 99469:20ea12222b0e user: Martin Panter date: Sun Dec 06 03:29:54 2015 +0000 summary: Issue #25717: Merge fstat() fix from 3.5 files: Misc/NEWS | 4 +++ Modules/_io/fileio.c | 39 ++++++++++++++++++++++--------- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -109,6 +109,10 @@ Library ------- +- Issue #25717: Restore the previous behaviour of tolerating most fstat() + errors when opening files. This was a regression in 3.5a1, and stopped + anonymous temporary files from working in special cases. + - Issue #24903: Fix regression in number of arguments compileall accepts when '-d' is specified. The check on the number of arguments has been dropped completely as it never worked correctly anyway. diff --git a/Modules/_io/fileio.c b/Modules/_io/fileio.c --- a/Modules/_io/fileio.c +++ b/Modules/_io/fileio.c @@ -250,6 +250,7 @@ int *atomic_flag_works = NULL; #endif struct _Py_stat_struct fdfstat; + int fstat_result; int async_err = 0; assert(PyFileIO_Check(self)); @@ -438,22 +439,36 @@ } self->blksize = DEFAULT_BUFFER_SIZE; - if (_Py_fstat(self->fd, &fdfstat) < 0) - goto error; + Py_BEGIN_ALLOW_THREADS + fstat_result = _Py_fstat_noraise(self->fd, &fdfstat); + Py_END_ALLOW_THREADS + if (fstat_result < 0) { +#ifdef MS_WINDOWS + if (GetLastError() == ERROR_INVALID_HANDLE) { + PyErr_SetFromWindowsErr(0); +#else + if (errno == EBADF) { + PyErr_SetFromErrno(PyExc_OSError); +#endif + goto error; + } + } + else { #if defined(S_ISDIR) && defined(EISDIR) - /* On Unix, open will succeed for directories. - In Python, there should be no file objects referring to - directories, so we need a check. */ - if (S_ISDIR(fdfstat.st_mode)) { - errno = EISDIR; - PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, nameobj); - goto error; - } + /* On Unix, open will succeed for directories. + In Python, there should be no file objects referring to + directories, so we need a check. */ + if (S_ISDIR(fdfstat.st_mode)) { + errno = EISDIR; + PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, nameobj); + goto error; + } #endif /* defined(S_ISDIR) */ #ifdef HAVE_STRUCT_STAT_ST_BLKSIZE - if (fdfstat.st_blksize > 1) - self->blksize = fdfstat.st_blksize; + if (fdfstat.st_blksize > 1) + self->blksize = fdfstat.st_blksize; #endif /* HAVE_STRUCT_STAT_ST_BLKSIZE */ + } #if defined(MS_WINDOWS) || defined(__CYGWIN__) /* don't translate newlines (\r\n <=> \n) */ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sat Dec 5 23:57:42 2015 From: python-checkins at python.org (benjamin.peterson) Date: Sun, 06 Dec 2015 04:57:42 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=282=2E7=29=3A_remove_pointle?= =?utf-8?q?ss_keys=28=29_call?= Message-ID: <20151206045742.29601.8823@psf.io> https://hg.python.org/cpython/rev/0ad57f476e64 changeset: 99471:0ad57f476e64 branch: 2.7 parent: 99468:2096109fe812 user: Benjamin Peterson date: Sat Dec 05 20:52:43 2015 -0800 summary: remove pointless keys() call files: Lib/test/regrtest.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -462,7 +462,7 @@ test_times = [] test_support.use_resources = use_resources - save_modules = set(sys.modules.keys()) + save_modules = set(sys.modules) def accumulate_result(test, result): ok, test_time = result -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 00:02:04 2015 From: python-checkins at python.org (ned.deily) Date: Sun, 06 Dec 2015 05:02:04 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue_=2325798=3A_merge_from_3=2E4?= Message-ID: <20151206050203.29979.90063@psf.io> https://hg.python.org/cpython/rev/02d2127fda6c changeset: 99474:02d2127fda6c branch: 3.5 parent: 99469:20ea12222b0e parent: 99473:51a0dd6f7c73 user: Ned Deily date: Sat Dec 05 23:55:33 2015 -0500 summary: Issue #25798: merge from 3.4 files: Mac/BuildScript/build-installer.py | 6 +- Mac/BuildScript/openssl_sdk_makedepend.patch | 18 +++++---- Misc/NEWS | 5 ++ 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/Mac/BuildScript/build-installer.py b/Mac/BuildScript/build-installer.py --- a/Mac/BuildScript/build-installer.py +++ b/Mac/BuildScript/build-installer.py @@ -237,9 +237,9 @@ result.extend([ dict( - name="OpenSSL 1.0.2d", - url="https://www.openssl.org/source/openssl-1.0.2d.tar.gz", - checksum='38dd619b2e77cbac69b99f52a053d25a', + name="OpenSSL 1.0.2e", + url="https://www.openssl.org/source/openssl-1.0.2e.tar.gz", + checksum='5262bfa25b60ed9de9f28d5d52d77fc5', patches=[ "openssl_sdk_makedepend.patch", ], diff --git a/Mac/BuildScript/openssl_sdk_makedepend.patch b/Mac/BuildScript/openssl_sdk_makedepend.patch --- a/Mac/BuildScript/openssl_sdk_makedepend.patch +++ b/Mac/BuildScript/openssl_sdk_makedepend.patch @@ -1,8 +1,8 @@ # HG changeset patch -# Parent 25a9af415e8c3faf591c360d5f0e361d049b2b43 +# Parent ff8a7557607cffd626997e57ed31c1012a3018aa # openssl_sdk_makedepend.patch # -# using openssl 1.0.2d +# using openssl 1.0.2e # # - support building with an OS X SDK # - allow "make depend" to use compilers with names other than "gcc" @@ -12,7 +12,7 @@ diff --git a/Configure b/Configure --- a/Configure +++ b/Configure -@@ -617,12 +617,12 @@ +@@ -635,12 +635,12 @@ ##### MacOS X (a.k.a. Rhapsody or Darwin) setup "rhapsody-ppc-cc","cc:-O3 -DB_ENDIAN::(unknown):MACOSX_RHAPSODY::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}::", @@ -31,24 +31,26 @@ "debug-darwin-ppc-cc","cc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DB_ENDIAN -g -Wall -O::-D_REENTRANT:MACOSX::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${ppc32_asm}:osx32:dlfcn:darwin-shared:-fPIC:-dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", # iPhoneOS/iOS "iphoneos-cross","llvm-gcc:-O3 -isysroot \$(CROSS_TOP)/SDKs/\$(CROSS_SDK) -fomit-frame-pointer -fno-common::-D_REENTRANT:iOS:-Wl,-search_paths_first%:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}:dlfcn:darwin-shared:-fPIC -fno-common:-dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", -@@ -1685,7 +1685,7 @@ +@@ -1714,8 +1714,7 @@ s/^CC=.*$/CC= $cc/; s/^AR=\s*ar/AR= $ar/; s/^RANLIB=.*/RANLIB= $ranlib/; - s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ if $cc eq "gcc"; -+ s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/; +- s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ if $ecc eq "gcc" || $ecc eq "clang"; ++ s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ } s/^CFLAG=.*$/CFLAG= $cflags/; s/^DEPFLAG=.*$/DEPFLAG=$depflags/; diff --git a/util/domd b/util/domd --- a/util/domd +++ b/util/domd -@@ -14,7 +14,7 @@ +@@ -14,8 +14,7 @@ cp Makefile Makefile.save # fake the presence of Kerberos touch $TOP/krb5.h --if expr "$MAKEDEPEND" : '.*gcc$' > /dev/null; then -+if true ; then # was: if expr "$MAKEDEPEND" : '.*gcc$' > /dev/null; then +-if ${MAKEDEPEND} --version 2>&1 | grep -q "clang" || +- echo $MAKEDEPEND | grep -q "gcc"; then ++if true ; then args="" while [ $# -gt 0 ]; do if [ "$1" != "--" ]; then args="$args $1"; fi diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -77,6 +77,11 @@ - Issue #25616: Tests for OrderedDict are extracted from test_collections into separate file test_ordered_dict. +Build +----- + +- Issue #25798: Update OS X 10.5 installer to use OpenSSL 1.0.2e. + What's New in Python 3.5.1 final? ================================= -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 00:02:04 2015 From: python-checkins at python.org (ned.deily) Date: Sun, 06 Dec 2015 05:02:04 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUgIzI1Nzk4?= =?utf-8?q?=3A_Update_OS_X_10=2E5+_32-bit-only_installer_to_build?= Message-ID: <20151206050203.105473.80671@psf.io> https://hg.python.org/cpython/rev/51a0dd6f7c73 changeset: 99473:51a0dd6f7c73 branch: 3.4 parent: 99461:6f831de45f43 user: Ned Deily date: Sat Dec 05 23:51:23 2015 -0500 summary: Issue #25798: Update OS X 10.5+ 32-bit-only installer to build and link with OpenSSL 1.0.2e. files: Mac/BuildScript/build-installer.py | 6 +- Mac/BuildScript/openssl_sdk_makedepend.patch | 18 +++++---- Misc/NEWS | 4 +- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/Mac/BuildScript/build-installer.py b/Mac/BuildScript/build-installer.py --- a/Mac/BuildScript/build-installer.py +++ b/Mac/BuildScript/build-installer.py @@ -237,9 +237,9 @@ result.extend([ dict( - name="OpenSSL 1.0.2d", - url="https://www.openssl.org/source/openssl-1.0.2d.tar.gz", - checksum='38dd619b2e77cbac69b99f52a053d25a', + name="OpenSSL 1.0.2e", + url="https://www.openssl.org/source/openssl-1.0.2e.tar.gz", + checksum='5262bfa25b60ed9de9f28d5d52d77fc5', patches=[ "openssl_sdk_makedepend.patch", ], diff --git a/Mac/BuildScript/openssl_sdk_makedepend.patch b/Mac/BuildScript/openssl_sdk_makedepend.patch --- a/Mac/BuildScript/openssl_sdk_makedepend.patch +++ b/Mac/BuildScript/openssl_sdk_makedepend.patch @@ -1,8 +1,8 @@ # HG changeset patch -# Parent 25a9af415e8c3faf591c360d5f0e361d049b2b43 +# Parent ff8a7557607cffd626997e57ed31c1012a3018aa # openssl_sdk_makedepend.patch # -# using openssl 1.0.2d +# using openssl 1.0.2e # # - support building with an OS X SDK # - allow "make depend" to use compilers with names other than "gcc" @@ -12,7 +12,7 @@ diff --git a/Configure b/Configure --- a/Configure +++ b/Configure -@@ -617,12 +617,12 @@ +@@ -635,12 +635,12 @@ ##### MacOS X (a.k.a. Rhapsody or Darwin) setup "rhapsody-ppc-cc","cc:-O3 -DB_ENDIAN::(unknown):MACOSX_RHAPSODY::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}::", @@ -31,24 +31,26 @@ "debug-darwin-ppc-cc","cc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DB_ENDIAN -g -Wall -O::-D_REENTRANT:MACOSX::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${ppc32_asm}:osx32:dlfcn:darwin-shared:-fPIC:-dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", # iPhoneOS/iOS "iphoneos-cross","llvm-gcc:-O3 -isysroot \$(CROSS_TOP)/SDKs/\$(CROSS_SDK) -fomit-frame-pointer -fno-common::-D_REENTRANT:iOS:-Wl,-search_paths_first%:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}:dlfcn:darwin-shared:-fPIC -fno-common:-dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", -@@ -1685,7 +1685,7 @@ +@@ -1714,8 +1714,7 @@ s/^CC=.*$/CC= $cc/; s/^AR=\s*ar/AR= $ar/; s/^RANLIB=.*/RANLIB= $ranlib/; - s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ if $cc eq "gcc"; -+ s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/; +- s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ if $ecc eq "gcc" || $ecc eq "clang"; ++ s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ } s/^CFLAG=.*$/CFLAG= $cflags/; s/^DEPFLAG=.*$/DEPFLAG=$depflags/; diff --git a/util/domd b/util/domd --- a/util/domd +++ b/util/domd -@@ -14,7 +14,7 @@ +@@ -14,8 +14,7 @@ cp Makefile Makefile.save # fake the presence of Kerberos touch $TOP/krb5.h --if expr "$MAKEDEPEND" : '.*gcc$' > /dev/null; then -+if true ; then # was: if expr "$MAKEDEPEND" : '.*gcc$' > /dev/null; then +-if ${MAKEDEPEND} --version 2>&1 | grep -q "clang" || +- echo $MAKEDEPEND | grep -q "gcc"; then ++if true ; then args="" while [ $# -gt 0 ]; do if [ "$1" != "--" ]; then args="$args $1"; fi diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -745,8 +745,8 @@ - Issue #23445: pydebug builds now use "gcc -Og" where possible, to make the resulting executable faster. -- Issue #24603: Update Windows builds and OS X 10.5 installer to use OpenSSL - 1.0.2d. +- Issue #24603: Update Windows builds to use OpenSSL1.0.2d + and OS X 10.5 installer to use OpenSSL 1.0.2e. C API ----- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 00:02:03 2015 From: python-checkins at python.org (ned.deily) Date: Sun, 06 Dec 2015 05:02:03 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzI1Nzk4?= =?utf-8?q?=3A_Update_OS_X_10=2E5+_32-bit-only_installer_to_build?= Message-ID: <20151206050203.61845.10446@psf.io> https://hg.python.org/cpython/rev/f9a0ac60f876 changeset: 99472:f9a0ac60f876 branch: 2.7 parent: 99468:2096109fe812 user: Ned Deily date: Sat Dec 05 23:47:34 2015 -0500 summary: Issue #25798: Update OS X 10.5+ 32-bit-only installer to build and link with OpenSSL 1.0.2e. files: Mac/BuildScript/build-installer.py | 6 +- Mac/BuildScript/openssl_sdk_makedepend.patch | 18 +++++---- Misc/NEWS | 6 +++ 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/Mac/BuildScript/build-installer.py b/Mac/BuildScript/build-installer.py --- a/Mac/BuildScript/build-installer.py +++ b/Mac/BuildScript/build-installer.py @@ -237,9 +237,9 @@ result.extend([ dict( - name="OpenSSL 1.0.2d", - url="https://www.openssl.org/source/openssl-1.0.2d.tar.gz", - checksum='38dd619b2e77cbac69b99f52a053d25a', + name="OpenSSL 1.0.2e", + url="https://www.openssl.org/source/openssl-1.0.2e.tar.gz", + checksum='5262bfa25b60ed9de9f28d5d52d77fc5', patches=[ "openssl_sdk_makedepend.patch", ], diff --git a/Mac/BuildScript/openssl_sdk_makedepend.patch b/Mac/BuildScript/openssl_sdk_makedepend.patch --- a/Mac/BuildScript/openssl_sdk_makedepend.patch +++ b/Mac/BuildScript/openssl_sdk_makedepend.patch @@ -1,8 +1,8 @@ # HG changeset patch -# Parent 25a9af415e8c3faf591c360d5f0e361d049b2b43 +# Parent ff8a7557607cffd626997e57ed31c1012a3018aa # openssl_sdk_makedepend.patch # -# using openssl 1.0.2d +# using openssl 1.0.2e # # - support building with an OS X SDK # - allow "make depend" to use compilers with names other than "gcc" @@ -12,7 +12,7 @@ diff --git a/Configure b/Configure --- a/Configure +++ b/Configure -@@ -617,12 +617,12 @@ +@@ -635,12 +635,12 @@ ##### MacOS X (a.k.a. Rhapsody or Darwin) setup "rhapsody-ppc-cc","cc:-O3 -DB_ENDIAN::(unknown):MACOSX_RHAPSODY::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}::", @@ -31,24 +31,26 @@ "debug-darwin-ppc-cc","cc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DB_ENDIAN -g -Wall -O::-D_REENTRANT:MACOSX::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${ppc32_asm}:osx32:dlfcn:darwin-shared:-fPIC:-dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", # iPhoneOS/iOS "iphoneos-cross","llvm-gcc:-O3 -isysroot \$(CROSS_TOP)/SDKs/\$(CROSS_SDK) -fomit-frame-pointer -fno-common::-D_REENTRANT:iOS:-Wl,-search_paths_first%:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}:dlfcn:darwin-shared:-fPIC -fno-common:-dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", -@@ -1685,7 +1685,7 @@ +@@ -1714,8 +1714,7 @@ s/^CC=.*$/CC= $cc/; s/^AR=\s*ar/AR= $ar/; s/^RANLIB=.*/RANLIB= $ranlib/; - s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ if $cc eq "gcc"; -+ s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/; +- s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ if $ecc eq "gcc" || $ecc eq "clang"; ++ s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ } s/^CFLAG=.*$/CFLAG= $cflags/; s/^DEPFLAG=.*$/DEPFLAG=$depflags/; diff --git a/util/domd b/util/domd --- a/util/domd +++ b/util/domd -@@ -14,7 +14,7 @@ +@@ -14,8 +14,7 @@ cp Makefile Makefile.save # fake the presence of Kerberos touch $TOP/krb5.h --if expr "$MAKEDEPEND" : '.*gcc$' > /dev/null; then -+if true ; then # was: if expr "$MAKEDEPEND" : '.*gcc$' > /dev/null; then +-if ${MAKEDEPEND} --version 2>&1 | grep -q "clang" || +- echo $MAKEDEPEND | grep -q "gcc"; then ++if true ; then args="" while [ $# -gt 0 ]; do if [ "$1" != "--" ]; then args="$args $1"; fi diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -53,6 +53,12 @@ - Issue #25616: Tests for OrderedDict are extracted from test_collections into separate file test_ordered_dict. +Build +----- + +- Issue #25798: Update OS X 10.5+ 32-bit-only installer to build + and link with OpenSSL 1.0.2e. + What's New in Python 2.7.11? ============================ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 00:02:04 2015 From: python-checkins at python.org (ned.deily) Date: Sun, 06 Dec 2015 05:02:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325798=3A_merge_from_3=2E5?= Message-ID: <20151206050204.4078.56438@psf.io> https://hg.python.org/cpython/rev/50a99be891bc changeset: 99475:50a99be891bc parent: 99470:8c978cbe057c parent: 99474:02d2127fda6c user: Ned Deily date: Sat Dec 05 23:57:55 2015 -0500 summary: Issue #25798: merge from 3.5 files: Mac/BuildScript/build-installer.py | 6 +- Mac/BuildScript/openssl_sdk_makedepend.patch | 18 +++++---- Misc/NEWS | 2 + 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/Mac/BuildScript/build-installer.py b/Mac/BuildScript/build-installer.py --- a/Mac/BuildScript/build-installer.py +++ b/Mac/BuildScript/build-installer.py @@ -237,9 +237,9 @@ result.extend([ dict( - name="OpenSSL 1.0.2d", - url="https://www.openssl.org/source/openssl-1.0.2d.tar.gz", - checksum='38dd619b2e77cbac69b99f52a053d25a', + name="OpenSSL 1.0.2e", + url="https://www.openssl.org/source/openssl-1.0.2e.tar.gz", + checksum='5262bfa25b60ed9de9f28d5d52d77fc5', patches=[ "openssl_sdk_makedepend.patch", ], diff --git a/Mac/BuildScript/openssl_sdk_makedepend.patch b/Mac/BuildScript/openssl_sdk_makedepend.patch --- a/Mac/BuildScript/openssl_sdk_makedepend.patch +++ b/Mac/BuildScript/openssl_sdk_makedepend.patch @@ -1,8 +1,8 @@ # HG changeset patch -# Parent 25a9af415e8c3faf591c360d5f0e361d049b2b43 +# Parent ff8a7557607cffd626997e57ed31c1012a3018aa # openssl_sdk_makedepend.patch # -# using openssl 1.0.2d +# using openssl 1.0.2e # # - support building with an OS X SDK # - allow "make depend" to use compilers with names other than "gcc" @@ -12,7 +12,7 @@ diff --git a/Configure b/Configure --- a/Configure +++ b/Configure -@@ -617,12 +617,12 @@ +@@ -635,12 +635,12 @@ ##### MacOS X (a.k.a. Rhapsody or Darwin) setup "rhapsody-ppc-cc","cc:-O3 -DB_ENDIAN::(unknown):MACOSX_RHAPSODY::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}::", @@ -31,24 +31,26 @@ "debug-darwin-ppc-cc","cc:-DBN_DEBUG -DREF_CHECK -DCONF_DEBUG -DCRYPTO_MDEBUG -DB_ENDIAN -g -Wall -O::-D_REENTRANT:MACOSX::BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${ppc32_asm}:osx32:dlfcn:darwin-shared:-fPIC:-dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", # iPhoneOS/iOS "iphoneos-cross","llvm-gcc:-O3 -isysroot \$(CROSS_TOP)/SDKs/\$(CROSS_SDK) -fomit-frame-pointer -fno-common::-D_REENTRANT:iOS:-Wl,-search_paths_first%:BN_LLONG RC4_CHAR RC4_CHUNK DES_UNROLL BF_PTR:${no_asm}:dlfcn:darwin-shared:-fPIC -fno-common:-dynamiclib:.\$(SHLIB_MAJOR).\$(SHLIB_MINOR).dylib", -@@ -1685,7 +1685,7 @@ +@@ -1714,8 +1714,7 @@ s/^CC=.*$/CC= $cc/; s/^AR=\s*ar/AR= $ar/; s/^RANLIB=.*/RANLIB= $ranlib/; - s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ if $cc eq "gcc"; -+ s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/; +- s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ if $ecc eq "gcc" || $ecc eq "clang"; ++ s/^MAKEDEPPROG=.*$/MAKEDEPPROG= $cc/ } s/^CFLAG=.*$/CFLAG= $cflags/; s/^DEPFLAG=.*$/DEPFLAG=$depflags/; diff --git a/util/domd b/util/domd --- a/util/domd +++ b/util/domd -@@ -14,7 +14,7 @@ +@@ -14,8 +14,7 @@ cp Makefile Makefile.save # fake the presence of Kerberos touch $TOP/krb5.h --if expr "$MAKEDEPEND" : '.*gcc$' > /dev/null; then -+if true ; then # was: if expr "$MAKEDEPEND" : '.*gcc$' > /dev/null; then +-if ${MAKEDEPEND} --version 2>&1 | grep -q "clang" || +- echo $MAKEDEPEND | grep -q "gcc"; then ++if true ; then args="" while [ $# -gt 0 ]; do if [ "$1" != "--" ]; then args="$args $1"; fi diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -486,6 +486,8 @@ - Issue #24986: It is now possible to build Python on Windows without errors when external libraries are not available. +- Issue #25798: Update OS X 10.5 installer to use OpenSSL 1.0.2e. + Windows ------- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 00:02:09 2015 From: python-checkins at python.org (ned.deily) Date: Sun, 06 Dec 2015 05:02:09 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMi43IC0+IDIuNyk6?= =?utf-8?q?_merge_heads?= Message-ID: <20151206050209.105501.45341@psf.io> https://hg.python.org/cpython/rev/8ddac065cd3b changeset: 99476:8ddac065cd3b branch: 2.7 parent: 99472:f9a0ac60f876 parent: 99471:0ad57f476e64 user: Ned Deily date: Sun Dec 06 00:00:57 2015 -0500 summary: merge heads files: Lib/test/regrtest.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/test/regrtest.py b/Lib/test/regrtest.py --- a/Lib/test/regrtest.py +++ b/Lib/test/regrtest.py @@ -462,7 +462,7 @@ test_times = [] test_support.use_resources = use_resources - save_modules = set(sys.modules.keys()) + save_modules = set(sys.modules) def accumulate_result(test, result): ok, test_time = result -- Repository URL: https://hg.python.org/cpython From solipsis at pitrou.net Sun Dec 6 03:42:36 2015 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sun, 06 Dec 2015 08:42:36 +0000 Subject: [Python-checkins] Daily reference leaks (50a99be891bc): sum=4 Message-ID: <20151206084235.21237.92601@psf.io> results for 50a99be891bc on branch "default" -------------------------------------------- test_functools leaked [0, 2, 2] memory blocks, sum=4 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/psf-users/antoine/refleaks/reflogF9JtOe', '--timeout', '7200'] From python-checkins at python.org Sun Dec 6 06:20:32 2015 From: python-checkins at python.org (martin.panter) Date: Sun, 06 Dec 2015 11:20:32 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1NzE3?= =?utf-8?q?=3A_Add_comment_explaining_why_errors_are_ignored?= Message-ID: <20151206112032.29968.40133@psf.io> https://hg.python.org/cpython/rev/e9bf5803b716 changeset: 99477:e9bf5803b716 branch: 3.5 parent: 99474:02d2127fda6c user: Martin Panter date: Sun Dec 06 11:12:15 2015 +0000 summary: Issue #25717: Add comment explaining why errors are ignored files: Modules/_io/fileio.c | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/Modules/_io/fileio.c b/Modules/_io/fileio.c --- a/Modules/_io/fileio.c +++ b/Modules/_io/fileio.c @@ -443,6 +443,9 @@ fstat_result = _Py_fstat_noraise(self->fd, &fdfstat); Py_END_ALLOW_THREADS if (fstat_result < 0) { + /* Tolerate fstat() errors other than EBADF. See Issue #25717, where + an anonymous file on a Virtual Box shared folder filesystem would + raise ENOENT. */ #ifdef MS_WINDOWS if (GetLastError() == ERROR_INVALID_HANDLE) { PyErr_SetFromWindowsErr(0); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 06:20:33 2015 From: python-checkins at python.org (martin.panter) Date: Sun, 06 Dec 2015 11:20:33 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue_=2325717=3A_Merge_comment_from_3=2E5?= Message-ID: <20151206112032.105477.57785@psf.io> https://hg.python.org/cpython/rev/8bf69413ec01 changeset: 99478:8bf69413ec01 parent: 99475:50a99be891bc parent: 99477:e9bf5803b716 user: Martin Panter date: Sun Dec 06 11:19:31 2015 +0000 summary: Issue #25717: Merge comment from 3.5 files: Modules/_io/fileio.c | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/Modules/_io/fileio.c b/Modules/_io/fileio.c --- a/Modules/_io/fileio.c +++ b/Modules/_io/fileio.c @@ -443,6 +443,9 @@ fstat_result = _Py_fstat_noraise(self->fd, &fdfstat); Py_END_ALLOW_THREADS if (fstat_result < 0) { + /* Tolerate fstat() errors other than EBADF. See Issue #25717, where + an anonymous file on a Virtual Box shared folder filesystem would + raise ENOENT. */ #ifdef MS_WINDOWS if (GetLastError() == ERROR_INVALID_HANDLE) { PyErr_SetFromWindowsErr(0); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 15:02:12 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 06 Dec 2015 20:02:12 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325761=3A_Improved?= =?utf-8?q?_detecting_errors_in_broken_pickle_data=2E?= Message-ID: <20151206200212.33665.61859@psf.io> https://hg.python.org/cpython/rev/5c670af0100f changeset: 99479:5c670af0100f user: Serhiy Storchaka date: Sun Dec 06 22:01:35 2015 +0200 summary: Issue #25761: Improved detecting errors in broken pickle data. files: Lib/pickle.py | 88 ++++++++------------ Lib/test/pickletester.py | 17 +-- Lib/test/test_pickle.py | 5 - Misc/NEWS | 2 + Modules/_pickle.c | 113 ++++++++++++++++---------- 5 files changed, 114 insertions(+), 111 deletions(-) diff --git a/Lib/pickle.py b/Lib/pickle.py --- a/Lib/pickle.py +++ b/Lib/pickle.py @@ -1031,7 +1031,7 @@ self._unframer = _Unframer(self._file_read, self._file_readline) self.read = self._unframer.read self.readline = self._unframer.readline - self.mark = object() # any new unique object + self.metastack = [] self.stack = [] self.append = self.stack.append self.proto = 0 @@ -1047,20 +1047,12 @@ except _Stop as stopinst: return stopinst.value - # Return largest index k such that self.stack[k] is self.mark. - # If the stack doesn't contain a mark, eventually raises IndexError. - # This could be sped by maintaining another stack, of indices at which - # the mark appears. For that matter, the latter stack would suffice, - # and we wouldn't need to push mark objects on self.stack at all. - # Doing so is probably a good thing, though, since if the pickle is - # corrupt (or hostile) we may get a clue from finding self.mark embedded - # in unpickled objects. - def marker(self): - stack = self.stack - mark = self.mark - k = len(stack)-1 - while stack[k] is not mark: k = k-1 - return k + # Return a list of items pushed in the stack after last MARK instruction. + def pop_mark(self): + items = self.stack + self.stack = self.metastack.pop() + self.append = self.stack.append + return items def persistent_load(self, pid): raise UnpicklingError("unsupported persistent id encountered") @@ -1237,8 +1229,8 @@ dispatch[SHORT_BINUNICODE[0]] = load_short_binunicode def load_tuple(self): - k = self.marker() - self.stack[k:] = [tuple(self.stack[k+1:])] + items = self.pop_mark() + self.append(tuple(items)) dispatch[TUPLE[0]] = load_tuple def load_empty_tuple(self): @@ -1270,21 +1262,20 @@ dispatch[EMPTY_SET[0]] = load_empty_set def load_frozenset(self): - k = self.marker() - self.stack[k:] = [frozenset(self.stack[k+1:])] + items = self.pop_mark() + self.append(frozenset(items)) dispatch[FROZENSET[0]] = load_frozenset def load_list(self): - k = self.marker() - self.stack[k:] = [self.stack[k+1:]] + items = self.pop_mark() + self.append(items) dispatch[LIST[0]] = load_list def load_dict(self): - k = self.marker() - items = self.stack[k+1:] + items = self.pop_mark() d = {items[i]: items[i+1] for i in range(0, len(items), 2)} - self.stack[k:] = [d] + self.append(d) dispatch[DICT[0]] = load_dict # INST and OBJ differ only in how they get a class object. It's not @@ -1292,9 +1283,7 @@ # previously diverged and grew different bugs. # klass is the class to instantiate, and k points to the topmost mark # object, following which are the arguments for klass.__init__. - def _instantiate(self, klass, k): - args = tuple(self.stack[k+1:]) - del self.stack[k:] + def _instantiate(self, klass, args): if (args or not isinstance(klass, type) or hasattr(klass, "__getinitargs__")): try: @@ -1310,14 +1299,14 @@ module = self.readline()[:-1].decode("ascii") name = self.readline()[:-1].decode("ascii") klass = self.find_class(module, name) - self._instantiate(klass, self.marker()) + self._instantiate(klass, self.pop_mark()) dispatch[INST[0]] = load_inst def load_obj(self): # Stack is ... markobject classobject arg1 arg2 ... - k = self.marker() - klass = self.stack.pop(k+1) - self._instantiate(klass, k) + args = self.pop_mark() + cls = args.pop(0) + self._instantiate(cls, args) dispatch[OBJ[0]] = load_obj def load_newobj(self): @@ -1402,12 +1391,14 @@ dispatch[REDUCE[0]] = load_reduce def load_pop(self): - del self.stack[-1] + if self.stack: + del self.stack[-1] + else: + self.pop_mark() dispatch[POP[0]] = load_pop def load_pop_mark(self): - k = self.marker() - del self.stack[k:] + self.pop_mark() dispatch[POP_MARK[0]] = load_pop_mark def load_dup(self): @@ -1463,17 +1454,14 @@ dispatch[APPEND[0]] = load_append def load_appends(self): - stack = self.stack - mark = self.marker() - list_obj = stack[mark - 1] - items = stack[mark + 1:] + items = self.pop_mark() + list_obj = self.stack[-1] if isinstance(list_obj, list): list_obj.extend(items) else: append = list_obj.append for item in items: append(item) - del stack[mark:] dispatch[APPENDS[0]] = load_appends def load_setitem(self): @@ -1485,27 +1473,21 @@ dispatch[SETITEM[0]] = load_setitem def load_setitems(self): - stack = self.stack - mark = self.marker() - dict = stack[mark - 1] - for i in range(mark + 1, len(stack), 2): - dict[stack[i]] = stack[i + 1] - - del stack[mark:] + items = self.pop_mark() + dict = self.stack[-1] + for i in range(0, len(items), 2): + dict[items[i]] = items[i + 1] dispatch[SETITEMS[0]] = load_setitems def load_additems(self): - stack = self.stack - mark = self.marker() - set_obj = stack[mark - 1] - items = stack[mark + 1:] + items = self.pop_mark() + set_obj = self.stack[-1] if isinstance(set_obj, set): set_obj.update(items) else: add = set_obj.add for item in items: add(item) - del stack[mark:] dispatch[ADDITEMS[0]] = load_additems def load_build(self): @@ -1533,7 +1515,9 @@ dispatch[BUILD[0]] = load_build def load_mark(self): - self.append(self.mark) + self.metastack.append(self.stack) + self.stack = [] + self.append = self.stack.append dispatch[MARK[0]] = load_mark def load_stop(self): diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -1000,7 +1000,7 @@ b'0', # POP b'1', # POP_MARK b'2', # DUP - # b'(2', # PyUnpickler doesn't raise + b'(2', b'R', # REDUCE b')R', b'a', # APPEND @@ -1009,7 +1009,7 @@ b'Nb', b'd', # DICT b'e', # APPENDS - # b'(e', # PyUnpickler raises AttributeError + b'(e', b'ibuiltins\nlist\n', # INST b'l', # LIST b'o', # OBJ @@ -1022,7 +1022,7 @@ b'NNs', b't', # TUPLE b'u', # SETITEMS - # b'(u', # PyUnpickler doesn't raise + b'(u', b'}(Nu', b'\x81', # NEWOBJ b')\x81', @@ -1033,7 +1033,7 @@ b'N\x87', b'NN\x87', b'\x90', # ADDITEMS - # b'(\x90', # PyUnpickler raises AttributeError + b'(\x90', b'\x91', # FROZENSET b'\x92', # NEWOBJ_EX b')}\x92', @@ -1046,7 +1046,7 @@ def test_bad_mark(self): badpickles = [ - # b'N(.', # STOP + b'N(.', # STOP b'N(2', # DUP b'cbuiltins\nlist\n)(R', # REDUCE b'cbuiltins\nlist\n()R', @@ -1081,7 +1081,7 @@ b'N(\x94', # MEMOIZE ] for p in badpickles: - self.check_unpickling_error(self.bad_mark_errors, p) + self.check_unpickling_error(self.bad_stack_errors, p) def test_truncated_data(self): self.check_unpickling_error(EOFError, b'') @@ -2581,11 +2581,6 @@ self.assertRaises(pickle.PicklingError, BadPickler().dump, 0) self.assertRaises(pickle.UnpicklingError, BadUnpickler().load) - def test_bad_input(self): - # Test issue4298 - s = bytes([0x58, 0, 0, 0, 0x54]) - self.assertRaises(EOFError, pickle.loads, s) - class AbstractPersistentPicklerTests(unittest.TestCase): diff --git a/Lib/test/test_pickle.py b/Lib/test/test_pickle.py --- a/Lib/test/test_pickle.py +++ b/Lib/test/test_pickle.py @@ -33,8 +33,6 @@ unpickler = pickle._Unpickler bad_stack_errors = (IndexError,) - bad_mark_errors = (IndexError, pickle.UnpicklingError, - TypeError, AttributeError, EOFError) truncated_errors = (pickle.UnpicklingError, EOFError, AttributeError, ValueError, struct.error, IndexError, ImportError) @@ -69,8 +67,6 @@ pickler = pickle._Pickler unpickler = pickle._Unpickler bad_stack_errors = (pickle.UnpicklingError, IndexError) - bad_mark_errors = (pickle.UnpicklingError, IndexError, - TypeError, AttributeError, EOFError) truncated_errors = (pickle.UnpicklingError, EOFError, AttributeError, ValueError, struct.error, IndexError, ImportError) @@ -132,7 +128,6 @@ class CUnpicklerTests(PyUnpicklerTests): unpickler = _pickle.Unpickler bad_stack_errors = (pickle.UnpicklingError,) - bad_mark_errors = (EOFError,) truncated_errors = (pickle.UnpicklingError, EOFError, AttributeError, ValueError) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -109,6 +109,8 @@ Library ------- +- Issue #25761: Improved detecting errors in broken pickle data. + - Issue #25717: Restore the previous behaviour of tolerating most fstat() errors when opening files. This was a regression in 3.5a1, and stopped anonymous temporary files from working in special cases. diff --git a/Modules/_pickle.c b/Modules/_pickle.c --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -370,18 +370,12 @@ /*************************************************************************/ -static int -stack_underflow(void) -{ - PickleState *st = _Pickle_GetGlobalState(); - PyErr_SetString(st->UnpicklingError, "unpickling stack underflow"); - return -1; -} - /* Internal data type used as the unpickling stack. */ typedef struct { PyObject_VAR_HEAD PyObject **data; + int mark_set; /* is MARK set? */ + Py_ssize_t fence; /* position of top MARK or 0 */ Py_ssize_t allocated; /* number of slots in data allocated */ } Pdata; @@ -412,6 +406,8 @@ if (!(self = PyObject_New(Pdata, &Pdata_Type))) return NULL; Py_SIZE(self) = 0; + self->mark_set = 0; + self->fence = 0; self->allocated = 8; self->data = PyMem_MALLOC(self->allocated * sizeof(PyObject *)); if (self->data) @@ -429,8 +425,7 @@ { Py_ssize_t i = Py_SIZE(self); - if (clearto < 0) - return stack_underflow(); + assert(clearto >= self->fence); if (clearto >= i) return 0; @@ -466,6 +461,17 @@ return -1; } +static int +Pdata_stack_underflow(Pdata *self) +{ + PickleState *st = _Pickle_GetGlobalState(); + PyErr_SetString(st->UnpicklingError, + self->mark_set ? + "unexpected MARK found" : + "unpickling stack underflow"); + return -1; +} + /* D is a Pdata*. Pop the topmost element and store it into V, which * must be an lvalue holding PyObject*. On stack underflow, UnpicklingError * is raised and V is set to NULL. @@ -473,9 +479,8 @@ static PyObject * Pdata_pop(Pdata *self) { - if (Py_SIZE(self) == 0) { - PickleState *st = _Pickle_GetGlobalState(); - PyErr_SetString(st->UnpicklingError, "bad pickle data"); + if (Py_SIZE(self) <= self->fence) { + Pdata_stack_underflow(self); return NULL; } return self->data[--Py_SIZE(self)]; @@ -507,6 +512,10 @@ PyObject *tuple; Py_ssize_t len, i, j; + if (start < self->fence) { + Pdata_stack_underflow(self); + return NULL; + } len = Py_SIZE(self) - start; tuple = PyTuple_New(len); if (tuple == NULL) @@ -4585,13 +4594,19 @@ static Py_ssize_t marker(UnpicklerObject *self) { - PickleState *st = _Pickle_GetGlobalState(); + Py_ssize_t mark; + if (self->num_marks < 1) { + PickleState *st = _Pickle_GetGlobalState(); PyErr_SetString(st->UnpicklingError, "could not find MARK"); return -1; } - return self->marks[--self->num_marks]; + mark = self->marks[--self->num_marks]; + self->stack->mark_set = self->num_marks != 0; + self->stack->fence = self->num_marks ? + self->marks[self->num_marks - 1] : 0; + return mark; } static int @@ -5052,7 +5067,7 @@ PyObject *tuple; if (Py_SIZE(self->stack) < len) - return stack_underflow(); + return Pdata_stack_underflow(self->stack); tuple = Pdata_poptuple(self->stack, Py_SIZE(self->stack) - len); if (tuple == NULL) @@ -5134,6 +5149,12 @@ if ((dict = PyDict_New()) == NULL) return -1; + if ((j - i) % 2 != 0) { + PickleState *st = _Pickle_GetGlobalState(); + PyErr_SetString(st->UnpicklingError, "odd number of items for DICT"); + return -1; + } + for (k = i + 1; k < j; k += 2) { key = self->stack->data[k - 1]; value = self->stack->data[k]; @@ -5201,7 +5222,7 @@ return -1; if (Py_SIZE(self->stack) - i < 1) - return stack_underflow(); + return Pdata_stack_underflow(self->stack); args = Pdata_poptuple(self->stack, i + 1); if (args == NULL) @@ -5518,12 +5539,15 @@ */ if (self->num_marks > 0 && self->marks[self->num_marks - 1] == len) { self->num_marks--; - } else if (len > 0) { + self->stack->mark_set = self->num_marks != 0; + self->stack->fence = self->num_marks ? + self->marks[self->num_marks - 1] : 0; + } else if (len <= self->stack->fence) + return Pdata_stack_underflow(self->stack); + else { len--; Py_DECREF(self->stack->data[len]); Py_SIZE(self->stack) = len; - } else { - return stack_underflow(); } return 0; } @@ -5545,10 +5569,10 @@ load_dup(UnpicklerObject *self) { PyObject *last; - Py_ssize_t len; - - if ((len = Py_SIZE(self->stack)) <= 0) - return stack_underflow(); + Py_ssize_t len = Py_SIZE(self->stack); + + if (len <= self->stack->fence) + return Pdata_stack_underflow(self->stack); last = self->stack->data[len - 1]; PDATA_APPEND(self->stack, last, -1); return 0; @@ -5731,8 +5755,8 @@ return -1; if (len < 2) return bad_readline(); - if (Py_SIZE(self->stack) <= 0) - return stack_underflow(); + if (Py_SIZE(self->stack) <= self->stack->fence) + return Pdata_stack_underflow(self->stack); value = self->stack->data[Py_SIZE(self->stack) - 1]; key = PyLong_FromString(s, NULL, 10); @@ -5760,8 +5784,8 @@ if (_Unpickler_Read(self, &s, 1) < 0) return -1; - if (Py_SIZE(self->stack) <= 0) - return stack_underflow(); + if (Py_SIZE(self->stack) <= self->stack->fence) + return Pdata_stack_underflow(self->stack); value = self->stack->data[Py_SIZE(self->stack) - 1]; idx = Py_CHARMASK(s[0]); @@ -5779,8 +5803,8 @@ if (_Unpickler_Read(self, &s, 4) < 0) return -1; - if (Py_SIZE(self->stack) <= 0) - return stack_underflow(); + if (Py_SIZE(self->stack) <= self->stack->fence) + return Pdata_stack_underflow(self->stack); value = self->stack->data[Py_SIZE(self->stack) - 1]; idx = calc_binsize(s, 4); @@ -5798,8 +5822,8 @@ { PyObject *value; - if (Py_SIZE(self->stack) <= 0) - return stack_underflow(); + if (Py_SIZE(self->stack) <= self->stack->fence) + return Pdata_stack_underflow(self->stack); value = self->stack->data[Py_SIZE(self->stack) - 1]; return _Unpickler_MemoPut(self, self->memo_len, value); @@ -5813,8 +5837,8 @@ Py_ssize_t len, i; len = Py_SIZE(self->stack); - if (x > len || x <= 0) - return stack_underflow(); + if (x > len || x <= self->stack->fence) + return Pdata_stack_underflow(self->stack); if (len == x) /* nothing to do */ return 0; @@ -5863,8 +5887,8 @@ static int load_append(UnpicklerObject *self) { - if (Py_SIZE(self->stack) - 1 <= 0) - return stack_underflow(); + if (Py_SIZE(self->stack) - 1 <= self->stack->fence) + return Pdata_stack_underflow(self->stack); return do_append(self, Py_SIZE(self->stack) - 1); } @@ -5886,8 +5910,8 @@ int status = 0; len = Py_SIZE(self->stack); - if (x > len || x <= 0) - return stack_underflow(); + if (x > len || x <= self->stack->fence) + return Pdata_stack_underflow(self->stack); if (len == x) /* nothing to do */ return 0; if ((len - x) % 2 != 0) { @@ -5940,8 +5964,8 @@ if (mark < 0) return -1; len = Py_SIZE(self->stack); - if (mark > len || mark <= 0) - return stack_underflow(); + if (mark > len || mark <= self->stack->fence) + return Pdata_stack_underflow(self->stack); if (len == mark) /* nothing to do */ return 0; @@ -5996,8 +6020,8 @@ /* Stack is ... instance, state. We want to leave instance at * the stack top, possibly mutated via instance.__setstate__(state). */ - if (Py_SIZE(self->stack) < 2) - return stack_underflow(); + if (Py_SIZE(self->stack) - 2 < self->stack->fence) + return Pdata_stack_underflow(self->stack); PDATA_POP(self->stack, state); if (state == NULL) @@ -6133,7 +6157,8 @@ self->marks_size = (Py_ssize_t)alloc; } - self->marks[self->num_marks++] = Py_SIZE(self->stack); + self->stack->mark_set = 1; + self->marks[self->num_marks++] = self->stack->fence = Py_SIZE(self->stack); return 0; } @@ -6216,6 +6241,8 @@ char *s = NULL; self->num_marks = 0; + self->stack->mark_set = 0; + self->stack->fence = 0; self->proto = 0; if (Py_SIZE(self->stack)) Pdata_clear(self->stack, 0); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 17:13:46 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 06 Dec 2015 22:13:46 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Issue25814=3A_Propagate_all_errors_from_custom_XML_parser_hand?= =?utf-8?q?lers?= Message-ID: <20151206221345.21235.78839@psf.io> https://hg.python.org/cpython/rev/0d1bbfe8fd09 changeset: 99482:0d1bbfe8fd09 branch: 3.5 parent: 99477:e9bf5803b716 parent: 99480:2df330606cd0 user: Serhiy Storchaka date: Sun Dec 06 23:54:28 2015 +0200 summary: Issue25814: Propagate all errors from custom XML parser handlers in ElementTree.iterparse(). files: Modules/_elementtree.c | 127 ++++++++-------------------- 1 files changed, 39 insertions(+), 88 deletions(-) diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c --- a/Modules/_elementtree.c +++ b/Modules/_elementtree.c @@ -2452,6 +2452,23 @@ } } +LOCAL(int) +treebuilder_append_event(TreeBuilderObject *self, PyObject *action, + PyObject *node) +{ + if (action != NULL) { + PyObject *res = PyTuple_Pack(2, action, node); + if (res == NULL) + return -1; + if (PyList_Append(self->events, res) < 0) { + Py_DECREF(res); + return -1; + } + Py_DECREF(res); + } + return 0; +} + /* -------------------------------------------------------------------- */ /* handlers */ @@ -2519,16 +2536,8 @@ Py_INCREF(node); self->last = node; - if (self->start_event_obj) { - PyObject* res; - PyObject* action = self->start_event_obj; - res = PyTuple_Pack(2, action, node); - if (res) { - PyList_Append(self->events, res); - Py_DECREF(res); - } else - PyErr_Clear(); /* FIXME: propagate error */ - } + if (treebuilder_append_event(self, self->start_event_obj, node) < 0) + goto error; return node; @@ -2608,65 +2617,13 @@ self->last = self->this; self->this = item; - if (self->end_event_obj) { - PyObject* res; - PyObject* action = self->end_event_obj; - PyObject* node = (PyObject*) self->last; - res = PyTuple_Pack(2, action, node); - if (res) { - PyList_Append(self->events, res); - Py_DECREF(res); - } else - PyErr_Clear(); /* FIXME: propagate error */ - } + if (treebuilder_append_event(self, self->end_event_obj, self->last) < 0) + return NULL; Py_INCREF(self->last); return (PyObject*) self->last; } -LOCAL(void) -treebuilder_handle_namespace(TreeBuilderObject* self, int start, - PyObject *prefix, PyObject *uri) -{ - PyObject* res; - PyObject* action; - PyObject* parcel; - - if (!self->events) - return; - - if (start) { - if (!self->start_ns_event_obj) - return; - action = self->start_ns_event_obj; - parcel = Py_BuildValue("OO", prefix, uri); - if (!parcel) - return; - Py_INCREF(action); - } else { - if (!self->end_ns_event_obj) - return; - action = self->end_ns_event_obj; - Py_INCREF(action); - parcel = Py_None; - Py_INCREF(parcel); - } - - res = PyTuple_New(2); - - if (res) { - PyTuple_SET_ITEM(res, 0, action); - PyTuple_SET_ITEM(res, 1, parcel); - PyList_Append(self->events, res); - Py_DECREF(res); - } - else { - Py_DECREF(action); - Py_DECREF(parcel); - PyErr_Clear(); /* FIXME: propagate error */ - } -} - /* -------------------------------------------------------------------- */ /* methods (in alphabetical order) */ @@ -3078,45 +3035,39 @@ expat_start_ns_handler(XMLParserObject* self, const XML_Char* prefix, const XML_Char *uri) { - PyObject* sprefix = NULL; - PyObject* suri = NULL; + TreeBuilderObject *target = (TreeBuilderObject*) self->target; + PyObject *parcel; if (PyErr_Occurred()) return; - if (uri) - suri = PyUnicode_DecodeUTF8(uri, strlen(uri), "strict"); - else - suri = PyUnicode_FromString(""); - if (!suri) + if (!target->events || !target->start_ns_event_obj) return; - if (prefix) - sprefix = PyUnicode_DecodeUTF8(prefix, strlen(prefix), "strict"); - else - sprefix = PyUnicode_FromString(""); - if (!sprefix) { - Py_DECREF(suri); + if (!uri) + uri = ""; + if (!prefix) + prefix = ""; + + parcel = Py_BuildValue("ss", prefix, uri); + if (!parcel) return; - } - - treebuilder_handle_namespace( - (TreeBuilderObject*) self->target, 1, sprefix, suri - ); - - Py_DECREF(sprefix); - Py_DECREF(suri); + treebuilder_append_event(target, target->start_ns_event_obj, parcel); + Py_DECREF(parcel); } static void expat_end_ns_handler(XMLParserObject* self, const XML_Char* prefix_in) { + TreeBuilderObject *target = (TreeBuilderObject*) self->target; + if (PyErr_Occurred()) return; - treebuilder_handle_namespace( - (TreeBuilderObject*) self->target, 0, NULL, NULL - ); + if (!target->events) + return; + + treebuilder_append_event(target, target->end_ns_event_obj, Py_None); } static void -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 17:13:46 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 06 Dec 2015 22:13:46 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy40KTogSXNzdWUyNTgxNDog?= =?utf-8?q?Propagate_all_errors_from_custom_XML_parser_handlers?= Message-ID: <20151206221345.33673.61414@psf.io> https://hg.python.org/cpython/rev/2df330606cd0 changeset: 99480:2df330606cd0 branch: 3.4 parent: 99473:51a0dd6f7c73 user: Serhiy Storchaka date: Sun Dec 06 23:51:44 2015 +0200 summary: Issue25814: Propagate all errors from custom XML parser handlers in ElementTree.iterparse(). files: Modules/_elementtree.c | 127 ++++++++-------------------- 1 files changed, 39 insertions(+), 88 deletions(-) diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c --- a/Modules/_elementtree.c +++ b/Modules/_elementtree.c @@ -2448,6 +2448,23 @@ } } +LOCAL(int) +treebuilder_append_event(TreeBuilderObject *self, PyObject *action, + PyObject *node) +{ + if (action != NULL) { + PyObject *res = PyTuple_Pack(2, action, node); + if (res == NULL) + return -1; + if (PyList_Append(self->events, res) < 0) { + Py_DECREF(res); + return -1; + } + Py_DECREF(res); + } + return 0; +} + /* -------------------------------------------------------------------- */ /* handlers */ @@ -2515,16 +2532,8 @@ Py_INCREF(node); self->last = node; - if (self->start_event_obj) { - PyObject* res; - PyObject* action = self->start_event_obj; - res = PyTuple_Pack(2, action, node); - if (res) { - PyList_Append(self->events, res); - Py_DECREF(res); - } else - PyErr_Clear(); /* FIXME: propagate error */ - } + if (treebuilder_append_event(self, self->start_event_obj, node) < 0) + goto error; return node; @@ -2604,65 +2613,13 @@ self->last = self->this; self->this = item; - if (self->end_event_obj) { - PyObject* res; - PyObject* action = self->end_event_obj; - PyObject* node = (PyObject*) self->last; - res = PyTuple_Pack(2, action, node); - if (res) { - PyList_Append(self->events, res); - Py_DECREF(res); - } else - PyErr_Clear(); /* FIXME: propagate error */ - } + if (treebuilder_append_event(self, self->end_event_obj, self->last) < 0) + return NULL; Py_INCREF(self->last); return (PyObject*) self->last; } -LOCAL(void) -treebuilder_handle_namespace(TreeBuilderObject* self, int start, - PyObject *prefix, PyObject *uri) -{ - PyObject* res; - PyObject* action; - PyObject* parcel; - - if (!self->events) - return; - - if (start) { - if (!self->start_ns_event_obj) - return; - action = self->start_ns_event_obj; - parcel = Py_BuildValue("OO", prefix, uri); - if (!parcel) - return; - Py_INCREF(action); - } else { - if (!self->end_ns_event_obj) - return; - action = self->end_ns_event_obj; - Py_INCREF(action); - parcel = Py_None; - Py_INCREF(parcel); - } - - res = PyTuple_New(2); - - if (res) { - PyTuple_SET_ITEM(res, 0, action); - PyTuple_SET_ITEM(res, 1, parcel); - PyList_Append(self->events, res); - Py_DECREF(res); - } - else { - Py_DECREF(action); - Py_DECREF(parcel); - PyErr_Clear(); /* FIXME: propagate error */ - } -} - /* -------------------------------------------------------------------- */ /* methods (in alphabetical order) */ @@ -3100,45 +3057,39 @@ expat_start_ns_handler(XMLParserObject* self, const XML_Char* prefix, const XML_Char *uri) { - PyObject* sprefix = NULL; - PyObject* suri = NULL; + TreeBuilderObject *target = (TreeBuilderObject*) self->target; + PyObject *parcel; if (PyErr_Occurred()) return; - if (uri) - suri = PyUnicode_DecodeUTF8(uri, strlen(uri), "strict"); - else - suri = PyUnicode_FromString(""); - if (!suri) + if (!target->events || !target->start_ns_event_obj) return; - if (prefix) - sprefix = PyUnicode_DecodeUTF8(prefix, strlen(prefix), "strict"); - else - sprefix = PyUnicode_FromString(""); - if (!sprefix) { - Py_DECREF(suri); + if (!uri) + uri = ""; + if (!prefix) + prefix = ""; + + parcel = Py_BuildValue("ss", prefix, uri); + if (!parcel) return; - } - - treebuilder_handle_namespace( - (TreeBuilderObject*) self->target, 1, sprefix, suri - ); - - Py_DECREF(sprefix); - Py_DECREF(suri); + treebuilder_append_event(target, target->start_ns_event_obj, parcel); + Py_DECREF(parcel); } static void expat_end_ns_handler(XMLParserObject* self, const XML_Char* prefix_in) { + TreeBuilderObject *target = (TreeBuilderObject*) self->target; + if (PyErr_Occurred()) return; - treebuilder_handle_namespace( - (TreeBuilderObject*) self->target, 0, NULL, NULL - ); + if (!target->events) + return; + + treebuilder_append_event(target, target->end_ns_event_obj, Py_None); } static void -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 17:13:46 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 06 Dec 2015 22:13:46 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUyNTgxNDog?= =?utf-8?q?Propagate_all_errors_from_custom_XML_parser_handlers?= Message-ID: <20151206221345.21233.43634@psf.io> https://hg.python.org/cpython/rev/de9ef294a3ef changeset: 99481:de9ef294a3ef branch: 2.7 parent: 99476:8ddac065cd3b user: Serhiy Storchaka date: Sun Dec 06 23:51:53 2015 +0200 summary: Issue25814: Propagate all errors from custom XML parser handlers in ElementTree.iterparse(). files: Modules/_elementtree.c | 118 +++++++++++----------------- 1 files changed, 45 insertions(+), 73 deletions(-) diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c --- a/Modules/_elementtree.c +++ b/Modules/_elementtree.c @@ -1720,6 +1720,23 @@ PyObject_Del(self); } +LOCAL(int) +treebuilder_append_event(TreeBuilderObject *self, PyObject *action, + PyObject *node) +{ + if (action != NULL) { + PyObject *res = PyTuple_Pack(2, action, node); + if (res == NULL) + return -1; + if (PyList_Append(self->events, res) < 0) { + Py_DECREF(res); + return -1; + } + Py_DECREF(res); + } + return 0; +} + /* -------------------------------------------------------------------- */ /* handlers */ @@ -1791,18 +1808,8 @@ Py_INCREF(node); self->last = (ElementObject*) node; - if (self->start_event_obj) { - PyObject* res; - PyObject* action = self->start_event_obj; - res = PyTuple_New(2); - if (res) { - Py_INCREF(action); PyTuple_SET_ITEM(res, 0, (PyObject*) action); - Py_INCREF(node); PyTuple_SET_ITEM(res, 1, (PyObject*) node); - PyList_Append(self->events, res); - Py_DECREF(res); - } else - PyErr_Clear(); /* FIXME: propagate error */ - } + if (treebuilder_append_event(self, self->start_event_obj, node) < 0) + goto error; return node; @@ -1885,63 +1892,13 @@ self->last = (ElementObject*) self->this; self->this = (ElementObject*) item; - if (self->end_event_obj) { - PyObject* res; - PyObject* action = self->end_event_obj; - PyObject* node = (PyObject*) self->last; - res = PyTuple_New(2); - if (res) { - Py_INCREF(action); PyTuple_SET_ITEM(res, 0, (PyObject*) action); - Py_INCREF(node); PyTuple_SET_ITEM(res, 1, (PyObject*) node); - PyList_Append(self->events, res); - Py_DECREF(res); - } else - PyErr_Clear(); /* FIXME: propagate error */ - } + if (treebuilder_append_event(self, self->end_event_obj, (PyObject*)self->last) < 0) + return NULL; Py_INCREF(self->last); return (PyObject*) self->last; } -LOCAL(void) -treebuilder_handle_namespace(TreeBuilderObject* self, int start, - PyObject *prefix, PyObject *uri) -{ - PyObject* res; - PyObject* action; - PyObject* parcel; - - if (!self->events) - return; - - if (start) { - if (!self->start_ns_event_obj) - return; - action = self->start_ns_event_obj; - parcel = Py_BuildValue("OO", prefix, uri); - if (!parcel) - return; - Py_INCREF(action); - } else { - if (!self->end_ns_event_obj) - return; - action = self->end_ns_event_obj; - Py_INCREF(action); - parcel = Py_None; - Py_INCREF(parcel); - } - - res = PyTuple_New(2); - - if (res) { - PyTuple_SET_ITEM(res, 0, action); - PyTuple_SET_ITEM(res, 1, parcel); - PyList_Append(self->events, res); - Py_DECREF(res); - } else - PyErr_Clear(); /* FIXME: propagate error */ -} - /* -------------------------------------------------------------------- */ /* methods (in alphabetical order) */ @@ -2367,8 +2324,16 @@ expat_start_ns_handler(XMLParserObject* self, const XML_Char* prefix, const XML_Char *uri) { - PyObject* sprefix = NULL; - PyObject* suri = NULL; + TreeBuilderObject *target = (TreeBuilderObject*) self->target; + PyObject *parcel; + PyObject *sprefix = NULL; + PyObject *suri = NULL; + + if (PyErr_Occurred()) + return; + + if (!target->events || !target->start_ns_event_obj) + return; if (uri) suri = makestring(uri, strlen(uri)); @@ -2386,20 +2351,27 @@ return; } - treebuilder_handle_namespace( - (TreeBuilderObject*) self->target, 1, sprefix, suri - ); - + parcel = PyTuple_Pack(2, sprefix, suri); Py_DECREF(sprefix); Py_DECREF(suri); + if (!parcel) + return; + treebuilder_append_event(target, target->start_ns_event_obj, parcel); + Py_DECREF(parcel); } static void expat_end_ns_handler(XMLParserObject* self, const XML_Char* prefix_in) { - treebuilder_handle_namespace( - (TreeBuilderObject*) self->target, 0, NULL, NULL - ); + TreeBuilderObject *target = (TreeBuilderObject*) self->target; + + if (PyErr_Occurred()) + return; + + if (!target->events) + return; + + treebuilder_append_event(target, target->end_ns_event_obj, Py_None); } static void -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 17:13:46 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Sun, 06 Dec 2015 22:13:46 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Issue25814=3A_Propagate_all_errors_from_custom_XML_parse?= =?utf-8?q?r_handlers?= Message-ID: <20151206221345.21239.49753@psf.io> https://hg.python.org/cpython/rev/2cf16918b632 changeset: 99483:2cf16918b632 parent: 99479:5c670af0100f parent: 99482:0d1bbfe8fd09 user: Serhiy Storchaka date: Sun Dec 06 23:55:05 2015 +0200 summary: Issue25814: Propagate all errors from custom XML parser handlers in ElementTree.iterparse(). files: Modules/_elementtree.c | 127 ++++++++-------------------- 1 files changed, 39 insertions(+), 88 deletions(-) diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c --- a/Modules/_elementtree.c +++ b/Modules/_elementtree.c @@ -2450,6 +2450,23 @@ } } +LOCAL(int) +treebuilder_append_event(TreeBuilderObject *self, PyObject *action, + PyObject *node) +{ + if (action != NULL) { + PyObject *res = PyTuple_Pack(2, action, node); + if (res == NULL) + return -1; + if (PyList_Append(self->events, res) < 0) { + Py_DECREF(res); + return -1; + } + Py_DECREF(res); + } + return 0; +} + /* -------------------------------------------------------------------- */ /* handlers */ @@ -2517,16 +2534,8 @@ Py_INCREF(node); self->last = node; - if (self->start_event_obj) { - PyObject* res; - PyObject* action = self->start_event_obj; - res = PyTuple_Pack(2, action, node); - if (res) { - PyList_Append(self->events, res); - Py_DECREF(res); - } else - PyErr_Clear(); /* FIXME: propagate error */ - } + if (treebuilder_append_event(self, self->start_event_obj, node) < 0) + goto error; return node; @@ -2606,65 +2615,13 @@ self->last = self->this; self->this = item; - if (self->end_event_obj) { - PyObject* res; - PyObject* action = self->end_event_obj; - PyObject* node = (PyObject*) self->last; - res = PyTuple_Pack(2, action, node); - if (res) { - PyList_Append(self->events, res); - Py_DECREF(res); - } else - PyErr_Clear(); /* FIXME: propagate error */ - } + if (treebuilder_append_event(self, self->end_event_obj, self->last) < 0) + return NULL; Py_INCREF(self->last); return (PyObject*) self->last; } -LOCAL(void) -treebuilder_handle_namespace(TreeBuilderObject* self, int start, - PyObject *prefix, PyObject *uri) -{ - PyObject* res; - PyObject* action; - PyObject* parcel; - - if (!self->events) - return; - - if (start) { - if (!self->start_ns_event_obj) - return; - action = self->start_ns_event_obj; - parcel = Py_BuildValue("OO", prefix, uri); - if (!parcel) - return; - Py_INCREF(action); - } else { - if (!self->end_ns_event_obj) - return; - action = self->end_ns_event_obj; - Py_INCREF(action); - parcel = Py_None; - Py_INCREF(parcel); - } - - res = PyTuple_New(2); - - if (res) { - PyTuple_SET_ITEM(res, 0, action); - PyTuple_SET_ITEM(res, 1, parcel); - PyList_Append(self->events, res); - Py_DECREF(res); - } - else { - Py_DECREF(action); - Py_DECREF(parcel); - PyErr_Clear(); /* FIXME: propagate error */ - } -} - /* -------------------------------------------------------------------- */ /* methods (in alphabetical order) */ @@ -3076,45 +3033,39 @@ expat_start_ns_handler(XMLParserObject* self, const XML_Char* prefix, const XML_Char *uri) { - PyObject* sprefix = NULL; - PyObject* suri = NULL; + TreeBuilderObject *target = (TreeBuilderObject*) self->target; + PyObject *parcel; if (PyErr_Occurred()) return; - if (uri) - suri = PyUnicode_DecodeUTF8(uri, strlen(uri), "strict"); - else - suri = PyUnicode_FromString(""); - if (!suri) + if (!target->events || !target->start_ns_event_obj) return; - if (prefix) - sprefix = PyUnicode_DecodeUTF8(prefix, strlen(prefix), "strict"); - else - sprefix = PyUnicode_FromString(""); - if (!sprefix) { - Py_DECREF(suri); + if (!uri) + uri = ""; + if (!prefix) + prefix = ""; + + parcel = Py_BuildValue("ss", prefix, uri); + if (!parcel) return; - } - - treebuilder_handle_namespace( - (TreeBuilderObject*) self->target, 1, sprefix, suri - ); - - Py_DECREF(sprefix); - Py_DECREF(suri); + treebuilder_append_event(target, target->start_ns_event_obj, parcel); + Py_DECREF(parcel); } static void expat_end_ns_handler(XMLParserObject* self, const XML_Char* prefix_in) { + TreeBuilderObject *target = (TreeBuilderObject*) self->target; + if (PyErr_Occurred()) return; - treebuilder_handle_namespace( - (TreeBuilderObject*) self->target, 0, NULL, NULL - ); + if (!target->events) + return; + + treebuilder_append_event(target, target->end_ns_event_obj, Py_None); } static void -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Sun Dec 6 19:31:36 2015 From: python-checkins at python.org (serhiy.storchaka) Date: Mon, 07 Dec 2015 00:31:36 +0000 Subject: [Python-checkins] =?utf-8?q?cpython=3A_Issue_=2325638=3A_Optimize?= =?utf-8?q?d_ElementTree=2Eiterparse=28=29=3B_it_is_now_2x_faster=2E?= Message-ID: <20151207003135.33677.80229@psf.io> https://hg.python.org/cpython/rev/dd67c8c53aea changeset: 99484:dd67c8c53aea user: Serhiy Storchaka date: Mon Dec 07 02:31:11 2015 +0200 summary: Issue #25638: Optimized ElementTree.iterparse(); it is now 2x faster. ElementTree.XMLParser._setevents now accepts any objects with the append method, not just a list. files: Lib/xml/etree/ElementTree.py | 92 +++++++------------- Misc/NEWS | 2 + Modules/_elementtree.c | 35 ++++--- Modules/clinic/_elementtree.c.h | 7 +- 4 files changed, 56 insertions(+), 80 deletions(-) diff --git a/Lib/xml/etree/ElementTree.py b/Lib/xml/etree/ElementTree.py --- a/Lib/xml/etree/ElementTree.py +++ b/Lib/xml/etree/ElementTree.py @@ -95,6 +95,7 @@ import re import warnings import io +import collections import contextlib from . import ElementPath @@ -1198,16 +1199,37 @@ Returns an iterator providing (event, elem) pairs. """ + # Use the internal, undocumented _parser argument for now; When the + # parser argument of iterparse is removed, this can be killed. + pullparser = XMLPullParser(events=events, _parser=parser) + def iterator(): + try: + while True: + yield from pullparser.read_events() + # load event buffer + data = source.read(16 * 1024) + if not data: + break + pullparser.feed(data) + root = pullparser._close_and_return_root() + yield from pullparser.read_events() + it.root = root + finally: + if close_source: + source.close() + + class IterParseIterator(collections.Iterator): + __next__ = iterator().__next__ + it = IterParseIterator() + it.root = None + del iterator, IterParseIterator + close_source = False if not hasattr(source, "read"): source = open(source, "rb") close_source = True - try: - return _IterParseIterator(source, events, parser, close_source) - except: - if close_source: - source.close() - raise + + return it class XMLPullParser: @@ -1217,9 +1239,7 @@ # upon in user code. It will be removed in a future release. # See http://bugs.python.org/issue17741 for more details. - # _elementtree.c expects a list, not a deque - self._events_queue = [] - self._index = 0 + self._events_queue = collections.deque() self._parser = _parser or XMLParser(target=TreeBuilder()) # wire up the parser for event reporting if events is None: @@ -1257,64 +1277,14 @@ retrieved from the iterator. """ events = self._events_queue - while True: - index = self._index - try: - event = events[self._index] - # Avoid retaining references to past events - events[self._index] = None - except IndexError: - break - index += 1 - # Compact the list in a O(1) amortized fashion - # As noted above, _elementree.c needs a list, not a deque - if index * 2 >= len(events): - events[:index] = [] - self._index = 0 - else: - self._index = index + while events: + event = events.popleft() if isinstance(event, Exception): raise event else: yield event -class _IterParseIterator: - - def __init__(self, source, events, parser, close_source=False): - # Use the internal, undocumented _parser argument for now; When the - # parser argument of iterparse is removed, this can be killed. - self._parser = XMLPullParser(events=events, _parser=parser) - self._file = source - self._close_file = close_source - self.root = self._root = None - - def __next__(self): - try: - while 1: - for event in self._parser.read_events(): - return event - if self._parser._parser is None: - break - # load event buffer - data = self._file.read(16 * 1024) - if data: - self._parser.feed(data) - else: - self._root = self._parser._close_and_return_root() - self.root = self._root - except: - if self._close_file: - self._file.close() - raise - if self._close_file: - self._file.close() - raise StopIteration - - def __iter__(self): - return self - - def XML(text, parser=None): """Parse XML document from string constant. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -109,6 +109,8 @@ Library ------- +- Issue #25638: Optimized ElementTree.iterparse(); it is now 2x faster. + - Issue #25761: Improved detecting errors in broken pickle data. - Issue #25717: Restore the previous behaviour of tolerating most fstat() diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c --- a/Modules/_elementtree.c +++ b/Modules/_elementtree.c @@ -2289,7 +2289,7 @@ PyObject *element_factory; /* element tracing */ - PyObject *events; /* list of events, or NULL if not collecting */ + PyObject *events_append; /* the append method of the list of events, or NULL */ PyObject *start_event_obj; /* event objects (NULL to ignore) */ PyObject *end_event_obj; PyObject *start_ns_event_obj; @@ -2324,7 +2324,7 @@ } t->index = 0; - t->events = NULL; + t->events_append = NULL; t->start_event_obj = t->end_event_obj = NULL; t->start_ns_event_obj = t->end_ns_event_obj = NULL; } @@ -2374,7 +2374,7 @@ Py_CLEAR(self->start_ns_event_obj); Py_CLEAR(self->end_event_obj); Py_CLEAR(self->start_event_obj); - Py_CLEAR(self->events); + Py_CLEAR(self->events_append); Py_CLEAR(self->stack); Py_CLEAR(self->data); Py_CLEAR(self->last); @@ -2455,13 +2455,14 @@ PyObject *node) { if (action != NULL) { - PyObject *res = PyTuple_Pack(2, action, node); + PyObject *res; + PyObject *event = PyTuple_Pack(2, action, node); + if (event == NULL) + return -1; + res = PyObject_CallFunctionObjArgs(self->events_append, event, NULL); + Py_DECREF(event); if (res == NULL) return -1; - if (PyList_Append(self->events, res) < 0) { - Py_DECREF(res); - return -1; - } Py_DECREF(res); } return 0; @@ -3039,7 +3040,7 @@ if (PyErr_Occurred()) return; - if (!target->events || !target->start_ns_event_obj) + if (!target->events_append || !target->start_ns_event_obj) return; if (!uri) @@ -3062,7 +3063,7 @@ if (PyErr_Occurred()) return; - if (!target->events) + if (!target->events_append) return; treebuilder_append_event(target, target->end_ns_event_obj, Py_None); @@ -3551,7 +3552,7 @@ /*[clinic input] _elementtree.XMLParser._setevents - events_queue: object(subclass_of='&PyList_Type') + events_queue: object events_to_report: object = None / @@ -3561,12 +3562,12 @@ _elementtree_XMLParser__setevents_impl(XMLParserObject *self, PyObject *events_queue, PyObject *events_to_report) -/*[clinic end generated code: output=1440092922b13ed1 input=59db9742910c6174]*/ +/*[clinic end generated code: output=1440092922b13ed1 input=abf90830a1c3b0fc]*/ { /* activate element event reporting */ Py_ssize_t i, seqlen; TreeBuilderObject *target; - PyObject *events_seq; + PyObject *events_append, *events_seq; if (!TreeBuilder_CheckExact(self->target)) { PyErr_SetString( @@ -3579,9 +3580,11 @@ target = (TreeBuilderObject*) self->target; - Py_INCREF(events_queue); - Py_XDECREF(target->events); - target->events = events_queue; + events_append = PyObject_GetAttrString(events_queue, "append"); + if (events_append == NULL) + return NULL; + Py_XDECREF(target->events_append); + target->events_append = events_append; /* clear out existing events */ Py_CLEAR(target->start_event_obj); diff --git a/Modules/clinic/_elementtree.c.h b/Modules/clinic/_elementtree.c.h --- a/Modules/clinic/_elementtree.c.h +++ b/Modules/clinic/_elementtree.c.h @@ -668,12 +668,13 @@ PyObject *events_queue; PyObject *events_to_report = Py_None; - if (!PyArg_ParseTuple(args, "O!|O:_setevents", - &PyList_Type, &events_queue, &events_to_report)) + if (!PyArg_UnpackTuple(args, "_setevents", + 1, 2, + &events_queue, &events_to_report)) goto exit; return_value = _elementtree_XMLParser__setevents_impl(self, events_queue, events_to_report); exit: return return_value; } -/*[clinic end generated code: output=25b8bf7e7f2151ca input=a9049054013a1b77]*/ +/*[clinic end generated code: output=19d94e2d2726d3aa input=a9049054013a1b77]*/ -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:04 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:04 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Version_bump_f?= =?utf-8?b?b3IgMy41LjEgZmluYWwu?= Message-ID: <20151207061604.21231.74565@psf.io> https://hg.python.org/cpython/rev/37a07cee5969 changeset: 99489:37a07cee5969 branch: 3.5 tag: v3.5.1 user: Larry Hastings date: Sat Dec 05 17:05:23 2015 -0800 summary: Version bump for 3.5.1 final. files: Include/patchlevel.h | 6 +++--- Misc/NEWS | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -19,11 +19,11 @@ #define PY_MAJOR_VERSION 3 #define PY_MINOR_VERSION 5 #define PY_MICRO_VERSION 1 -#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_GAMMA -#define PY_RELEASE_SERIAL 1 +#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL +#define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "3.5.1rc1+" +#define PY_VERSION "3.5.1" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,7 +10,8 @@ Core and Builtins ----------------- -- Issue #25709: Fixed problem with in-place string concatenation and utf-8 cache. +- Issue #25709: Fixed problem with in-place string concatenation and + utf-8 cache. Windows ------- -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:15:59 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:15:59 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAoMy41KTogSXNzdWUgIzI1NzA5?= =?utf-8?q?=3A_Fixed_problem_with_in-place_string_concatenation_and_utf-8_?= =?utf-8?q?cache=2E?= Message-ID: <20151207061559.29621.15717@psf.io> https://hg.python.org/cpython/rev/376b100107ba changeset: 99487:376b100107ba branch: 3.5 user: Serhiy Storchaka date: Thu Dec 03 01:02:03 2015 +0200 summary: Issue #25709: Fixed problem with in-place string concatenation and utf-8 cache. files: Lib/test/test_unicode.py | 17 +++++++++++++++++ Misc/NEWS | 7 ++++++- Objects/unicodeobject.c | 5 +++++ 3 files changed, 28 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -2699,6 +2699,23 @@ self.assertTrue(astral >= bmp2) self.assertFalse(astral >= astral2) + @support.cpython_only + def test_pep393_utf8_caching_bug(self): + # Issue #25709: Problem with string concatenation and utf-8 cache + from _testcapi import getargs_s_hash + for k in 0x24, 0xa4, 0x20ac, 0x1f40d: + s = '' + for i in range(5): + # Due to CPython specific optimization the 's' string can be + # resized in-place. + s += chr(k) + # Parsing with the "s#" format code calls indirectly + # PyUnicode_AsUTF8AndSize() which creates the UTF-8 + # encoded string cached in the Unicode object. + self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1)) + # Check that the second call returns the same result + self.assertEqual(getargs_s_hash(s), chr(k).encode() * (i + 1)) + class StringModuleTest(unittest.TestCase): def test_formatter_parser(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,9 +10,14 @@ Core and Builtins ----------------- -Library +- Issue #25709: Fixed problem with in-place string concatenation and utf-8 cache. + +Windows ------- +- Issue #25715: Python 3.5.1 installer shows wrong upgrade path and incorrect + logic for launcher detection. + What's New in Python 3.5.1 release candidate 1? =============================================== diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -722,6 +722,11 @@ } new_size = (struct_size + (length + 1) * char_size); + if (_PyUnicode_HAS_UTF8_MEMORY(unicode)) { + PyObject_DEL(_PyUnicode_UTF8(unicode)); + _PyUnicode_UTF8(unicode) = NULL; + _PyUnicode_UTF8_LENGTH(unicode) = 0; + } _Py_DEC_REFTOTAL; _Py_ForgetReference(unicode); -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:05 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:05 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Post-release_f?= =?utf-8?q?ixups_for_Python_3=2E5=2E1=2E?= Message-ID: <20151207061605.29597.15993@psf.io> https://hg.python.org/cpython/rev/536f5be43262 changeset: 99491:536f5be43262 branch: 3.5 user: Larry Hastings date: Sun Dec 06 21:53:27 2015 -0800 summary: Post-release fixups for Python 3.5.1. files: Include/patchlevel.h | 2 +- Misc/NEWS | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -23,7 +23,7 @@ #define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "3.5.1" +#define PY_VERSION "3.5.1+" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -2,6 +2,18 @@ Python News +++++++++++ +What's New in Python 3.5.2 release candidate 1? +=============================================== + +Release date: tba + +Core and Builtins +----------------- + +Library +------- + + What's New in Python 3.5.1 final? ================================= -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:00 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:00 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy41IC0+IDMuNSk6?= =?utf-8?q?_Merge=2E?= Message-ID: <20151207061559.33647.27959@psf.io> https://hg.python.org/cpython/rev/f668f41b378c changeset: 99486:f668f41b378c branch: 3.5 parent: 99302:afc0ad583808 parent: 99485:f278d374da80 user: Larry Hastings date: Sat Dec 05 16:49:19 2015 -0800 summary: Merge. files: Tools/msi/bundle/bundle.targets | 2 +- Tools/msi/uploadrelease.proj | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Tools/msi/bundle/bundle.targets b/Tools/msi/bundle/bundle.targets --- a/Tools/msi/bundle/bundle.targets +++ b/Tools/msi/bundle/bundle.targets @@ -18,7 +18,7 @@ $(DownloadUrlBase.TrimEnd(`/`))/{version}/{arch}{releasename}/{msi} - $(DefineConstants);DownloadUrl=$(DownloadUrl.Replace(`{version}`, `$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)`).Replace(`{arch}`, `$(ArchName)`).Replace(`{releasename}`, `$(ReleaseName)`).Replace(`{msi}`, `{2}`)) + $(DefineConstants);DownloadUrl=$(DownloadUrl.Replace(`{version}`, `$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)`).Replace(`{arch}`, `$(ArchName)`).Replace(`{releasename}`, `$(ReleaseLevelName)`).Replace(`{msi}`, `{2}`)) $(DefineConstants);DownloadUrl={2} diff --git a/Tools/msi/uploadrelease.proj b/Tools/msi/uploadrelease.proj --- a/Tools/msi/uploadrelease.proj +++ b/Tools/msi/uploadrelease.proj @@ -16,7 +16,7 @@ $(DownloadUrlBase.TrimEnd(`/`))/$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber) - $(DownloadUrl.TrimEnd(`/`)) + $(DownloadUrl.Replace(`{version}`, `$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)`).Replace(`{arch}`, `$(ArchName)`).Replace(`{releasename}`, `$(ReleaseLevelName)`).Replace(`{msi}`, ``).TrimEnd(`/`)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:05 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:05 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Added_tag_v3?= =?utf-8?q?=2E5=2E1_for_changeset_37a07cee5969?= Message-ID: <20151207061604.105493.98177@psf.io> https://hg.python.org/cpython/rev/0b1c47f78157 changeset: 99490:0b1c47f78157 branch: 3.5 user: Larry Hastings date: Sat Dec 05 17:05:33 2015 -0800 summary: Added tag v3.5.1 for changeset 37a07cee5969 files: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -158,3 +158,4 @@ 2d033fedfa7f1e325fd14ccdaa9cb42155da206f v3.5.0rc4 374f501f4567b7595f2ad7798aa09afa2456bb28 v3.5.0 948ef16a69513ba1ff15c9d7d0b012b949df4c80 v3.5.1rc1 +37a07cee5969e6d3672583187a73cf636ff28e1b v3.5.1 -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:00 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:00 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Fixes_upload_d?= =?utf-8?q?irectories_for_Windows_installer=2E?= Message-ID: <20151207061559.61849.29760@psf.io> https://hg.python.org/cpython/rev/f278d374da80 changeset: 99485:f278d374da80 branch: 3.5 parent: 99301:da245b9641d9 user: Steve Dower date: Sun Nov 22 18:20:11 2015 -0800 summary: Fixes upload directories for Windows installer. files: Tools/msi/bundle/bundle.targets | 2 +- Tools/msi/uploadrelease.proj | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Tools/msi/bundle/bundle.targets b/Tools/msi/bundle/bundle.targets --- a/Tools/msi/bundle/bundle.targets +++ b/Tools/msi/bundle/bundle.targets @@ -18,7 +18,7 @@ $(DownloadUrlBase.TrimEnd(`/`))/{version}/{arch}{releasename}/{msi} - $(DefineConstants);DownloadUrl=$(DownloadUrl.Replace(`{version}`, `$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)`).Replace(`{arch}`, `$(ArchName)`).Replace(`{releasename}`, `$(ReleaseName)`).Replace(`{msi}`, `{2}`)) + $(DefineConstants);DownloadUrl=$(DownloadUrl.Replace(`{version}`, `$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)`).Replace(`{arch}`, `$(ArchName)`).Replace(`{releasename}`, `$(ReleaseLevelName)`).Replace(`{msi}`, `{2}`)) $(DefineConstants);DownloadUrl={2} diff --git a/Tools/msi/uploadrelease.proj b/Tools/msi/uploadrelease.proj --- a/Tools/msi/uploadrelease.proj +++ b/Tools/msi/uploadrelease.proj @@ -16,7 +16,7 @@ $(DownloadUrlBase.TrimEnd(`/`))/$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber) - $(DownloadUrl.TrimEnd(`/`)) + $(DownloadUrl.Replace(`{version}`, `$(MajorVersionNumber).$(MinorVersionNumber).$(MicroVersionNumber)`).Replace(`{arch}`, `$(ArchName)`).Replace(`{releasename}`, `$(ReleaseLevelName)`).Replace(`{msi}`, ``).TrimEnd(`/`)) -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:00 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:00 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E5=29=3A_Updated_pydoc_?= =?utf-8?q?topics_for_3=2E5=2E1_final=2E?= Message-ID: <20151207061559.33653.88105@psf.io> https://hg.python.org/cpython/rev/5410a923c835 changeset: 99488:5410a923c835 branch: 3.5 user: Larry Hastings date: Sat Dec 05 17:03:20 2015 -0800 summary: Updated pydoc topics for 3.5.1 final. files: Lib/pydoc_data/topics.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py --- a/Lib/pydoc_data/topics.py +++ b/Lib/pydoc_data/topics.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Sat Nov 21 23:47:52 2015 +# Autogenerated by Sphinx on Sat Dec 5 17:02:49 2015 topics = {'assert': u'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for\n*attributeref*, *subscription*, and *slicing*.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an\n asterisk, called a "starred" target: The object must be a sequence\n with at least as many items as there are targets in the target\n list, minus one. The first items of the sequence are assigned,\n from left to right, to the targets before the starred target. The\n final items of the sequence are assigned to the targets after the\n starred target. A list of the remaining items in the sequence is\n then assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of\n items as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" or "nonlocal" statement\n in the current code block: the name is bound to the object in the\n current local namespace.\n\n * Otherwise: the name is bound to the object in the global\n namespace or the outer namespace determined by "nonlocal",\n respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, "IndexError" is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the "__setitem__()" method is called with\n appropriate arguments.\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to integers. If\n either bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the target\n sequence allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nAlthough the definition of assignment implies that overlaps between\nthe left-hand side and the right-hand side are \'simultanenous\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables occur left-to-right, sometimes\nresulting in confusion. For instance, the following program prints\n"[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2 # i is updated, then x[i] is updated\n print(x)\n\nSee also: **PEP 3132** - Extended Iterable Unpacking\n\n The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n', -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:08 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Version_bump_f?= =?utf-8?b?b3IgMy40LjRyYzEu?= Message-ID: <20151207061608.21245.80111@psf.io> https://hg.python.org/cpython/rev/04f3f725896c changeset: 99493:04f3f725896c branch: 3.4 tag: v3.4.4rc1 user: Larry Hastings date: Sun Dec 06 05:53:35 2015 -0800 summary: Version bump for 3.4.4rc1. files: Include/patchlevel.h | 8 ++++---- Misc/NEWS | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -18,12 +18,12 @@ /*--start constants--*/ #define PY_MAJOR_VERSION 3 #define PY_MINOR_VERSION 4 -#define PY_MICRO_VERSION 3 -#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL -#define PY_RELEASE_SERIAL 0 +#define PY_MICRO_VERSION 4 +#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_GAMMA +#define PY_RELEASE_SERIAL 1 /* Version as a string */ -#define PY_VERSION "3.4.3+" +#define PY_VERSION "3.4.4rc1" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -5,12 +5,13 @@ What's New in Python 3.4.4rc1? ============================== -Release date: tba +Release date: 2015/12/06 Core and Builtins ----------------- -- Issue #25709: Fixed problem with in-place string concatenation and utf-8 cache. +- Issue #25709: Fixed problem with in-place string concatenation and utf-8 + cache. - Issue #24097: Fixed crash in object.__reduce__() if slot name is freed inside __getattr__. -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:08 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Added_tag_v3?= =?utf-8?q?=2E4=2E4rc1_for_changeset_04f3f725896c?= Message-ID: <20151207061608.21247.53070@psf.io> https://hg.python.org/cpython/rev/981fc41d5bbd changeset: 99494:981fc41d5bbd branch: 3.4 user: Larry Hastings date: Sun Dec 06 05:53:49 2015 -0800 summary: Added tag v3.4.4rc1 for changeset 04f3f725896c files: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -144,3 +144,4 @@ ab2c023a9432f16652e89c404bbc84aa91bf55af v3.4.2 69dd528ca6255a66c37cc5cf680e8357d108b036 v3.4.3rc1 b4cbecbc0781e89a309d03b60a1f75f8499250e6 v3.4.3 +04f3f725896c6961212c3a12e8ac25be6958f4fa v3.4.4rc1 -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:08 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Post-release_f?= =?utf-8?q?ixups_for_Python_3=2E4=2E4rc1=2E?= Message-ID: <20151207061608.105477.29655@psf.io> https://hg.python.org/cpython/rev/04a1f9fd0802 changeset: 99495:04a1f9fd0802 branch: 3.4 user: Larry Hastings date: Sun Dec 06 21:54:29 2015 -0800 summary: Post-release fixups for Python 3.4.4rc1. files: Include/patchlevel.h | 2 +- Misc/NEWS | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -23,7 +23,7 @@ #define PY_RELEASE_SERIAL 1 /* Version as a string */ -#define PY_VERSION "3.4.4rc1" +#define PY_VERSION "3.4.4rc1+" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -2,6 +2,18 @@ Python News +++++++++++ +What's New in Python 3.4.4? +=========================== + +Release date: 2015/12/20 + +Core and Builtins +----------------- + +Library +------- + + What's New in Python 3.4.4rc1? ============================== -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:09 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:09 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNSk6?= =?utf-8?q?_Mostly-null_merge_from_3=2E4=2E__=28Only_added_the_tag_for_3?= =?utf-8?b?LjQuNHJjMS4p?= Message-ID: <20151207061609.4092.21735@psf.io> https://hg.python.org/cpython/rev/179cbf430ba7 changeset: 99498:179cbf430ba7 branch: 3.5 parent: 99497:786608c702dc parent: 99496:3b94245f9cf1 user: Larry Hastings date: Sun Dec 06 22:11:21 2015 -0800 summary: Mostly-null merge from 3.4. (Only added the tag for 3.4.4rc1.) files: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -144,6 +144,7 @@ ab2c023a9432f16652e89c404bbc84aa91bf55af v3.4.2 69dd528ca6255a66c37cc5cf680e8357d108b036 v3.4.3rc1 b4cbecbc0781e89a309d03b60a1f75f8499250e6 v3.4.3 +04f3f725896c6961212c3a12e8ac25be6958f4fa v3.4.4rc1 5d4b6a57d5fd7564bf73f3db0e46fe5eeb00bcd8 v3.5.0a1 0337bd7ebcb6559d69679bc7025059ad1ce4f432 v3.5.0a2 82656e28b5e5c4ae48d8dd8b5f0d7968908a82b6 v3.5.0a3 -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:09 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:09 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=28merge_3=2E5_-=3E_default?= =?utf-8?q?=29=3A_Null_merge_from_3=2E5=2E?= Message-ID: <20151207061609.4080.4416@psf.io> https://hg.python.org/cpython/rev/b4aeb35ab7e1 changeset: 99499:b4aeb35ab7e1 parent: 99484:dd67c8c53aea parent: 99498:179cbf430ba7 user: Larry Hastings date: Sun Dec 06 22:14:58 2015 -0800 summary: Null merge from 3.5. files: .hgtags | 2 ++ Misc/NEWS | 7 ++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -144,6 +144,7 @@ ab2c023a9432f16652e89c404bbc84aa91bf55af v3.4.2 69dd528ca6255a66c37cc5cf680e8357d108b036 v3.4.3rc1 b4cbecbc0781e89a309d03b60a1f75f8499250e6 v3.4.3 +04f3f725896c6961212c3a12e8ac25be6958f4fa v3.4.4rc1 5d4b6a57d5fd7564bf73f3db0e46fe5eeb00bcd8 v3.5.0a1 0337bd7ebcb6559d69679bc7025059ad1ce4f432 v3.5.0a2 82656e28b5e5c4ae48d8dd8b5f0d7968908a82b6 v3.5.0a3 @@ -158,3 +159,4 @@ 2d033fedfa7f1e325fd14ccdaa9cb42155da206f v3.5.0rc4 374f501f4567b7595f2ad7798aa09afa2456bb28 v3.5.0 948ef16a69513ba1ff15c9d7d0b012b949df4c80 v3.5.1rc1 +37a07cee5969e6d3672583187a73cf636ff28e1b v3.5.1 diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -5,7 +5,7 @@ What's New in Python 3.6.0 alpha 1? =================================== -Release date: XXXX-XX-XX +Release date: tba Core and Builtins ----------------- @@ -514,8 +514,8 @@ Core and Builtins ----------------- -Library -------- +- Issue #25709: Fixed problem with in-place string concatenation and + utf-8 cache. Windows ------- @@ -523,6 +523,7 @@ - Issue #25715: Python 3.5.1 installer shows wrong upgrade path and incorrect logic for launcher detection. + What's New in Python 3.5.1 release candidate 1? =============================================== -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:09 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:09 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy41IC0+IDMuNSk6?= =?utf-8?q?_Merge=2E?= Message-ID: <20151207061609.105503.27153@psf.io> https://hg.python.org/cpython/rev/786608c702dc changeset: 99497:786608c702dc branch: 3.5 parent: 99482:0d1bbfe8fd09 parent: 99491:536f5be43262 user: Larry Hastings date: Sun Dec 06 22:00:57 2015 -0800 summary: Merge. files: .hgtags | 1 + Include/patchlevel.h | 6 +++--- Lib/pydoc_data/topics.py | 2 +- Misc/NEWS | 7 ++++--- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -158,3 +158,4 @@ 2d033fedfa7f1e325fd14ccdaa9cb42155da206f v3.5.0rc4 374f501f4567b7595f2ad7798aa09afa2456bb28 v3.5.0 948ef16a69513ba1ff15c9d7d0b012b949df4c80 v3.5.1rc1 +37a07cee5969e6d3672583187a73cf636ff28e1b v3.5.1 diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -19,11 +19,11 @@ #define PY_MAJOR_VERSION 3 #define PY_MINOR_VERSION 5 #define PY_MICRO_VERSION 1 -#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_GAMMA -#define PY_RELEASE_SERIAL 1 +#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL +#define PY_RELEASE_SERIAL 0 /* Version as a string */ -#define PY_VERSION "3.5.1rc1+" +#define PY_VERSION "3.5.1+" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py --- a/Lib/pydoc_data/topics.py +++ b/Lib/pydoc_data/topics.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Sat Nov 21 23:47:52 2015 +# Autogenerated by Sphinx on Sat Dec 5 17:02:49 2015 topics = {'assert': u'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for\n*attributeref*, *subscription*, and *slicing*.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an\n asterisk, called a "starred" target: The object must be a sequence\n with at least as many items as there are targets in the target\n list, minus one. The first items of the sequence are assigned,\n from left to right, to the targets before the starred target. The\n final items of the sequence are assigned to the targets after the\n starred target. A list of the remaining items in the sequence is\n then assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of\n items as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" or "nonlocal" statement\n in the current code block: the name is bound to the object in the\n current local namespace.\n\n * Otherwise: the name is bound to the object in the global\n namespace or the outer namespace determined by "nonlocal",\n respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, "IndexError" is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the "__setitem__()" method is called with\n appropriate arguments.\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to integers. If\n either bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the target\n sequence allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nAlthough the definition of assignment implies that overlaps between\nthe left-hand side and the right-hand side are \'simultanenous\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables occur left-to-right, sometimes\nresulting in confusion. For instance, the following program prints\n"[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2 # i is updated, then x[i] is updated\n print(x)\n\nSee also: **PEP 3132** - Extended Iterable Unpacking\n\n The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "@=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n', diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -5,7 +5,7 @@ What's New in Python 3.5.2 release candidate 1? =============================================== -Release date: XXXX-XX-XX +Release date: tba Core and Builtins ----------------- @@ -91,8 +91,8 @@ Core and Builtins ----------------- -Library -------- +- Issue #25709: Fixed problem with in-place string concatenation and + utf-8 cache. Windows ------- @@ -100,6 +100,7 @@ - Issue #25715: Python 3.5.1 installer shows wrong upgrade path and incorrect logic for launcher detection. + What's New in Python 3.5.1 release candidate 1? =============================================== -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:08 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:08 +0000 Subject: [Python-checkins] =?utf-8?q?cpython_=283=2E4=29=3A_Rebuilt_pydoc_?= =?utf-8?q?topics_for_3=2E4=2E4rc1=2E?= Message-ID: <20151207061605.29611.16696@psf.io> https://hg.python.org/cpython/rev/933c9f9000ee changeset: 99492:933c9f9000ee branch: 3.4 parent: 99473:51a0dd6f7c73 user: Larry Hastings date: Sun Dec 06 05:51:56 2015 -0800 summary: Rebuilt pydoc topics for 3.4.4rc1. files: Lib/pydoc_data/topics.py | 30 ++++++++++++++-------------- 1 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py --- a/Lib/pydoc_data/topics.py +++ b/Lib/pydoc_data/topics.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Sun Feb 22 23:52:05 2015 +# Autogenerated by Sphinx on Sun Dec 6 05:51:21 2015 topics = {'assert': u'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for\n*attributeref*, *subscription*, and *slicing*.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an\n asterisk, called a "starred" target: The object must be a sequence\n with at least as many items as there are targets in the target\n list, minus one. The first items of the sequence are assigned,\n from left to right, to the targets before the starred target. The\n final items of the sequence are assigned to the targets after the\n starred target. A list of the remaining items in the sequence is\n then assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of\n items as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" or "nonlocal" statement\n in the current code block: the name is bound to the object in the\n current local namespace.\n\n * Otherwise: the name is bound to the object in the global\n namespace or the outer namespace determined by "nonlocal",\n respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, "IndexError" is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the "__setitem__()" method is called with\n appropriate arguments.\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to integers. If\n either bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the target\n sequence allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nAlthough the definition of assignment implies that overlaps between\nthe left-hand side and the right-hand side are \'simultanenous\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables occur left-to-right, sometimes\nresulting in confusion. For instance, the following program prints\n"[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2 # i is updated, then x[i] is updated\n print(x)\n\nSee also: **PEP 3132** - Extended Iterable Unpacking\n\n The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n', @@ -18,19 +18,19 @@ 'callable-types': u'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n', 'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nAn optional trailing comma may be present after the positional and\nkeyword arguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n"__call__()" method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n', 'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', - 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n are identical to themselves, "x is x" but are not equal to\n themselves, "x != x". Additionally, comparing any value to a\n not-a-number value will return "False". For example, both "3 <\n float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nComparison of objects of differing types depends on whether either of\nthe types provide explicit support for the comparison. Most numeric\ntypes can be compared with one another. When cross-type comparison is\nnot supported, the comparison method returns "NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', + 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\n\nValue comparisons\n=================\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects do not need to have the same type.\n\nChapter *Objects, values and types* states that objects have a value\n(in addition to type and identity). The value of an object is a\nrather abstract notion in Python: For example, there is no canonical\naccess method for an object\'s value. Also, there is no requirement\nthat the value of an object should be constructed in a particular way,\ne.g. comprised of all its data attributes. Comparison operators\nimplement a particular notion of what the value of an object is. One\ncan think of them as defining the value of an object indirectly, by\nmeans of their comparison implementation.\n\nBecause all types are (direct or indirect) subtypes of "object", they\ninherit the default comparison behavior from "object". Types can\ncustomize their comparison behavior by implementing *rich comparison\nmethods* like "__lt__()", described in *Basic customization*.\n\nThe default behavior for equality comparison ("==" and "!=") is based\non the identity of the objects. Hence, equality comparison of\ninstances with the same identity results in equality, and equality\ncomparison of instances with different identities results in\ninequality. A motivation for this default behavior is the desire that\nall objects should be reflexive (i.e. "x is y" implies "x == y").\n\nA default order comparison ("<", ">", "<=", and ">=") is not provided;\nan attempt raises "TypeError". A motivation for this default behavior\nis the lack of a similar invariant as for equality.\n\nThe behavior of the default equality comparison, that instances with\ndifferent identities are always unequal, may be in contrast to what\ntypes will need that have a sensible definition of object value and\nvalue-based equality. Such types will need to customize their\ncomparison behavior, and in fact, a number of built-in types have done\nthat.\n\nThe following list describes the comparison behavior of the most\nimportant built-in types.\n\n* Numbers of built-in numeric types (*Numeric Types --- int, float,\n complex*) and of the standard library types "fractions.Fraction" and\n "decimal.Decimal" can be compared within and across their types,\n with the restriction that complex numbers do not support order\n comparison. Within the limits of the types involved, they compare\n mathematically (algorithmically) correct without loss of precision.\n\n The not-a-number values "float(\'NaN\')" and "Decimal(\'NaN\')" are\n special. They are identical to themselves ("x is x" is true) but\n are not equal to themselves ("x == x" is false). Additionally,\n comparing any number to a not-a-number value will return "False".\n For example, both "3 < float(\'NaN\')" and "float(\'NaN\') < 3" will\n return "False".\n\n* Binary sequences (instances of "bytes" or "bytearray") can be\n compared within and across their types. They compare\n lexicographically using the numeric values of their elements.\n\n* Strings (instances of "str") compare lexicographically using the\n numerical Unicode code points (the result of the built-in function\n "ord()") of their characters. [3]\n\n Strings and binary sequences cannot be directly compared.\n\n* Sequences (instances of "tuple", "list", or "range") can be\n compared only within each of their types, with the restriction that\n ranges do not support order comparison. Equality comparison across\n these types results in unequality, and ordering comparison across\n these types raises "TypeError".\n\n Sequences compare lexicographically using comparison of\n corresponding elements, whereby reflexivity of the elements is\n enforced.\n\n In enforcing reflexivity of elements, the comparison of collections\n assumes that for a collection element "x", "x == x" is always true.\n Based on that assumption, element identity is compared first, and\n element comparison is performed only for distinct elements. This\n approach yields the same result as a strict element comparison\n would, if the compared elements are reflexive. For non-reflexive\n elements, the result is different than for strict element\n comparison, and may be surprising: The non-reflexive not-a-number\n values for example result in the following comparison behavior when\n used in a list:\n\n >>> nan = float(\'NaN\')\n >>> nan is nan\n True\n >>> nan == nan\n False <-- the defined non-reflexive behavior of NaN\n >>> [nan] == [nan]\n True <-- list enforces reflexivity and tests identity first\n\n Lexicographical comparison between built-in collections works as\n follows:\n\n * For two collections to compare equal, they must be of the same\n type, have the same length, and each pair of corresponding\n elements must compare equal (for example, "[1,2] == (1,2)" is\n false because the type is not the same).\n\n * Collections that support order comparison are ordered the same\n as their first unequal elements (for example, "[1,2,x] <= [1,2,y]"\n has the same value as "x <= y"). If a corresponding element does\n not exist, the shorter collection is ordered first (for example,\n "[1,2] < [1,2,3]" is true).\n\n* Mappings (instances of "dict") compare equal if and only if they\n have equal *(key, value)* pairs. Equality comparison of the keys and\n elements enforces reflexivity.\n\n Order comparisons ("<", ">", "<=", and ">=") raise "TypeError".\n\n* Sets (instances of "set" or "frozenset") can be compared within\n and across their types.\n\n They define order comparison operators to mean subset and superset\n tests. Those relations do not define total orderings (for example,\n the two sets "{1,2}" and "{2,3}" are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering\n (for example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs).\n\n Comparison of sets enforces reflexivity of its elements.\n\n* Most other built-in types have no comparison methods implemented,\n so they inherit the default comparison behavior.\n\nUser-defined classes that customize their comparison behavior should\nfollow some consistency rules, if possible:\n\n* Equality comparison should be reflexive. In other words, identical\n objects should compare equal:\n\n "x is y" implies "x == y"\n\n* Comparison should be symmetric. In other words, the following\n expressions should have the same result:\n\n "x == y" and "y == x"\n\n "x != y" and "y != x"\n\n "x < y" and "y > x"\n\n "x <= y" and "y >= x"\n\n* Comparison should be transitive. The following (non-exhaustive)\n examples illustrate that:\n\n "x > y and y > z" implies "x > z"\n\n "x < y and y <= z" implies "x < z"\n\n* Inverse comparison should result in the boolean negation. In other\n words, the following expressions should have the same result:\n\n "x == y" and "not x != y"\n\n "x < y" and "not x >= y" (for total ordering)\n\n "x > y" and "not x <= y" (for total ordering)\n\n The last two expressions apply to totally ordered collections (e.g.\n to sequences, but not to sets or mappings). See also the\n "total_ordering()" decorator.\n\nPython does not enforce these consistency rules. In fact, the\nnot-a-number values are an example for not following these rules.\n\n\nMembership test operations\n==========================\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\n\nIdentity comparisons\n====================\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', 'compound': u'\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe "if", "while" and "for" statements implement traditional control\nflow constructs. "try" specifies exception handlers and/or cleanup\ncode for a group of statements, while the "with" statement allows the\nexecution of initialization and finalization code around a block of\ncode. Function and class definitions are also syntactically compound\nstatements.\n\nA compound statement consists of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of a suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which "if" clause a following "else" clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n"print()" calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a "NEWLINE" possibly followed by a\n"DEDENT". Also note that optional continuation clauses always begin\nwith a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling "else"\' problem is solved in Python by\nrequiring nested "if" statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe "if" statement\n==================\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n\n\nThe "while" statement\n=====================\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n\n\nThe "for" statement\n===================\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order returned by the iterator. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there is no next\nitem.\n\nThe for-loop makes assignments to the variables(s) in the target list.\nThis overwrites all previous assignments to those variables including\nthose made in the suite of the for-loop:\n\n for i in range(10):\n print(i)\n i = 5 # this will not affect the for-loop\n # because i will be overwritten with the next\n # index in the range\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, they will not have been assigned to at\nall by the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe "try" statement\n===================\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be accessed via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe "with" statement\n====================\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n | "*" [parameter] ("," defparameter)* ["," "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more *parameters* have the form *parameter* "="\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding *argument* may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the ""*"" must also have a default value --- this\nis a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated from left to right when the\nfunction definition is executed.** This means that the expression is\nevaluated once, when the function is defined, and that the same "pre-\ncomputed" value is used for each call. This is especially important\nto understand when a default parameter is a mutable object, such as a\nlist or a dictionary: if the function modifies the object (e.g. by\nappending an item to a list), the default value is in effect modified.\nThis is generally not what was intended. A way around this is to use\n"None" as the default, and explicitly test for it in the body of the\nfunction, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n""*identifier"" is present, it is initialized to a tuple receiving any\nexcess positional parameters, defaulting to the empty tuple. If the\nform ""**identifier"" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after ""*"" or ""*identifier"" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "": expression"" following\nthe parameter name. Any parameter may have an annotation even those\nof the form "*identifier" or "**identifier". Functions may have\n"return" annotation of the form ""-> expression"" after the parameter\nlist. These annotations can be any valid Python expression and are\nevaluated when the function definition is executed. Annotations may\nbe evaluated in a different order than they appear in the source code.\nThe presence of annotations does not change the semantics of a\nfunction. The annotation values are available as values of a\ndictionary keyed by the parameters\' names in the "__annotations__"\nattribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda\nexpressions, described in section *Lambdas*. Note that the lambda\nexpression is merely a shorthand for a simplified function definition;\na function defined in a ""def"" statement can be passed around or\nassigned to another name just like a function defined by a lambda\nexpression. The ""def"" form is actually more powerful since it\nallows the execution of multiple statements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A ""def""\nstatement executed inside a function definition defines a local\nfunction that can be returned or passed around. Free variables used\nin the nested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also: **PEP 3107** - Function Annotations\n\n The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', 'context-managers': u'\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n', 'continue': u'\nThe "continue" statement\n************************\n\n continue_stmt ::= "continue"\n\n"continue" may only occur syntactically nested in a "for" or "while"\nloop, but not nested in a function or class definition or "finally"\nclause within that loop. It continues with the next cycle of the\nnearest enclosing loop.\n\nWhen "continue" passes control out of a "try" statement with a\n"finally" clause, that "finally" clause is executed before really\nstarting the next loop cycle.\n', 'conversions': u'\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works as follows:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the\n other is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string as a\nleft argument to the \'%\' operator). Extensions must define their own\nconversion behavior.\n', - 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the second can be resolved by freeing the reference to the\n traceback object when it is no longer useful, and the third can\n be resolved by storing "None" in "sys.last_traceback". Circular\n references which are garbage are detected and cleaned up when the\n cyclic garbage collector is enabled (it\'s on by default). Refer\n to the documentation for the "gc" module for more information\n about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n Note: "hash()" truncates the value returned from an object\'s\n custom "__hash__()" method to the size of a "Py_ssize_t". This\n is typically 8 bytes on 64-bit builds and 4 bytes on 32-bit\n builds. If an object\'s "__hash__()" must interoperate on builds\n of different bit sizes, be sure to check the width on all\n supported builds. An easy way to do this is with "python -c\n "import sys; print(sys.hash_info.width)""\n\n If a class does not define an "__eq__()" method it should not\n define a "__hash__()" operation either; if it defines "__eq__()"\n but not "__hash__()", its instances will not be usable as items in\n hashable collections. If a class defines mutable objects and\n implements an "__eq__()" method, it should not implement\n "__hash__()", since the implementation of hashable collections\n requires that a key\'s hash value is immutable (if the object\'s hash\n value changes, it will be in the wrong hash bucket).\n\n User-defined classes have "__eq__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns an appropriate value such\n that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n A class that overrides "__eq__()" and does not define "__hash__()"\n will have its "__hash__()" implicitly set to "None". When the\n "__hash__()" method of a class is "None", instances of the class\n will raise an appropriate "TypeError" when a program attempts to\n retrieve their hash value, and will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable").\n\n If a class that overrides "__eq__()" needs to retain the\n implementation of "__hash__()" from a parent class, the interpreter\n must be told this explicitly by setting "__hash__ =\n .__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n', - 'debugger': u'\n"pdb" --- The Python Debugger\n*****************************\n\n**Source code:** Lib/pdb.py\n\n======================================================================\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > (0)?()\n (Pdb) continue\n > (1)?()\n (Pdb) continue\n NameError: \'spam\'\n > (1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the "readline" module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the "p" command.\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: "pdb.py" now accepts a "-c" option that executes\ncommands as if given in a ".pdbrc" file, see *Debugger Commands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "continue" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type "continue", or you can\n step through the statement using "step" or "next" (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module "__main__" is used. (See the\n explanation of the built-in "exec()" or "eval()" functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When "runeval()" returns, it returns the\n value of the expression. Otherwise this function is similar to\n "run()".\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses Ctrl-C on the console) when you give a\n "continue" command. This allows you to break into the debugger\n again by pressing Ctrl-C. If you want Pdb not to touch the SIGINT\n handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n"h(elp)" means that either "h" or "help" can be used to enter the help\ncommand (but not "he" or "hel", nor "H" or "Help" or "HELP").\nArguments to commands must be separated by whitespace (spaces or\ntabs). Optional arguments are enclosed in square brackets ("[]") in\nthe command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n("|").\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a "list" command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint ("!"). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by ";;".\n(A single ";" is not used as it is the separator for multiple commands\nin a line that is passed to the Python parser.) No intelligence is\napplied to separating the commands; the input is split at the first\n";;" pair, even if it is in the middle of a quoted string.\n\nIf a file ".pdbrc" exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ".pdbrc" can now contain commands that\ncontinue debugging, such as "continue" or "next". Previously, these\ncommands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. "help pdb"\n displays the full documentation (the docstring of the "pdb"\n module). Since the *command* argument must be an identifier, "help\n exec" must be entered to get help on the "!" command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on "sys.path". Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for "break".\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just "end" to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) p some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with "end"; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between "next" and "step"\n is that "step" stops inside a called function, while "next"\n executes called functions at (nearly) full speed, only stopping at\n the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a "for" loop or out\n of a "finally" clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With "." as argument, list 11 lines around the current line. With\n one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by "->". If an\n exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ">>", if it differs\n from the current line.\n\n New in version 3.2: The ">>" marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for "list".\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\n Note: "print()" can also be used, but is not a debugger command\n --- this executes the Python "print()" function.\n\npp expression\n\n Like the "p" command, except the value of the expression is pretty-\n printed using the "pprint" module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the "code" module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by "%1", "%2", and so on, while "%*" is replaced by all\n the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ".pdbrc" file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n "global" statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with "shlex" and the result is used as the new\n "sys.argv". History, breakpoints, actions and debugger options are\n preserved. "restart" is an alias for "run".\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module\n is determined by the "__name__" in the frame globals.\n', + 'customization': u'\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the second can be resolved by freeing the reference to the\n traceback object when it is no longer useful, and the third can\n be resolved by storing "None" in "sys.last_traceback". Circular\n references which are garbage are detected and cleaned up when the\n cyclic garbage collector is enabled (it\'s on by default). Refer\n to the documentation for the "gc" module for more information\n about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n By default, "__ne__()" delegates to "__eq__()" and inverts the\n result unless it is "NotImplemented". There are no other implied\n relationships among the comparison operators, for example, the\n truth of "(x.__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n', + 'debugger': u'\n"pdb" --- The Python Debugger\n*****************************\n\n**Source code:** Lib/pdb.py\n\n======================================================================\n\nThe module "pdb" defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n"Pdb". This is currently undocumented but easily understood by reading\nthe source. The extension interface uses the modules "bdb" and "cmd".\n\nThe debugger\'s prompt is "(Pdb)". Typical usage to run a program under\ncontrol of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > (0)?()\n (Pdb) continue\n > (1)?()\n (Pdb) continue\n NameError: \'spam\'\n > (1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the "readline" module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the "p" command.\n\n"pdb.py" can also be invoked as a script to debug other scripts. For\nexample:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: "pdb.py" now accepts a "-c" option that executes\ncommands as if given in a ".pdbrc" file, see *Debugger Commands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the "continue" command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type "continue", or you can\n step through the statement using "step" or "next" (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module "__main__" is used. (See the\n explanation of the built-in "exec()" or "eval()" functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When "runeval()" returns, it returns the\n value of the expression. Otherwise this function is similar to\n "run()".\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When "runcall()" returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n "sys.last_traceback".\n\nThe "run*" functions and "set_trace()" are aliases for instantiating\nthe "Pdb" class and calling the method of the same name. If you want\nto access further features, you have to do this yourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n "Pdb" is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying "cmd.Cmd" class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses "Ctrl-C" on the console) when you give a\n "continue" command. This allows you to break into the debugger\n again by pressing "Ctrl-C". If you want Pdb not to touch the\n SIGINT handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n"h(elp)" means that either "h" or "help" can be used to enter the help\ncommand (but not "he" or "hel", nor "H" or "Help" or "HELP").\nArguments to commands must be separated by whitespace (spaces or\ntabs). Optional arguments are enclosed in square brackets ("[]") in\nthe command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n("|").\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a "list" command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint ("!"). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by ";;".\n(A single ";" is not used as it is the separator for multiple commands\nin a line that is passed to the Python parser.) No intelligence is\napplied to separating the commands; the input is split at the first\n";;" pair, even if it is in the middle of a quoted string.\n\nIf a file ".pdbrc" exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ".pdbrc" can now contain commands that\ncontinue debugging, such as "continue" or "next". Previously, these\ncommands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. "help pdb"\n displays the full documentation (the docstring of the "pdb"\n module). Since the *command* argument must be an identifier, "help\n exec" must be entered to get help on the "!" command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on "sys.path". Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for "break".\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just "end" to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) p some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with "end"; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between "next" and "step"\n is that "step" stops inside a called function, while "next"\n executes called functions at (nearly) full speed, only stopping at\n the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a "for" loop or out\n of a "finally" clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With "." as argument, list 11 lines around the current line. With\n one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by "->". If an\n exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ">>", if it differs\n from the current line.\n\n New in version 3.2: The ">>" marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for "list".\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\n Note: "print()" can also be used, but is not a debugger command\n --- this executes the Python "print()" function.\n\npp expression\n\n Like the "p" command, except the value of the expression is pretty-\n printed using the "pprint" module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the "code" module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by "%1", "%2", and so on, while "%*" is replaced by all\n the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ".pdbrc" file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n "global" statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with "shlex" and the result is used as the new\n "sys.argv". History, breakpoints, actions and debugger options are\n preserved. "restart" is an alias for "run".\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module\n is determined by the "__name__" in the frame globals.\n', 'del': u'\nThe "del" statement\n*******************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a "global"\nstatement in the same code block. If the name is unbound, a\n"NameError" exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2: Previously it was illegal to delete a name\nfrom the local namespace if it occurs as a free variable in a nested\nblock.\n', 'dict': u'\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', - 'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', + 'dynamic-features': u'\nInteraction with dynamic features\n*********************************\n\nName resolution of free variables occurs at runtime, not at compile\ntime. This means that the following code will print 42:\n\n i = 10\n def f():\n print(i)\n i = 42\n f()\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', 'else': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', 'exceptions': u'\nExceptions\n**********\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', - 'execmodel': u'\nExecution model\n***************\n\n\nNaming and binding\n==================\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\nas a command line argument to the interpreter) is a code block. A\nscript command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal". If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, an\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\n"global" statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n---------------------------------\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', + 'execmodel': u'\nExecution model\n***************\n\n\nStructure of a program\n======================\n\nA Python program is constructed from code blocks. A *block* is a piece\nof Python program text that is executed as a unit. The following are\nblocks: a module, a function body, and a class definition. Each\ncommand typed interactively is a block. A script file (a file given\nas standard input to the interpreter or specified as a command line\nargument to the interpreter) is a code block. A script command (a\ncommand specified on the interpreter command line with the \'**-c**\'\noption) is a code block. The string argument passed to the built-in\nfunctions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\n\nNaming and binding\n==================\n\n\nBinding of names\n----------------\n\n*Names* refer to objects. Names are introduced by name binding\noperations.\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal" or "global". If a name is bound at the\nmodule level, it is a global variable. (The variables of the module\ncode block are local and global.) If a variable is used in a code\nblock but not defined there, it is a *free variable*.\n\nEach occurrence of a name in the program text refers to the *binding*\nof that name established by the following name resolution rules.\n\n\nResolution of names\n-------------------\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name.\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nWhen a name is not found at all, a "NameError" exception is raised. If\nthe current scope is a function scope, and the name refers to a local\nvariable that has not yet been bound to a value at the point where the\nname is used, an "UnboundLocalError" exception is raised.\n"UnboundLocalError" is a subclass of "NameError".\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\n"global" statement must precede all uses of the name.\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nThe "nonlocal" statement causes corresponding names to refer to\npreviously bound variables in the nearest enclosing function scope.\n"SyntaxError" is raised at compile time if the given name does not\nexist in any enclosing function scope.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nClass definition blocks and arguments to "exec()" and "eval()" are\nspecial in the context of name resolution. A class definition is an\nexecutable statement that may use and define names. These references\nfollow the normal rules for name resolution with an exception that\nunbound local variables are looked up in the global namespace. The\nnamespace of the class definition becomes the attribute dictionary of\nthe class. The scope of names defined in a class block is limited to\nthe class block; it does not extend to the code blocks of methods --\nthis includes comprehensions and generator expressions since they are\nimplemented using a function scope. This means that the following\nwill fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\n\nBuiltins and restricted execution\n---------------------------------\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\n\nInteraction with dynamic features\n---------------------------------\n\nName resolution of free variables occurs at runtime, not at compile\ntime. This means that the following code will print 42:\n\n i = 10\n def f():\n print(i)\n i = 42\n f()\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n\n\nExceptions\n==========\n\nExceptions are a means of breaking out of the normal flow of control\nof a code block in order to handle errors or other exceptional\nconditions. An exception is *raised* at the point where the error is\ndetected; it may be *handled* by the surrounding code block or by any\ncode block that directly or indirectly invoked the code block where\nthe error occurred.\n\nThe Python interpreter raises an exception when it detects a run-time\nerror (such as division by zero). A Python program can also\nexplicitly raise an exception with the "raise" statement. Exception\nhandlers are specified with the "try" ... "except" statement. The\n"finally" clause of such a statement can be used to specify cleanup\ncode which does not handle the exception, but is executed whether an\nexception occurred or not in the preceding code.\n\nPython uses the "termination" model of error handling: an exception\nhandler can find out what happened and continue execution at an outer\nlevel, but it cannot repair the cause of the error and retry the\nfailing operation (except by re-entering the offending piece of code\nfrom the top).\n\nWhen an exception is not handled at all, the interpreter terminates\nexecution of the program, or returns to its interactive main loop. In\neither case, it prints a stack backtrace, except when the exception is\n"SystemExit".\n\nExceptions are identified by class instances. The "except" clause is\nselected depending on the class of the instance: it must reference the\nclass of the instance or a base class thereof. The instance can be\nreceived by the handler and can carry additional information about the\nexceptional condition.\n\nNote: Exception messages are not part of the Python API. Their\n contents may change from one version of Python to the next without\n warning and should not be relied on by code which will run under\n multiple versions of the interpreter.\n\nSee also the description of the "try" statement in section *The try\nstatement* and "raise" statement in section *The raise statement*.\n\n-[ Footnotes ]-\n\n[1] This limitation occurs because the code that is executed by\n these operations is not available at the time the module is\n compiled.\n', 'exprlists': u'\nExpression lists\n****************\n\n expression_list ::= expression ( "," expression )* [","]\n\nAn expression list containing at least one comma yields a tuple. The\nlength of the tuple is the number of expressions in the list. The\nexpressions are evaluated from left to right.\n\nThe trailing comma is required only to create a single tuple (a.k.a. a\n*singleton*); it is optional in all other cases. A single expression\nwithout a trailing comma doesn\'t create a tuple, but rather yields the\nvalue of that expression. (To create an empty tuple, use an empty pair\nof parentheses: "()".)\n', 'floating': u'\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, "077e010" is legal, and denotes the same number\nas "77e10". The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator "-" and the\nliteral "1".\n', 'for': u'\nThe "for" statement\n*******************\n\nThe "for" statement is used to iterate over the elements of a sequence\n(such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n"expression_list". The suite is then executed once for each item\nprovided by the iterator, in the order returned by the iterator. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a "StopIteration" exception),\nthe suite in the "else" clause, if present, is executed, and the loop\nterminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and continues\nwith the next item, or with the "else" clause if there is no next\nitem.\n\nThe for-loop makes assignments to the variables(s) in the target list.\nThis overwrites all previous assignments to those variables including\nthose made in the suite of the for-loop:\n\n for i in range(10):\n print(i)\n i = 5 # this will not affect the for-loop\n # because i will be overwritten with the next\n # index in the range\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, they will not have been assigned to at\nall by the loop. Hint: the built-in function "range()" returns an\niterator of integers suitable to emulate the effect of Pascal\'s "for i\n:= a to b do"; e.g., "list(range(3))" returns the list "[0, 1, 2]".\n\nNote: There is a subtlety when the sequence is being modified by the\n loop (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', @@ -42,16 +42,16 @@ 'if': u'\nThe "if" statement\n******************\n\nThe "if" statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the "if" statement is executed or evaluated).\nIf all expressions are false, the suite of the "else" clause, if\npresent, is executed.\n', 'imaginary': u'\nImaginary literals\n******************\n\nImaginary literals are described by the following lexical definitions:\n\n imagnumber ::= (floatnumber | intpart) ("j" | "J")\n\nAn imaginary literal yields a complex number with a real part of 0.0.\nComplex numbers are represented as a pair of floating point numbers\nand have the same restrictions on their range. To create a complex\nnumber with a nonzero real part, add a floating point number to it,\ne.g., "(3+4j)". Some examples of imaginary literals:\n\n 3.14j 10.j 10j .001j 1e100j 3.14e-10j\n', 'import': u'\nThe "import" statement\n**********************\n\n import_stmt ::= "import" module ["as" name] ( "," module ["as" name] )*\n | "from" relative_module "import" identifier ["as" name]\n ( "," identifier ["as" name] )*\n | "from" relative_module "import" "(" identifier ["as" name]\n ( "," identifier ["as" name] )* [","] ")"\n | "from" module "import" "*"\n module ::= (identifier ".")* identifier\n relative_module ::= "."* module | "."+\n name ::= identifier\n\nThe basic import statement (no "from" clause) is executed in two\nsteps:\n\n1. find a module, loading and initializing it if necessary\n\n2. define a name or names in the local namespace for the scope\n where the "import" statement occurs.\n\nWhen the statement contains multiple clauses (separated by commas) the\ntwo steps are carried out separately for each clause, just as though\nthe clauses had been separated out into individiual import statements.\n\nThe details of the first step, finding and loading modules are\ndescribed in greater detail in the section on the *import system*,\nwhich also describes the various types of packages and modules that\ncan be imported, as well as all the hooks that can be used to\ncustomize the import system. Note that failures in this step may\nindicate either that the module could not be located, *or* that an\nerror occurred while initializing the module, which includes execution\nof the module\'s code.\n\nIf the requested module is retrieved successfully, it will be made\navailable in the local namespace in one of three ways:\n\n* If the module name is followed by "as", then the name following\n "as" is bound directly to the imported module.\n\n* If no other name is specified, and the module being imported is a\n top level module, the module\'s name is bound in the local namespace\n as a reference to the imported module\n\n* If the module being imported is *not* a top level module, then the\n name of the top level package that contains the module is bound in\n the local namespace as a reference to the top level package. The\n imported module must be accessed using its full qualified name\n rather than directly\n\nThe "from" form uses a slightly more complex process:\n\n1. find the module specified in the "from" clause, loading and\n initializing it if necessary;\n\n2. for each of the identifiers specified in the "import" clauses:\n\n 1. check if the imported module has an attribute by that name\n\n 2. if not, attempt to import a submodule with that name and then\n check the imported module again for that attribute\n\n 3. if the attribute is not found, "ImportError" is raised.\n\n 4. otherwise, a reference to that value is stored in the local\n namespace, using the name in the "as" clause if it is present,\n otherwise using the attribute name\n\nExamples:\n\n import foo # foo imported and bound locally\n import foo.bar.baz # foo.bar.baz imported, foo bound locally\n import foo.bar.baz as fbb # foo.bar.baz imported and bound as fbb\n from foo.bar import baz # foo.bar.baz imported and bound as baz\n from foo import attr # foo imported and foo.attr bound as attr\n\nIf the list of identifiers is replaced by a star ("\'*\'"), all public\nnames defined in the module are bound in the local namespace for the\nscope where the "import" statement occurs.\n\nThe *public names* defined by a module are determined by checking the\nmodule\'s namespace for a variable named "__all__"; if defined, it must\nbe a sequence of strings which are names defined or imported by that\nmodule. The names given in "__all__" are all considered public and\nare required to exist. If "__all__" is not defined, the set of public\nnames includes all names found in the module\'s namespace which do not\nbegin with an underscore character ("\'_\'"). "__all__" should contain\nthe entire public API. It is intended to avoid accidentally exporting\nitems that are not part of the API (such as library modules which were\nimported and used within the module).\n\nThe wild card form of import --- "from module import *" --- is only\nallowed at the module level. Attempting to use it in class or\nfunction definitions will raise a "SyntaxError".\n\nWhen specifying what module to import you do not have to specify the\nabsolute name of the module. When a module or package is contained\nwithin another package it is possible to make a relative import within\nthe same top package without having to mention the package name. By\nusing leading dots in the specified module or package after "from" you\ncan specify how high to traverse up the current package hierarchy\nwithout specifying exact names. One leading dot means the current\npackage where the module making the import exists. Two dots means up\none package level. Three dots is up two levels, etc. So if you execute\n"from . import mod" from a module in the "pkg" package then you will\nend up importing "pkg.mod". If you execute "from ..subpkg2 import mod"\nfrom within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\nspecification for relative imports is contained within **PEP 328**.\n\n"importlib.import_module()" is provided to support applications that\ndetermine dynamically the modules to be loaded.\n\n\nFuture statements\n=================\n\nA *future statement* is a directive to the compiler that a particular\nmodule should be compiled using syntax or semantics that will be\navailable in a specified future release of Python where the feature\nbecomes standard.\n\nThe future statement is intended to ease migration to future versions\nof Python that introduce incompatible changes to the language. It\nallows use of the new features on a per-module basis before the\nrelease in which the feature becomes standard.\n\n future_statement ::= "from" "__future__" "import" feature ["as" name]\n ("," feature ["as" name])*\n | "from" "__future__" "import" "(" feature ["as" name]\n ("," feature ["as" name])* [","] ")"\n feature ::= identifier\n name ::= identifier\n\nA future statement must appear near the top of the module. The only\nlines that can appear before a future statement are:\n\n* the module docstring (if any),\n\n* comments,\n\n* blank lines, and\n\n* other future statements.\n\nThe features recognized by Python 3.0 are "absolute_import",\n"division", "generators", "unicode_literals", "print_function",\n"nested_scopes" and "with_statement". They are all redundant because\nthey are always enabled, and only kept for backwards compatibility.\n\nA future statement is recognized and treated specially at compile\ntime: Changes to the semantics of core constructs are often\nimplemented by generating different code. It may even be the case\nthat a new feature introduces new incompatible syntax (such as a new\nreserved word), in which case the compiler may need to parse the\nmodule differently. Such decisions cannot be pushed off until\nruntime.\n\nFor any given release, the compiler knows which feature names have\nbeen defined, and raises a compile-time error if a future statement\ncontains a feature not known to it.\n\nThe direct runtime semantics are the same as for any import statement:\nthere is a standard module "__future__", described later, and it will\nbe imported in the usual way at the time the future statement is\nexecuted.\n\nThe interesting runtime semantics depend on the specific feature\nenabled by the future statement.\n\nNote that there is nothing special about the statement:\n\n import __future__ [as name]\n\nThat is not a future statement; it\'s an ordinary import statement with\nno special semantics or syntax restrictions.\n\nCode compiled by calls to the built-in functions "exec()" and\n"compile()" that occur in a module "M" containing a future statement\nwill, by default, use the new syntax or semantics associated with the\nfuture statement. This can be controlled by optional arguments to\n"compile()" --- see the documentation of that function for details.\n\nA future statement typed at an interactive interpreter prompt will\ntake effect for the rest of the interpreter session. If an\ninterpreter is started with the *-i* option, is passed a script name\nto execute, and the script includes a future statement, it will be in\neffect in the interactive session started after the script is\nexecuted.\n\nSee also: **PEP 236** - Back to the __future__\n\n The original proposal for the __future__ mechanism.\n', - 'in': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n are identical to themselves, "x is x" but are not equal to\n themselves, "x != x". Additionally, comparing any value to a\n not-a-number value will return "False". For example, both "3 <\n float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nComparison of objects of differing types depends on whether either of\nthe types provide explicit support for the comparison. Most numeric\ntypes can be compared with one another. When cross-type comparison is\nnot supported, the comparison method returns "NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', - 'integers': u'\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"+\n nonzerodigit ::= "1"..."9"\n digit ::= "0"..."9"\n octinteger ::= "0" ("o" | "O") octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n octdigit ::= "0"..."7"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n bindigit ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n 7 2147483647 0o177 0b100110111\n 3 79228162514264337593543950336 0o377 0x100000000\n 79228162514264337593543950336 0xdeadbeef\n', + 'in': u'\nMembership test operations\n**************************\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n', + 'integers': u'\nInteger literals\n****************\n\nInteger literals are described by the following lexical definitions:\n\n integer ::= decimalinteger | octinteger | hexinteger | bininteger\n decimalinteger ::= nonzerodigit digit* | "0"+\n nonzerodigit ::= "1"..."9"\n digit ::= "0"..."9"\n octinteger ::= "0" ("o" | "O") octdigit+\n hexinteger ::= "0" ("x" | "X") hexdigit+\n bininteger ::= "0" ("b" | "B") bindigit+\n octdigit ::= "0"..."7"\n hexdigit ::= digit | "a"..."f" | "A"..."F"\n bindigit ::= "0" | "1"\n\nThere is no limit for the length of integer literals apart from what\ncan be stored in available memory.\n\nNote that leading zeros in a non-zero decimal number are not allowed.\nThis is for disambiguation with C-style octal literals, which Python\nused before version 3.0.\n\nSome examples of integer literals:\n\n 7 2147483647 0o177 0b100110111\n 3 79228162514264337593543950336 0o377 0xdeadbeef\n', 'lambda': u'\nLambdas\n*******\n\n lambda_expr ::= "lambda" [parameter_list]: expression\n lambda_expr_nocond ::= "lambda" [parameter_list]: expression_nocond\n\nLambda expressions (sometimes called lambda forms) are used to create\nanonymous functions. The expression "lambda arguments: expression"\nyields a function object. The unnamed object behaves like a function\nobject defined with\n\n def (arguments):\n return expression\n\nSee section *Function definitions* for the syntax of parameter lists.\nNote that functions created with lambda expressions cannot contain\nstatements or annotations.\n', 'lists': u'\nList displays\n*************\n\nA list display is a possibly empty series of expressions enclosed in\nsquare brackets:\n\n list_display ::= "[" [expression_list | comprehension] "]"\n\nA list display yields a new list object, the contents being specified\nby either a list of expressions or a comprehension. When a comma-\nseparated list of expressions is supplied, its elements are evaluated\nfrom left to right and placed into the list object in that order.\nWhen a comprehension is supplied, the list is constructed from the\nelements resulting from the comprehension.\n', - 'naming': u'\nNaming and binding\n******************\n\n*Names* refer to objects. Names are introduced by name binding\noperations. Each occurrence of a name in the program text refers to\nthe *binding* of that name established in the innermost function block\ncontaining the use.\n\nA *block* is a piece of Python program text that is executed as a\nunit. The following are blocks: a module, a function body, and a class\ndefinition. Each command typed interactively is a block. A script\nfile (a file given as standard input to the interpreter or specified\nas a command line argument to the interpreter) is a code block. A\nscript command (a command specified on the interpreter command line\nwith the \'**-c**\' option) is a code block. The string argument passed\nto the built-in functions "eval()" and "exec()" is a code block.\n\nA code block is executed in an *execution frame*. A frame contains\nsome administrative information (used for debugging) and determines\nwhere and how execution continues after the code block\'s execution has\ncompleted.\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name. The scope of names defined in a\nclass block is limited to the class block; it does not extend to the\ncode blocks of methods -- this includes comprehensions and generator\nexpressions since they are implemented using a function scope. This\nmeans that the following will fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal". If a name is bound at the module\nlevel, it is a global variable. (The variables of the module code\nblock are local and global.) If a variable is used in a code block\nbut not defined there, it is a *free variable*.\n\nWhen a name is not found at all, a "NameError" exception is raised.\nIf the name refers to a local variable that has not been bound, an\n"UnboundLocalError" exception is raised. "UnboundLocalError" is a\nsubclass of "NameError".\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\n"global" statement must precede all uses of the name.\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nA class definition is an executable statement that may use and define\nnames. These references follow the normal rules for name resolution.\nThe namespace of the class definition becomes the attribute dictionary\nof the class. Names defined at the class scope are not visible in\nmethods.\n\n\nInteraction with dynamic features\n=================================\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- "import *" --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a "SyntaxError".\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', + 'naming': u'\nNaming and binding\n******************\n\n\nBinding of names\n================\n\n*Names* refer to objects. Names are introduced by name binding\noperations.\n\nThe following constructs bind names: formal parameters to functions,\n"import" statements, class and function definitions (these bind the\nclass or function name in the defining block), and targets that are\nidentifiers if occurring in an assignment, "for" loop header, or after\n"as" in a "with" statement or "except" clause. The "import" statement\nof the form "from ... import *" binds all names defined in the\nimported module, except those beginning with an underscore. This form\nmay only be used at the module level.\n\nA target occurring in a "del" statement is also considered bound for\nthis purpose (though the actual semantics are to unbind the name).\n\nEach assignment or import statement occurs within a block defined by a\nclass or function definition or at the module level (the top-level\ncode block).\n\nIf a name is bound in a block, it is a local variable of that block,\nunless declared as "nonlocal" or "global". If a name is bound at the\nmodule level, it is a global variable. (The variables of the module\ncode block are local and global.) If a variable is used in a code\nblock but not defined there, it is a *free variable*.\n\nEach occurrence of a name in the program text refers to the *binding*\nof that name established by the following name resolution rules.\n\n\nResolution of names\n===================\n\nA *scope* defines the visibility of a name within a block. If a local\nvariable is defined in a block, its scope includes that block. If the\ndefinition occurs in a function block, the scope extends to any blocks\ncontained within the defining one, unless a contained block introduces\na different binding for the name.\n\nWhen a name is used in a code block, it is resolved using the nearest\nenclosing scope. The set of all such scopes visible to a code block\nis called the block\'s *environment*.\n\nWhen a name is not found at all, a "NameError" exception is raised. If\nthe current scope is a function scope, and the name refers to a local\nvariable that has not yet been bound to a value at the point where the\nname is used, an "UnboundLocalError" exception is raised.\n"UnboundLocalError" is a subclass of "NameError".\n\nIf a name binding operation occurs anywhere within a code block, all\nuses of the name within the block are treated as references to the\ncurrent block. This can lead to errors when a name is used within a\nblock before it is bound. This rule is subtle. Python lacks\ndeclarations and allows name binding operations to occur anywhere\nwithin a code block. The local variables of a code block can be\ndetermined by scanning the entire text of the block for name binding\noperations.\n\nIf the "global" statement occurs within a block, all uses of the name\nspecified in the statement refer to the binding of that name in the\ntop-level namespace. Names are resolved in the top-level namespace by\nsearching the global namespace, i.e. the namespace of the module\ncontaining the code block, and the builtins namespace, the namespace\nof the module "builtins". The global namespace is searched first. If\nthe name is not found there, the builtins namespace is searched. The\n"global" statement must precede all uses of the name.\n\nThe "global" statement has the same scope as a name binding operation\nin the same block. If the nearest enclosing scope for a free variable\ncontains a global statement, the free variable is treated as a global.\n\nThe "nonlocal" statement causes corresponding names to refer to\npreviously bound variables in the nearest enclosing function scope.\n"SyntaxError" is raised at compile time if the given name does not\nexist in any enclosing function scope.\n\nThe namespace for a module is automatically created the first time a\nmodule is imported. The main module for a script is always called\n"__main__".\n\nClass definition blocks and arguments to "exec()" and "eval()" are\nspecial in the context of name resolution. A class definition is an\nexecutable statement that may use and define names. These references\nfollow the normal rules for name resolution with an exception that\nunbound local variables are looked up in the global namespace. The\nnamespace of the class definition becomes the attribute dictionary of\nthe class. The scope of names defined in a class block is limited to\nthe class block; it does not extend to the code blocks of methods --\nthis includes comprehensions and generator expressions since they are\nimplemented using a function scope. This means that the following\nwill fail:\n\n class A:\n a = 42\n b = list(a + i for i in range(10))\n\n\nBuiltins and restricted execution\n=================================\n\nThe builtins namespace associated with the execution of a code block\nis actually found by looking up the name "__builtins__" in its global\nnamespace; this should be a dictionary or a module (in the latter case\nthe module\'s dictionary is used). By default, when in the "__main__"\nmodule, "__builtins__" is the built-in module "builtins"; when in any\nother module, "__builtins__" is an alias for the dictionary of the\n"builtins" module itself. "__builtins__" can be set to a user-created\ndictionary to create a weak form of restricted execution.\n\n**CPython implementation detail:** Users should not touch\n"__builtins__"; it is strictly an implementation detail. Users\nwanting to override values in the builtins namespace should "import"\nthe "builtins" module and modify its attributes appropriately.\n\n\nInteraction with dynamic features\n=================================\n\nName resolution of free variables occurs at runtime, not at compile\ntime. This means that the following code will print 42:\n\n i = 10\n def f():\n print(i)\n i = 42\n f()\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nThe "eval()" and "exec()" functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe "exec()" and "eval()" functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', 'nonlocal': u'\nThe "nonlocal" statement\n************************\n\n nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n\nThe "nonlocal" statement causes the listed identifiers to refer to\npreviously bound variables in the nearest enclosing scope excluding\nglobals. This is important because the default behavior for binding is\nto search the local namespace first. The statement allows\nencapsulated code to rebind variables outside of the local scope\nbesides the global (module) scope.\n\nNames listed in a "nonlocal" statement, unlike those listed in a\n"global" statement, must refer to pre-existing bindings in an\nenclosing scope (the scope in which a new binding should be created\ncannot be determined unambiguously).\n\nNames listed in a "nonlocal" statement must not collide with pre-\nexisting bindings in the local scope.\n\nSee also: **PEP 3104** - Access to Names in Outer Scopes\n\n The specification for the "nonlocal" statement.\n', 'numbers': u'\nNumeric literals\n****************\n\nThere are three types of numeric literals: integers, floating point\nnumbers, and imaginary numbers. There are no complex literals\n(complex numbers can be formed by adding a real number and an\nimaginary number).\n\nNote that numeric literals do not include a sign; a phrase like "-1"\nis actually an expression composed of the unary operator \'"-"\' and the\nliteral "1".\n', 'numeric-types': u'\nEmulating numeric types\n***********************\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n', 'objects': u'\nObjects, values and types\n*************************\n\n*Objects* are Python\'s abstraction for data. All data in a Python\nprogram is represented by objects or by relations between objects. (In\na sense, and in conformance to Von Neumann\'s model of a "stored\nprogram computer," code is also represented by objects.)\n\nEvery object has an identity, a type and a value. An object\'s\n*identity* never changes once it has been created; you may think of it\nas the object\'s address in memory. The \'"is"\' operator compares the\nidentity of two objects; the "id()" function returns an integer\nrepresenting its identity.\n\n**CPython implementation detail:** For CPython, "id(x)" is the memory\naddress where "x" is stored.\n\nAn object\'s type determines the operations that the object supports\n(e.g., "does it have a length?") and also defines the possible values\nfor objects of that type. The "type()" function returns an object\'s\ntype (which is an object itself). Like its identity, an object\'s\n*type* is also unchangeable. [1]\n\nThe *value* of some objects can change. Objects whose value can\nchange are said to be *mutable*; objects whose value is unchangeable\nonce they are created are called *immutable*. (The value of an\nimmutable container object that contains a reference to a mutable\nobject can change when the latter\'s value is changed; however the\ncontainer is still considered immutable, because the collection of\nobjects it contains cannot be changed. So, immutability is not\nstrictly the same as having an unchangeable value, it is more subtle.)\nAn object\'s mutability is determined by its type; for instance,\nnumbers, strings and tuples are immutable, while dictionaries and\nlists are mutable.\n\nObjects are never explicitly destroyed; however, when they become\nunreachable they may be garbage-collected. An implementation is\nallowed to postpone garbage collection or omit it altogether --- it is\na matter of implementation quality how garbage collection is\nimplemented, as long as no objects are collected that are still\nreachable.\n\n**CPython implementation detail:** CPython currently uses a reference-\ncounting scheme with (optional) delayed detection of cyclically linked\ngarbage, which collects most objects as soon as they become\nunreachable, but is not guaranteed to collect garbage containing\ncircular references. See the documentation of the "gc" module for\ninformation on controlling the collection of cyclic garbage. Other\nimplementations act differently and CPython may change. Do not depend\non immediate finalization of objects when they become unreachable (so\nyou should always close files explicitly).\n\nNote that the use of the implementation\'s tracing or debugging\nfacilities may keep objects alive that would normally be collectable.\nAlso note that catching an exception with a \'"try"..."except"\'\nstatement may keep objects alive.\n\nSome objects contain references to "external" resources such as open\nfiles or windows. It is understood that these resources are freed\nwhen the object is garbage-collected, but since garbage collection is\nnot guaranteed to happen, such objects also provide an explicit way to\nrelease the external resource, usually a "close()" method. Programs\nare strongly recommended to explicitly close such objects. The\n\'"try"..."finally"\' statement and the \'"with"\' statement provide\nconvenient ways to do this.\n\nSome objects contain references to other objects; these are called\n*containers*. Examples of containers are tuples, lists and\ndictionaries. The references are part of a container\'s value. In\nmost cases, when we talk about the value of a container, we imply the\nvalues, not the identities of the contained objects; however, when we\ntalk about the mutability of a container, only the identities of the\nimmediately contained objects are implied. So, if an immutable\ncontainer (like a tuple) contains a reference to a mutable object, its\nvalue changes if that mutable object is changed.\n\nTypes affect almost all aspects of object behavior. Even the\nimportance of object identity is affected in some sense: for immutable\ntypes, operations that compute new values may actually return a\nreference to any existing object with the same type and value, while\nfor mutable objects this is not allowed. E.g., after "a = 1; b = 1",\n"a" and "b" may or may not refer to the same object with the value\none, depending on the implementation, but after "c = []; d = []", "c"\nand "d" are guaranteed to refer to two different, unique, newly\ncreated empty lists. (Note that "c = d = []" assigns the same object\nto both "c" and "d".)\n', - 'operator-summary': u'\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedence in Python, from\nlowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for exponentiation, which\ngroups from right to left).\n\nNote that comparisons, membership tests, and identity tests, all have\nthe same precedence and have a left-to-right chaining feature as\ndescribed in the *Comparisons* section.\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%" | Multiplication, division, remainder |\n| | [5] |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [6] |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "{expressions...}" | display, dictionary display, set |\n| | display |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While "abs(x%y) < abs(y)" is true mathematically, for floats\n it may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n possible for "x//y" to be one larger than "(x-x%y)//y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[3] While comparisons between strings make sense at the byte\n level, they may be counter-intuitive to users. For example, the\n strings ""\\u00C7"" and ""\\u0327\\u0043"" compare differently, even\n though they both represent the same unicode character (LATIN\n CAPITAL LETTER C WITH CEDILLA). To compare strings in a human\n recognizable way, compare using "unicodedata.normalize()".\n\n[4] Due to automatic garbage-collection, free lists, and the\n dynamic nature of descriptors, you may notice seemingly unusual\n behaviour in certain uses of the "is" operator, like those\n involving comparisons between instance methods, or constants.\n Check their documentation for more info.\n\n[5] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[6] The power operator "**" binds less tightly than an arithmetic\n or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n', + 'operator-summary': u'\nOperator precedence\n*******************\n\nThe following table summarizes the operator precedence in Python, from\nlowest precedence (least binding) to highest precedence (most\nbinding). Operators in the same box have the same precedence. Unless\nthe syntax is explicitly given, operators are binary. Operators in\nthe same box group left to right (except for exponentiation, which\ngroups from right to left).\n\nNote that comparisons, membership tests, and identity tests, all have\nthe same precedence and have a left-to-right chaining feature as\ndescribed in the *Comparisons* section.\n\n+-------------------------------------------------+---------------------------------------+\n| Operator | Description |\n+=================================================+=======================================+\n| "lambda" | Lambda expression |\n+-------------------------------------------------+---------------------------------------+\n| "if" -- "else" | Conditional expression |\n+-------------------------------------------------+---------------------------------------+\n| "or" | Boolean OR |\n+-------------------------------------------------+---------------------------------------+\n| "and" | Boolean AND |\n+-------------------------------------------------+---------------------------------------+\n| "not" "x" | Boolean NOT |\n+-------------------------------------------------+---------------------------------------+\n| "in", "not in", "is", "is not", "<", "<=", ">", | Comparisons, including membership |\n| ">=", "!=", "==" | tests and identity tests |\n+-------------------------------------------------+---------------------------------------+\n| "|" | Bitwise OR |\n+-------------------------------------------------+---------------------------------------+\n| "^" | Bitwise XOR |\n+-------------------------------------------------+---------------------------------------+\n| "&" | Bitwise AND |\n+-------------------------------------------------+---------------------------------------+\n| "<<", ">>" | Shifts |\n+-------------------------------------------------+---------------------------------------+\n| "+", "-" | Addition and subtraction |\n+-------------------------------------------------+---------------------------------------+\n| "*", "/", "//", "%" | Multiplication, division, remainder |\n| | [5] |\n+-------------------------------------------------+---------------------------------------+\n| "+x", "-x", "~x" | Positive, negative, bitwise NOT |\n+-------------------------------------------------+---------------------------------------+\n| "**" | Exponentiation [6] |\n+-------------------------------------------------+---------------------------------------+\n| "x[index]", "x[index:index]", | Subscription, slicing, call, |\n| "x(arguments...)", "x.attribute" | attribute reference |\n+-------------------------------------------------+---------------------------------------+\n| "(expressions...)", "[expressions...]", "{key: | Binding or tuple display, list |\n| value...}", "{expressions...}" | display, dictionary display, set |\n| | display |\n+-------------------------------------------------+---------------------------------------+\n\n-[ Footnotes ]-\n\n[1] While "abs(x%y) < abs(y)" is true mathematically, for floats\n it may not be true numerically due to roundoff. For example, and\n assuming a platform on which a Python float is an IEEE 754 double-\n precision number, in order that "-1e-100 % 1e100" have the same\n sign as "1e100", the computed result is "-1e-100 + 1e100", which\n is numerically exactly equal to "1e100". The function\n "math.fmod()" returns a result whose sign matches the sign of the\n first argument instead, and so returns "-1e-100" in this case.\n Which approach is more appropriate depends on the application.\n\n[2] If x is very close to an exact integer multiple of y, it\'s\n possible for "x//y" to be one larger than "(x-x%y)//y" due to\n rounding. In such cases, Python returns the latter result, in\n order to preserve that "divmod(x,y)[0] * y + x % y" be very close\n to "x".\n\n[3] The Unicode standard distinguishes between *code points* (e.g.\n U+0041) and *abstract characters* (e.g. "LATIN CAPITAL LETTER A").\n While most abstract characters in Unicode are only represented\n using one code point, there is a number of abstract characters\n that can in addition be represented using a sequence of more than\n one code point. For example, the abstract character "LATIN\n CAPITAL LETTER C WITH CEDILLA" can be represented as a single\n *precomposed character* at code position U+00C7, or as a sequence\n of a *base character* at code position U+0043 (LATIN CAPITAL\n LETTER C), followed by a *combining character* at code position\n U+0327 (COMBINING CEDILLA).\n\n The comparison operators on strings compare at the level of\n Unicode code points. This may be counter-intuitive to humans. For\n example, ""\\u00C7" == "\\u0043\\u0327"" is "False", even though both\n strings represent the same abstract character "LATIN CAPITAL\n LETTER C WITH CEDILLA".\n\n To compare strings at the level of abstract characters (that is,\n in a way intuitive to humans), use "unicodedata.normalize()".\n\n[4] Due to automatic garbage-collection, free lists, and the\n dynamic nature of descriptors, you may notice seemingly unusual\n behaviour in certain uses of the "is" operator, like those\n involving comparisons between instance methods, or constants.\n Check their documentation for more info.\n\n[5] The "%" operator is also used for string formatting; the same\n precedence applies.\n\n[6] The power operator "**" binds less tightly than an arithmetic\n or bitwise unary operator on its right, that is, "2**-1" is "0.5".\n', 'pass': u'\nThe "pass" statement\n********************\n\n pass_stmt ::= "pass"\n\n"pass" is a null operation --- when it is executed, nothing happens.\nIt is useful as a placeholder when a statement is required\nsyntactically, but no code needs to be executed, for example:\n\n def f(arg): pass # a function that does nothing (yet)\n\n class C: pass # a class with no methods (yet)\n', 'power': u'\nThe power operator\n******************\n\nThe power operator binds more tightly than unary operators on its\nleft; it binds less tightly than unary operators on its right. The\nsyntax is:\n\n power ::= primary ["**" u_expr]\n\nThus, in an unparenthesized sequence of power and unary operators, the\noperators are evaluated from right to left (this does not constrain\nthe evaluation order for the operands): "-1**2" results in "-1".\n\nThe power operator has the same semantics as the built-in "pow()"\nfunction, when called with two arguments: it yields its left argument\nraised to the power of its right argument. The numeric arguments are\nfirst converted to a common type, and the result is of that type.\n\nFor int operands, the result has the same type as the operands unless\nthe second argument is negative; in that case, all arguments are\nconverted to float and a float result is delivered. For example,\n"10**2" returns "100", but "10**-2" returns "0.01".\n\nRaising "0.0" to a negative power results in a "ZeroDivisionError".\nRaising a negative number to a fractional power results in a "complex"\nnumber. (In earlier versions it raised a "ValueError".)\n', 'raise': u'\nThe "raise" statement\n*********************\n\n raise_stmt ::= "raise" [expression ["from" expression]]\n\nIf no expressions are present, "raise" re-raises the last exception\nthat was active in the current scope. If no exception is active in\nthe current scope, a "RuntimeError" exception is raised indicating\nthat this is an error.\n\nOtherwise, "raise" evaluates the first expression as the exception\nobject. It must be either a subclass or an instance of\n"BaseException". If it is a class, the exception instance will be\nobtained when needed by instantiating the class with no arguments.\n\nThe *type* of the exception is the exception instance\'s class, the\n*value* is the instance itself.\n\nA traceback object is normally created automatically when an exception\nis raised and attached to it as the "__traceback__" attribute, which\nis writable. You can create an exception and set your own traceback in\none step using the "with_traceback()" exception method (which returns\nthe same exception instance, with its traceback set to its argument),\nlike so:\n\n raise Exception("foo occurred").with_traceback(tracebackobj)\n\nThe "from" clause is used for exception chaining: if given, the second\n*expression* must be another exception class or instance, which will\nthen be attached to the raised exception as the "__cause__" attribute\n(which is writable). If the raised exception is not handled, both\nexceptions will be printed:\n\n >>> try:\n ... print(1 / 0)\n ... except Exception as exc:\n ... raise RuntimeError("Something bad happened") from exc\n ...\n Traceback (most recent call last):\n File "", line 2, in \n ZeroDivisionError: int division or modulo by zero\n\n The above exception was the direct cause of the following exception:\n\n Traceback (most recent call last):\n File "", line 4, in \n RuntimeError: Something bad happened\n\nA similar mechanism works implicitly if an exception is raised inside\nan exception handler or a "finally" clause: the previous exception is\nthen attached as the new exception\'s "__context__" attribute:\n\n >>> try:\n ... print(1 / 0)\n ... except:\n ... raise RuntimeError("Something bad happened")\n ...\n Traceback (most recent call last):\n File "", line 2, in \n ZeroDivisionError: int division or modulo by zero\n\n During handling of the above exception, another exception occurred:\n\n Traceback (most recent call last):\n File "", line 4, in \n RuntimeError: Something bad happened\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information about handling exceptions is in section\n*The try statement*.\n', @@ -60,19 +60,19 @@ 'shifting': u'\nShifting operations\n*******************\n\nThe shifting operations have lower priority than the arithmetic\noperations:\n\n shift_expr ::= a_expr | shift_expr ( "<<" | ">>" ) a_expr\n\nThese operators accept integers as arguments. They shift the first\nargument to the left or right by the number of bits given by the\nsecond argument.\n\nA right shift by *n* bits is defined as floor division by "pow(2,n)".\nA left shift by *n* bits is defined as multiplication with "pow(2,n)".\n\nNote: In the current implementation, the right-hand operand is\n required to be at most "sys.maxsize". If the right-hand operand is\n larger than "sys.maxsize" an "OverflowError" exception is raised.\n', 'slicings': u'\nSlicings\n********\n\nA slicing selects a range of items in a sequence object (e.g., a\nstring, tuple or list). Slicings may be used as expressions or as\ntargets in assignment or "del" statements. The syntax for a slicing:\n\n slicing ::= primary "[" slice_list "]"\n slice_list ::= slice_item ("," slice_item)* [","]\n slice_item ::= expression | proper_slice\n proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" [stride] ]\n lower_bound ::= expression\n upper_bound ::= expression\n stride ::= expression\n\nThere is ambiguity in the formal syntax here: anything that looks like\nan expression list also looks like a slice list, so any subscription\ncan be interpreted as a slicing. Rather than further complicating the\nsyntax, this is disambiguated by defining that in this case the\ninterpretation as a subscription takes priority over the\ninterpretation as a slicing (this is the case if the slice list\ncontains no proper slice).\n\nThe semantics for a slicing are as follows. The primary is indexed\n(using the same "__getitem__()" method as normal subscription) with a\nkey that is constructed from the slice list, as follows. If the slice\nlist contains at least one comma, the key is a tuple containing the\nconversion of the slice items; otherwise, the conversion of the lone\nslice item is the key. The conversion of a slice item that is an\nexpression is that expression. The conversion of a proper slice is a\nslice object (see section *The standard type hierarchy*) whose\n"start", "stop" and "step" attributes are the values of the\nexpressions given as lower bound, upper bound and stride,\nrespectively, substituting "None" for missing expressions.\n', 'specialattrs': u'\nSpecial Attributes\n******************\n\nThe implementation adds a few special read-only attributes to several\nobject types, where they are relevant. Some of these are not reported\nby the "dir()" built-in function.\n\nobject.__dict__\n\n A dictionary or other mapping object used to store an object\'s\n (writable) attributes.\n\ninstance.__class__\n\n The class to which a class instance belongs.\n\nclass.__bases__\n\n The tuple of base classes of a class object.\n\nclass.__name__\n\n The name of the class or type.\n\nclass.__qualname__\n\n The *qualified name* of the class or type.\n\n New in version 3.3.\n\nclass.__mro__\n\n This attribute is a tuple of classes that are considered when\n looking for base classes during method resolution.\n\nclass.mro()\n\n This method can be overridden by a metaclass to customize the\n method resolution order for its instances. It is called at class\n instantiation, and its result is stored in "__mro__".\n\nclass.__subclasses__()\n\n Each class keeps a list of weak references to its immediate\n subclasses. This method returns a list of all those references\n still alive. Example:\n\n >>> int.__subclasses__()\n []\n\n-[ Footnotes ]-\n\n[1] Additional information on these special methods may be found\n in the Python Reference Manual (*Basic customization*).\n\n[2] As a consequence, the list "[1, 2]" is considered equal to\n "[1.0, 2.0]", and similarly for tuples.\n\n[3] They must have since the parser can\'t tell the type of the\n operands.\n\n[4] Cased characters are those with general category property\n being one of "Lu" (Letter, uppercase), "Ll" (Letter, lowercase),\n or "Lt" (Letter, titlecase).\n\n[5] To format only a tuple you should therefore provide a\n singleton tuple whose only element is the tuple to be formatted.\n', - 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)".\nExcept where mentioned, attempts to execute an operation raise an\nexception when no appropriate method is defined (typically\n"AttributeError" or "TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the second can be resolved by freeing the reference to the\n traceback object when it is no longer useful, and the third can\n be resolved by storing "None" in "sys.last_traceback". Circular\n references which are garbage are detected and cleaned up when the\n cyclic garbage collector is enabled (it\'s on by default). Refer\n to the documentation for the "gc" module for more information\n about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of "x==y" does not imply that "x!=y" is false.\n Accordingly, when defining "__eq__()", one should also define\n "__ne__()" so that the operators will behave as expected. See the\n paragraph on "__hash__()" for some important notes on creating\n *hashable* objects which support custom comparison operations and\n are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, "__lt__()" and "__gt__()" are each other\'s\n reflection, "__le__()" and "__ge__()" are each other\'s reflection,\n and "__eq__()" and "__ne__()" are their own reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see "functools.total_ordering()".\n\nobject.__hash__(self)\n\n Called by built-in function "hash()" and for operations on members\n of hashed collections including "set", "frozenset", and "dict".\n "__hash__()" should return an integer. The only required property\n is that objects which compare equal have the same hash value; it is\n advised to somehow mix together (e.g. using exclusive or) the hash\n values for the components of the object that also play a part in\n comparison of objects.\n\n Note: "hash()" truncates the value returned from an object\'s\n custom "__hash__()" method to the size of a "Py_ssize_t". This\n is typically 8 bytes on 64-bit builds and 4 bytes on 32-bit\n builds. If an object\'s "__hash__()" must interoperate on builds\n of different bit sizes, be sure to check the width on all\n supported builds. An easy way to do this is with "python -c\n "import sys; print(sys.hash_info.width)""\n\n If a class does not define an "__eq__()" method it should not\n define a "__hash__()" operation either; if it defines "__eq__()"\n but not "__hash__()", its instances will not be usable as items in\n hashable collections. If a class defines mutable objects and\n implements an "__eq__()" method, it should not implement\n "__hash__()", since the implementation of hashable collections\n requires that a key\'s hash value is immutable (if the object\'s hash\n value changes, it will be in the wrong hash bucket).\n\n User-defined classes have "__eq__()" and "__hash__()" methods by\n default; with them, all objects compare unequal (except with\n themselves) and "x.__hash__()" returns an appropriate value such\n that "x == y" implies both that "x is y" and "hash(x) == hash(y)".\n\n A class that overrides "__eq__()" and does not define "__hash__()"\n will have its "__hash__()" implicitly set to "None". When the\n "__hash__()" method of a class is "None", instances of the class\n will raise an appropriate "TypeError" when a program attempts to\n retrieve their hash value, and will also be correctly identified as\n unhashable when checking "isinstance(obj, collections.Hashable").\n\n If a class that overrides "__eq__()" needs to retain the\n implementation of "__hash__()" from a parent class, the interpreter\n must be told this explicitly by setting "__hash__ =\n .__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. *__slots__*\n reserves space for the declared variables and prevents the\n automatic creation of *__dict__* and *__weakref__* for each\n instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using "type()". The class body is\nexecuted in a new namespace and the class name is bound locally to the\nresult of "type(name, bases, namespace)".\n\nThe class creation process can be customised by passing the\n"metaclass" keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both "MyClass" and "MySubclass" are instances\nof "Meta":\n\n class Meta(type):\n pass\n\n class MyClass(metaclass=Meta):\n pass\n\n class MySubclass(MyClass):\n pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then "type()" is\n used\n\n* if an explicit metaclass is given and it is *not* an instance of\n "type()", then it is used directly as the metaclass\n\n* if an instance of "type()" is given as the explicit metaclass, or\n bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. "type(cls)") of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with "TypeError".\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a "__prepare__" attribute,\nit is called as "namespace = metaclass.__prepare__(name, bases,\n**kwds)" (where the additional keyword arguments, if any, come from\nthe class definition).\n\nIf the metaclass has no "__prepare__" attribute, then the class\nnamespace is initialised as an empty "dict()" instance.\n\nSee also: **PEP 3115** - Metaclasses in Python 3000\n\n Introduced the "__prepare__" namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as "exec(body, globals(),\nnamespace)". The key difference from a normal call to "exec()" is that\nlexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling "metaclass(name, bases,\nnamespace, **kwds)" (the additional keywords passed here are the same\nas those passed to "__prepare__").\n\nThis class object is the one that will be referenced by the zero-\nargument form of "super()". "__class__" is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either "__class__" or "super". This allows the zero argument form\nof "super()" to correctly identify the class being defined based on\nlexical scoping, while the class or instance that was used to make the\ncurrent call is identified based on the first argument passed to the\nmethod.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also: **PEP 3135** - New super\n\n Describes the implicit "__class__" closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n"collections.OrderedDict" to remember the order that class variables\nare defined:\n\n class OrderedClass(type):\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return collections.OrderedDict()\n\n def __new__(cls, name, bases, namespace, **kwds):\n result = type.__new__(cls, name, bases, dict(namespace))\n result.members = tuple(namespace)\n return result\n\n class A(metaclass=OrderedClass):\n def one(self): pass\n def two(self): pass\n def three(self): pass\n def four(self): pass\n\n >>> A.members\n (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s "__prepare__()" method which returns an\nempty "collections.OrderedDict". That mapping records the methods and\nattributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s "__new__()" method gets\ninvoked. That method builds the new type and it saves the ordered\ndictionary keys in an attribute called "members".\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also: **PEP 3119** - Introducing Abstract Base Classes\n\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n A call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception:\n\n >>> class C:\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print("Metaclass getattribute invoked")\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object, metaclass=Meta):\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print("Class getattribute invoked")\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n under certain controlled conditions. It generally isn\'t a good\n idea though, since it can lead to some very strange behaviour if\n it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n', - 'string-methods': u'\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see "str.format()",\n*Format String Syntax* and *String Formatting*) and the other based on\nC "printf" style formatting that handles a narrower range of types and\nis slightly harder to use correctly, but is often faster for the cases\nit can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the "re" module).\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter "\'\xdf\'" is equivalent to ""ss"".\n Since it is already lowercase, "lower()" would do nothing to "\'\xdf\'";\n "casefold()" converts it to ""ss"".\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is an ASCII space). The\n original string is returned if *width* is less than or equal to\n "len(s)".\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is "\'utf-8\'". *errors* may be given to set a different\n error handling scheme. The default for *errors* is "\'strict\'",\n meaning that encoding errors raise a "UnicodeError". Other possible\n values are "\'ignore\'", "\'replace\'", "\'xmlcharrefreplace\'",\n "\'backslashreplace\'" and any other name registered via\n "codecs.register_error()", see section *Error Handlers*. For a list\n of possible encodings, see section *Standard Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\nstr.expandtabs(tabsize=8)\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" if *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to "str.format(**mapping)", except that "mapping" is used\n directly and not copied to a "dict". This is useful if for example\n "mapping" is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character "c"\n is alphanumeric if one of the following returns "True":\n "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()".\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\n Use "keyword.iskeyword()" to test for reserved identifiers such as\n "def" and "class".\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when "repr()" is\n invoked on a string. It has no bearing on the handling of strings\n written to "sys.stdout" or "sys.stderr".)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A "TypeError" will be raised if there are\n any non-string values in *iterable*, including "bytes" objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n "str.translate()".\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n For example:\n\n >>> \'1,2,3\'.split(\',\')\n [\'1\', \'2\', \'3\']\n >>> \'1,2,3\'.split(\',\', maxsplit=1)\n [\'1\', \'2,3\']\n >>> \'1,2,,3,\'.split(\',\')\n [\'1\', \'2\', \'\', \'3\', \'\']\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example:\n\n >>> \'1 2 3\'.split()\n [\'1\', \'2\', \'3\']\n >>> \'1 2 3\'.split(maxsplit=1)\n [\'1\', \'2 3\']\n >>> \' 1 2 3 \'.split()\n [\'1\', \'2\', \'3\']\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. This method uses the *universal newlines* approach to\n splitting lines. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n For example:\n\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()\n [\'ab c\', \'\', \'de fg\', \'kl\']\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines(keepends=True)\n [\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line:\n\n >>> "".splitlines()\n []\n >>> "One line\\n".splitlines()\n [\'One line\']\n\n For comparison, "split(\'\\n\')" gives:\n\n >>> \'\'.split(\'\\n\')\n [\'\']\n >>> \'Two lines\\n\'.split(\'\\n\')\n [\'Two lines\', \'\']\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n "s.swapcase().swapcase() == s".\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n For example:\n\n >>> \'Hello world\'.title()\n \'Hello World\'\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n Return a copy of the *s* where all characters have been mapped\n through the *map* which must be a dictionary of Unicode ordinals\n (integers) to Unicode ordinals, strings or "None". Unmapped\n characters are left untouched. Characters mapped to "None" are\n deleted.\n\n You can use "str.maketrans()" to create a translation map from\n character-to-character mappings in different formats.\n\n Note: An even more flexible approach is to create a custom\n character mapping codec using the "codecs" module (see\n "encodings.cp1251" for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return a copy of the string left filled with ASCII "\'0\'" digits to\n make a string of length *width*. A leading sign prefix ("\'+\'"/"\'-\'"\n is handled by inserting the padding *after* the sign character\n rather than before. The original string is returned if *width* is\n less than or equal to "len(s)".\n\n For example:\n\n >>> "42".zfill(5)\n \'00042\'\n >>> "-42".zfill(5)\n \'-0042\'\n', + 'specialnames': u'\nSpecial method names\n********************\n\nA class can implement certain operations that are invoked by special\nsyntax (such as arithmetic operations or subscripting and slicing) by\ndefining methods with special names. This is Python\'s approach to\n*operator overloading*, allowing classes to define their own behavior\nwith respect to language operators. For instance, if a class defines\na method named "__getitem__()", and "x" is an instance of this class,\nthen "x[i]" is roughly equivalent to "type(x).__getitem__(x, i)".\nExcept where mentioned, attempts to execute an operation raise an\nexception when no appropriate method is defined (typically\n"AttributeError" or "TypeError").\n\nWhen implementing a class that emulates any built-in type, it is\nimportant that the emulation only be implemented to the degree that it\nmakes sense for the object being modelled. For example, some\nsequences may work well with retrieval of individual elements, but\nextracting a slice may not make sense. (One example of this is the\n"NodeList" interface in the W3C\'s Document Object Model.)\n\n\nBasic customization\n===================\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. "__new__()" is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of "__new__()" should be the new object instance (usually an\n instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s "__new__()" method using\n "super(currentclass, cls).__new__(cls[, ...])" with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If "__new__()" returns an instance of *cls*, then the new\n instance\'s "__init__()" method will be invoked like\n "__init__(self[, ...])", where *self* is the new instance and the\n remaining arguments are the same as were passed to "__new__()".\n\n If "__new__()" does not return an instance of *cls*, then the new\n instance\'s "__init__()" method will not be invoked.\n\n "__new__()" is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called after the instance has been created (by "__new__()"), but\n before it is returned to the caller. The arguments are those\n passed to the class constructor expression. If a base class has an\n "__init__()" method, the derived class\'s "__init__()" method, if\n any, must explicitly call it to ensure proper initialization of the\n base class part of the instance; for example:\n "BaseClass.__init__(self, [args...])".\n\n Because "__new__()" and "__init__()" work together in constructing\n objects ("__new__()" to create it, and "__init__()" to customise\n it), no non-"None" value may be returned by "__init__()"; doing so\n will cause a "TypeError" to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a "__del__()" method, the\n derived class\'s "__del__()" method, if any, must explicitly call it\n to ensure proper deletion of the base class part of the instance.\n Note that it is possible (though not recommended!) for the\n "__del__()" method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n "__del__()" methods are called for objects that still exist when\n the interpreter exits.\n\n Note: "del x" doesn\'t directly call "x.__del__()" --- the former\n decrements the reference count for "x" by one, and the latter is\n only called when "x"\'s reference count reaches zero. Some common\n situations that may prevent the reference count of an object from\n going to zero include: circular references between objects (e.g.,\n a doubly-linked list or a tree data structure with parent and\n child pointers); a reference to the object on the stack frame of\n a function that caught an exception (the traceback stored in\n "sys.exc_info()[2]" keeps the stack frame alive); or a reference\n to the object on the stack frame that raised an unhandled\n exception in interactive mode (the traceback stored in\n "sys.last_traceback" keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the second can be resolved by freeing the reference to the\n traceback object when it is no longer useful, and the third can\n be resolved by storing "None" in "sys.last_traceback". Circular\n references which are garbage are detected and cleaned up when the\n cyclic garbage collector is enabled (it\'s on by default). Refer\n to the documentation for the "gc" module for more information\n about this topic.\n\n Warning: Due to the precarious circumstances under which\n "__del__()" methods are invoked, exceptions that occur during\n their execution are ignored, and a warning is printed to\n "sys.stderr" instead. Also, when "__del__()" is invoked in\n response to a module being deleted (e.g., when execution of the\n program is done), other globals referenced by the "__del__()"\n method may already have been deleted or in the process of being\n torn down (e.g. the import machinery shutting down). For this\n reason, "__del__()" methods should do the absolute minimum needed\n to maintain external invariants. Starting with version 1.5,\n Python guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the "__del__()" method is called.\n\nobject.__repr__(self)\n\n Called by the "repr()" built-in function to compute the "official"\n string representation of an object. If at all possible, this\n should look like a valid Python expression that could be used to\n recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n "<...some useful description...>" should be returned. The return\n value must be a string object. If a class defines "__repr__()" but\n not "__str__()", then "__repr__()" is also used when an "informal"\n string representation of instances of that class is required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by "str(object)" and the built-in functions "format()" and\n "print()" to compute the "informal" or nicely printable string\n representation of an object. The return value must be a *string*\n object.\n\n This method differs from "object.__repr__()" in that there is no\n expectation that "__str__()" return a valid Python expression: a\n more convenient or concise representation can be used.\n\n The default implementation defined by the built-in type "object"\n calls "object.__repr__()".\n\nobject.__bytes__(self)\n\n Called by "bytes()" to compute a byte-string representation of an\n object. This should return a "bytes" object.\n\nobject.__format__(self, format_spec)\n\n Called by the "format()" built-in function (and by extension, the\n "str.format()" method of class "str") to produce a "formatted"\n string representation of an object. The "format_spec" argument is a\n string that contains a description of the formatting options\n desired. The interpretation of the "format_spec" argument is up to\n the type implementing "__format__()", however most classes will\n either delegate formatting to one of the built-in types, or use a\n similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\n Changed in version 3.4: The __format__ method of "object" itself\n raises a "TypeError" if passed any non-empty string.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: "xy" calls\n "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n\n A rich comparison method may return the singleton "NotImplemented"\n if it does not implement the operation for a given pair of\n arguments. By convention, "False" and "True" are returned for a\n successful comparison. However, these methods can return any value,\n so if the comparison operator is used in a Boolean context (e.g.,\n in the condition of an "if" statement), Python will call "bool()"\n on the value to determine if the result is true or false.\n\n By default, "__ne__()" delegates to "__eq__()" and inverts the\n result unless it is "NotImplemented". There are no other implied\n relationships among the comparison operators, for example, the\n truth of "(x.__hash__".\n\n If a class that does not override "__eq__()" wishes to suppress\n hash support, it should include "__hash__ = None" in the class\n definition. A class which defines its own "__hash__()" that\n explicitly raises a "TypeError" would be incorrectly identified as\n hashable by an "isinstance(obj, collections.Hashable)" call.\n\n Note: By default, the "__hash__()" values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the iteration order of\n dicts, sets and other mappings. Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also "PYTHONHASHSEED".\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n "bool()"; should return "False" or "True". When this method is not\n defined, "__len__()" is called, if it is defined, and the object is\n considered true if its result is nonzero. If a class defines\n neither "__len__()" nor "__bool__()", all its instances are\n considered true.\n\n\nCustomizing attribute access\n============================\n\nThe following methods can be defined to customize the meaning of\nattribute access (use of, assignment to, or deletion of "x.name") for\nclass instances.\n\nobject.__getattr__(self, name)\n\n Called when an attribute lookup has not found the attribute in the\n usual places (i.e. it is not an instance attribute nor is it found\n in the class tree for "self"). "name" is the attribute name. This\n method should return the (computed) attribute value or raise an\n "AttributeError" exception.\n\n Note that if the attribute is found through the normal mechanism,\n "__getattr__()" is not called. (This is an intentional asymmetry\n between "__getattr__()" and "__setattr__()".) This is done both for\n efficiency reasons and because otherwise "__getattr__()" would have\n no way to access other attributes of the instance. Note that at\n least for instance variables, you can fake total control by not\n inserting any values in the instance attribute dictionary (but\n instead inserting them in another object). See the\n "__getattribute__()" method below for a way to actually get total\n control over attribute access.\n\nobject.__getattribute__(self, name)\n\n Called unconditionally to implement attribute accesses for\n instances of the class. If the class also defines "__getattr__()",\n the latter will not be called unless "__getattribute__()" either\n calls it explicitly or raises an "AttributeError". This method\n should return the (computed) attribute value or raise an\n "AttributeError" exception. In order to avoid infinite recursion in\n this method, its implementation should always call the base class\n method with the same name to access any attributes it needs, for\n example, "object.__getattribute__(self, name)".\n\n Note: This method may still be bypassed when looking up special\n methods as the result of implicit invocation via language syntax\n or built-in functions. See *Special method lookup*.\n\nobject.__setattr__(self, name, value)\n\n Called when an attribute assignment is attempted. This is called\n instead of the normal mechanism (i.e. store the value in the\n instance dictionary). *name* is the attribute name, *value* is the\n value to be assigned to it.\n\n If "__setattr__()" wants to assign to an instance attribute, it\n should call the base class method with the same name, for example,\n "object.__setattr__(self, name, value)".\n\nobject.__delattr__(self, name)\n\n Like "__setattr__()" but for attribute deletion instead of\n assignment. This should only be implemented if "del obj.name" is\n meaningful for the object.\n\nobject.__dir__(self)\n\n Called when "dir()" is called on the object. A sequence must be\n returned. "dir()" converts the returned sequence to a list and\n sorts it.\n\n\nImplementing Descriptors\n------------------------\n\nThe following methods only apply when an instance of the class\ncontaining the method (a so-called *descriptor* class) appears in an\n*owner* class (the descriptor must be in either the owner\'s class\ndictionary or in the class dictionary for one of its parents). In the\nexamples below, "the attribute" refers to the attribute whose name is\nthe key of the property in the owner class\' "__dict__".\n\nobject.__get__(self, instance, owner)\n\n Called to get the attribute of the owner class (class attribute\n access) or of an instance of that class (instance attribute\n access). *owner* is always the owner class, while *instance* is the\n instance that the attribute was accessed through, or "None" when\n the attribute is accessed through the *owner*. This method should\n return the (computed) attribute value or raise an "AttributeError"\n exception.\n\nobject.__set__(self, instance, value)\n\n Called to set the attribute on an instance *instance* of the owner\n class to a new value, *value*.\n\nobject.__delete__(self, instance)\n\n Called to delete the attribute on an instance *instance* of the\n owner class.\n\nThe attribute "__objclass__" is interpreted by the "inspect" module as\nspecifying the class where this object was defined (setting this\nappropriately can assist in runtime introspection of dynamic class\nattributes). For callables, it may indicate that an instance of the\ngiven type (or a subclass) is expected or required as the first\npositional argument (for example, CPython sets this attribute for\nunbound methods that are implemented in C).\n\n\nInvoking Descriptors\n--------------------\n\nIn general, a descriptor is an object attribute with "binding\nbehavior", one whose attribute access has been overridden by methods\nin the descriptor protocol: "__get__()", "__set__()", and\n"__delete__()". If any of those methods are defined for an object, it\nis said to be a descriptor.\n\nThe default behavior for attribute access is to get, set, or delete\nthe attribute from an object\'s dictionary. For instance, "a.x" has a\nlookup chain starting with "a.__dict__[\'x\']", then\n"type(a).__dict__[\'x\']", and continuing through the base classes of\n"type(a)" excluding metaclasses.\n\nHowever, if the looked-up value is an object defining one of the\ndescriptor methods, then Python may override the default behavior and\ninvoke the descriptor method instead. Where this occurs in the\nprecedence chain depends on which descriptor methods were defined and\nhow they were called.\n\nThe starting point for descriptor invocation is a binding, "a.x". How\nthe arguments are assembled depends on "a":\n\nDirect Call\n The simplest and least common call is when user code directly\n invokes a descriptor method: "x.__get__(a)".\n\nInstance Binding\n If binding to an object instance, "a.x" is transformed into the\n call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n\nClass Binding\n If binding to a class, "A.x" is transformed into the call:\n "A.__dict__[\'x\'].__get__(None, A)".\n\nSuper Binding\n If "a" is an instance of "super", then the binding "super(B,\n obj).m()" searches "obj.__class__.__mro__" for the base class "A"\n immediately preceding "B" and then invokes the descriptor with the\n call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n\nFor instance bindings, the precedence of descriptor invocation depends\non the which descriptor methods are defined. A descriptor can define\nany combination of "__get__()", "__set__()" and "__delete__()". If it\ndoes not define "__get__()", then accessing the attribute will return\nthe descriptor object itself unless there is a value in the object\'s\ninstance dictionary. If the descriptor defines "__set__()" and/or\n"__delete__()", it is a data descriptor; if it defines neither, it is\na non-data descriptor. Normally, data descriptors define both\n"__get__()" and "__set__()", while non-data descriptors have just the\n"__get__()" method. Data descriptors with "__set__()" and "__get__()"\ndefined always override a redefinition in an instance dictionary. In\ncontrast, non-data descriptors can be overridden by instances.\n\nPython methods (including "staticmethod()" and "classmethod()") are\nimplemented as non-data descriptors. Accordingly, instances can\nredefine and override methods. This allows individual instances to\nacquire behaviors that differ from other instances of the same class.\n\nThe "property()" function is implemented as a data descriptor.\nAccordingly, instances cannot override the behavior of a property.\n\n\n__slots__\n---------\n\nBy default, instances of classes have a dictionary for attribute\nstorage. This wastes space for objects having very few instance\nvariables. The space consumption can become acute when creating large\nnumbers of instances.\n\nThe default can be overridden by defining *__slots__* in a class\ndefinition. The *__slots__* declaration takes a sequence of instance\nvariables and reserves just enough space in each instance to hold a\nvalue for each variable. Space is saved because *__dict__* is not\ncreated for each instance.\n\nobject.__slots__\n\n This class variable can be assigned a string, iterable, or sequence\n of strings with variable names used by instances. *__slots__*\n reserves space for the declared variables and prevents the\n automatic creation of *__dict__* and *__weakref__* for each\n instance.\n\n\nNotes on using *__slots__*\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n* When inheriting from a class without *__slots__*, the *__dict__*\n attribute of that class will always be accessible, so a *__slots__*\n definition in the subclass is meaningless.\n\n* Without a *__dict__* variable, instances cannot be assigned new\n variables not listed in the *__slots__* definition. Attempts to\n assign to an unlisted variable name raises "AttributeError". If\n dynamic assignment of new variables is desired, then add\n "\'__dict__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* Without a *__weakref__* variable for each instance, classes\n defining *__slots__* do not support weak references to its\n instances. If weak reference support is needed, then add\n "\'__weakref__\'" to the sequence of strings in the *__slots__*\n declaration.\n\n* *__slots__* are implemented at the class level by creating\n descriptors (*Implementing Descriptors*) for each variable name. As\n a result, class attributes cannot be used to set default values for\n instance variables defined by *__slots__*; otherwise, the class\n attribute would overwrite the descriptor assignment.\n\n* The action of a *__slots__* declaration is limited to the class\n where it is defined. As a result, subclasses will have a *__dict__*\n unless they also define *__slots__* (which must only contain names\n of any *additional* slots).\n\n* If a class defines a slot also defined in a base class, the\n instance variable defined by the base class slot is inaccessible\n (except by retrieving its descriptor directly from the base class).\n This renders the meaning of the program undefined. In the future, a\n check may be added to prevent this.\n\n* Nonempty *__slots__* does not work for classes derived from\n "variable-length" built-in types such as "int", "bytes" and "tuple".\n\n* Any non-string iterable may be assigned to *__slots__*. Mappings\n may also be used; however, in the future, special meaning may be\n assigned to the values corresponding to each key.\n\n* *__class__* assignment works only if both classes have the same\n *__slots__*.\n\n\nCustomizing class creation\n==========================\n\nBy default, classes are constructed using "type()". The class body is\nexecuted in a new namespace and the class name is bound locally to the\nresult of "type(name, bases, namespace)".\n\nThe class creation process can be customised by passing the\n"metaclass" keyword argument in the class definition line, or by\ninheriting from an existing class that included such an argument. In\nthe following example, both "MyClass" and "MySubclass" are instances\nof "Meta":\n\n class Meta(type):\n pass\n\n class MyClass(metaclass=Meta):\n pass\n\n class MySubclass(MyClass):\n pass\n\nAny other keyword arguments that are specified in the class definition\nare passed through to all metaclass operations described below.\n\nWhen a class definition is executed, the following steps occur:\n\n* the appropriate metaclass is determined\n\n* the class namespace is prepared\n\n* the class body is executed\n\n* the class object is created\n\n\nDetermining the appropriate metaclass\n-------------------------------------\n\nThe appropriate metaclass for a class definition is determined as\nfollows:\n\n* if no bases and no explicit metaclass are given, then "type()" is\n used\n\n* if an explicit metaclass is given and it is *not* an instance of\n "type()", then it is used directly as the metaclass\n\n* if an instance of "type()" is given as the explicit metaclass, or\n bases are defined, then the most derived metaclass is used\n\nThe most derived metaclass is selected from the explicitly specified\nmetaclass (if any) and the metaclasses (i.e. "type(cls)") of all\nspecified base classes. The most derived metaclass is one which is a\nsubtype of *all* of these candidate metaclasses. If none of the\ncandidate metaclasses meets that criterion, then the class definition\nwill fail with "TypeError".\n\n\nPreparing the class namespace\n-----------------------------\n\nOnce the appropriate metaclass has been identified, then the class\nnamespace is prepared. If the metaclass has a "__prepare__" attribute,\nit is called as "namespace = metaclass.__prepare__(name, bases,\n**kwds)" (where the additional keyword arguments, if any, come from\nthe class definition).\n\nIf the metaclass has no "__prepare__" attribute, then the class\nnamespace is initialised as an empty "dict()" instance.\n\nSee also: **PEP 3115** - Metaclasses in Python 3000\n\n Introduced the "__prepare__" namespace hook\n\n\nExecuting the class body\n------------------------\n\nThe class body is executed (approximately) as "exec(body, globals(),\nnamespace)". The key difference from a normal call to "exec()" is that\nlexical scoping allows the class body (including any methods) to\nreference names from the current and outer scopes when the class\ndefinition occurs inside a function.\n\nHowever, even when the class definition occurs inside the function,\nmethods defined inside the class still cannot see names defined at the\nclass scope. Class variables must be accessed through the first\nparameter of instance or class methods, and cannot be accessed at all\nfrom static methods.\n\n\nCreating the class object\n-------------------------\n\nOnce the class namespace has been populated by executing the class\nbody, the class object is created by calling "metaclass(name, bases,\nnamespace, **kwds)" (the additional keywords passed here are the same\nas those passed to "__prepare__").\n\nThis class object is the one that will be referenced by the zero-\nargument form of "super()". "__class__" is an implicit closure\nreference created by the compiler if any methods in a class body refer\nto either "__class__" or "super". This allows the zero argument form\nof "super()" to correctly identify the class being defined based on\nlexical scoping, while the class or instance that was used to make the\ncurrent call is identified based on the first argument passed to the\nmethod.\n\nAfter the class object is created, it is passed to the class\ndecorators included in the class definition (if any) and the resulting\nobject is bound in the local namespace as the defined class.\n\nSee also: **PEP 3135** - New super\n\n Describes the implicit "__class__" closure reference\n\n\nMetaclass example\n-----------------\n\nThe potential uses for metaclasses are boundless. Some ideas that have\nbeen explored include logging, interface checking, automatic\ndelegation, automatic property creation, proxies, frameworks, and\nautomatic resource locking/synchronization.\n\nHere is an example of a metaclass that uses an\n"collections.OrderedDict" to remember the order that class variables\nare defined:\n\n class OrderedClass(type):\n\n @classmethod\n def __prepare__(metacls, name, bases, **kwds):\n return collections.OrderedDict()\n\n def __new__(cls, name, bases, namespace, **kwds):\n result = type.__new__(cls, name, bases, dict(namespace))\n result.members = tuple(namespace)\n return result\n\n class A(metaclass=OrderedClass):\n def one(self): pass\n def two(self): pass\n def three(self): pass\n def four(self): pass\n\n >>> A.members\n (\'__module__\', \'one\', \'two\', \'three\', \'four\')\n\nWhen the class definition for *A* gets executed, the process begins\nwith calling the metaclass\'s "__prepare__()" method which returns an\nempty "collections.OrderedDict". That mapping records the methods and\nattributes of *A* as they are defined within the body of the class\nstatement. Once those definitions are executed, the ordered dictionary\nis fully populated and the metaclass\'s "__new__()" method gets\ninvoked. That method builds the new type and it saves the ordered\ndictionary keys in an attribute called "members".\n\n\nCustomizing instance and subclass checks\n========================================\n\nThe following methods are used to override the default behavior of the\n"isinstance()" and "issubclass()" built-in functions.\n\nIn particular, the metaclass "abc.ABCMeta" implements these methods in\norder to allow the addition of Abstract Base Classes (ABCs) as\n"virtual base classes" to any class or type (including built-in\ntypes), including other ABCs.\n\nclass.__instancecheck__(self, instance)\n\n Return true if *instance* should be considered a (direct or\n indirect) instance of *class*. If defined, called to implement\n "isinstance(instance, class)".\n\nclass.__subclasscheck__(self, subclass)\n\n Return true if *subclass* should be considered a (direct or\n indirect) subclass of *class*. If defined, called to implement\n "issubclass(subclass, class)".\n\nNote that these methods are looked up on the type (metaclass) of a\nclass. They cannot be defined as class methods in the actual class.\nThis is consistent with the lookup of special methods that are called\non instances, only in this case the instance is itself a class.\n\nSee also: **PEP 3119** - Introducing Abstract Base Classes\n\n Includes the specification for customizing "isinstance()" and\n "issubclass()" behavior through "__instancecheck__()" and\n "__subclasscheck__()", with motivation for this functionality in\n the context of adding Abstract Base Classes (see the "abc"\n module) to the language.\n\n\nEmulating callable objects\n==========================\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n\n\nEmulating container types\n=========================\n\nThe following methods can be defined to implement container objects.\nContainers usually are sequences (such as lists or tuples) or mappings\n(like dictionaries), but can represent other containers as well. The\nfirst set of methods is used either to emulate a sequence or to\nemulate a mapping; the difference is that for a sequence, the\nallowable keys should be the integers *k* for which "0 <= k < N" where\n*N* is the length of the sequence, or slice objects, which define a\nrange of items. It is also recommended that mappings provide the\nmethods "keys()", "values()", "items()", "get()", "clear()",\n"setdefault()", "pop()", "popitem()", "copy()", and "update()"\nbehaving similar to those for Python\'s standard dictionary objects.\nThe "collections" module provides a "MutableMapping" abstract base\nclass to help create those methods from a base set of "__getitem__()",\n"__setitem__()", "__delitem__()", and "keys()". Mutable sequences\nshould provide methods "append()", "count()", "index()", "extend()",\n"insert()", "pop()", "remove()", "reverse()" and "sort()", like Python\nstandard list objects. Finally, sequence types should implement\naddition (meaning concatenation) and multiplication (meaning\nrepetition) by defining the methods "__add__()", "__radd__()",\n"__iadd__()", "__mul__()", "__rmul__()" and "__imul__()" described\nbelow; they should not define other numerical operators. It is\nrecommended that both mappings and sequences implement the\n"__contains__()" method to allow efficient use of the "in" operator;\nfor mappings, "in" should search the mapping\'s keys; for sequences, it\nshould search through the values. It is further recommended that both\nmappings and sequences implement the "__iter__()" method to allow\nefficient iteration through the container; for mappings, "__iter__()"\nshould be the same as "keys()"; for sequences, it should iterate\nthrough the values.\n\nobject.__len__(self)\n\n Called to implement the built-in function "len()". Should return\n the length of the object, an integer ">=" 0. Also, an object that\n doesn\'t define a "__bool__()" method and whose "__len__()" method\n returns zero is considered to be false in a Boolean context.\n\nobject.__length_hint__(self)\n\n Called to implement "operator.length_hint()". Should return an\n estimated length for the object (which may be greater or less than\n the actual length). The length must be an integer ">=" 0. This\n method is purely an optimization and is never required for\n correctness.\n\n New in version 3.4.\n\nNote: Slicing is done exclusively with the following three methods.\n A call like\n\n a[1:2] = b\n\n is translated to\n\n a[slice(1, 2, None)] = b\n\n and so forth. Missing slice items are always filled in with "None".\n\nobject.__getitem__(self, key)\n\n Called to implement evaluation of "self[key]". For sequence types,\n the accepted keys should be integers and slice objects. Note that\n the special interpretation of negative indexes (if the class wishes\n to emulate a sequence type) is up to the "__getitem__()" method. If\n *key* is of an inappropriate type, "TypeError" may be raised; if of\n a value outside the set of indexes for the sequence (after any\n special interpretation of negative values), "IndexError" should be\n raised. For mapping types, if *key* is missing (not in the\n container), "KeyError" should be raised.\n\n Note: "for" loops expect that an "IndexError" will be raised for\n illegal indexes to allow proper detection of the end of the\n sequence.\n\nobject.__missing__(self, key)\n\n Called by "dict"."__getitem__()" to implement "self[key]" for dict\n subclasses when key is not in the dictionary.\n\nobject.__setitem__(self, key, value)\n\n Called to implement assignment to "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support changes to the values for keys, or if new keys\n can be added, or for sequences if elements can be replaced. The\n same exceptions should be raised for improper *key* values as for\n the "__getitem__()" method.\n\nobject.__delitem__(self, key)\n\n Called to implement deletion of "self[key]". Same note as for\n "__getitem__()". This should only be implemented for mappings if\n the objects support removal of keys, or for sequences if elements\n can be removed from the sequence. The same exceptions should be\n raised for improper *key* values as for the "__getitem__()" method.\n\nobject.__iter__(self)\n\n This method is called when an iterator is required for a container.\n This method should return a new iterator object that can iterate\n over all the objects in the container. For mappings, it should\n iterate over the keys of the container.\n\n Iterator objects also need to implement this method; they are\n required to return themselves. For more information on iterator\n objects, see *Iterator Types*.\n\nobject.__reversed__(self)\n\n Called (if present) by the "reversed()" built-in to implement\n reverse iteration. It should return a new iterator object that\n iterates over all the objects in the container in reverse order.\n\n If the "__reversed__()" method is not provided, the "reversed()"\n built-in will fall back to using the sequence protocol ("__len__()"\n and "__getitem__()"). Objects that support the sequence protocol\n should only provide "__reversed__()" if they can provide an\n implementation that is more efficient than the one provided by\n "reversed()".\n\nThe membership test operators ("in" and "not in") are normally\nimplemented as an iteration through a sequence. However, container\nobjects can supply the following special method with a more efficient\nimplementation, which also does not require the object be a sequence.\n\nobject.__contains__(self, item)\n\n Called to implement membership test operators. Should return true\n if *item* is in *self*, false otherwise. For mapping objects, this\n should consider the keys of the mapping rather than the values or\n the key-item pairs.\n\n For objects that don\'t define "__contains__()", the membership test\n first tries iteration via "__iter__()", then the old sequence\n iteration protocol via "__getitem__()", see *this section in the\n language reference*.\n\n\nEmulating numeric types\n=======================\n\nThe following methods can be defined to emulate numeric objects.\nMethods corresponding to operations that are not supported by the\nparticular kind of number implemented (e.g., bitwise operations for\nnon-integral numbers) should be left undefined.\n\nobject.__add__(self, other)\nobject.__sub__(self, other)\nobject.__mul__(self, other)\nobject.__truediv__(self, other)\nobject.__floordiv__(self, other)\nobject.__mod__(self, other)\nobject.__divmod__(self, other)\nobject.__pow__(self, other[, modulo])\nobject.__lshift__(self, other)\nobject.__rshift__(self, other)\nobject.__and__(self, other)\nobject.__xor__(self, other)\nobject.__or__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|"). For instance, to evaluate the\n expression "x + y", where *x* is an instance of a class that has an\n "__add__()" method, "x.__add__(y)" is called. The "__divmod__()"\n method should be the equivalent to using "__floordiv__()" and\n "__mod__()"; it should not be related to "__truediv__()". Note\n that "__pow__()" should be defined to accept an optional third\n argument if the ternary version of the built-in "pow()" function is\n to be supported.\n\n If one of those methods does not support the operation with the\n supplied arguments, it should return "NotImplemented".\n\nobject.__radd__(self, other)\nobject.__rsub__(self, other)\nobject.__rmul__(self, other)\nobject.__rtruediv__(self, other)\nobject.__rfloordiv__(self, other)\nobject.__rmod__(self, other)\nobject.__rdivmod__(self, other)\nobject.__rpow__(self, other)\nobject.__rlshift__(self, other)\nobject.__rrshift__(self, other)\nobject.__rand__(self, other)\nobject.__rxor__(self, other)\nobject.__ror__(self, other)\n\n These methods are called to implement the binary arithmetic\n operations ("+", "-", "*", "/", "//", "%", "divmod()", "pow()",\n "**", "<<", ">>", "&", "^", "|") with reflected (swapped) operands.\n These functions are only called if the left operand does not\n support the corresponding operation and the operands are of\n different types. [2] For instance, to evaluate the expression "x -\n y", where *y* is an instance of a class that has an "__rsub__()"\n method, "y.__rsub__(x)" is called if "x.__sub__(y)" returns\n *NotImplemented*.\n\n Note that ternary "pow()" will not try calling "__rpow__()" (the\n coercion rules would become too complicated).\n\n Note: If the right operand\'s type is a subclass of the left\n operand\'s type and that subclass provides the reflected method\n for the operation, this method will be called before the left\n operand\'s non-reflected method. This behavior allows subclasses\n to override their ancestors\' operations.\n\nobject.__iadd__(self, other)\nobject.__isub__(self, other)\nobject.__imul__(self, other)\nobject.__itruediv__(self, other)\nobject.__ifloordiv__(self, other)\nobject.__imod__(self, other)\nobject.__ipow__(self, other[, modulo])\nobject.__ilshift__(self, other)\nobject.__irshift__(self, other)\nobject.__iand__(self, other)\nobject.__ixor__(self, other)\nobject.__ior__(self, other)\n\n These methods are called to implement the augmented arithmetic\n assignments ("+=", "-=", "*=", "/=", "//=", "%=", "**=", "<<=",\n ">>=", "&=", "^=", "|="). These methods should attempt to do the\n operation in-place (modifying *self*) and return the result (which\n could be, but does not have to be, *self*). If a specific method\n is not defined, the augmented assignment falls back to the normal\n methods. For instance, if *x* is an instance of a class with an\n "__iadd__()" method, "x += y" is equivalent to "x = x.__iadd__(y)"\n . Otherwise, "x.__add__(y)" and "y.__radd__(x)" are considered, as\n with the evaluation of "x + y". In certain situations, augmented\n assignment can result in unexpected errors (see *Why does\n a_tuple[i] += [\'item\'] raise an exception when the addition\n works?*), but this behavior is in fact part of the data model.\n\nobject.__neg__(self)\nobject.__pos__(self)\nobject.__abs__(self)\nobject.__invert__(self)\n\n Called to implement the unary arithmetic operations ("-", "+",\n "abs()" and "~").\n\nobject.__complex__(self)\nobject.__int__(self)\nobject.__float__(self)\nobject.__round__(self[, n])\n\n Called to implement the built-in functions "complex()", "int()",\n "float()" and "round()". Should return a value of the appropriate\n type.\n\nobject.__index__(self)\n\n Called to implement "operator.index()", and whenever Python needs\n to losslessly convert the numeric object to an integer object (such\n as in slicing, or in the built-in "bin()", "hex()" and "oct()"\n functions). Presence of this method indicates that the numeric\n object is an integer type. Must return an integer.\n\n Note: In order to have a coherent integer type class, when\n "__index__()" is defined "__int__()" should also be defined, and\n both should return the same value.\n\n\nWith Statement Context Managers\n===============================\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a "with" statement. The context manager\nhandles the entry into, and the exit from, the desired runtime context\nfor the execution of the block of code. Context managers are normally\ninvoked using the "with" statement (described in section *The with\nstatement*), but can also be used by directly invoking their methods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The "with"\n statement will bind this method\'s return value to the target(s)\n specified in the "as" clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be "None".\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that "__exit__()" methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n\n\nSpecial method lookup\n=====================\n\nFor custom classes, implicit invocations of special methods are only\nguaranteed to work correctly if defined on an object\'s type, not in\nthe object\'s instance dictionary. That behaviour is the reason why\nthe following code raises an exception:\n\n >>> class C:\n ... pass\n ...\n >>> c = C()\n >>> c.__len__ = lambda: 5\n >>> len(c)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: object of type \'C\' has no len()\n\nThe rationale behind this behaviour lies with a number of special\nmethods such as "__hash__()" and "__repr__()" that are implemented by\nall objects, including type objects. If the implicit lookup of these\nmethods used the conventional lookup process, they would fail when\ninvoked on the type object itself:\n\n >>> 1 .__hash__() == hash(1)\n True\n >>> int.__hash__() == hash(int)\n Traceback (most recent call last):\n File "", line 1, in \n TypeError: descriptor \'__hash__\' of \'int\' object needs an argument\n\nIncorrectly attempting to invoke an unbound method of a class in this\nway is sometimes referred to as \'metaclass confusion\', and is avoided\nby bypassing the instance when looking up special methods:\n\n >>> type(1).__hash__(1) == hash(1)\n True\n >>> type(int).__hash__(int) == hash(int)\n True\n\nIn addition to bypassing any instance attributes in the interest of\ncorrectness, implicit special method lookup generally also bypasses\nthe "__getattribute__()" method even of the object\'s metaclass:\n\n >>> class Meta(type):\n ... def __getattribute__(*args):\n ... print("Metaclass getattribute invoked")\n ... return type.__getattribute__(*args)\n ...\n >>> class C(object, metaclass=Meta):\n ... def __len__(self):\n ... return 10\n ... def __getattribute__(*args):\n ... print("Class getattribute invoked")\n ... return object.__getattribute__(*args)\n ...\n >>> c = C()\n >>> c.__len__() # Explicit lookup via instance\n Class getattribute invoked\n 10\n >>> type(c).__len__(c) # Explicit lookup via type\n Metaclass getattribute invoked\n 10\n >>> len(c) # Implicit lookup\n 10\n\nBypassing the "__getattribute__()" machinery in this fashion provides\nsignificant scope for speed optimisations within the interpreter, at\nthe cost of some flexibility in the handling of special methods (the\nspecial method *must* be set on the class object itself in order to be\nconsistently invoked by the interpreter).\n\n-[ Footnotes ]-\n\n[1] It *is* possible in some cases to change an object\'s type,\n under certain controlled conditions. It generally isn\'t a good\n idea though, since it can lead to some very strange behaviour if\n it is handled incorrectly.\n\n[2] For operands of the same type, it is assumed that if the non-\n reflected method (such as "__add__()") fails the operation is not\n supported, which is why the reflected method is not called.\n', + 'string-methods': u'\nString Methods\n**************\n\nStrings implement all of the *common* sequence operations, along with\nthe additional methods described below.\n\nStrings also support two styles of string formatting, one providing a\nlarge degree of flexibility and customization (see "str.format()",\n*Format String Syntax* and *String Formatting*) and the other based on\nC "printf" style formatting that handles a narrower range of types and\nis slightly harder to use correctly, but is often faster for the cases\nit can handle (*printf-style String Formatting*).\n\nThe *Text Processing Services* section of the standard library covers\na number of other modules that provide various text related utilities\n(including regular expression support in the "re" module).\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter "\'\xdf\'" is equivalent to ""ss"".\n Since it is already lowercase, "lower()" would do nothing to "\'\xdf\'";\n "casefold()" converts it to ""ss"".\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is an ASCII space). The\n original string is returned if *width* is less than or equal to\n "len(s)".\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is "\'utf-8\'". *errors* may be given to set a different\n error handling scheme. The default for *errors* is "\'strict\'",\n meaning that encoding errors raise a "UnicodeError". Other possible\n values are "\'ignore\'", "\'replace\'", "\'xmlcharrefreplace\'",\n "\'backslashreplace\'" and any other name registered via\n "codecs.register_error()", see section *Error Handlers*. For a list\n of possible encodings, see section *Standard Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return "True" if the string ends with the specified *suffix*,\n otherwise return "False". *suffix* can also be a tuple of suffixes\n to look for. With optional *start*, test beginning at that\n position. With optional *end*, stop comparing at that position.\n\nstr.expandtabs(tabsize=8)\n\n Return a copy of the string where all tab characters are replaced\n by one or more spaces, depending on the current column and the\n given tab size. Tab positions occur every *tabsize* characters\n (default is 8, giving tab positions at columns 0, 8, 16 and so on).\n To expand the string, the current column is set to zero and the\n string is examined character by character. If the character is a\n tab ("\\t"), one or more space characters are inserted in the result\n until the current column is equal to the next tab position. (The\n tab character itself is not copied.) If the character is a newline\n ("\\n") or return ("\\r"), it is copied and the current column is\n reset to zero. Any other character is copied unchanged and the\n current column is incremented by one regardless of how the\n character is represented when printed.\n\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs()\n \'01 012 0123 01234\'\n >>> \'01\\t012\\t0123\\t01234\'.expandtabs(4)\n \'01 012 0123 01234\'\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" if *sub* is not found.\n\n Note: The "find()" method should be used only if you need to know\n the position of *sub*. To check if *sub* is a substring or not,\n use the "in" operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces "{}". Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to "str.format(**mapping)", except that "mapping" is used\n directly and not copied to a "dict". This is useful if for example\n "mapping" is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like "find()", but raise "ValueError" when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character "c"\n is alphanumeric if one of the following returns "True":\n "c.isalpha()", "c.isdecimal()", "c.isdigit()", or "c.isnumeric()".\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\n Use "keyword.iskeyword()" to test for reserved identifiers such as\n "def" and "class".\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when "repr()" is\n invoked on a string. It has no bearing on the handling of strings\n written to "sys.stdout" or "sys.stderr".)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A "TypeError" will be raised if there are\n any non-string values in *iterable*, including "bytes" objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n "str.translate()".\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within "s[start:end]".\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return "-1" on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like "rfind()" but raises "ValueError" when the substring *sub* is\n not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is an ASCII\n space). The original string is returned if *width* is less than or\n equal to "len(s)".\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n "None", any whitespace string is a separator. Except for splitting\n from the right, "rsplit()" behaves like "split()" which is\n described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or "None", the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most "maxsplit+1"\n elements). If *maxsplit* is not specified or "-1", then there is\n no limit on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', \'2\']"). The *sep* argument\n may consist of multiple characters (for example,\n "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', \'3\']"). Splitting an\n empty string with a specified separator returns "[\'\']".\n\n For example:\n\n >>> \'1,2,3\'.split(\',\')\n [\'1\', \'2\', \'3\']\n >>> \'1,2,3\'.split(\',\', maxsplit=1)\n [\'1\', \'2,3\']\n >>> \'1,2,,3,\'.split(\',\')\n [\'1\', \'2\', \'\', \'3\', \'\']\n\n If *sep* is not specified or is "None", a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a "None" separator returns "[]".\n\n For example:\n\n >>> \'1 2 3\'.split()\n [\'1\', \'2\', \'3\']\n >>> \'1 2 3\'.split(maxsplit=1)\n [\'1\', \'2 3\']\n >>> \' 1 2 3 \'.split()\n [\'1\', \'2\', \'3\']\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\n This method splits on the following line boundaries. In\n particular, the boundaries are a superset of *universal newlines*.\n\n +-------------------------+-------------------------------+\n | Representation | Description |\n +=========================+===============================+\n | "\\n" | Line Feed |\n +-------------------------+-------------------------------+\n | "\\r" | Carriage Return |\n +-------------------------+-------------------------------+\n | "\\r\\n" | Carriage Return + Line Feed |\n +-------------------------+-------------------------------+\n | "\\v" or "\\x0b" | Line Tabulation |\n +-------------------------+-------------------------------+\n | "\\f" or "\\x0c" | Form Feed |\n +-------------------------+-------------------------------+\n | "\\x1c" | File Separator |\n +-------------------------+-------------------------------+\n | "\\x1d" | Group Separator |\n +-------------------------+-------------------------------+\n | "\\x1e" | Record Separator |\n +-------------------------+-------------------------------+\n | "\\x85" | Next Line (C1 Control Code) |\n +-------------------------+-------------------------------+\n | "\\u2028" | Line Separator |\n +-------------------------+-------------------------------+\n | "\\u2029" | Paragraph Separator |\n +-------------------------+-------------------------------+\n\n Changed in version 3.2: "\\v" and "\\f" added to list of line\n boundaries.\n\n For example:\n\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines()\n [\'ab c\', \'\', \'de fg\', \'kl\']\n >>> \'ab c\\n\\nde fg\\rkl\\r\\n\'.splitlines(keepends=True)\n [\'ab c\\n\', \'\\n\', \'de fg\\r\', \'kl\\r\\n\']\n\n Unlike "split()" when a delimiter string *sep* is given, this\n method returns an empty list for the empty string, and a terminal\n line break does not result in an extra line:\n\n >>> "".splitlines()\n []\n >>> "One line\\n".splitlines()\n [\'One line\']\n\n For comparison, "split(\'\\n\')" gives:\n\n >>> \'\'.split(\'\\n\')\n [\'\']\n >>> \'Two lines\\n\'.split(\'\\n\')\n [\'Two lines\', \'\']\n\nstr.startswith(prefix[, start[, end]])\n\n Return "True" if string starts with the *prefix*, otherwise return\n "False". *prefix* can also be a tuple of prefixes to look for.\n With optional *start*, test string beginning at that position.\n With optional *end*, stop comparing string at that position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or "None", the *chars*\n argument defaults to removing whitespace. The *chars* argument is\n not a prefix or suffix; rather, all combinations of its values are\n stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n "s.swapcase().swapcase() == s".\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n For example:\n\n >>> \'Hello world\'.title()\n \'Hello World\'\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n ... lambda mo: mo.group(0)[0].upper() +\n ... mo.group(0)[1:].lower(),\n ... s)\n ...\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(table)\n\n Return a copy of the string in which each character has been mapped\n through the given translation table. The table must be an object\n that implements indexing via "__getitem__()", typically a *mapping*\n or *sequence*. When indexed by a Unicode ordinal (an integer), the\n table object can do any of the following: return a Unicode ordinal\n or a string, to map the character to one or more other characters;\n return "None", to delete the character from the return string; or\n raise a "LookupError" exception, to map the character to itself.\n\n You can use "str.maketrans()" to create a translation map from\n character-to-character mappings in different formats.\n\n See also the "codecs" module for a more flexible approach to custom\n character mappings.\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that "str.upper().isupper()" might be\n "False" if "s" contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return a copy of the string left filled with ASCII "\'0\'" digits to\n make a string of length *width*. A leading sign prefix\n ("\'+\'"/"\'-\'") is handled by inserting the padding *after* the sign\n character rather than before. The original string is returned if\n *width* is less than or equal to "len(s)".\n\n For example:\n\n >>> "42".zfill(5)\n \'00042\'\n >>> "-42".zfill(5)\n \'-0042\'\n', 'strings': u'\nString and Bytes literals\n*************************\n\nString literals are described by the following lexical definitions:\n\n stringliteral ::= [stringprefix](shortstring | longstring)\n stringprefix ::= "r" | "u" | "R" | "U"\n shortstring ::= "\'" shortstringitem* "\'" | \'"\' shortstringitem* \'"\'\n longstring ::= "\'\'\'" longstringitem* "\'\'\'" | \'"""\' longstringitem* \'"""\'\n shortstringitem ::= shortstringchar | stringescapeseq\n longstringitem ::= longstringchar | stringescapeseq\n shortstringchar ::= \n longstringchar ::= \n stringescapeseq ::= "\\" \n\n bytesliteral ::= bytesprefix(shortbytes | longbytes)\n bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | "rb" | "rB" | "Rb" | "RB"\n shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' shortbytesitem* \'"\'\n longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' longbytesitem* \'"""\'\n shortbytesitem ::= shortbyteschar | bytesescapeseq\n longbytesitem ::= longbyteschar | bytesescapeseq\n shortbyteschar ::= \n longbyteschar ::= \n bytesescapeseq ::= "\\" \n\nOne syntactic restriction not indicated by these productions is that\nwhitespace is not allowed between the "stringprefix" or "bytesprefix"\nand the rest of the literal. The source character set is defined by\nthe encoding declaration; it is UTF-8 if no encoding declaration is\ngiven in the source file; see section *Encoding declarations*.\n\nIn plain English: Both types of literals can be enclosed in matching\nsingle quotes ("\'") or double quotes ("""). They can also be enclosed\nin matching groups of three single or double quotes (these are\ngenerally referred to as *triple-quoted strings*). The backslash\n("\\") character is used to escape characters that otherwise have a\nspecial meaning, such as newline, backslash itself, or the quote\ncharacter.\n\nBytes literals are always prefixed with "\'b\'" or "\'B\'"; they produce\nan instance of the "bytes" type instead of the "str" type. They may\nonly contain ASCII characters; bytes with a numeric value of 128 or\ngreater must be expressed with escapes.\n\nAs of Python 3.3 it is possible again to prefix string literals with a\n"u" prefix to simplify maintenance of dual 2.x and 3.x codebases.\n\nBoth string and bytes literals may optionally be prefixed with a\nletter "\'r\'" or "\'R\'"; such strings are called *raw strings* and treat\nbackslashes as literal characters. As a result, in string literals,\n"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated specially.\nGiven that Python 2.x\'s raw unicode literals behave differently than\nPython 3.x\'s the "\'ur\'" syntax is not supported.\n\nNew in version 3.3: The "\'rb\'" prefix of raw bytes literals has been\nadded as a synonym of "\'br\'".\n\nNew in version 3.3: Support for the unicode legacy literal\n("u\'value\'") was reintroduced to simplify the maintenance of dual\nPython 2.x and 3.x codebases. See **PEP 414** for more information.\n\nIn triple-quoted literals, unescaped newlines and quotes are allowed\n(and are retained), except that three unescaped quotes in a row\nterminate the literal. (A "quote" is the character used to open the\nliteral, i.e. either "\'" or """.)\n\nUnless an "\'r\'" or "\'R\'" prefix is present, escape sequences in string\nand bytes literals are interpreted according to rules similar to those\nused by Standard C. The recognized escape sequences are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\newline" | Backslash and newline ignored | |\n+-------------------+-----------------------------------+---------+\n| "\\\\" | Backslash ("\\") | |\n+-------------------+-----------------------------------+---------+\n| "\\\'" | Single quote ("\'") | |\n+-------------------+-----------------------------------+---------+\n| "\\"" | Double quote (""") | |\n+-------------------+-----------------------------------+---------+\n| "\\a" | ASCII Bell (BEL) | |\n+-------------------+-----------------------------------+---------+\n| "\\b" | ASCII Backspace (BS) | |\n+-------------------+-----------------------------------+---------+\n| "\\f" | ASCII Formfeed (FF) | |\n+-------------------+-----------------------------------+---------+\n| "\\n" | ASCII Linefeed (LF) | |\n+-------------------+-----------------------------------+---------+\n| "\\r" | ASCII Carriage Return (CR) | |\n+-------------------+-----------------------------------+---------+\n| "\\t" | ASCII Horizontal Tab (TAB) | |\n+-------------------+-----------------------------------+---------+\n| "\\v" | ASCII Vertical Tab (VT) | |\n+-------------------+-----------------------------------+---------+\n| "\\ooo" | Character with octal value *ooo* | (1,3) |\n+-------------------+-----------------------------------+---------+\n| "\\xhh" | Character with hex value *hh* | (2,3) |\n+-------------------+-----------------------------------+---------+\n\nEscape sequences only recognized in string literals are:\n\n+-------------------+-----------------------------------+---------+\n| Escape Sequence | Meaning | Notes |\n+===================+===================================+=========+\n| "\\N{name}" | Character named *name* in the | (4) |\n| | Unicode database | |\n+-------------------+-----------------------------------+---------+\n| "\\uxxxx" | Character with 16-bit hex value | (5) |\n| | *xxxx* | |\n+-------------------+-----------------------------------+---------+\n| "\\Uxxxxxxxx" | Character with 32-bit hex value | (6) |\n| | *xxxxxxxx* | |\n+-------------------+-----------------------------------+---------+\n\nNotes:\n\n1. As in Standard C, up to three octal digits are accepted.\n\n2. Unlike in Standard C, exactly two hex digits are required.\n\n3. In a bytes literal, hexadecimal and octal escapes denote the\n byte with the given value. In a string literal, these escapes\n denote a Unicode character with the given value.\n\n4. Changed in version 3.3: Support for name aliases [1] has been\n added.\n\n5. Individual code units which form parts of a surrogate pair can\n be encoded using this escape sequence. Exactly four hex digits are\n required.\n\n6. Any Unicode character can be encoded this way. Exactly eight\n hex digits are required.\n\nUnlike Standard C, all unrecognized escape sequences are left in the\nstring unchanged, i.e., *the backslash is left in the result*. (This\nbehavior is useful when debugging: if an escape sequence is mistyped,\nthe resulting output is more easily recognized as broken.) It is also\nimportant to note that the escape sequences only recognized in string\nliterals fall into the category of unrecognized escapes for bytes\nliterals.\n\nEven in a raw literal, quotes can be escaped with a backslash, but the\nbackslash remains in the result; for example, "r"\\""" is a valid\nstring literal consisting of two characters: a backslash and a double\nquote; "r"\\"" is not a valid string literal (even a raw string cannot\nend in an odd number of backslashes). Specifically, *a raw literal\ncannot end in a single backslash* (since the backslash would escape\nthe following quote character). Note also that a single backslash\nfollowed by a newline is interpreted as those two characters as part\nof the literal, *not* as a line continuation.\n', 'subscriptions': u'\nSubscriptions\n*************\n\nA subscription selects an item of a sequence (string, tuple or list)\nor mapping (dictionary) object:\n\n subscription ::= primary "[" expression_list "]"\n\nThe primary must evaluate to an object that supports subscription\n(lists or dictionaries for example). User-defined objects can support\nsubscription by defining a "__getitem__()" method.\n\nFor built-in objects, there are two types of objects that support\nsubscription:\n\nIf the primary is a mapping, the expression list must evaluate to an\nobject whose value is one of the keys of the mapping, and the\nsubscription selects the value in the mapping that corresponds to that\nkey. (The expression list is a tuple except if it has exactly one\nitem.)\n\nIf the primary is a sequence, the expression (list) must evaluate to\nan integer or a slice (as discussed in the following section).\n\nThe formal syntax makes no special provision for negative indices in\nsequences; however, built-in sequences all provide a "__getitem__()"\nmethod that interprets negative indices by adding the length of the\nsequence to the index (so that "x[-1]" selects the last item of "x").\nThe resulting value must be a nonnegative integer less than the number\nof items in the sequence, and the subscription selects the item whose\nindex is that value (counting from zero). Since the support for\nnegative indices and slicing occurs in the object\'s "__getitem__()"\nmethod, subclasses overriding this method will need to explicitly add\nthat support.\n\nA string\'s items are characters. A character is not a separate data\ntype but a string of exactly one character.\n', 'truth': u'\nTruth Value Testing\n*******************\n\nAny object can be tested for truth value, for use in an "if" or\n"while" condition or as operand of the Boolean operations below. The\nfollowing values are considered false:\n\n* "None"\n\n* "False"\n\n* zero of any numeric type, for example, "0", "0.0", "0j".\n\n* any empty sequence, for example, "\'\'", "()", "[]".\n\n* any empty mapping, for example, "{}".\n\n* instances of user-defined classes, if the class defines a\n "__bool__()" or "__len__()" method, when that method returns the\n integer zero or "bool" value "False". [1]\n\nAll other values are considered true --- so objects of many types are\nalways true.\n\nOperations and built-in functions that have a Boolean result always\nreturn "0" or "False" for false and "1" or "True" for true, unless\notherwise stated. (Important exception: the Boolean operations "or"\nand "and" always return one of their operands.)\n', 'try': u'\nThe "try" statement\n*******************\n\nThe "try" statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" identifier]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe "except" clause(s) specify one or more exception handlers. When no\nexception occurs in the "try" clause, no exception handler is\nexecuted. When an exception occurs in the "try" suite, a search for an\nexception handler is started. This search inspects the except clauses\nin turn until one is found that matches the exception. An expression-\nless except clause, if present, must be last; it matches any\nexception. For an except clause with an expression, that expression\nis evaluated, and the clause matches the exception if the resulting\nobject is "compatible" with the exception. An object is compatible\nwith an exception if it is the class or a base class of the exception\nobject or a tuple containing an item compatible with the exception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire "try" statement raised\nthe exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the "as" keyword in that except clause, if\npresent, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using "as target", it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the "sys" module and can be accessed via\n"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of the\nexception class, the exception instance and a traceback object (see\nsection *The standard type hierarchy*) identifying the point in the\nprogram where the exception occurred. "sys.exc_info()" values are\nrestored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional "else" clause is executed if and when control flows off\nthe end of the "try" clause. [2] Exceptions in the "else" clause are\nnot handled by the preceding "except" clauses.\n\nIf "finally" is present, it specifies a \'cleanup\' handler. The "try"\nclause is executed, including any "except" and "else" clauses. If an\nexception occurs in any of the clauses and is not handled, the\nexception is temporarily saved. The "finally" clause is executed. If\nthere is a saved exception it is re-raised at the end of the "finally"\nclause. If the "finally" clause raises another exception, the saved\nexception is set as the context of the new exception. If the "finally"\nclause executes a "return" or "break" statement, the saved exception\nis discarded:\n\n >>> def f():\n ... try:\n ... 1/0\n ... finally:\n ... return 42\n ...\n >>> f()\n 42\n\nThe exception information is not available to the program during\nexecution of the "finally" clause.\n\nWhen a "return", "break" or "continue" statement is executed in the\n"try" suite of a "try"..."finally" statement, the "finally" clause is\nalso executed \'on the way out.\' A "continue" statement is illegal in\nthe "finally" clause. (The reason is a problem with the current\nimplementation --- this restriction may be lifted in the future).\n\nThe return value of a function is determined by the last "return"\nstatement executed. Since the "finally" clause always executes, a\n"return" statement executed in the "finally" clause will always be the\nlast one executed:\n\n >>> def foo():\n ... try:\n ... return \'try\'\n ... finally:\n ... return \'finally\'\n ...\n >>> foo()\n \'finally\'\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the "raise" statement to\ngenerate exceptions may be found in section *The raise statement*.\n', 'types': u'\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name "None". It\n is used to signify the absence of a value in many situations, e.g.,\n it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n "NotImplemented". Numeric methods and rich comparison methods\n should return this value if they do not implement the operation for\n the operands provided. (The interpreter will then try the\n reflected operation, or some other fallback, depending on the\n operator.) Its truth value is true.\n\n See *Implementing the arithmetic operations* for more details.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the literal "..." or the\n built-in name "Ellipsis". Its truth value is true.\n\n"numbers.Number"\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n "numbers.Integral"\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are two types of integers:\n\n Integers ("int")\n\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans ("bool")\n These represent the truth values False and True. The two\n objects representing the values "False" and "True" are the\n only Boolean objects. The Boolean type is a subtype of the\n integer type, and Boolean values behave like the values 0 and\n 1, respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ""False"" or\n ""True"" are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers.\n\n "numbers.Real" ("float")\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these are\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n "numbers.Complex" ("complex")\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number "z" can be retrieved through the read-only\n attributes "z.real" and "z.imag".\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function "len()" returns the number of items\n of a sequence. When the length of a sequence is *n*, the index set\n contains the numbers 0, 1, ..., *n*-1. Item *i* of sequence *a* is\n selected by "a[i]".\n\n Sequences also support slicing: "a[i:j]" selects all items with\n index *k* such that *i* "<=" *k* "<" *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: "a[i:j:k]" selects all items of *a* with index *x* where\n "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n A string is a sequence of values that represent Unicode code\n points. All the code points in the range "U+0000 - U+10FFFF"\n can be represented in a string. Python doesn\'t have a "char"\n type; instead, every code point in the string is represented\n as a string object with length "1". The built-in function\n "ord()" converts a code point from its string form to an\n integer in the range "0 - 10FFFF"; "chr()" converts an\n integer in the range "0 - 10FFFF" to the corresponding length\n "1" string object. "str.encode()" can be used to convert a\n "str" to "bytes" using the given text encoding, and\n "bytes.decode()" can be used to achieve the opposite.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Bytes\n A bytes object is an immutable array. The items are 8-bit\n bytes, represented by integers in the range 0 <= x < 256.\n Bytes literals (like "b\'abc\'") and the built-in function\n "bytes()" can be used to construct bytes objects. Also,\n bytes objects can be decoded to strings via the "decode()"\n method.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and "del" (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in "bytearray()" constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module "array" provides an additional example of a\n mutable sequence type, as does the "collections" module.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function "len()"\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., "1" and\n "1.0"), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n "set()" constructor and can be modified afterwards by several\n methods, such as "add()".\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in "frozenset()" constructor. As a frozenset is immutable\n and *hashable*, it can be used again as an element of another\n set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation "a[k]" selects the item indexed by "k"\n from the mapping "a"; this can be used in expressions and as the\n target of assignments or "del" statements. The built-in function\n "len()" returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., "1" and "1.0")\n then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the "{...}"\n notation (see section *Dictionary displays*).\n\n The extension modules "dbm.ndbm" and "dbm.gnu" provide\n additional examples of mapping types, as does the "collections"\n module.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +---------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +===========================+=================================+=============+\n | "__doc__" | The function\'s documentation | Writable |\n | | string, or "None" if | |\n | | unavailable; not inherited by | |\n | | subclasses | |\n +---------------------------+---------------------------------+-------------+\n | "__name__" | The function\'s name | Writable |\n +---------------------------+---------------------------------+-------------+\n | "__qualname__" | The function\'s *qualified name* | Writable |\n | | New in version 3.3. | |\n +---------------------------+---------------------------------+-------------+\n | "__module__" | The name of the module the | Writable |\n | | function was defined in, or | |\n | | "None" if unavailable. | |\n +---------------------------+---------------------------------+-------------+\n | "__defaults__" | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or "None" if no arguments have | |\n | | a default value | |\n +---------------------------+---------------------------------+-------------+\n | "__code__" | The code object representing | Writable |\n | | the compiled function body. | |\n +---------------------------+---------------------------------+-------------+\n | "__globals__" | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +---------------------------+---------------------------------+-------------+\n | "__dict__" | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +---------------------------+---------------------------------+-------------+\n | "__closure__" | "None" or a tuple of cells that | Read-only |\n | | contain bindings for the | |\n | | function\'s free variables. | |\n +---------------------------+---------------------------------+-------------+\n | "__annotations__" | A dict containing annotations | Writable |\n | | of parameters. The keys of the | |\n | | dict are the parameter names, | |\n | | and "\'return\'" for the return | |\n | | annotation, if provided. | |\n +---------------------------+---------------------------------+-------------+\n | "__kwdefaults__" | A dict containing defaults for | Writable |\n | | keyword-only parameters. | |\n +---------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n Instance methods\n An instance method object combines a class, a class instance and\n any callable object (normally a user-defined function).\n\n Special read-only attributes: "__self__" is the class instance\n object, "__func__" is the function object; "__doc__" is the\n method\'s documentation (same as "__func__.__doc__"); "__name__"\n is the method name (same as "__func__.__name__"); "__module__"\n is the name of the module the method was defined in, or "None"\n if unavailable.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object or a class\n method object.\n\n When an instance method object is created by retrieving a user-\n defined function object from a class via one of its instances,\n its "__self__" attribute is the instance, and the method object\n is said to be bound. The new method\'s "__func__" attribute is\n the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the "__func__"\n attribute of the new instance is not the original method object\n but its "__func__" attribute.\n\n When an instance method object is created by retrieving a class\n method object from a class or instance, its "__self__" attribute\n is the class itself, and its "__func__" attribute is the\n function object underlying the class method.\n\n When an instance method object is called, the underlying\n function ("__func__") is called, inserting the class instance\n ("__self__") in front of the argument list. For instance, when\n "C" is a class which contains a definition for a function "f()",\n and "x" is an instance of "C", calling "x.f(1)" is equivalent to\n calling "C.f(x, 1)".\n\n When an instance method object is derived from a class method\n object, the "class instance" stored in "__self__" will actually\n be the class itself, so that calling either "x.f(1)" or "C.f(1)"\n is equivalent to calling "f(C,1)" where "f" is the underlying\n function.\n\n Note that the transformation from function object to instance\n method object happens each time the attribute is retrieved from\n the instance. In some cases, a fruitful optimization is to\n assign the attribute to a local variable and call that local\n variable. Also notice that this transformation only happens for\n user-defined functions; other callable objects (and all non-\n callable objects) are retrieved without transformation. It is\n also important to note that user-defined functions which are\n attributes of a class instance are not converted to bound\n methods; this *only* happens when the function is an attribute\n of the class.\n\n Generator functions\n A function or method which uses the "yield" statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s "iterator.__next__()" method will cause the\n function to execute until it provides a value using the "yield"\n statement. When the function executes a "return" statement or\n falls off the end, a "StopIteration" exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are "len()" and "math.sin()"\n ("math" is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: "__doc__" is the function\'s documentation\n string, or "None" if unavailable; "__name__" is the function\'s\n name; "__self__" is set to "None" (but see the next item);\n "__module__" is the name of the module the function was defined\n in or "None" if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n "alist.append()", assuming *alist* is a list object. In this\n case, the special read-only attribute "__self__" is set to the\n object denoted by *alist*.\n\n Classes\n Classes are callable. These objects normally act as factories\n for new instances of themselves, but variations are possible for\n class types that override "__new__()". The arguments of the\n call are passed to "__new__()" and, in the typical case, to\n "__init__()" to initialize the new instance.\n\n Class Instances\n Instances of arbitrary classes can be made callable by defining\n a "__call__()" method in their class.\n\nModules\n Modules are a basic organizational unit of Python code, and are\n created by the *import system* as invoked either by the "import"\n statement (see "import"), or by calling functions such as\n "importlib.import_module()" and built-in "__import__()". A module\n object has a namespace implemented by a dictionary object (this is\n the dictionary referenced by the "__globals__" attribute of\n functions defined in the module). Attribute references are\n translated to lookups in this dictionary, e.g., "m.x" is equivalent\n to "m.__dict__["x"]". A module object does not contain the code\n object used to initialize the module (since it isn\'t needed once\n the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n\n Special read-only attribute: "__dict__" is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: "__name__" is the module\'s name;\n "__doc__" is the module\'s documentation string, or "None" if\n unavailable; "__file__" is the pathname of the file from which the\n module was loaded, if it was loaded from a file. The "__file__"\n attribute may be missing for certain types of modules, such as C\n modules that are statically linked into the interpreter; for\n extension modules loaded dynamically from a shared library, it is\n the pathname of the shared library file.\n\nCustom classes\n Custom class types are typically created by class definitions (see\n section *Class definitions*). A class has a namespace implemented\n by a dictionary object. Class attribute references are translated\n to lookups in this dictionary, e.g., "C.x" is translated to\n "C.__dict__["x"]" (although there are a number of hooks which allow\n for other means of locating attributes). When the attribute name is\n not found there, the attribute search continues in the base\n classes. This search of the base classes uses the C3 method\n resolution order which behaves correctly even in the presence of\n \'diamond\' inheritance structures where there are multiple\n inheritance paths leading back to a common ancestor. Additional\n details on the C3 MRO used by Python can be found in the\n documentation accompanying the 2.3 release at\n https://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class "C", say) would yield a\n class method object, it is transformed into an instance method\n object whose "__self__" attributes is "C". When it would yield a\n static method object, it is transformed into the object wrapped by\n the static method object. See section *Implementing Descriptors*\n for another way in which attributes retrieved from a class may\n differ from those actually contained in its "__dict__".\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: "__name__" is the class name; "__module__" is\n the module name in which the class was defined; "__dict__" is the\n dictionary containing the class\'s namespace; "__bases__" is a tuple\n (possibly empty or a singleton) containing the base classes, in the\n order of their occurrence in the base class list; "__doc__" is the\n class\'s documentation string, or None if undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object, it is transformed into an instance method object\n whose "__self__" attribute is the instance. Static method and\n class method objects are also transformed; see above under\n "Classes". See section *Implementing Descriptors* for another way\n in which attributes of a class retrieved via its instances may\n differ from the objects actually stored in the class\'s "__dict__".\n If no class attribute is found, and the object\'s class has a\n "__getattr__()" method, that is called to satisfy the lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n "__setattr__()" or "__delattr__()" method, this is called instead\n of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: "__dict__" is the attribute dictionary;\n "__class__" is the instance\'s class.\n\nI/O objects (also known as file objects)\n A *file object* represents an open file. Various shortcuts are\n available to create file objects: the "open()" built-in function,\n and also "os.popen()", "os.fdopen()", and the "makefile()" method\n of socket objects (and perhaps by other functions or methods\n provided by extension modules).\n\n The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams; they are all open in text\n mode and therefore follow the interface defined by the\n "io.TextIOBase" abstract class.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: "co_name" gives the function name;\n "co_argcount" is the number of positional arguments (including\n arguments with default values); "co_nlocals" is the number of\n local variables used by the function (including arguments);\n "co_varnames" is a tuple containing the names of the local\n variables (starting with the argument names); "co_cellvars" is a\n tuple containing the names of local variables that are\n referenced by nested functions; "co_freevars" is a tuple\n containing the names of free variables; "co_code" is a string\n representing the sequence of bytecode instructions; "co_consts"\n is a tuple containing the literals used by the bytecode;\n "co_names" is a tuple containing the names used by the bytecode;\n "co_filename" is the filename from which the code was compiled;\n "co_firstlineno" is the first line number of the function;\n "co_lnotab" is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); "co_stacksize" is the required stack size\n (including local variables); "co_flags" is an integer encoding a\n number of flags for the interpreter.\n\n The following flag bits are defined for "co_flags": bit "0x04"\n is set if the function uses the "*arguments" syntax to accept an\n arbitrary number of positional arguments; bit "0x08" is set if\n the function uses the "**keywords" syntax to accept arbitrary\n keyword arguments; bit "0x20" is set if the function is a\n generator.\n\n Future feature declarations ("from __future__ import division")\n also use bits in "co_flags" to indicate whether a code object\n was compiled with a particular feature enabled: bit "0x2000" is\n set if the function was compiled with future division enabled;\n bits "0x10" and "0x1000" were used in earlier versions of\n Python.\n\n Other bits in "co_flags" are reserved for internal use.\n\n If a code object represents a function, the first item in\n "co_consts" is the documentation string of the function, or\n "None" if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: "f_back" is to the previous stack\n frame (towards the caller), or "None" if this is the bottom\n stack frame; "f_code" is the code object being executed in this\n frame; "f_locals" is the dictionary used to look up local\n variables; "f_globals" is used for global variables;\n "f_builtins" is used for built-in (intrinsic) names; "f_lasti"\n gives the precise instruction (this is an index into the\n bytecode string of the code object).\n\n Special writable attributes: "f_trace", if not "None", is a\n function called at the start of each source code line (this is\n used by the debugger); "f_lineno" is the current line number of\n the frame --- writing to this from within a trace function jumps\n to the given line (only for the bottom-most frame). A debugger\n can implement a Jump command (aka Set Next Statement) by writing\n to f_lineno.\n\n Frame objects support one method:\n\n frame.clear()\n\n This method clears all references to local variables held by\n the frame. Also, if the frame belonged to a generator, the\n generator is finalized. This helps break reference cycles\n involving frame objects (for example when catching an\n exception and storing its traceback for later use).\n\n "RuntimeError" is raised if the frame is currently executing.\n\n New in version 3.4.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as the third item of the\n tuple returned by "sys.exc_info()". When the program contains no\n suitable handler, the stack trace is written (nicely formatted)\n to the standard error stream; if the interpreter is interactive,\n it is also made available to the user as "sys.last_traceback".\n\n Special read-only attributes: "tb_next" is the next level in the\n stack trace (towards the frame where the exception occurred), or\n "None" if there is no next level; "tb_frame" points to the\n execution frame of the current level; "tb_lineno" gives the line\n number where the exception occurred; "tb_lasti" indicates the\n precise instruction. The line number and last instruction in\n the traceback may differ from the line number of its frame\n object if the exception occurred in a "try" statement with no\n matching except clause or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices for "__getitem__()"\n methods. They are also created by the built-in "slice()"\n function.\n\n Special read-only attributes: "start" is the lower bound; "stop"\n is the upper bound; "step" is the step value; each is "None" if\n omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the slice that the slice object\n would describe if applied to a sequence of *length* items.\n It returns a tuple of three integers; respectively these are\n the *start* and *stop* indices and the *step* or stride\n length of the slice. Missing or out-of-bounds indices are\n handled in a manner consistent with regular slices.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n "staticmethod()" constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in "classmethod()" constructor.\n', 'typesfunctions': u'\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: "func(argument-list)".\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', - 'typesmapping': u'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built-\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n If a subclass of dict defines a method "__missing__()" and *key*\n is not present, the "d[key]" operation calls that method with\n the key *key* as argument. The "d[key]" operation then returns\n or raises whatever is returned or raised by the\n "__missing__(key)" call. No other operations or methods invoke\n "__missing__()". If "__missing__()" is not defined, "KeyError"\n is raised. "__missing__()" must be a method; it cannot be an\n instance variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n The example above shows part of the implementation of\n "collections.Counter". A different "__missing__" method is used\n by "collections.defaultdict".\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n key not in d\n\n Equivalent to "not key in d".\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iter(d.keys())".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n items()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See the *documentation of view objects*.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See the\n *documentation of view objects*.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n values()\n\n Return a new view of the dictionary\'s values. See the\n *documentation of view objects*.\n\nSee also: "types.MappingProxyType" can be used to create a read-only\n view of a "dict".\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.keys()", "dict.values()" and\n"dict.items()" are *view objects*. They provide a dynamic view on the\ndictionary\'s entries, which means that when the dictionary changes,\nthe view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that "(key, value)" pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class "collections.abc.Set" are available (for example, "==",\n"<", or "^").\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n', + 'typesmapping': u'\nMapping Types --- "dict"\n************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built-\nin "list", "set", and "tuple" classes, and the "collections" module.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as "1" and "1.0") then they can be used interchangeably to index\nthe same dictionary entry. (Note however, that since computers store\nfloating-point numbers as approximations it is usually unwise to use\nthem as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of "key:\nvalue" pairs within braces, for example: "{\'jack\': 4098, \'sjoerd\':\n4127}" or "{4098: \'jack\', 4127: \'sjoerd\'}", or by the "dict"\nconstructor.\n\nclass class dict(**kwarg)\nclass class dict(mapping, **kwarg)\nclass class dict(iterable, **kwarg)\n\n Return a new dictionary initialized from an optional positional\n argument and a possibly empty set of keyword arguments.\n\n If no positional argument is given, an empty dictionary is created.\n If a positional argument is given and it is a mapping object, a\n dictionary is created with the same key-value pairs as the mapping\n object. Otherwise, the positional argument must be an *iterable*\n object. Each item in the iterable must itself be an iterable with\n exactly two objects. The first object of each item becomes a key\n in the new dictionary, and the second object the corresponding\n value. If a key occurs more than once, the last value for that key\n becomes the corresponding value in the new dictionary.\n\n If keyword arguments are given, the keyword arguments and their\n values are added to the dictionary created from the positional\n argument. If a key being added is already present, the value from\n the keyword argument replaces the value from the positional\n argument.\n\n To illustrate, the following examples all return a dictionary equal\n to "{"one": 1, "two": 2, "three": 3}":\n\n >>> a = dict(one=1, two=2, three=3)\n >>> b = {\'one\': 1, \'two\': 2, \'three\': 3}\n >>> c = dict(zip([\'one\', \'two\', \'three\'], [1, 2, 3]))\n >>> d = dict([(\'two\', 2), (\'one\', 1), (\'three\', 3)])\n >>> e = dict({\'three\': 3, \'one\': 1, \'two\': 2})\n >>> a == b == c == d == e\n True\n\n Providing keyword arguments as in the first example only works for\n keys that are valid Python identifiers. Otherwise, any valid keys\n can be used.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a "KeyError" if\n *key* is not in the map.\n\n If a subclass of dict defines a method "__missing__()" and *key*\n is not present, the "d[key]" operation calls that method with\n the key *key* as argument. The "d[key]" operation then returns\n or raises whatever is returned or raised by the\n "__missing__(key)" call. No other operations or methods invoke\n "__missing__()". If "__missing__()" is not defined, "KeyError"\n is raised. "__missing__()" must be a method; it cannot be an\n instance variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n The example above shows part of the implementation of\n "collections.Counter". A different "__missing__" method is used\n by "collections.defaultdict".\n\n d[key] = value\n\n Set "d[key]" to *value*.\n\n del d[key]\n\n Remove "d[key]" from *d*. Raises a "KeyError" if *key* is not\n in the map.\n\n key in d\n\n Return "True" if *d* has a key *key*, else "False".\n\n key not in d\n\n Equivalent to "not key in d".\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for "iter(d.keys())".\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n "fromkeys()" is a class method that returns a new dictionary.\n *value* defaults to "None".\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to "None", so\n that this method never raises a "KeyError".\n\n items()\n\n Return a new view of the dictionary\'s items ("(key, value)"\n pairs). See the *documentation of view objects*.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See the\n *documentation of view objects*.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a "KeyError" is raised.\n\n popitem()\n\n Remove and return an arbitrary "(key, value)" pair from the\n dictionary.\n\n "popitem()" is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling "popitem()" raises a "KeyError".\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to "None".\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return "None".\n\n "update()" accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: "d.update(red=1,\n blue=2)".\n\n values()\n\n Return a new view of the dictionary\'s values. See the\n *documentation of view objects*.\n\n Dictionaries compare equal if and only if they have the same "(key,\n value)" pairs. Order comparisons (\'<\', \'<=\', \'>=\', \'>\') raise\n "TypeError".\n\nSee also: "types.MappingProxyType" can be used to create a read-only\n view of a "dict".\n\n\nDictionary view objects\n=======================\n\nThe objects returned by "dict.keys()", "dict.values()" and\n"dict.items()" are *view objects*. They provide a dynamic view on the\ndictionary\'s entries, which means that when the dictionary changes,\nthe view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of "(key, value)") in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of "(value, key)" pairs using\n "zip()": "pairs = zip(d.values(), d.keys())". Another way to\n create the same list is "pairs = [(v, k) for (k, v) in d.items()]".\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a "RuntimeError" or fail to iterate over all entries.\n\nx in dictview\n\n Return "True" if *x* is in the underlying dictionary\'s keys, values\n or items (in the latter case, *x* should be a "(key, value)"\n tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that "(key, value)" pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class "collections.abc.Set" are available (for example, "==",\n"<", or "^").\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n', 'typesmethods': u'\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as "append()" on lists)\nand class instance methods. Built-in methods are described with the\ntypes that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the "self"\nargument to the argument list. Bound methods have two special read-\nonly attributes: "m.__self__" is the object on which the method\noperates, and "m.__func__" is the function implementing the method.\nCalling "m(arg-1, arg-2, ..., arg-n)" is completely equivalent to\ncalling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)".\n\nLike function objects, bound method objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object ("meth.__func__"), setting method\nattributes on bound methods is disallowed. Attempting to set an\nattribute on a method results in an "AttributeError" being raised. In\norder to set a method attribute, you need to explicitly set it on the\nunderlying function object:\n\n >>> class C:\n ... def method(self):\n ... pass\n ...\n >>> c = C()\n >>> c.method.whoami = \'my name is method\' # can\'t set on the method\n Traceback (most recent call last):\n File "", line 1, in \n AttributeError: \'method\' object has no attribute \'whoami\'\n >>> c.method.__func__.whoami = \'my name is method\'\n >>> c.method.whoami\n \'my name is method\'\n\nSee *The standard type hierarchy* for more information.\n', 'typesmodules': u'\nModules\n*******\n\nThe only special operation on a module is attribute access: "m.name",\nwhere *m* is a module and *name* accesses a name defined in *m*\'s\nsymbol table. Module attributes can be assigned to. (Note that the\n"import" statement is not, strictly speaking, an operation on a module\nobject; "import foo" does not require a module object named *foo* to\nexist, rather it requires an (external) *definition* for a module\nnamed *foo* somewhere.)\n\nA special attribute of every module is "__dict__". This is the\ndictionary containing the module\'s symbol table. Modifying this\ndictionary will actually change the module\'s symbol table, but direct\nassignment to the "__dict__" attribute is not possible (you can write\n"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but you can\'t\nwrite "m.__dict__ = {}"). Modifying "__dict__" directly is not\nrecommended.\n\nModules built into the interpreter are written like this: "". If loaded from a file, they are written as\n"".\n', - 'typesseq': u'\nSequence Types --- "list", "tuple", "range"\n*******************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The "collections.abc.Sequence" ABC\nis provided to make it easier to correctly implement these operations\non custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority.\nIn the table, *s* and *t* are sequences of the same type, *n*, *i*,\n*j* and *k* are integers and *x* is an arbitrary object that meets any\ntype and value restrictions imposed by *s*.\n\nThe "in" and "not in" operations have the same priorities as the\ncomparison operations. The "+" (concatenation) and "*" (repetition)\noperations have the same priority as the corresponding numeric\noperations.\n\n+----------------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+============================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n| | equal to *x*, else "False" | |\n+----------------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n| | equal to *x*, else "True" | |\n+----------------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6)(7) |\n+----------------------------+----------------------------------+------------+\n| "s * n" or "n * s" | *n* shallow copies of *s* | (2)(7) |\n| | concatenated | |\n+----------------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+----------------------------+----------------------------------+------------+\n| "len(s)" | length of *s* | |\n+----------------------------+----------------------------------+------------+\n| "min(s)" | smallest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "max(s)" | largest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "s.index(x[, i[, j]])" | index of the first occurrence of | (8) |\n| | *x* in *s* (at or after index | |\n| | *i* and before index *j*) | |\n+----------------------------+----------------------------------+------------+\n| "s.count(x)" | total number of occurrences of | |\n| | *x* in *s* | |\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons. In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length. (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the "in" and "not in" operations are used only for simple\n containment testing in the general case, some specialised sequences\n (such as "str", "bytes" and "bytearray") also use them for\n subsequence testing:\n\n >>> "gg" in "eggs"\n True\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are (pointers\n to) this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of\n the string: "len(s) + i" or "len(s) + j" is substituted. But note\n that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. Concatenating immutable sequences always results in a new\n object. This means that building up a sequence by repeated\n concatenation will have a quadratic runtime cost in the total\n sequence length. To get a linear runtime cost, you must switch to\n one of the alternatives below:\n\n * if concatenating "str" objects, you can build a list and use\n "str.join()" at the end or else write to a "io.StringIO" instance\n and retrieve its value when complete\n\n * if concatenating "bytes" objects, you can similarly use\n "bytes.join()" or "io.BytesIO", or you can do in-place\n concatenation with a "bytearray" object. "bytearray" objects are\n mutable and have an efficient overallocation mechanism\n\n * if concatenating "tuple" objects, extend a "list" instead\n\n * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as "range") only support item\n sequences that follow specific patterns, and hence don\'t support\n sequence concatenation or repetition.\n\n8. "index" raises "ValueError" when *x* is not found in *s*. When\n supported, the additional arguments to the index method allow\n efficient searching of subsections of the sequence. Passing the\n extra arguments is roughly equivalent to using "s[i:j].index(x)",\n only without copying any data and with the returned index being\n relative to the start of the sequence rather than the start of the\n slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe "hash()" built-in.\n\nThis support allows immutable sequences, such as "tuple" instances, to\nbe used as "dict" keys and stored in "set" and "frozenset" instances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in "TypeError".\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" | extends *s* with the contents of | |\n| | *t* (same as "s[len(s):len(s)] = | |\n| | t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n Lists may be constructed in several ways:\n\n * Using a pair of square brackets to denote the empty list: "[]"\n\n * Using square brackets, separating items with commas: "[a]",\n "[a, b, c]"\n\n * Using a list comprehension: "[x for x in iterable]"\n\n * Using the type constructor: "list()" or "list(iterable)"\n\n The constructor builds a list whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a list, a copy is made and\n returned, similar to "iterable[:]". For example, "list(\'abc\')"\n returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" returns "[1, 2,\n 3]". If no argument is given, the constructor creates a new empty\n list, "[]".\n\n Many other operations also produce lists, including the "sorted()"\n built-in.\n\n Lists implement all of the *common* and *mutable* sequence\n operations. Lists also provide the following additional method:\n\n sort(*, key=None, reverse=None)\n\n This method sorts the list in place, using only "<" comparisons\n between items. Exceptions are not suppressed - if any comparison\n operations fail, the entire sort operation will fail (and the\n list will likely be left in a partially modified state).\n\n "sort()" accepts two arguments that can only be passed by\n keyword (*keyword-only arguments*):\n\n *key* specifies a function of one argument that is used to\n extract a comparison key from each list element (for example,\n "key=str.lower"). The key corresponding to each item in the list\n is calculated once and then used for the entire sorting process.\n The default value of "None" means that list items are sorted\n directly without calculating a separate key value.\n\n The "functools.cmp_to_key()" utility is available to convert a\n 2.x style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n This method modifies the sequence in place for economy of space\n when sorting a large sequence. To remind users that it operates\n by side effect, it does not return the sorted sequence (use\n "sorted()" to explicitly request a new sorted list instance).\n\n The "sort()" method is guaranteed to be stable. A sort is\n stable if it guarantees not to change the relative order of\n elements that compare equal --- this is helpful for sorting in\n multiple passes (for example, sort by department, then by salary\n grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises "ValueError" if it can detect\n that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the "enumerate()"\nbuilt-in). Tuples are also used for cases where an immutable sequence\nof homogeneous data is needed (such as allowing storage in a "set" or\n"dict" instance).\n\nclass class tuple([iterable])\n\n Tuples may be constructed in a number of ways:\n\n * Using a pair of parentheses to denote the empty tuple: "()"\n\n * Using a trailing comma for a singleton tuple: "a," or "(a,)"\n\n * Separating items with commas: "a, b, c" or "(a, b, c)"\n\n * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)"\n\n The constructor builds a tuple whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a tuple, it is returned\n unchanged. For example, "tuple(\'abc\')" returns "(\'a\', \'b\', \'c\')"\n and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is\n given, the constructor creates a new empty tuple, "()".\n\n Note that it is actually the comma which makes a tuple, not the\n parentheses. The parentheses are optional, except in the empty\n tuple case, or when they are needed to avoid syntactic ambiguity.\n For example, "f(a, b, c)" is a function call with three arguments,\n while "f((a, b, c))" is a function call with a 3-tuple as the sole\n argument.\n\n Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, "collections.namedtuple()" may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe "range" type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in "for" loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n The arguments to the range constructor must be integers (either\n built-in "int" or any object that implements the "__index__"\n special method). If the *step* argument is omitted, it defaults to\n "1". If the *start* argument is omitted, it defaults to "0". If\n *step* is zero, "ValueError" is raised.\n\n For a positive *step*, the contents of a range "r" are determined\n by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] <\n stop".\n\n For a negative *step*, the contents of the range are still\n determined by the formula "r[i] = start + step*i", but the\n constraints are "i >= 0" and "r[i] > stop".\n\n A range object will be empty if "r[0]" does not meet the value\n constraint. Ranges do support negative indices, but these are\n interpreted as indexing from the end of the sequence determined by\n the positive indices.\n\n Ranges containing absolute values larger than "sys.maxsize" are\n permitted but some features (such as "len()") may raise\n "OverflowError".\n\n Range examples:\n\n >>> list(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(range(1, 11))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(range(0, 30, 5))\n [0, 5, 10, 15, 20, 25]\n >>> list(range(0, 10, 3))\n [0, 3, 6, 9]\n >>> list(range(0, -10, -1))\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n >>> list(range(0))\n []\n >>> list(range(1, 0))\n []\n\n Ranges implement all of the *common* sequence operations except\n concatenation and repetition (due to the fact that range objects\n can only represent sequences that follow a strict pattern and\n repetition and concatenation will usually violate that pattern).\n\nThe advantage of the "range" type over a regular "list" or "tuple" is\nthat a "range" object will always take the same (small) amount of\nmemory, no matter the size of the range it represents (as it only\nstores the "start", "stop" and "step" values, calculating individual\nitems and subranges as needed).\n\nRange objects implement the "collections.abc.Sequence" ABC, and\nprovide features such as containment tests, element index lookup,\nslicing and support for negative indices (see *Sequence Types ---\nlist, tuple, range*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with "==" and "!=" compares them as\nsequences. That is, two range objects are considered equal if they\nrepresent the same sequence of values. (Note that two range objects\nthat compare equal might have different "start", "stop" and "step"\nattributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3,\n2) == range(0, 4, 2)".)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test "int" objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The "start", "stop" and "step" attributes.\n', - 'typesseq-mutable': u'\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" | extends *s* with the contents of | |\n| | *t* (same as "s[len(s):len(s)] = | |\n| | t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n', + 'typesseq': u'\nSequence Types --- "list", "tuple", "range"\n*******************************************\n\nThere are three basic sequence types: lists, tuples, and range\nobjects. Additional sequence types tailored for processing of *binary\ndata* and *text strings* are described in dedicated sections.\n\n\nCommon Sequence Operations\n==========================\n\nThe operations in the following table are supported by most sequence\ntypes, both mutable and immutable. The "collections.abc.Sequence" ABC\nis provided to make it easier to correctly implement these operations\non custom sequence types.\n\nThis table lists the sequence operations sorted in ascending priority.\nIn the table, *s* and *t* are sequences of the same type, *n*, *i*,\n*j* and *k* are integers and *x* is an arbitrary object that meets any\ntype and value restrictions imposed by *s*.\n\nThe "in" and "not in" operations have the same priorities as the\ncomparison operations. The "+" (concatenation) and "*" (repetition)\noperations have the same priority as the corresponding numeric\noperations.\n\n+----------------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+============================+==================================+============+\n| "x in s" | "True" if an item of *s* is | (1) |\n| | equal to *x*, else "False" | |\n+----------------------------+----------------------------------+------------+\n| "x not in s" | "False" if an item of *s* is | (1) |\n| | equal to *x*, else "True" | |\n+----------------------------+----------------------------------+------------+\n| "s + t" | the concatenation of *s* and *t* | (6)(7) |\n+----------------------------+----------------------------------+------------+\n| "s * n" or "n * s" | equivalent to adding *s* to | (2)(7) |\n| | itself *n* times | |\n+----------------------------+----------------------------------+------------+\n| "s[i]" | *i*th item of *s*, origin 0 | (3) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j]" | slice of *s* from *i* to *j* | (3)(4) |\n+----------------------------+----------------------------------+------------+\n| "s[i:j:k]" | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+----------------------------+----------------------------------+------------+\n| "len(s)" | length of *s* | |\n+----------------------------+----------------------------------+------------+\n| "min(s)" | smallest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "max(s)" | largest item of *s* | |\n+----------------------------+----------------------------------+------------+\n| "s.index(x[, i[, j]])" | index of the first occurrence of | (8) |\n| | *x* in *s* (at or after index | |\n| | *i* and before index *j*) | |\n+----------------------------+----------------------------------+------------+\n| "s.count(x)" | total number of occurrences of | |\n| | *x* in *s* | |\n+----------------------------+----------------------------------+------------+\n\nSequences of the same type also support comparisons. In particular,\ntuples and lists are compared lexicographically by comparing\ncorresponding elements. This means that to compare equal, every\nelement must compare equal and the two sequences must be of the same\ntype and have the same length. (For full details see *Comparisons* in\nthe language reference.)\n\nNotes:\n\n1. While the "in" and "not in" operations are used only for simple\n containment testing in the general case, some specialised sequences\n (such as "str", "bytes" and "bytearray") also use them for\n subsequence testing:\n\n >>> "gg" in "eggs"\n True\n\n2. Values of *n* less than "0" are treated as "0" (which yields an\n empty sequence of the same type as *s*). Note that items in the\n sequence *s* are not copied; they are referenced multiple times.\n This often haunts new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that "[[]]" is a one-element list containing\n an empty list, so all three elements of "[[]] * 3" are references\n to this single empty list. Modifying any of the elements of\n "lists" modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n Further explanation is available in the FAQ entry *How do I create\n a multidimensional list?*.\n\n3. If *i* or *j* is negative, the index is relative to the end of\n the string: "len(s) + i" or "len(s) + j" is substituted. But note\n that "-0" is still "0".\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that "i <= k < j". If *i* or *j* is\n greater than "len(s)", use "len(s)". If *i* is omitted or "None",\n use "0". If *j* is omitted or "None", use "len(s)". If *i* is\n greater than or equal to *j*, the slice is empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index "x = i + n*k" such that "0 <= n <\n (j-i)/k". In other words, the indices are "i", "i+k", "i+2*k",\n "i+3*k" and so on, stopping when *j* is reached (but never\n including *j*). If *i* or *j* is greater than "len(s)", use\n "len(s)". If *i* or *j* are omitted or "None", they become "end"\n values (which end depends on the sign of *k*). Note, *k* cannot be\n zero. If *k* is "None", it is treated like "1".\n\n6. Concatenating immutable sequences always results in a new\n object. This means that building up a sequence by repeated\n concatenation will have a quadratic runtime cost in the total\n sequence length. To get a linear runtime cost, you must switch to\n one of the alternatives below:\n\n * if concatenating "str" objects, you can build a list and use\n "str.join()" at the end or else write to an "io.StringIO"\n instance and retrieve its value when complete\n\n * if concatenating "bytes" objects, you can similarly use\n "bytes.join()" or "io.BytesIO", or you can do in-place\n concatenation with a "bytearray" object. "bytearray" objects are\n mutable and have an efficient overallocation mechanism\n\n * if concatenating "tuple" objects, extend a "list" instead\n\n * for other types, investigate the relevant class documentation\n\n7. Some sequence types (such as "range") only support item\n sequences that follow specific patterns, and hence don\'t support\n sequence concatenation or repetition.\n\n8. "index" raises "ValueError" when *x* is not found in *s*. When\n supported, the additional arguments to the index method allow\n efficient searching of subsections of the sequence. Passing the\n extra arguments is roughly equivalent to using "s[i:j].index(x)",\n only without copying any data and with the returned index being\n relative to the start of the sequence rather than the start of the\n slice.\n\n\nImmutable Sequence Types\n========================\n\nThe only operation that immutable sequence types generally implement\nthat is not also implemented by mutable sequence types is support for\nthe "hash()" built-in.\n\nThis support allows immutable sequences, such as "tuple" instances, to\nbe used as "dict" keys and stored in "set" and "frozenset" instances.\n\nAttempting to hash an immutable sequence that contains unhashable\nvalues will result in "TypeError".\n\n\nMutable Sequence Types\n======================\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" or "s += t" | extends *s* with the contents of | |\n| | *t* (for the most part the same | |\n| | as "s[len(s):len(s)] = t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n" | updates *s* with its contents | (6) |\n| | repeated *n* times | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n\n6. The value *n* is an integer, or an object implementing\n "__index__()". Zero and negative values of *n* clear the sequence.\n Items in the sequence are not copied; they are referenced multiple\n times, as explained for "s * n" under *Common Sequence Operations*.\n\n\nLists\n=====\n\nLists are mutable sequences, typically used to store collections of\nhomogeneous items (where the precise degree of similarity will vary by\napplication).\n\nclass class list([iterable])\n\n Lists may be constructed in several ways:\n\n * Using a pair of square brackets to denote the empty list: "[]"\n\n * Using square brackets, separating items with commas: "[a]",\n "[a, b, c]"\n\n * Using a list comprehension: "[x for x in iterable]"\n\n * Using the type constructor: "list()" or "list(iterable)"\n\n The constructor builds a list whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a list, a copy is made and\n returned, similar to "iterable[:]". For example, "list(\'abc\')"\n returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" returns "[1, 2,\n 3]". If no argument is given, the constructor creates a new empty\n list, "[]".\n\n Many other operations also produce lists, including the "sorted()"\n built-in.\n\n Lists implement all of the *common* and *mutable* sequence\n operations. Lists also provide the following additional method:\n\n sort(*, key=None, reverse=None)\n\n This method sorts the list in place, using only "<" comparisons\n between items. Exceptions are not suppressed - if any comparison\n operations fail, the entire sort operation will fail (and the\n list will likely be left in a partially modified state).\n\n "sort()" accepts two arguments that can only be passed by\n keyword (*keyword-only arguments*):\n\n *key* specifies a function of one argument that is used to\n extract a comparison key from each list element (for example,\n "key=str.lower"). The key corresponding to each item in the list\n is calculated once and then used for the entire sorting process.\n The default value of "None" means that list items are sorted\n directly without calculating a separate key value.\n\n The "functools.cmp_to_key()" utility is available to convert a\n 2.x style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to "True", then the list\n elements are sorted as if each comparison were reversed.\n\n This method modifies the sequence in place for economy of space\n when sorting a large sequence. To remind users that it operates\n by side effect, it does not return the sorted sequence (use\n "sorted()" to explicitly request a new sorted list instance).\n\n The "sort()" method is guaranteed to be stable. A sort is\n stable if it guarantees not to change the relative order of\n elements that compare equal --- this is helpful for sorting in\n multiple passes (for example, sort by department, then by salary\n grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises "ValueError" if it can detect\n that the list has been mutated during a sort.\n\n\nTuples\n======\n\nTuples are immutable sequences, typically used to store collections of\nheterogeneous data (such as the 2-tuples produced by the "enumerate()"\nbuilt-in). Tuples are also used for cases where an immutable sequence\nof homogeneous data is needed (such as allowing storage in a "set" or\n"dict" instance).\n\nclass class tuple([iterable])\n\n Tuples may be constructed in a number of ways:\n\n * Using a pair of parentheses to denote the empty tuple: "()"\n\n * Using a trailing comma for a singleton tuple: "a," or "(a,)"\n\n * Separating items with commas: "a, b, c" or "(a, b, c)"\n\n * Using the "tuple()" built-in: "tuple()" or "tuple(iterable)"\n\n The constructor builds a tuple whose items are the same and in the\n same order as *iterable*\'s items. *iterable* may be either a\n sequence, a container that supports iteration, or an iterator\n object. If *iterable* is already a tuple, it is returned\n unchanged. For example, "tuple(\'abc\')" returns "(\'a\', \'b\', \'c\')"\n and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument is\n given, the constructor creates a new empty tuple, "()".\n\n Note that it is actually the comma which makes a tuple, not the\n parentheses. The parentheses are optional, except in the empty\n tuple case, or when they are needed to avoid syntactic ambiguity.\n For example, "f(a, b, c)" is a function call with three arguments,\n while "f((a, b, c))" is a function call with a 3-tuple as the sole\n argument.\n\n Tuples implement all of the *common* sequence operations.\n\nFor heterogeneous collections of data where access by name is clearer\nthan access by index, "collections.namedtuple()" may be a more\nappropriate choice than a simple tuple object.\n\n\nRanges\n======\n\nThe "range" type represents an immutable sequence of numbers and is\ncommonly used for looping a specific number of times in "for" loops.\n\nclass class range(stop)\nclass class range(start, stop[, step])\n\n The arguments to the range constructor must be integers (either\n built-in "int" or any object that implements the "__index__"\n special method). If the *step* argument is omitted, it defaults to\n "1". If the *start* argument is omitted, it defaults to "0". If\n *step* is zero, "ValueError" is raised.\n\n For a positive *step*, the contents of a range "r" are determined\n by the formula "r[i] = start + step*i" where "i >= 0" and "r[i] <\n stop".\n\n For a negative *step*, the contents of the range are still\n determined by the formula "r[i] = start + step*i", but the\n constraints are "i >= 0" and "r[i] > stop".\n\n A range object will be empty if "r[0]" does not meet the value\n constraint. Ranges do support negative indices, but these are\n interpreted as indexing from the end of the sequence determined by\n the positive indices.\n\n Ranges containing absolute values larger than "sys.maxsize" are\n permitted but some features (such as "len()") may raise\n "OverflowError".\n\n Range examples:\n\n >>> list(range(10))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> list(range(1, 11))\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n >>> list(range(0, 30, 5))\n [0, 5, 10, 15, 20, 25]\n >>> list(range(0, 10, 3))\n [0, 3, 6, 9]\n >>> list(range(0, -10, -1))\n [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n >>> list(range(0))\n []\n >>> list(range(1, 0))\n []\n\n Ranges implement all of the *common* sequence operations except\n concatenation and repetition (due to the fact that range objects\n can only represent sequences that follow a strict pattern and\n repetition and concatenation will usually violate that pattern).\n\nThe advantage of the "range" type over a regular "list" or "tuple" is\nthat a "range" object will always take the same (small) amount of\nmemory, no matter the size of the range it represents (as it only\nstores the "start", "stop" and "step" values, calculating individual\nitems and subranges as needed).\n\nRange objects implement the "collections.abc.Sequence" ABC, and\nprovide features such as containment tests, element index lookup,\nslicing and support for negative indices (see *Sequence Types ---\nlist, tuple, range*):\n\n>>> r = range(0, 20, 2)\n>>> r\nrange(0, 20, 2)\n>>> 11 in r\nFalse\n>>> 10 in r\nTrue\n>>> r.index(10)\n5\n>>> r[5]\n10\n>>> r[:5]\nrange(0, 10, 2)\n>>> r[-1]\n18\n\nTesting range objects for equality with "==" and "!=" compares them as\nsequences. That is, two range objects are considered equal if they\nrepresent the same sequence of values. (Note that two range objects\nthat compare equal might have different "start", "stop" and "step"\nattributes, for example "range(0) == range(2, 1, 3)" or "range(0, 3,\n2) == range(0, 4, 2)".)\n\nChanged in version 3.2: Implement the Sequence ABC. Support slicing\nand negative indices. Test "int" objects for membership in constant\ntime instead of iterating through all items.\n\nChanged in version 3.3: Define \'==\' and \'!=\' to compare range objects\nbased on the sequence of values they define (instead of comparing\nbased on object identity).\n\nNew in version 3.3: The "start", "stop" and "step" attributes.\n', + 'typesseq-mutable': u'\nMutable Sequence Types\n**********************\n\nThe operations in the following table are defined on mutable sequence\ntypes. The "collections.abc.MutableSequence" ABC is provided to make\nit easier to correctly implement these operations on custom sequence\ntypes.\n\nIn the table *s* is an instance of a mutable sequence type, *t* is any\niterable object and *x* is an arbitrary object that meets any type and\nvalue restrictions imposed by *s* (for example, "bytearray" only\naccepts integers that meet the value restriction "0 <= x <= 255").\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| "s[i] = x" | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j] = t" | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j]" | same as "s[i:j] = []" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s[i:j:k] = t" | the elements of "s[i:j:k]" are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "del s[i:j:k]" | removes the elements of | |\n| | "s[i:j:k]" from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.append(x)" | appends *x* to the end of the | |\n| | sequence (same as | |\n| | "s[len(s):len(s)] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.clear()" | removes all items from "s" (same | (5) |\n| | as "del s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.copy()" | creates a shallow copy of "s" | (5) |\n| | (same as "s[:]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.extend(t)" or "s += t" | extends *s* with the contents of | |\n| | *t* (for the most part the same | |\n| | as "s[len(s):len(s)] = t") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s *= n" | updates *s* with its contents | (6) |\n| | repeated *n* times | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.insert(i, x)" | inserts *x* into *s* at the | |\n| | index given by *i* (same as | |\n| | "s[i:i] = [x]") | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.pop([i])" | retrieves the item at *i* and | (2) |\n| | also removes it from *s* | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.remove(x)" | remove the first item from *s* | (3) |\n| | where "s[i] == x" | |\n+--------------------------------+----------------------------------+-----------------------+\n| "s.reverse()" | reverses the items of *s* in | (4) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. The optional argument *i* defaults to "-1", so that by default\n the last item is removed and returned.\n\n3. "remove" raises "ValueError" when *x* is not found in *s*.\n\n4. The "reverse()" method modifies the sequence in place for\n economy of space when reversing a large sequence. To remind users\n that it operates by side effect, it does not return the reversed\n sequence.\n\n5. "clear()" and "copy()" are included for consistency with the\n interfaces of mutable containers that don\'t support slicing\n operations (such as "dict" and "set")\n\n New in version 3.3: "clear()" and "copy()" methods.\n\n6. The value *n* is an integer, or an object implementing\n "__index__()". Zero and negative values of *n* clear the sequence.\n Items in the sequence are not copied; they are referenced multiple\n times, as explained for "s * n" under *Common Sequence Operations*.\n', 'unary': u'\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary "-" (minus) operator yields the negation of its numeric\nargument.\n\nThe unary "+" (plus) operator yields its numeric argument unchanged.\n\nThe unary "~" (invert) operator yields the bitwise inversion of its\ninteger argument. The bitwise inversion of "x" is defined as\n"-(x+1)". It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n"TypeError" exception is raised.\n', 'while': u'\nThe "while" statement\n*********************\n\nThe "while" statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the "else" clause, if present, is executed\nand the loop terminates.\n\nA "break" statement executed in the first suite terminates the loop\nwithout executing the "else" clause\'s suite. A "continue" statement\nexecuted in the first suite skips the rest of the suite and goes back\nto testing the expression.\n', 'with': u'\nThe "with" statement\n********************\n\nThe "with" statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common "try"..."except"..."finally"\nusage patterns to be encapsulated for convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the "with" statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the "with_item")\n is evaluated to obtain a context manager.\n\n2. The context manager\'s "__exit__()" is loaded for later use.\n\n3. The context manager\'s "__enter__()" method is invoked.\n\n4. If a target was included in the "with" statement, the return\n value from "__enter__()" is assigned to it.\n\n Note: The "with" statement guarantees that if the "__enter__()"\n method returns without an error, then "__exit__()" will always be\n called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s "__exit__()" method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to "__exit__()". Otherwise, three\n "None" arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the "__exit__()" method was false, the exception is reraised.\n If the return value was true, the exception is suppressed, and\n execution continues with the statement following the "with"\n statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from "__exit__()" is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple "with" statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also: **PEP 0343** - The "with" statement\n\n The specification, background, and examples for the Python "with"\n statement.\n', -- Repository URL: https://hg.python.org/cpython From python-checkins at python.org Mon Dec 7 01:16:09 2015 From: python-checkins at python.org (larry.hastings) Date: Mon, 07 Dec 2015 06:16:09 +0000 Subject: [Python-checkins] =?utf-8?b?Y3B5dGhvbiAobWVyZ2UgMy40IC0+IDMuNCk6?= =?utf-8?q?_Merge=2E?= Message-ID: <20151207061609.33677.68544@psf.io> https://hg.python.org/cpython/rev/3b94245f9cf1 changeset: 99496:3b94245f9cf1 branch: 3.4 parent: 99480:2df330606cd0 parent: 99495:04a1f9fd0802 user: Larry Hastings date: Sun Dec 06 21:58:18 2015 -0800 summary: Merge. files: .hgtags | 1 + Include/patchlevel.h | 8 +++--- Lib/pydoc_data/topics.py | 30 ++++++++++++++-------------- Misc/NEWS | 17 ++++++++++++++- 4 files changed, 35 insertions(+), 21 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -144,3 +144,4 @@ ab2c023a9432f16652e89c404bbc84aa91bf55af v3.4.2 69dd528ca6255a66c37cc5cf680e8357d108b036 v3.4.3rc1 b4cbecbc0781e89a309d03b60a1f75f8499250e6 v3.4.3 +04f3f725896c6961212c3a12e8ac25be6958f4fa v3.4.4rc1 diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -18,12 +18,12 @@ /*--start constants--*/ #define PY_MAJOR_VERSION 3 #define PY_MINOR_VERSION 4 -#define PY_MICRO_VERSION 3 -#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL -#define PY_RELEASE_SERIAL 0 +#define PY_MICRO_VERSION 4 +#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_GAMMA +#define PY_RELEASE_SERIAL 1 /* Version as a string */ -#define PY_VERSION "3.4.3+" +#define PY_VERSION "3.4.4rc1+" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py --- a/Lib/pydoc_data/topics.py +++ b/Lib/pydoc_data/topics.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Sun Feb 22 23:52:05 2015 +# Autogenerated by Sphinx on Sun Dec 6 05:51:21 2015 topics = {'assert': u'\nThe "assert" statement\n**********************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, "assert expression", is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, "assert expression1, expression2", is equivalent to\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that "__debug__" and "AssertionError" refer\nto the built-in variables with those names. In the current\nimplementation, the built-in variable "__debug__" is "True" under\nnormal circumstances, "False" when optimization is requested (command\nline option -O). The current code generator emits no code for an\nassert statement when optimization is requested at compile time. Note\nthat it is unnecessary to include the source code for the expression\nthat failed in the error message; it will be displayed as part of the\nstack trace.\n\nAssignments to "__debug__" are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': u'\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for\n*attributeref*, *subscription*, and *slicing*.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The\n object must be an iterable with the same number of items as there\n are targets in the target list, and the items are assigned, from\n left to right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an\n asterisk, called a "starred" target: The object must be a sequence\n with at least as many items as there are targets in the target\n list, minus one. The first items of the sequence are assigned,\n from left to right, to the targets before the starred target. The\n final items of the sequence are assigned to the targets after the\n starred target. A list of the remaining items in the sequence is\n then assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of\n items as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a "global" or "nonlocal" statement\n in the current code block: the name is bound to the object in the\n current local namespace.\n\n * Otherwise: the name is bound to the object in the global\n namespace or the outer namespace determined by "nonlocal",\n respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in\n square brackets: The object must be an iterable with the same number\n of items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, "TypeError" is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily "AttributeError").\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n "a.x" can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target "a.x" is always\n set as an instance attribute, creating it if necessary. Thus, the\n two occurrences of "a.x" do not necessarily refer to the same\n attribute: if the RHS expression refers to a class attribute, the\n LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with "property()".\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, "IndexError" is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the "__setitem__()" method is called with\n appropriate arguments.\n\n* If the target is a slicing: The primary expression in the\n reference is evaluated. It should yield a mutable sequence object\n (such as a list). The assigned object should be a sequence object\n of the same type. Next, the lower and upper bound expressions are\n evaluated, insofar they are present; defaults are zero and the\n sequence\'s length. The bounds should evaluate to integers. If\n either bound is negative, the sequence\'s length is added to it. The\n resulting bounds are clipped to lie between zero and the sequence\'s\n length, inclusive. Finally, the sequence object is asked to replace\n the slice with the items of the assigned sequence. The length of\n the slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the target\n sequence allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nAlthough the definition of assignment implies that overlaps between\nthe left-hand side and the right-hand side are \'simultanenous\' (for\nexample "a, b = b, a" swaps two variables), overlaps *within* the\ncollection of assigned-to variables occur left-to-right, sometimes\nresulting in confusion. For instance, the following program prints\n"[0, 2]":\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2 # i is updated, then x[i] is updated\n print(x)\n\nSee also: **PEP 3132** - Extended Iterable Unpacking\n\n The specification for the "*target" feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions of the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like "x += 1" can be rewritten as\n"x = x + 1" to achieve a similar, but not exactly equal effect. In the\naugmented version, "x" is only evaluated once. Also, when possible,\nthe actual operation is performed *in-place*, meaning that rather than\ncreating a new object and assigning that to the target, the old object\nis modified instead.\n\nUnlike normal assignments, augmented assignments evaluate the left-\nhand side *before* evaluating the right-hand side. For example, "a[i]\n+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and performs\nthe addition, and lastly, it writes the result back to "a[i]".\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'atom-identifiers': u'\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a "NameError" exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name, with leading underscores removed and a single underscore\ninserted, in front of the name. For example, the identifier "__spam"\noccurring in a class named "Ham" will be transformed to "_Ham__spam".\nThis transformation is independent of the syntactical context in which\nthe identifier is used. If the transformed name is extremely long\n(longer than 255 characters), implementation defined truncation may\nhappen. If the class name consists only of underscores, no\ntransformation is done.\n', @@ -18,19 +18,19 @@ 'callable-types': u'\nEmulating callable objects\n**************************\n\nobject.__call__(self[, args...])\n\n Called when the instance is "called" as a function; if this method\n is defined, "x(arg1, arg2, ...)" is a shorthand for\n "x.__call__(arg1, arg2, ...)".\n', 'calls': u'\nCalls\n*****\n\nA call calls a callable object (e.g., a *function*) with a possibly\nempty series of *arguments*:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nAn optional trailing comma may be present after the positional and\nkeyword arguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n"__call__()" method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal *parameter* lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a "TypeError" exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is "None", it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a "TypeError"\nexception is raised. Otherwise, the list of filled slots is used as\nthe argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use "PyArg_ParseTuple()" to parse\ntheir arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "*identifier" is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a "TypeError" exception is raised, unless a formal parameter\nusing the syntax "**identifier" is present; in this case, that formal\nparameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax "*expression" appears in the function call, "expression"\nmust evaluate to an iterable. Elements from this iterable are treated\nas if they were additional positional arguments; if there are\npositional arguments *x1*, ..., *xN*, and "expression" evaluates to a\nsequence *y1*, ..., *yM*, this is equivalent to a call with M+N\npositional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the "*expression" syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the "**expression" argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the "*expression" syntax\nto be used in the same call, so in practice this confusion does not\narise.\n\nIf the syntax "**expression" appears in the function call,\n"expression" must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both "expression" and as an explicit keyword argument, a\n"TypeError" exception is raised.\n\nFormal parameters using the syntax "*identifier" or "**identifier"\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly "None", unless it raises an\nexception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a "return"\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a "__call__()" method; the effect is then the\n same as if that method was called.\n', 'class': u'\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class "object"; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with "self.name = value". Both class and\ninstance attributes are accessible through the notation ""self.name"",\nand an instance attribute hides a class attribute with the same name\nwhen accessed in this way. Class attributes can be used as defaults\nfor instance attributes, but using mutable values there can lead to\nunexpected results. *Descriptors* can be used to create instance\nvariables with different implementation details.\n\nSee also: **PEP 3115** - Metaclasses in Python 3 **PEP 3129** -\n Class Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless\n there is a "finally" clause which happens to raise another\n exception. That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of\n an exception or the execution of a "return", "continue", or\n "break" statement.\n\n[3] A string literal appearing as the first statement in the\n function body is transformed into the function\'s "__doc__"\n attribute and therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s "__doc__" item and\n therefore the class\'s *docstring*.\n', - 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects need not have the same type. If both are\nnumbers, they are converted to a common type. Otherwise, the "==" and\n"!=" operators *always* consider objects of different types to be\nunequal, while the "<", ">", ">=" and "<=" operators raise a\n"TypeError" when comparing objects of different types that do not\nimplement these operators for the given pair of types. You can\ncontrol comparison behavior of objects of non-built-in types by\ndefining rich comparison methods like "__gt__()", described in section\n*Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values "float(\'NaN\')" and "Decimal(\'NaN\')" are special. The\n are identical to themselves, "x is x" but are not equal to\n themselves, "x != x". Additionally, comparing any value to a\n not-a-number value will return "False". For example, both "3 <\n float(\'NaN\')" and "float(\'NaN\') < 3" will return "False".\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric\n equivalents (the result of the built-in function "ord()") of their\n characters. [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison\n of corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, "[1,2,x] <= [1,2,y]" has the same\n value as "x <= y". If the corresponding element does not exist, the\n shorter sequence is ordered first (for example, "[1,2] < [1,2,3]").\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same "(key, value)" pairs. Order comparisons "(\'<\', \'<=\', \'>=\',\n \'>\')" raise "TypeError".\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets "{1,2}" and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, "min()", "max()", and "sorted()" produce undefined\n results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they\n are the same object; the choice whether one object is considered\n smaller or larger than another one is made arbitrarily but\n consistently within one execution of a program.\n\nComparison of objects of differing types depends on whether either of\nthe types provide explicit support for the comparison. Most numeric\ntypes can be compared with one another. When cross-type comparison is\nnot supported, the comparison method returns "NotImplemented".\n\nThe operators "in" and "not in" test for membership. "x in s"\nevaluates to true if *x* is a member of *s*, and false otherwise. "x\nnot in s" returns the negation of "x in s". All built-in sequences\nand set types support this as well as dictionary, for which "in" tests\nwhether the dictionary has a given key. For container types such as\nlist, tuple, set, frozenset, dict, or collections.deque, the\nexpression "x in y" is equivalent to "any(x is e or x == e for e in\ny)".\n\nFor the string and bytes types, "x in y" is true if and only if *x* is\na substring of *y*. An equivalent test is "y.find(x) != -1". Empty\nstrings are always considered to be a substring of any other string,\nso """ in "abc"" will return "True".\n\nFor user-defined classes which define the "__contains__()" method, "x\nin y" is true if and only if "y.__contains__(x)" is true.\n\nFor user-defined classes which do not define "__contains__()" but do\ndefine "__iter__()", "x in y" is true if some value "z" with "x == z"\nis produced while iterating over "y". If an exception is raised\nduring the iteration, it is as if "in" raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n"__getitem__()", "x in y" is true if and only if there is a non-\nnegative integer index *i* such that "x == y[i]", and all lower\ninteger indices do not raise "IndexError" exception. (If any other\nexception is raised, it is as if "in" raised that exception).\n\nThe operator "not in" is defined to have the inverse true value of\n"in".\n\nThe operators "is" and "is not" test for object identity: "x is y" is\ntrue if and only if *x* and *y* are the same object. "x is not y"\nyields the inverse truth value. [4]\n', + 'comparisons': u'\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like "a < b < c" have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: "True" or "False".\n\nComparisons can be chained arbitrarily, e.g., "x < y <= z" is\nequivalent to "x < y and y <= z", except that "y" is evaluated only\nonce (but in both cases "z" is not evaluated at all when "x < y" is\nfound to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then "a op1 b op2 c ... y\nopN z" is equivalent to "a op1 b and b op2 c and ... y opN z", except\nthat each expression is evaluated at most once.\n\nNote that "a op1 b op2 c" doesn\'t imply any kind of comparison between\n*a* and *c*, so that, e.g., "x < y > z" is perfectly legal (though\nperhaps not pretty).\n\n\nValue comparisons\n=================\n\nThe operators "<", ">", "==", ">=", "<=", and "!=" compare the values\nof two objects. The objects do not need to have the same type.\n\nChapter *Objects, values and types* states that objects have a value\n(in addition to type and identity). The value of an object is a\nrather abstract notion in Python: For example, there is no canonical\naccess method for an object\'s value. Also, there is no requirement\nthat the value of an object should be constructed in a particular way,\ne.g. comprised of all its data attributes. Comparison operators\nimplement a particular notion of what the value of an object is. One\ncan think of them as defining the value of an object indirectly, by\nmeans of their comparison implementation.\n\nBecause all types are (direct or indirect) subtypes of "object", they\ninherit the default comparison behavior from "object". Types can\ncustomize their comparison behavior by implementing *rich comparison\nmethods* like "__lt__()", described in *Basic customization*.\n\nThe default behavior for equality comparison ("==" and "!=") is based\non the identity of the objects. Hence, equality comparison of\ninstances with the same identity results in equality, and equality\ncomparison of instances with different identities results in\ninequality. A motivation for this default behavior is the desire that\nall objects should be reflexive (i.e. "x is y" implies "x == y").\n\nA default order comparison ("<", ">", "<=", and ">=") is not provided;\nan attempt raises "TypeError". A motivation for this default behavior\nis the lack of a similar invariant as for equality.\n\nThe behavior of the default equality comparison, that instances with\ndifferent identities are always unequal, may be in contrast to what\ntypes will need that have a sensible definition of object value and\nvalue-based equality. Such types will need to customize their\ncomparison behavior, and in fact, a number of built-in types have done\nthat.\n\nThe following list describes the comparison behavior of the most\nimportant built-in types.\n\n* Numbers of built-in numeric types (*Numeric Types --- int, float,\n complex*) and of the standard library types "fractions.Fraction" and\n "decimal.Decimal" can be compared within and across their types,\n with the restriction that complex numbers do not support order\n comparison. Within the limits of the types involved, they compare\n mathematically (algorithmically) correct without loss of precision.\n\n The not-a-number values "float(\'NaN\')" and "Decimal(\'NaN\')" are\n special. They are identical to themselves ("x is x" is true) but\n are not equal to themselves ("x == x" is false). Additionally,\n comparing any number to a not-a-number value will return "False".\n For example, both "3 < float(\'NaN\')" and "float(\'NaN\') < 3" will\n return "False".\n\n* Binary sequences (instances of "bytes" or "bytearray") can be\n compared within and across their types. They compare\n lexicographically using the numeric values of their elements.\n\n* Strings (instances of "str") compare lexicographically using the\n numerical Unicode code points (the result of the built-in function\n "ord()") of their characters. [3]\n\n Strings and binary sequences cannot be directly compared.\n\n* Sequences (instances of "tuple", "list", or "range") can be\n compared only within each of their types, with the restriction that\n ranges do not support order comparison. Equality comparison across\n these types results in unequality, and ordering comparison across\n these types raises "TypeError".\n\n Sequences compare lexicographically using comparison of\n corresponding elements, whereby reflexivity of the elements is\n enforced.\n\n In enforcing reflexivity of elements, the comparison of collections\n assumes that for a collection element "x", "x == x" is always true.\n Based on that assumption, element identity is compared first, and\n element comparison is performed only for distinct elements. This\n approach yields the same result as a strict element comparison\n would, if the compared elements are reflexive. For non-reflexive\n elements, the result is different than for strict element\n comparison, and may be surprising: The non-reflexive not-a-number\n values for example result in the following comparison behavior when\n used in a list:\n\n >>> nan = float(\'NaN\')\n >>> nan is nan\n True\n >>> nan == nan\n False <-- the defined non-reflexive behavior of NaN\n >>> [nan] == [nan]\n True <-- list enforces reflexivity and tests identity first\n\n Lexicographical comparison between built-in collections works as\n follows:\n\n * For two collections to compare equal, they must be of the same\n type, have the same length, and each pair of corresponding\n elements must compare equal (for example, "[1,2] == (1,2)" is\n false because the type is not t