From python-checkins at python.org Sun Apr 1 00:34:09 2012 From: python-checkins at python.org (sandro.tosi) Date: Sun, 01 Apr 2012 00:34:09 +0200 Subject: [Python-checkins] =?utf8?q?devguide=3A_Issue_=2314316=3A_fix_brok?= =?utf8?q?en_link=3B_fix_by_=C3=89ric_Araujo?= Message-ID: http://hg.python.org/devguide/rev/b038bfc67e3d changeset: 498:b038bfc67e3d user: Sandro Tosi date: Sun Apr 01 00:31:55 2012 +0200 summary: Issue #14316: fix broken link; fix by ?ric Araujo files: grammar.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/grammar.rst b/grammar.rst --- a/grammar.rst +++ b/grammar.rst @@ -20,7 +20,7 @@ --------- People are getting this wrong all the time; it took well over a -year before someone `noticed `_ +year before someone `noticed `_ that adding the floor division operator (//) broke the parser module. -- Repository URL: http://hg.python.org/devguide From python-checkins at python.org Sun Apr 1 01:08:01 2012 From: python-checkins at python.org (antoine.pitrou) Date: Sun, 01 Apr 2012 01:08:01 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzEzODcy?= =?utf8?q?=3A_socket=2Edetach=28=29_now_marks_the_socket_closed_=28as_mirr?= =?utf8?q?ored_in_the?= Message-ID: http://hg.python.org/cpython/rev/3a220feafa15 changeset: 76033:3a220feafa15 branch: 3.2 parent: 76030:f96b603278cc user: Antoine Pitrou date: Sun Apr 01 01:00:17 2012 +0200 summary: Issue #13872: socket.detach() now marks the socket closed (as mirrored in the socket repr()). Patch by Matt Joiner. files: Lib/socket.py | 11 +++++++++++ Lib/test/test_socket.py | 1 + Misc/NEWS | 3 +++ 3 files changed, 15 insertions(+), 0 deletions(-) diff --git a/Lib/socket.py b/Lib/socket.py --- a/Lib/socket.py +++ b/Lib/socket.py @@ -197,6 +197,17 @@ if self._io_refs <= 0: self._real_close() + def detach(self): + """detach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + """ + self._closed = True + return super().detach() + + def fromfd(fd, family, type, proto=0): """ fromfd(fd, family, type[, proto]) -> socket object diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py --- a/Lib/test/test_socket.py +++ b/Lib/test/test_socket.py @@ -951,6 +951,7 @@ f = self.cli_conn.detach() self.assertEqual(f, fileno) # cli_conn cannot be used anymore... + self.assertTrue(self.cli_conn._closed) self.assertRaises(socket.error, self.cli_conn.recv, 1024) self.cli_conn.close() # ...but we can create another socket using the (still open) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -31,6 +31,9 @@ Library ------- +- Issue #13872: socket.detach() now marks the socket closed (as mirrored + in the socket repr()). Patch by Matt Joiner. + - Issue #14406: Fix a race condition when using ``concurrent.futures.wait( return_when=ALL_COMPLETED)``. Patch by Matt Joiner. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 01:08:02 2012 From: python-checkins at python.org (antoine.pitrou) Date: Sun, 01 Apr 2012 01:08:02 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_Issue_=2313872=3A_socket=2Edetach=28=29_now_marks_the_socket?= =?utf8?q?_closed_=28as_mirrored_in_the?= Message-ID: http://hg.python.org/cpython/rev/d2f0c3eb1eed changeset: 76034:d2f0c3eb1eed parent: 76031:9a5ae3f37d06 parent: 76033:3a220feafa15 user: Antoine Pitrou date: Sun Apr 01 01:00:55 2012 +0200 summary: Issue #13872: socket.detach() now marks the socket closed (as mirrored in the socket repr()). Patch by Matt Joiner. files: Lib/socket.py | 11 +++++++++++ Lib/test/test_socket.py | 1 + Misc/NEWS | 3 +++ 3 files changed, 15 insertions(+), 0 deletions(-) diff --git a/Lib/socket.py b/Lib/socket.py --- a/Lib/socket.py +++ b/Lib/socket.py @@ -199,6 +199,17 @@ if self._io_refs <= 0: self._real_close() + def detach(self): + """detach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + """ + self._closed = True + return super().detach() + + def fromfd(fd, family, type, proto=0): """ fromfd(fd, family, type[, proto]) -> socket object diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py --- a/Lib/test/test_socket.py +++ b/Lib/test/test_socket.py @@ -1574,6 +1574,7 @@ f = self.cli_conn.detach() self.assertEqual(f, fileno) # cli_conn cannot be used anymore... + self.assertTrue(self.cli_conn._closed) self.assertRaises(socket.error, self.cli_conn.recv, 1024) self.cli_conn.close() # ...but we can create another socket using the (still open) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -37,6 +37,9 @@ Library ------- +- Issue #13872: socket.detach() now marks the socket closed (as mirrored + in the socket repr()). Patch by Matt Joiner. + - Issue #14406: Fix a race condition when using ``concurrent.futures.wait( return_when=ALL_COMPLETED)``. Patch by Matt Joiner. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 01:25:21 2012 From: python-checkins at python.org (antoine.pitrou) Date: Sun, 01 Apr 2012 01:25:21 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=2314300=3A_Under_Win?= =?utf8?q?dows=2C_sockets_created_using_socket=2Edup=28=29_now_allow?= Message-ID: http://hg.python.org/cpython/rev/5be4d8fc9c44 changeset: 76035:5be4d8fc9c44 user: Antoine Pitrou date: Sun Apr 01 01:14:39 2012 +0200 summary: Issue #14300: Under Windows, sockets created using socket.dup() now allow overlapped I/O. Patch by sbt. files: Misc/NEWS | 3 +++ Modules/socketmodule.c | 2 +- 2 files changed, 4 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -37,6 +37,9 @@ Library ------- +- Issue #14300: Under Windows, sockets created using socket.dup() now allow + overlapped I/O. Patch by sbt. + - Issue #13872: socket.detach() now marks the socket closed (as mirrored in the socket repr()). Patch by Matt Joiner. diff --git a/Modules/socketmodule.c b/Modules/socketmodule.c --- a/Modules/socketmodule.c +++ b/Modules/socketmodule.c @@ -381,7 +381,7 @@ return INVALID_SOCKET; return WSASocket(FROM_PROTOCOL_INFO, FROM_PROTOCOL_INFO, - FROM_PROTOCOL_INFO, &info, 0, 0); + FROM_PROTOCOL_INFO, &info, 0, WSA_FLAG_OVERLAPPED); } #define SOCKETCLOSE closesocket #else -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 01:52:28 2012 From: python-checkins at python.org (sandro.tosi) Date: Sun, 01 Apr 2012 01:52:28 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=282=2E7=29=3A_fix_typo=3B_tha?= =?utf8?q?nks_to_Robert_Bardos_from_docs=40?= Message-ID: http://hg.python.org/cpython/rev/3623c3e6c049 changeset: 76036:3623c3e6c049 branch: 2.7 parent: 76032:6f8dd543d80a user: Sandro Tosi date: Sun Apr 01 01:49:46 2012 +0200 summary: fix typo; thanks to Robert Bardos from docs@ files: Doc/glossary.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -406,7 +406,7 @@ :meth:`str.lower` method can serve as a key function for case insensitive sorts. Alternatively, an ad-hoc key function can be built from a :keyword:`lambda` expression such as ``lambda r: (r[0], r[2])``. Also, - the :mod:`operator` module provides three key function constuctors: + the :mod:`operator` module provides three key function constructors: :func:`~operator.attrgetter`, :func:`~operator.itemgetter`, and :func:`~operator.methodcaller`. See the :ref:`Sorting HOW TO ` for examples of how to create and use key functions. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 01:52:29 2012 From: python-checkins at python.org (sandro.tosi) Date: Sun, 01 Apr 2012 01:52:29 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=283=2E2=29=3A_fix_typo=3B_tha?= =?utf8?q?nks_to_Robert_Bardos_from_docs=40?= Message-ID: http://hg.python.org/cpython/rev/0a4a6f98bd8e changeset: 76037:0a4a6f98bd8e branch: 3.2 parent: 76033:3a220feafa15 user: Sandro Tosi date: Sun Apr 01 01:50:00 2012 +0200 summary: fix typo; thanks to Robert Bardos from docs@ files: Doc/glossary.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -385,7 +385,7 @@ :meth:`str.lower` method can serve as a key function for case insensitive sorts. Alternatively, an ad-hoc key function can be built from a :keyword:`lambda` expression such as ``lambda r: (r[0], r[2])``. Also, - the :mod:`operator` module provides three key function constuctors: + the :mod:`operator` module provides three key function constructors: :func:`~operator.attrgetter`, :func:`~operator.itemgetter`, and :func:`~operator.methodcaller`. See the :ref:`Sorting HOW TO ` for examples of how to create and use key functions. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 01:52:30 2012 From: python-checkins at python.org (sandro.tosi) Date: Sun, 01 Apr 2012 01:52:30 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_merge_with_3=2E2?= Message-ID: http://hg.python.org/cpython/rev/51b5f2b90df2 changeset: 76038:51b5f2b90df2 parent: 76035:5be4d8fc9c44 parent: 76037:0a4a6f98bd8e user: Sandro Tosi date: Sun Apr 01 01:50:22 2012 +0200 summary: merge with 3.2 files: Doc/glossary.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -385,7 +385,7 @@ :meth:`str.lower` method can serve as a key function for case insensitive sorts. Alternatively, an ad-hoc key function can be built from a :keyword:`lambda` expression such as ``lambda r: (r[0], r[2])``. Also, - the :mod:`operator` module provides three key function constuctors: + the :mod:`operator` module provides three key function constructors: :func:`~operator.attrgetter`, :func:`~operator.itemgetter`, and :func:`~operator.methodcaller`. See the :ref:`Sorting HOW TO ` for examples of how to create and use key functions. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 04:24:04 2012 From: python-checkins at python.org (victor.stinner) Date: Sun, 01 Apr 2012 04:24:04 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_time=2Eget=5Fcl?= =?utf8?q?ock=5Finfo=28=29?= Message-ID: http://hg.python.org/peps/rev/7375d3178b6a changeset: 4183:7375d3178b6a user: Victor Stinner date: Sun Apr 01 04:11:08 2012 +0200 summary: PEP 418: Add time.get_clock_info() files: pep-0418.txt | 23 +++++++++++++++++++++-- 1 files changed, 21 insertions(+), 2 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -13,8 +13,8 @@ Abstract ======== -Add time.monotonic(fallback=True) and time.highres() functions to -Python 3.3. +Add time.monotonic(fallback=True), time.highres(), time.get_clock_info(name) +functions to Python 3.3. Rationale @@ -40,6 +40,7 @@ clock is available, falls back to system clock by default, or raises an OSError if *fallback* is False. time.monotonic(fallback=True) cannot go backward. +* time.get_clock_info(name): get information on the specified time function time.time() @@ -208,6 +209,24 @@ highres.use_performance_counter = (os.name == 'nt') highres.use_monotonic = hasattr(time, 'monotonic') +time.get_clock_info(name) +------------------------- + +Get information on the specified time function as a dictionary. Only the following +names are accepted: + + * "clock": time.clock() + * "highres": time.highres() + * "monotonic": time.monotonic() + * "time": time.time() + +The following keys are always present: + + * "function" (str): name of the underlying operating system function (ex: + "QueryPerformanceCounter()" or "clock_gettime(CLOCK_REALTIME)") + * "resolution" (float): resolution in seconds of the function + * "monotonic" (bool): True if the clock is monotonic + Hardware clocks =============== -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Sun Apr 1 04:24:05 2012 From: python-checkins at python.org (victor.stinner) Date: Sun, 01 Apr 2012 04:24:05 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Drop_fallback_param?= =?utf8?q?eter_of_time=2Emonotonic=28=29?= Message-ID: http://hg.python.org/peps/rev/e6e799e92983 changeset: 4184:e6e799e92983 user: Victor Stinner date: Sun Apr 01 04:23:32 2012 +0200 summary: PEP 418: Drop fallback parameter of time.monotonic() time.monotonic() now always falls back and time.get_clock_info() can be used to check if the clock is monotonic. files: pep-0418.txt | 65 +++++++++++++++++++++------------------ 1 files changed, 35 insertions(+), 30 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -13,8 +13,8 @@ Abstract ======== -Add time.monotonic(fallback=True), time.highres(), time.get_clock_info(name) -functions to Python 3.3. +Add time.monotonic(), time.highres(), time.get_clock_info(name) functions to +Python 3.3. Rationale @@ -25,8 +25,8 @@ * Display the current time to a human (e.g. display a calendar or draw a wall clock): use system clock, i.e. time.time() or datetime.datetime.now(). -* Benchmark, profiling, timeout: time.highres(). -* Event scheduler: time.monotonic(), or time.monotonic(fallback=False). +* Benchmark, profiling: time.highres(). +* Event scheduler, timeout: time.monotonic(). Functions @@ -36,10 +36,8 @@ * time.time(): system clock, "wall clock". * time.highres(): clock with the best accuracy. -* time.monotonic(fallback=True): monotonic clock. If no monotonic - clock is available, falls back to system clock by default, or raises - an OSError if *fallback* is False. time.monotonic(fallback=True) - cannot go backward. +* time.monotonic(): monotonic clock, or system clock if no monotonic + clock is available * time.get_clock_info(name): get information on the specified time function @@ -81,19 +79,16 @@ return _time.time() -time.monotonic(fallback=True) ------------------------------ +time.monotonic() +---------------- -Clock that cannot go backward, its rate is as steady as possible. Its -rate may be adjusted by NTP. The reference point of the returned -value is undefined so only the difference of consecutive calls is -valid. +Monotonic clock, or system clock if the platform does not provide a monotonic +clock (e.g. on GNU/Hurd). Its rate is as steady as possible. Its rate may be +adjusted by NTP. The reference point of the returned value is undefined so +only the difference of consecutive calls is valid. -By default, it falls back to the system clock if no monotonic clock is -available or if the monotonic clock failed, and so it cannot fail. If -fallback is False, it raises OSError if the monotonic clock failed and -NotImplementedError if the platform does not provide a monotonic clock -(e.g. on GNU/Hurd). +Use time.get_clock_info('monotonic')['monotonic'] to check if the clock +monotonic or not. The elapsed time may or may not include time the system spends in sleep or hibernation; this depends on the operating system. @@ -103,10 +98,10 @@ if os.name == 'nt': # GetTickCount64() requires Windows Vista, Server 2008 or later if hasattr(time, '_GetTickCount64'): - def monotonic(fallback=True): + def monotonic(): return _time.GetTickCount64() else: - def monotonic(fallback=True): + def monotonic(): ticks = _time.GetTickCount() if ticks < monotonic.last: # Integer overflow detected @@ -117,7 +112,7 @@ monotonic.delta = 0 elif os.name == 'mac': - def monotonic(fallback=True): + def monotonic(): if monotonic.factor is None: factor = _time.mach_timebase_info() monotonic.factor = timebase[0] / timebase[1] @@ -125,7 +120,7 @@ monotonic.factor = None elif os.name.startswith('sunos'): - def monotonic(fallback=True): + def monotonic(): if monotonic.use_clock_highres: try: time.clock_gettime(time.CLOCK_HIGHRES) @@ -135,8 +130,6 @@ try: return time.gethrtime() except OSError: - if not fallback: - raise monotonic.use_gethrtime = False return time.time() monotonic.use_clock_highres = (hasattr(time, 'clock_gettime') @@ -144,15 +137,13 @@ monotonic.use_gethrtime = True elif hasattr(time, "clock_gettime"): - def monotonic(fallback=True): + def monotonic(): while monotonic.clocks: try: clk_id = monotonic.clocks[0] return time.clock_gettime(clk_id) except OSError: # CLOCK_MONOTONIC_RAW requires a Linux kernel >= 2.6.28 - if len(monotonic.clocks) == 1 and not fallback: - raise del monotonic.clocks[0] return time.time() monotonic.clocks = [] @@ -163,9 +154,7 @@ monotonic.clocks.append(time.CLOCK_MONOTONIC) else: - def monotonic(fallback=True): - if not fallback: - raise NotImplementedError("you platform does not provide any monotonic clock") + def monotonic(): return time.time() On Windows, QueryPerformanceCounter() is not used even though it has a @@ -679,6 +668,21 @@ a monotonic clock with an unspecified starting point +One function with a flag: time.monotonic(fallback=True) +------------------------------------------------------- + + * time.monotonic(fallback=True) falls back to the system clock if no monotonic + clock is available or if the monotonic clock failed. + * time.monotonic(fallback=False) raises OSError if monotonic clock fails and + NotImplementedError if the system does not provide a monotonic clock + +"A keyword argument that gets passed as a constant in the caller is usually +poor API." + +Raising NotImplementedError for a function is something uncommon in Python and +should be avoided. + + One function, no flag --------------------- -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Sun Apr 1 04:24:04 2012 From: python-checkins at python.org (victor.stinner) Date: Sun, 01 Apr 2012 04:24:04 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_a_=22Hardware_c?= =?utf8?q?locks=22_section?= Message-ID: http://hg.python.org/peps/rev/7010ff8e845d changeset: 4182:7010ff8e845d user: Victor Stinner date: Sun Apr 01 04:03:15 2012 +0200 summary: PEP 418: Add a "Hardware clocks" section * Mention the QueryPerformanceCounter() issue on CPU with variable frequency files: pep-0418.txt | 46 +++++++++++++++++++++++++++++++++++---- 1 files changed, 41 insertions(+), 5 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -209,8 +209,30 @@ highres.use_monotonic = hasattr(time, 'monotonic') -Clocks -====== +Hardware clocks +=============== + +* HPET: An HPET chip consists of a 64-bit up-counter (main counter) + counting at least at 10 MHz and a set of up to 256 comparators (at + least 3). Each HPET can have up to 32 timers. +* TSC (Time Stamp Counter): Historically, the TSC increased with every internal + processor clock cycle, but now the rate is usually constant (even if the + processor changes frequency) and usually equals the maximum processor + frequency. The instructor RDTSC can be used to read this counter. +* ACPI PMTMR (power management timer): ACPI 24-bit timer with a frequency + of 3.5 MHz (3,579,545 Hz). HPET can cause around 3 seconds of drift per day. +* Cyclone: The Cyclone timer uses a 32-bit counter on IBM Extended + X-Architecture (EXA) chipsets which include computers that use the + IBM "Summit" series chipsets (ex: x440). This is available in IA32 + and IA64 architectures. +* PIT (programmable interrupt timer): Intel 8253/8254 chipsets with a + configurable frequency in range 18.2 Hz - 1.2 MHz. It is a 16-bit counter. +* RTC (Real-time clock). Most RTCs use a crystal oscillator with a frequency of + 32,768 Hz + + +Operating system clocks +======================= Monotonic clocks ---------------- @@ -363,6 +385,8 @@ * Windows XP had a bug (see `KB896256`_): on a multiprocessor computer, QueryPerformanceCounter() returned a different value for each processor. The bug was fixed in Windows XP SP2. +* Issues with processor with variable frequency: the frequency is changed + depending on the workload to reduce memory consumption. .. _KB896256: http://support.microsoft.com/?id=896256 .. _KB274323: http://support.microsoft.com/?id=274323 @@ -390,6 +414,11 @@ There are applications using this undocumented function, example: `Timer Resolution `_. +WaitForSingleObject() use the same timer than GetTickCount() with the same +resolution. + +GetTickCount() has an accuracy of 55 ms on Windows 9x. + Windows: timeGetTime ^^^^^^^^^^^^^^^^^^^^ @@ -610,19 +639,25 @@ Alternatives: API design ======================== -Name of the "monotonic or fallback" function name -------------------------------------------------- +Other names for new functions +----------------------------- -Other names were proposed: +time.highres(): * time.hires(): "hires" can be read as "to hire" as in "he hires a car to go on holiday", rather than a "HIgh-RESolution clock". +* time.timer(): "it would be too easy to confuse with (or misspell as) + time.time()" + +time.monotonic(): + * time.steady(): no OS provides a clock advancing at a steady rate, so "steady" should be avoided. * time.try_monotonic(): it is a clear and obvious solution for the use-case of "I prefer the monotonic clock, if it is available, otherwise I'll take my chances with a best-effect clock." -* time.wallclock() +* time.wallclock(): it is not the system time aka the "wall clock", but + a monotonic clock with an unspecified starting point One function, no flag -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Sun Apr 1 04:30:19 2012 From: python-checkins at python.org (victor.stinner) Date: Sun, 01 Apr 2012 04:30:19 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_time=2Eget=5Fclock?= =?utf8?q?=5Finfo=28=29=2C_rename_=22monotonic=22_option_to_=22is=5Fmonoto?= =?utf8?q?nic=22?= Message-ID: http://hg.python.org/peps/rev/4b3e9b3e37ba changeset: 4185:4b3e9b3e37ba user: Victor Stinner date: Sun Apr 01 04:30:13 2012 +0200 summary: PEP 418: time.get_clock_info(), rename "monotonic" option to "is_monotonic" files: pep-0418.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -214,7 +214,7 @@ * "function" (str): name of the underlying operating system function (ex: "QueryPerformanceCounter()" or "clock_gettime(CLOCK_REALTIME)") * "resolution" (float): resolution in seconds of the function - * "monotonic" (bool): True if the clock is monotonic + * "is_monotonic" (bool): True if the clock is monotonic Hardware clocks -- Repository URL: http://hg.python.org/peps From ncoghlan at gmail.com Sun Apr 1 05:33:36 2012 From: ncoghlan at gmail.com (Nick Coghlan) Date: Sun, 1 Apr 2012 13:33:36 +1000 Subject: [Python-checkins] cpython: Issue #14435: Add Misc/NEWS and Misc/ACKS In-Reply-To: References: Message-ID: On Sat, Mar 31, 2012 at 11:10 PM, kristjan.jonsson wrote: > diff --git a/Misc/ACKS b/Misc/ACKS > --- a/Misc/ACKS > +++ b/Misc/ACKS > @@ -507,6 +507,7 @@ > ?Richard Jones > ?Irmen de Jong > ?Lucas de Jonge > +Kristj?n Valur J?nsson > ?Jens B. Jorgensen > ?John Jorgensen > ?Sijin Joseph *blinks* This must have been one of those cases where everyone assumed your name was already there and never thought to check... Cheers, Nick. -- Nick Coghlan?? |?? ncoghlan at gmail.com?? |?? Brisbane, Australia From solipsis at pitrou.net Sun Apr 1 05:35:02 2012 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Sun, 01 Apr 2012 05:35:02 +0200 Subject: [Python-checkins] Daily reference leaks (51b5f2b90df2): sum=-6 Message-ID: results for 51b5f2b90df2 on branch "default" -------------------------------------------- test_xml_etree_c leaked [-2, -2, -2] references, sum=-6 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogovYOZv', '-x'] From python-checkins at python.org Sun Apr 1 09:20:09 2012 From: python-checkins at python.org (ned.deily) Date: Sun, 01 Apr 2012 09:20:09 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=2313507=3A_OS_X_inst?= =?utf8?q?aller_builds_now_build_liblzma_for_the_new?= Message-ID: http://hg.python.org/cpython/rev/0e1177499762 changeset: 76039:0e1177499762 user: Ned Deily date: Sun Apr 01 00:17:33 2012 -0700 summary: Issue #13507: OS X installer builds now build liblzma for the new lzma module. (Patch by Nicholas Riley) files: Mac/BuildScript/build-installer.py | 11 +++++++++++ 1 files changed, 11 insertions(+), 0 deletions(-) diff --git a/Mac/BuildScript/build-installer.py b/Mac/BuildScript/build-installer.py --- a/Mac/BuildScript/build-installer.py +++ b/Mac/BuildScript/build-installer.py @@ -143,6 +143,17 @@ def library_recipes(): result = [] + result.extend([ + dict( + name="XZ 5.0.3", + url="http://tukaani.org/xz/xz-5.0.3.tar.gz", + checksum='fefe52f9ecd521de2a8ce38c21a27574', + configure_pre=[ + '--disable-dependency-tracking', + ] + ) + ]) + if DEPTARGET < '10.5': result.extend([ dict( -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 11:31:41 2012 From: python-checkins at python.org (ned.deily) Date: Sun, 01 Apr 2012 11:31:41 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=2314463=3A_Prevent_?= =?utf8?q?=5Fdecimal=2Eso_compile_failures_in_OS_X_installer_builds=2E?= Message-ID: http://hg.python.org/cpython/rev/ac60138522fc changeset: 76040:ac60138522fc user: Ned Deily date: Sun Apr 01 02:30:46 2012 -0700 summary: Issue #14463: Prevent _decimal.so compile failures in OS X installer builds. files: setup.py | 6 +++++- 1 files changed, 5 insertions(+), 1 deletions(-) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1804,7 +1804,11 @@ sources = ['_decimal/_decimal.c'] depends = ['_decimal/docstrings.h'] else: - include_dirs = ['./Modules/_decimal/libmpdec'] + srcdir = sysconfig.get_config_var('srcdir') + include_dirs = [os.path.abspath(os.path.join(srcdir, + 'Modules', + '_decimal', + 'libmpdec'))] libraries = [] sources = [ '_decimal/_decimal.c', -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 13:08:48 2012 From: python-checkins at python.org (stefan.krah) Date: Sun, 01 Apr 2012 13:08:48 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=2314394=3A_Use_elabo?= =?utf8?q?rate_phrases_that_boil_down_to_=22one_to_two_orders?= Message-ID: http://hg.python.org/cpython/rev/6ba569924986 changeset: 76041:6ba569924986 user: Stefan Krah date: Sun Apr 01 13:07:24 2012 +0200 summary: Issue #14394: Use elaborate phrases that boil down to "one to two orders of magnitude". Provide link to the benchmarks. files: Doc/whatsnew/3.3.rst | 14 ++++++++++---- 1 files changed, 10 insertions(+), 4 deletions(-) diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst --- a/Doc/whatsnew/3.3.rst +++ b/Doc/whatsnew/3.3.rst @@ -603,11 +603,17 @@ C-module and libmpdec written by Stefan Krah. The new C version of the decimal module integrates the high speed libmpdec -library for arbitrary precision correctly-rounded decimal arithmetic. -libmpdec conforms to IBM's General Decimal Arithmetic Specification. +library for arbitrary precision correctly-rounded decimal floating point +arithmetic. libmpdec conforms to IBM's General Decimal Arithmetic Specification. -Performance gains range from 12x for database applications to 80x for -numerically intensive applications: +Performance gains range from 10x for database applications to 80x for +numerically intensive applications. These numbers are expected gains +for standard precisions used in decimal floating point arithmetic. Since +the precision is user configurable, the exact figures may vary. For example, +in integer bignum arithmetic the differences can be significantly higher. + +The following table is meant as an illustration. Benchmarks are available +at (http://www.bytereef.org/mpdecimal/quickstart.html). +---------+-------------+--------------+-------------+ | | decimal.py | _decimal | speedup | -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 13:10:52 2012 From: python-checkins at python.org (georg.brandl) Date: Sun, 01 Apr 2012 13:10:52 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Remove_parens_around_link?= =?utf8?q?=2E?= Message-ID: http://hg.python.org/cpython/rev/61f33da3185b changeset: 76042:61f33da3185b user: Georg Brandl date: Sun Apr 01 13:10:58 2012 +0200 summary: Remove parens around link. files: Doc/whatsnew/3.3.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst --- a/Doc/whatsnew/3.3.rst +++ b/Doc/whatsnew/3.3.rst @@ -613,7 +613,7 @@ in integer bignum arithmetic the differences can be significantly higher. The following table is meant as an illustration. Benchmarks are available -at (http://www.bytereef.org/mpdecimal/quickstart.html). +at http://www.bytereef.org/mpdecimal/quickstart.html. +---------+-------------+--------------+-------------+ | | decimal.py | _decimal | speedup | -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 13:48:28 2012 From: python-checkins at python.org (georg.brandl) Date: Sun, 01 Apr 2012 13:48:28 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Update_pydoc_topics=2E?= Message-ID: http://hg.python.org/cpython/rev/707514c704e6 changeset: 76043:707514c704e6 user: Georg Brandl date: Sun Apr 01 13:46:44 2012 +0200 summary: Update pydoc topics. files: Lib/pydoc_data/topics.py | 12 ++++++------ 1 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py --- a/Lib/pydoc_data/topics.py +++ b/Lib/pydoc_data/topics.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Sun Mar 4 16:11:27 2012 +# Autogenerated by Sphinx on Sun Apr 1 13:46:17 2012 topics = {'assert': '\nThe ``assert`` statement\n************************\n\nAssert statements are a convenient way to insert debugging assertions\ninto a program:\n\n assert_stmt ::= "assert" expression ["," expression]\n\nThe simple form, ``assert expression``, is equivalent to\n\n if __debug__:\n if not expression: raise AssertionError\n\nThe extended form, ``assert expression1, expression2``, is equivalent\nto\n\n if __debug__:\n if not expression1: raise AssertionError(expression2)\n\nThese equivalences assume that ``__debug__`` and ``AssertionError``\nrefer to the built-in variables with those names. In the current\nimplementation, the built-in variable ``__debug__`` is ``True`` under\nnormal circumstances, ``False`` when optimization is requested\n(command line option -O). The current code generator emits no code\nfor an assert statement when optimization is requested at compile\ntime. Note that it is unnecessary to include the source code for the\nexpression that failed in the error message; it will be displayed as\npart of the stack trace.\n\nAssignments to ``__debug__`` are illegal. The value for the built-in\nvariable is determined when the interpreter starts.\n', 'assignment': '\nAssignment statements\n*********************\n\nAssignment statements are used to (re)bind names to values and to\nmodify attributes or items of mutable objects:\n\n assignment_stmt ::= (target_list "=")+ (expression_list | yield_expression)\n target_list ::= target ("," target)* [","]\n target ::= identifier\n | "(" target_list ")"\n | "[" target_list "]"\n | attributeref\n | subscription\n | slicing\n | "*" target\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn assignment statement evaluates the expression list (remember that\nthis can be a single expression or a comma-separated list, the latter\nyielding a tuple) and assigns the single resulting object to each of\nthe target lists, from left to right.\n\nAssignment is defined recursively depending on the form of the target\n(list). When a target is part of a mutable object (an attribute\nreference, subscription or slicing), the mutable object must\nultimately perform the assignment and decide about its validity, and\nmay raise an exception if the assignment is unacceptable. The rules\nobserved by various types and the exceptions raised are given with the\ndefinition of the object types (see section *The standard type\nhierarchy*).\n\nAssignment of an object to a target list, optionally enclosed in\nparentheses or square brackets, is recursively defined as follows.\n\n* If the target list is a single target: The object is assigned to\n that target.\n\n* If the target list is a comma-separated list of targets: The object\n must be an iterable with the same number of items as there are\n targets in the target list, and the items are assigned, from left to\n right, to the corresponding targets.\n\n * If the target list contains one target prefixed with an asterisk,\n called a "starred" target: The object must be a sequence with at\n least as many items as there are targets in the target list, minus\n one. The first items of the sequence are assigned, from left to\n right, to the targets before the starred target. The final items\n of the sequence are assigned to the targets after the starred\n target. A list of the remaining items in the sequence is then\n assigned to the starred target (the list can be empty).\n\n * Else: The object must be a sequence with the same number of items\n as there are targets in the target list, and the items are\n assigned, from left to right, to the corresponding targets.\n\nAssignment of an object to a single target is recursively defined as\nfollows.\n\n* If the target is an identifier (name):\n\n * If the name does not occur in a ``global`` or ``nonlocal``\n statement in the current code block: the name is bound to the\n object in the current local namespace.\n\n * Otherwise: the name is bound to the object in the global namespace\n or the outer namespace determined by ``nonlocal``, respectively.\n\n The name is rebound if it was already bound. This may cause the\n reference count for the object previously bound to the name to reach\n zero, causing the object to be deallocated and its destructor (if it\n has one) to be called.\n\n* If the target is a target list enclosed in parentheses or in square\n brackets: The object must be an iterable with the same number of\n items as there are targets in the target list, and its items are\n assigned, from left to right, to the corresponding targets.\n\n* If the target is an attribute reference: The primary expression in\n the reference is evaluated. It should yield an object with\n assignable attributes; if this is not the case, ``TypeError`` is\n raised. That object is then asked to assign the assigned object to\n the given attribute; if it cannot perform the assignment, it raises\n an exception (usually but not necessarily ``AttributeError``).\n\n Note: If the object is a class instance and the attribute reference\n occurs on both sides of the assignment operator, the RHS expression,\n ``a.x`` can access either an instance attribute or (if no instance\n attribute exists) a class attribute. The LHS target ``a.x`` is\n always set as an instance attribute, creating it if necessary.\n Thus, the two occurrences of ``a.x`` do not necessarily refer to the\n same attribute: if the RHS expression refers to a class attribute,\n the LHS creates a new instance attribute as the target of the\n assignment:\n\n class Cls:\n x = 3 # class variable\n inst = Cls()\n inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x as 3\n\n This description does not necessarily apply to descriptor\n attributes, such as properties created with ``property()``.\n\n* If the target is a subscription: The primary expression in the\n reference is evaluated. It should yield either a mutable sequence\n object (such as a list) or a mapping object (such as a dictionary).\n Next, the subscript expression is evaluated.\n\n If the primary is a mutable sequence object (such as a list), the\n subscript must yield an integer. If it is negative, the sequence\'s\n length is added to it. The resulting value must be a nonnegative\n integer less than the sequence\'s length, and the sequence is asked\n to assign the assigned object to its item with that index. If the\n index is out of range, ``IndexError`` is raised (assignment to a\n subscripted sequence cannot add new items to a list).\n\n If the primary is a mapping object (such as a dictionary), the\n subscript must have a type compatible with the mapping\'s key type,\n and the mapping is then asked to create a key/datum pair which maps\n the subscript to the assigned object. This can either replace an\n existing key/value pair with the same key value, or insert a new\n key/value pair (if no key with the same value existed).\n\n For user-defined objects, the ``__setitem__()`` method is called\n with appropriate arguments.\n\n* If the target is a slicing: The primary expression in the reference\n is evaluated. It should yield a mutable sequence object (such as a\n list). The assigned object should be a sequence object of the same\n type. Next, the lower and upper bound expressions are evaluated,\n insofar they are present; defaults are zero and the sequence\'s\n length. The bounds should evaluate to integers. If either bound is\n negative, the sequence\'s length is added to it. The resulting\n bounds are clipped to lie between zero and the sequence\'s length,\n inclusive. Finally, the sequence object is asked to replace the\n slice with the items of the assigned sequence. The length of the\n slice may be different from the length of the assigned sequence,\n thus changing the length of the target sequence, if the object\n allows it.\n\n**CPython implementation detail:** In the current implementation, the\nsyntax for targets is taken to be the same as for expressions, and\ninvalid syntax is rejected during the code generation phase, causing\nless detailed error messages.\n\nWARNING: Although the definition of assignment implies that overlaps\nbetween the left-hand side and the right-hand side are \'safe\' (for\nexample ``a, b = b, a`` swaps two variables), overlaps *within* the\ncollection of assigned-to variables are not safe! For instance, the\nfollowing program prints ``[0, 2]``:\n\n x = [0, 1]\n i = 0\n i, x[i] = 1, 2\n print(x)\n\nSee also:\n\n **PEP 3132** - Extended Iterable Unpacking\n The specification for the ``*target`` feature.\n\n\nAugmented assignment statements\n===============================\n\nAugmented assignment is the combination, in a single statement, of a\nbinary operation and an assignment statement:\n\n augmented_assignment_stmt ::= augtarget augop (expression_list | yield_expression)\n augtarget ::= identifier | attributeref | subscription | slicing\n augop ::= "+=" | "-=" | "*=" | "/=" | "//=" | "%=" | "**="\n | ">>=" | "<<=" | "&=" | "^=" | "|="\n\n(See section *Primaries* for the syntax definitions for the last three\nsymbols.)\n\nAn augmented assignment evaluates the target (which, unlike normal\nassignment statements, cannot be an unpacking) and the expression\nlist, performs the binary operation specific to the type of assignment\non the two operands, and assigns the result to the original target.\nThe target is only evaluated once.\n\nAn augmented assignment expression like ``x += 1`` can be rewritten as\n``x = x + 1`` to achieve a similar, but not exactly equal effect. In\nthe augmented version, ``x`` is only evaluated once. Also, when\npossible, the actual operation is performed *in-place*, meaning that\nrather than creating a new object and assigning that to the target,\nthe old object is modified instead.\n\nWith the exception of assigning to tuples and multiple targets in a\nsingle statement, the assignment done by augmented assignment\nstatements is handled the same way as normal assignments. Similarly,\nwith the exception of the possible *in-place* behavior, the binary\noperation performed by augmented assignment is the same as the normal\nbinary operations.\n\nFor targets which are attribute references, the same *caveat about\nclass and instance attributes* applies as for regular assignments.\n', 'atom-identifiers': '\nIdentifiers (Names)\n*******************\n\nAn identifier occurring as an atom is a name. See section\n*Identifiers and keywords* for lexical definition and section *Naming\nand binding* for documentation of naming and binding.\n\nWhen the name is bound to an object, evaluation of the atom yields\nthat object. When a name is not bound, an attempt to evaluate it\nraises a ``NameError`` exception.\n\n**Private name mangling:** When an identifier that textually occurs in\na class definition begins with two or more underscore characters and\ndoes not end in two or more underscores, it is considered a *private\nname* of that class. Private names are transformed to a longer form\nbefore code is generated for them. The transformation inserts the\nclass name in front of the name, with leading underscores removed, and\na single underscore inserted in front of the class name. For example,\nthe identifier ``__spam`` occurring in a class named ``Ham`` will be\ntransformed to ``_Ham__spam``. This transformation is independent of\nthe syntactical context in which the identifier is used. If the\ntransformed name is extremely long (longer than 255 characters),\nimplementation defined truncation may happen. If the class name\nconsists only of underscores, no transformation is done.\n', @@ -19,12 +19,12 @@ 'calls': '\nCalls\n*****\n\nA call calls a callable object (e.g., a function) with a possibly\nempty series of arguments:\n\n call ::= primary "(" [argument_list [","] | comprehension] ")"\n argument_list ::= positional_arguments ["," keyword_arguments]\n ["," "*" expression] ["," keyword_arguments]\n ["," "**" expression]\n | keyword_arguments ["," "*" expression]\n ["," keyword_arguments] ["," "**" expression]\n | "*" expression ["," keyword_arguments] ["," "**" expression]\n | "**" expression\n positional_arguments ::= expression ("," expression)*\n keyword_arguments ::= keyword_item ("," keyword_item)*\n keyword_item ::= identifier "=" expression\n\nA trailing comma may be present after the positional and keyword\narguments but does not affect the semantics.\n\nThe primary must evaluate to a callable object (user-defined\nfunctions, built-in functions, methods of built-in objects, class\nobjects, methods of class instances, and all objects having a\n``__call__()`` method are callable). All argument expressions are\nevaluated before the call is attempted. Please refer to section\n*Function definitions* for the syntax of formal parameter lists.\n\nIf keyword arguments are present, they are first converted to\npositional arguments, as follows. First, a list of unfilled slots is\ncreated for the formal parameters. If there are N positional\narguments, they are placed in the first N slots. Next, for each\nkeyword argument, the identifier is used to determine the\ncorresponding slot (if the identifier is the same as the first formal\nparameter name, the first slot is used, and so on). If the slot is\nalready filled, a ``TypeError`` exception is raised. Otherwise, the\nvalue of the argument is placed in the slot, filling it (even if the\nexpression is ``None``, it fills the slot). When all arguments have\nbeen processed, the slots that are still unfilled are filled with the\ncorresponding default value from the function definition. (Default\nvalues are calculated, once, when the function is defined; thus, a\nmutable object such as a list or dictionary used as default value will\nbe shared by all calls that don\'t specify an argument value for the\ncorresponding slot; this should usually be avoided.) If there are any\nunfilled slots for which no default value is specified, a\n``TypeError`` exception is raised. Otherwise, the list of filled\nslots is used as the argument list for the call.\n\n**CPython implementation detail:** An implementation may provide\nbuilt-in functions whose positional parameters do not have names, even\nif they are \'named\' for the purpose of documentation, and which\ntherefore cannot be supplied by keyword. In CPython, this is the case\nfor functions implemented in C that use ``PyArg_ParseTuple()`` to\nparse their arguments.\n\nIf there are more positional arguments than there are formal parameter\nslots, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``*identifier`` is present; in this case, that formal\nparameter receives a tuple containing the excess positional arguments\n(or an empty tuple if there were no excess positional arguments).\n\nIf any keyword argument does not correspond to a formal parameter\nname, a ``TypeError`` exception is raised, unless a formal parameter\nusing the syntax ``**identifier`` is present; in this case, that\nformal parameter receives a dictionary containing the excess keyword\narguments (using the keywords as keys and the argument values as\ncorresponding values), or a (new) empty dictionary if there were no\nexcess keyword arguments.\n\nIf the syntax ``*expression`` appears in the function call,\n``expression`` must evaluate to an iterable. Elements from this\niterable are treated as if they were additional positional arguments;\nif there are positional arguments *x1*, ..., *xN*, and ``expression``\nevaluates to a sequence *y1*, ..., *yM*, this is equivalent to a call\nwith M+N positional arguments *x1*, ..., *xN*, *y1*, ..., *yM*.\n\nA consequence of this is that although the ``*expression`` syntax may\nappear *after* some keyword arguments, it is processed *before* the\nkeyword arguments (and the ``**expression`` argument, if any -- see\nbelow). So:\n\n >>> def f(a, b):\n ... print(a, b)\n ...\n >>> f(b=1, *(2,))\n 2 1\n >>> f(a=1, *(2,))\n Traceback (most recent call last):\n File "", line 1, in ?\n TypeError: f() got multiple values for keyword argument \'a\'\n >>> f(1, *(2,))\n 1 2\n\nIt is unusual for both keyword arguments and the ``*expression``\nsyntax to be used in the same call, so in practice this confusion does\nnot arise.\n\nIf the syntax ``**expression`` appears in the function call,\n``expression`` must evaluate to a mapping, the contents of which are\ntreated as additional keyword arguments. In the case of a keyword\nappearing in both ``expression`` and as an explicit keyword argument,\na ``TypeError`` exception is raised.\n\nFormal parameters using the syntax ``*identifier`` or ``**identifier``\ncannot be used as positional argument slots or as keyword argument\nnames.\n\nA call always returns some value, possibly ``None``, unless it raises\nan exception. How this value is computed depends on the type of the\ncallable object.\n\nIf it is---\n\na user-defined function:\n The code block for the function is executed, passing it the\n argument list. The first thing the code block will do is bind the\n formal parameters to the arguments; this is described in section\n *Function definitions*. When the code block executes a ``return``\n statement, this specifies the return value of the function call.\n\na built-in function or method:\n The result is up to the interpreter; see *Built-in Functions* for\n the descriptions of built-in functions and methods.\n\na class object:\n A new instance of that class is returned.\n\na class instance method:\n The corresponding user-defined function is called, with an argument\n list that is one longer than the argument list of the call: the\n instance becomes the first argument.\n\na class instance:\n The class must define a ``__call__()`` method; the effect is then\n the same as if that method was called.\n', 'class': '\nClass definitions\n*****************\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class ``object``; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with ``self.name = value``. Both class and\ninstance attributes are accessible through the notation\n"``self.name``", and an instance attribute hides a class attribute\nwith the same name when accessed in this way. Class attributes can be\nused as defaults for instance attributes, but using mutable values\nthere can lead to unexpected results. *Descriptors* can be used to\ncreate instance variables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'comparisons': '\nComparisons\n***********\n\nUnlike C, all comparison operations in Python have the same priority,\nwhich is lower than that of any arithmetic, shifting or bitwise\noperation. Also unlike C, expressions like ``a < b < c`` have the\ninterpretation that is conventional in mathematics:\n\n comparison ::= or_expr ( comp_operator or_expr )*\n comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n | "is" ["not"] | ["not"] "in"\n\nComparisons yield boolean values: ``True`` or ``False``.\n\nComparisons can be chained arbitrarily, e.g., ``x < y <= z`` is\nequivalent to ``x < y and y <= z``, except that ``y`` is evaluated\nonly once (but in both cases ``z`` is not evaluated at all when ``x <\ny`` is found to be false).\n\nFormally, if *a*, *b*, *c*, ..., *y*, *z* are expressions and *op1*,\n*op2*, ..., *opN* are comparison operators, then ``a op1 b op2 c ... y\nopN z`` is equivalent to ``a op1 b and b op2 c and ... y opN z``,\nexcept that each expression is evaluated at most once.\n\nNote that ``a op1 b op2 c`` doesn\'t imply any kind of comparison\nbetween *a* and *c*, so that, e.g., ``x < y > z`` is perfectly legal\n(though perhaps not pretty).\n\nThe operators ``<``, ``>``, ``==``, ``>=``, ``<=``, and ``!=`` compare\nthe values of two objects. The objects need not have the same type.\nIf both are numbers, they are converted to a common type. Otherwise,\nthe ``==`` and ``!=`` operators *always* consider objects of different\ntypes to be unequal, while the ``<``, ``>``, ``>=`` and ``<=``\noperators raise a ``TypeError`` when comparing objects of different\ntypes that do not implement these operators for the given pair of\ntypes. You can control comparison behavior of objects of non-built-in\ntypes by defining rich comparison methods like ``__gt__()``, described\nin section *Basic customization*.\n\nComparison of objects of the same type depends on the type:\n\n* Numbers are compared arithmetically.\n\n* The values ``float(\'NaN\')`` and ``Decimal(\'NaN\')`` are special. The\n are identical to themselves, ``x is x`` but are not equal to\n themselves, ``x != x``. Additionally, comparing any value to a\n not-a-number value will return ``False``. For example, both ``3 <\n float(\'NaN\')`` and ``float(\'NaN\') < 3`` will return ``False``.\n\n* Bytes objects are compared lexicographically using the numeric\n values of their elements.\n\n* Strings are compared lexicographically using the numeric equivalents\n (the result of the built-in function ``ord()``) of their characters.\n [3] String and bytes object can\'t be compared!\n\n* Tuples and lists are compared lexicographically using comparison of\n corresponding elements. This means that to compare equal, each\n element must compare equal and the two sequences must be of the same\n type and have the same length.\n\n If not equal, the sequences are ordered the same as their first\n differing elements. For example, ``[1,2,x] <= [1,2,y]`` has the\n same value as ``x <= y``. If the corresponding element does not\n exist, the shorter sequence is ordered first (for example, ``[1,2] <\n [1,2,3]``).\n\n* Mappings (dictionaries) compare equal if and only if they have the\n same ``(key, value)`` pairs. Order comparisons ``(\'<\', \'<=\', \'>=\',\n \'>\')`` raise ``TypeError``.\n\n* Sets and frozensets define comparison operators to mean subset and\n superset tests. Those relations do not define total orderings (the\n two sets ``{1,2}`` and {2,3} are not equal, nor subsets of one\n another, nor supersets of one another). Accordingly, sets are not\n appropriate arguments for functions which depend on total ordering.\n For example, ``min()``, ``max()``, and ``sorted()`` produce\n undefined results given a list of sets as inputs.\n\n* Most other objects of built-in types compare unequal unless they are\n the same object; the choice whether one object is considered smaller\n or larger than another one is made arbitrarily but consistently\n within one execution of a program.\n\nComparison of objects of the differing types depends on whether either\nof the types provide explicit support for the comparison. Most\nnumeric types can be compared with one another, but comparisons of\n``float`` and ``Decimal`` are not supported to avoid the inevitable\nconfusion arising from representation issues such as ``float(\'1.1\')``\nbeing inexactly represented and therefore not exactly equal to\n``Decimal(\'1.1\')`` which is. When cross-type comparison is not\nsupported, the comparison method returns ``NotImplemented``. This can\ncreate the illusion of non-transitivity between supported cross-type\ncomparisons and unsupported comparisons. For example, ``Decimal(2) ==\n2`` and ``2 == float(2)`` but ``Decimal(2) != float(2)``.\n\nThe operators ``in`` and ``not in`` test for membership. ``x in s``\nevaluates to true if *x* is a member of *s*, and false otherwise. ``x\nnot in s`` returns the negation of ``x in s``. All built-in sequences\nand set types support this as well as dictionary, for which ``in``\ntests whether a the dictionary has a given key. For container types\nsuch as list, tuple, set, frozenset, dict, or collections.deque, the\nexpression ``x in y`` is equivalent to ``any(x is e or x == e for e in\ny)``.\n\nFor the string and bytes types, ``x in y`` is true if and only if *x*\nis a substring of *y*. An equivalent test is ``y.find(x) != -1``.\nEmpty strings are always considered to be a substring of any other\nstring, so ``"" in "abc"`` will return ``True``.\n\nFor user-defined classes which define the ``__contains__()`` method,\n``x in y`` is true if and only if ``y.__contains__(x)`` is true.\n\nFor user-defined classes which do not define ``__contains__()`` but do\ndefine ``__iter__()``, ``x in y`` is true if some value ``z`` with ``x\n== z`` is produced while iterating over ``y``. If an exception is\nraised during the iteration, it is as if ``in`` raised that exception.\n\nLastly, the old-style iteration protocol is tried: if a class defines\n``__getitem__()``, ``x in y`` is true if and only if there is a non-\nnegative integer index *i* such that ``x == y[i]``, and all lower\ninteger indices do not raise ``IndexError`` exception. (If any other\nexception is raised, it is as if ``in`` raised that exception).\n\nThe operator ``not in`` is defined to have the inverse true value of\n``in``.\n\nThe operators ``is`` and ``is not`` test for object identity: ``x is\ny`` is true if and only if *x* and *y* are the same object. ``x is\nnot y`` yields the inverse truth value. [4]\n', - 'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements, while the ``with`` statement\nallows the execution of initialization and finalization code around a\nblock of code. Function and class definitions are also syntactically\ncompound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print()`` calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a ``StopIteration``\nexception), the suite in the ``else`` clause, if present, is executed,\nand the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function ``range()`` returns an\niterator of integers suitable to emulate the effect of Pascal\'s ``for\ni := a to b do``; e.g., ``list(range(3))`` returns the list ``[0, 1,\n2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the ``as`` keyword in that except clause,\nif present, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using ``as target``, it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the ``sys`` module and can be access via\n``sys.exc_info()``. ``sys.exc_info()`` returns a 3-tuple consisting of\nthe exception class, the exception instance and a traceback object\n(see section *The standard type hierarchy*) identifying the point in\nthe program where the exception occurred. ``sys.exc_info()`` values\nare restored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is set as the context of the new exception. The exception\ninformation is not available to the program during execution of the\n``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)*\n [, "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the "``*``" must also have a default value ---\nthis is a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after "``*``" or "``*identifier``" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "``: expression``"\nfollowing the parameter name. Any parameter may have an annotation\neven those of the form ``*identifier`` or ``**identifier``. Functions\nmay have "return" annotation of the form "``-> expression``" after the\nparameter list. These annotations can be any valid Python expression\nand are evaluated when the function definition is executed.\nAnnotations may be evaluated in a different order than they appear in\nthe source code. The presence of annotations does not change the\nsemantics of a function. The annotation values are available as\nvalues of a dictionary keyed by the parameters\' names in the\n``__annotations__`` attribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class ``object``; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with ``self.name = value``. Both class and\ninstance attributes are accessible through the notation\n"``self.name``", and an instance attribute hides a class attribute\nwith the same name when accessed in this way. Class attributes can be\nused as defaults for instance attributes, but using mutable values\nthere can lead to unexpected results. *Descriptors* can be used to\ncreate instance variables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', + 'compound': '\nCompound statements\n*******************\n\nCompound statements contain (groups of) other statements; they affect\nor control the execution of those other statements in some way. In\ngeneral, compound statements span multiple lines, although in simple\nincarnations a whole compound statement may be contained in one line.\n\nThe ``if``, ``while`` and ``for`` statements implement traditional\ncontrol flow constructs. ``try`` specifies exception handlers and/or\ncleanup code for a group of statements, while the ``with`` statement\nallows the execution of initialization and finalization code around a\nblock of code. Function and class definitions are also syntactically\ncompound statements.\n\nCompound statements consist of one or more \'clauses.\' A clause\nconsists of a header and a \'suite.\' The clause headers of a\nparticular compound statement are all at the same indentation level.\nEach clause header begins with a uniquely identifying keyword and ends\nwith a colon. A suite is a group of statements controlled by a\nclause. A suite can be one or more semicolon-separated simple\nstatements on the same line as the header, following the header\'s\ncolon, or it can be one or more indented statements on subsequent\nlines. Only the latter form of suite can contain nested compound\nstatements; the following is illegal, mostly because it wouldn\'t be\nclear to which ``if`` clause a following ``else`` clause would belong:\n\n if test1: if test2: print(x)\n\nAlso note that the semicolon binds tighter than the colon in this\ncontext, so that in the following example, either all or none of the\n``print()`` calls are executed:\n\n if x < y < z: print(x); print(y); print(z)\n\nSummarizing:\n\n compound_stmt ::= if_stmt\n | while_stmt\n | for_stmt\n | try_stmt\n | with_stmt\n | funcdef\n | classdef\n suite ::= stmt_list NEWLINE | NEWLINE INDENT statement+ DEDENT\n statement ::= stmt_list NEWLINE | compound_stmt\n stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n\nNote that statements always end in a ``NEWLINE`` possibly followed by\na ``DEDENT``. Also note that optional continuation clauses always\nbegin with a keyword that cannot start a statement, thus there are no\nambiguities (the \'dangling ``else``\' problem is solved in Python by\nrequiring nested ``if`` statements to be indented).\n\nThe formatting of the grammar rules in the following sections places\neach clause on a separate line for clarity.\n\n\nThe ``if`` statement\n====================\n\nThe ``if`` statement is used for conditional execution:\n\n if_stmt ::= "if" expression ":" suite\n ( "elif" expression ":" suite )*\n ["else" ":" suite]\n\nIt selects exactly one of the suites by evaluating the expressions one\nby one until one is found to be true (see section *Boolean operations*\nfor the definition of true and false); then that suite is executed\n(and no other part of the ``if`` statement is executed or evaluated).\nIf all expressions are false, the suite of the ``else`` clause, if\npresent, is executed.\n\n\nThe ``while`` statement\n=======================\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n\n\nThe ``for`` statement\n=====================\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a ``StopIteration``\nexception), the suite in the ``else`` clause, if present, is executed,\nand the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function ``range()`` returns an\niterator of integers suitable to emulate the effect of Pascal\'s ``for\ni := a to b do``; e.g., ``list(range(3))`` returns the list ``[0, 1,\n2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n\n\nThe ``try`` statement\n=====================\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the ``as`` keyword in that except clause,\nif present, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using ``as target``, it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the ``sys`` module and can be access via\n``sys.exc_info()``. ``sys.exc_info()`` returns a 3-tuple consisting of\nthe exception class, the exception instance and a traceback object\n(see section *The standard type hierarchy*) identifying the point in\nthe program where the exception occurred. ``sys.exc_info()`` values\nare restored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is set as the context of the new exception. The exception\ninformation is not available to the program during execution of the\n``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n\n\nThe ``with`` statement\n======================\n\nThe ``with`` statement is used to wrap the execution of a block with\nmethods defined by a context manager (see section *With Statement\nContext Managers*). This allows common\n``try``...``except``...``finally`` usage patterns to be encapsulated\nfor convenient reuse.\n\n with_stmt ::= "with" with_item ("," with_item)* ":" suite\n with_item ::= expression ["as" target]\n\nThe execution of the ``with`` statement with one "item" proceeds as\nfollows:\n\n1. The context expression (the expression given in the ``with_item``)\n is evaluated to obtain a context manager.\n\n2. The context manager\'s ``__exit__()`` is loaded for later use.\n\n3. The context manager\'s ``__enter__()`` method is invoked.\n\n4. If a target was included in the ``with`` statement, the return\n value from ``__enter__()`` is assigned to it.\n\n Note: The ``with`` statement guarantees that if the ``__enter__()``\n method returns without an error, then ``__exit__()`` will always\n be called. Thus, if an error occurs during the assignment to the\n target list, it will be treated the same as an error occurring\n within the suite would be. See step 6 below.\n\n5. The suite is executed.\n\n6. The context manager\'s ``__exit__()`` method is invoked. If an\n exception caused the suite to be exited, its type, value, and\n traceback are passed as arguments to ``__exit__()``. Otherwise,\n three ``None`` arguments are supplied.\n\n If the suite was exited due to an exception, and the return value\n from the ``__exit__()`` method was false, the exception is\n reraised. If the return value was true, the exception is\n suppressed, and execution continues with the statement following\n the ``with`` statement.\n\n If the suite was exited for any reason other than an exception, the\n return value from ``__exit__()`` is ignored, and execution proceeds\n at the normal location for the kind of exit that was taken.\n\nWith more than one item, the context managers are processed as if\nmultiple ``with`` statements were nested:\n\n with A() as a, B() as b:\n suite\n\nis equivalent to\n\n with A() as a:\n with B() as b:\n suite\n\nChanged in version 3.1: Support for multiple context expressions.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n\n\nFunction definitions\n====================\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)*\n [, "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the "``*``" must also have a default value ---\nthis is a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after "``*``" or "``*identifier``" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "``: expression``"\nfollowing the parameter name. Any parameter may have an annotation\neven those of the form ``*identifier`` or ``**identifier``. Functions\nmay have "return" annotation of the form "``-> expression``" after the\nparameter list. These annotations can be any valid Python expression\nand are evaluated when the function definition is executed.\nAnnotations may be evaluated in a different order than they appear in\nthe source code. The presence of annotations does not change the\nsemantics of a function. The annotation values are available as\nvalues of a dictionary keyed by the parameters\' names in the\n``__annotations__`` attribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also:\n\n **PEP 3107** - Function Annotations\n The original specification for function annotations.\n\n\nClass definitions\n=================\n\nA class definition defines a class object (see section *The standard\ntype hierarchy*):\n\n classdef ::= [decorators] "class" classname [inheritance] ":" suite\n inheritance ::= "(" [parameter_list] ")"\n classname ::= identifier\n\nA class definition is an executable statement. The inheritance list\nusually gives a list of base classes (see *Customizing class creation*\nfor more advanced uses), so each item in the list should evaluate to a\nclass object which allows subclassing. Classes without an inheritance\nlist inherit, by default, from the base class ``object``; hence,\n\n class Foo:\n pass\n\nis equivalent to\n\n class Foo(object):\n pass\n\nThe class\'s suite is then executed in a new execution frame (see\n*Naming and binding*), using a newly created local namespace and the\noriginal global namespace. (Usually, the suite contains mostly\nfunction definitions.) When the class\'s suite finishes execution, its\nexecution frame is discarded but its local namespace is saved. [4] A\nclass object is then created using the inheritance list for the base\nclasses and the saved local namespace for the attribute dictionary.\nThe class name is bound to this class object in the original local\nnamespace.\n\nClass creation can be customized heavily using *metaclasses*.\n\nClasses can also be decorated: just like when decorating functions,\n\n @f1(arg)\n @f2\n class Foo: pass\n\nis equivalent to\n\n class Foo: pass\n Foo = f1(arg)(f2(Foo))\n\nThe evaluation rules for the decorator expressions are the same as for\nfunction decorators. The result must be a class object, which is then\nbound to the class name.\n\n**Programmer\'s note:** Variables defined in the class definition are\nclass attributes; they are shared by instances. Instance attributes\ncan be set in a method with ``self.name = value``. Both class and\ninstance attributes are accessible through the notation\n"``self.name``", and an instance attribute hides a class attribute\nwith the same name when accessed in this way. Class attributes can be\nused as defaults for instance attributes, but using mutable values\nthere can lead to unexpected results. *Descriptors* can be used to\ncreate instance variables with different implementation details.\n\nSee also:\n\n **PEP 3115** - Metaclasses in Python 3 **PEP 3129** - Class\n Decorators\n\n-[ Footnotes ]-\n\n[1] The exception is propagated to the invocation stack unless there\n is a ``finally`` clause which happens to raise another exception.\n That new exception causes the old one to be lost.\n\n[2] Currently, control "flows off the end" except in the case of an\n exception or the execution of a ``return``, ``continue``, or\n ``break`` statement.\n\n[3] A string literal appearing as the first statement in the function\n body is transformed into the function\'s ``__doc__`` attribute and\n therefore the function\'s *docstring*.\n\n[4] A string literal appearing as the first statement in the class\n body is transformed into the namespace\'s ``__doc__`` item and\n therefore the class\'s *docstring*.\n', 'context-managers': '\nWith Statement Context Managers\n*******************************\n\nA *context manager* is an object that defines the runtime context to\nbe established when executing a ``with`` statement. The context\nmanager handles the entry into, and the exit from, the desired runtime\ncontext for the execution of the block of code. Context managers are\nnormally invoked using the ``with`` statement (described in section\n*The with statement*), but can also be used by directly invoking their\nmethods.\n\nTypical uses of context managers include saving and restoring various\nkinds of global state, locking and unlocking resources, closing opened\nfiles, etc.\n\nFor more information on context managers, see *Context Manager Types*.\n\nobject.__enter__(self)\n\n Enter the runtime context related to this object. The ``with``\n statement will bind this method\'s return value to the target(s)\n specified in the ``as`` clause of the statement, if any.\n\nobject.__exit__(self, exc_type, exc_value, traceback)\n\n Exit the runtime context related to this object. The parameters\n describe the exception that caused the context to be exited. If the\n context was exited without an exception, all three arguments will\n be ``None``.\n\n If an exception is supplied, and the method wishes to suppress the\n exception (i.e., prevent it from being propagated), it should\n return a true value. Otherwise, the exception will be processed\n normally upon exit from this method.\n\n Note that ``__exit__()`` methods should not reraise the passed-in\n exception; this is the caller\'s responsibility.\n\nSee also:\n\n **PEP 0343** - The "with" statement\n The specification, background, and examples for the Python\n ``with`` statement.\n', 'continue': '\nThe ``continue`` statement\n**************************\n\n continue_stmt ::= "continue"\n\n``continue`` may only occur syntactically nested in a ``for`` or\n``while`` loop, but not nested in a function or class definition or\n``finally`` clause within that loop. It continues with the next cycle\nof the nearest enclosing loop.\n\nWhen ``continue`` passes control out of a ``try`` statement with a\n``finally`` clause, that ``finally`` clause is executed before really\nstarting the next loop cycle.\n', 'conversions': '\nArithmetic conversions\n**********************\n\nWhen a description of an arithmetic operator below uses the phrase\n"the numeric arguments are converted to a common type," this means\nthat the operator implementation for built-in types works that way:\n\n* If either argument is a complex number, the other is converted to\n complex;\n\n* otherwise, if either argument is a floating point number, the other\n is converted to floating point;\n\n* otherwise, both must be integers and no conversion is necessary.\n\nSome additional rules apply for certain operators (e.g., a string left\nargument to the \'%\' operator). Extensions must define their own\nconversion behavior.\n', 'customization': '\nBasic customization\n*******************\n\nobject.__new__(cls[, ...])\n\n Called to create a new instance of class *cls*. ``__new__()`` is a\n static method (special-cased so you need not declare it as such)\n that takes the class of which an instance was requested as its\n first argument. The remaining arguments are those passed to the\n object constructor expression (the call to the class). The return\n value of ``__new__()`` should be the new object instance (usually\n an instance of *cls*).\n\n Typical implementations create a new instance of the class by\n invoking the superclass\'s ``__new__()`` method using\n ``super(currentclass, cls).__new__(cls[, ...])`` with appropriate\n arguments and then modifying the newly-created instance as\n necessary before returning it.\n\n If ``__new__()`` returns an instance of *cls*, then the new\n instance\'s ``__init__()`` method will be invoked like\n ``__init__(self[, ...])``, where *self* is the new instance and the\n remaining arguments are the same as were passed to ``__new__()``.\n\n If ``__new__()`` does not return an instance of *cls*, then the new\n instance\'s ``__init__()`` method will not be invoked.\n\n ``__new__()`` is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation. It\n is also commonly overridden in custom metaclasses in order to\n customize class creation.\n\nobject.__init__(self[, ...])\n\n Called when the instance is created. The arguments are those\n passed to the class constructor expression. If a base class has an\n ``__init__()`` method, the derived class\'s ``__init__()`` method,\n if any, must explicitly call it to ensure proper initialization of\n the base class part of the instance; for example:\n ``BaseClass.__init__(self, [args...])``. As a special constraint\n on constructors, no value may be returned; doing so will cause a\n ``TypeError`` to be raised at runtime.\n\nobject.__del__(self)\n\n Called when the instance is about to be destroyed. This is also\n called a destructor. If a base class has a ``__del__()`` method,\n the derived class\'s ``__del__()`` method, if any, must explicitly\n call it to ensure proper deletion of the base class part of the\n instance. Note that it is possible (though not recommended!) for\n the ``__del__()`` method to postpone destruction of the instance by\n creating a new reference to it. It may then be called at a later\n time when this new reference is deleted. It is not guaranteed that\n ``__del__()`` methods are called for objects that still exist when\n the interpreter exits.\n\n Note: ``del x`` doesn\'t directly call ``x.__del__()`` --- the former\n decrements the reference count for ``x`` by one, and the latter\n is only called when ``x``\'s reference count reaches zero. Some\n common situations that may prevent the reference count of an\n object from going to zero include: circular references between\n objects (e.g., a doubly-linked list or a tree data structure with\n parent and child pointers); a reference to the object on the\n stack frame of a function that caught an exception (the traceback\n stored in ``sys.exc_info()[2]`` keeps the stack frame alive); or\n a reference to the object on the stack frame that raised an\n unhandled exception in interactive mode (the traceback stored in\n ``sys.last_traceback`` keeps the stack frame alive). The first\n situation can only be remedied by explicitly breaking the cycles;\n the latter two situations can be resolved by storing ``None`` in\n ``sys.last_traceback``. Circular references which are garbage are\n detected when the option cycle detector is enabled (it\'s on by\n default), but can only be cleaned up if there are no Python-\n level ``__del__()`` methods involved. Refer to the documentation\n for the ``gc`` module for more information about how\n ``__del__()`` methods are handled by the cycle detector,\n particularly the description of the ``garbage`` value.\n\n Warning: Due to the precarious circumstances under which ``__del__()``\n methods are invoked, exceptions that occur during their execution\n are ignored, and a warning is printed to ``sys.stderr`` instead.\n Also, when ``__del__()`` is invoked in response to a module being\n deleted (e.g., when execution of the program is done), other\n globals referenced by the ``__del__()`` method may already have\n been deleted or in the process of being torn down (e.g. the\n import machinery shutting down). For this reason, ``__del__()``\n methods should do the absolute minimum needed to maintain\n external invariants. Starting with version 1.5, Python\n guarantees that globals whose name begins with a single\n underscore are deleted from their module before other globals are\n deleted; if no other references to such globals exist, this may\n help in assuring that imported modules are still available at the\n time when the ``__del__()`` method is called.\n\nobject.__repr__(self)\n\n Called by the ``repr()`` built-in function to compute the\n "official" string representation of an object. If at all possible,\n this should look like a valid Python expression that could be used\n to recreate an object with the same value (given an appropriate\n environment). If this is not possible, a string of the form\n ``<...some useful description...>`` should be returned. The return\n value must be a string object. If a class defines ``__repr__()``\n but not ``__str__()``, then ``__repr__()`` is also used when an\n "informal" string representation of instances of that class is\n required.\n\n This is typically used for debugging, so it is important that the\n representation is information-rich and unambiguous.\n\nobject.__str__(self)\n\n Called by the ``str()`` built-in function and by the ``print()``\n function to compute the "informal" string representation of an\n object. This differs from ``__repr__()`` in that it does not have\n to be a valid Python expression: a more convenient or concise\n representation may be used instead. The return value must be a\n string object.\n\nobject.__bytes__(self)\n\n Called by ``bytes()`` to compute a byte-string representation of an\n object. This should return a ``bytes`` object.\n\nobject.__format__(self, format_spec)\n\n Called by the ``format()`` built-in function (and by extension, the\n ``format()`` method of class ``str``) to produce a "formatted"\n string representation of an object. The ``format_spec`` argument is\n a string that contains a description of the formatting options\n desired. The interpretation of the ``format_spec`` argument is up\n to the type implementing ``__format__()``, however most classes\n will either delegate formatting to one of the built-in types, or\n use a similar formatting option syntax.\n\n See *Format Specification Mini-Language* for a description of the\n standard formatting syntax.\n\n The return value must be a string object.\n\nobject.__lt__(self, other)\nobject.__le__(self, other)\nobject.__eq__(self, other)\nobject.__ne__(self, other)\nobject.__gt__(self, other)\nobject.__ge__(self, other)\n\n These are the so-called "rich comparison" methods. The\n correspondence between operator symbols and method names is as\n follows: ``xy`` calls ``x.__gt__(y)``, and ``x>=y`` calls\n ``x.__ge__(y)``.\n\n A rich comparison method may return the singleton\n ``NotImplemented`` if it does not implement the operation for a\n given pair of arguments. By convention, ``False`` and ``True`` are\n returned for a successful comparison. However, these methods can\n return any value, so if the comparison operator is used in a\n Boolean context (e.g., in the condition of an ``if`` statement),\n Python will call ``bool()`` on the value to determine if the result\n is true or false.\n\n There are no implied relationships among the comparison operators.\n The truth of ``x==y`` does not imply that ``x!=y`` is false.\n Accordingly, when defining ``__eq__()``, one should also define\n ``__ne__()`` so that the operators will behave as expected. See\n the paragraph on ``__hash__()`` for some important notes on\n creating *hashable* objects which support custom comparison\n operations and are usable as dictionary keys.\n\n There are no swapped-argument versions of these methods (to be used\n when the left argument does not support the operation but the right\n argument does); rather, ``__lt__()`` and ``__gt__()`` are each\n other\'s reflection, ``__le__()`` and ``__ge__()`` are each other\'s\n reflection, and ``__eq__()`` and ``__ne__()`` are their own\n reflection.\n\n Arguments to rich comparison methods are never coerced.\n\n To automatically generate ordering operations from a single root\n operation, see ``functools.total_ordering()``.\n\nobject.__hash__(self)\n\n Called by built-in function ``hash()`` and for operations on\n members of hashed collections including ``set``, ``frozenset``, and\n ``dict``. ``__hash__()`` should return an integer. The only\n required property is that objects which compare equal have the same\n hash value; it is advised to somehow mix together (e.g. using\n exclusive or) the hash values for the components of the object that\n also play a part in comparison of objects.\n\n If a class does not define an ``__eq__()`` method it should not\n define a ``__hash__()`` operation either; if it defines\n ``__eq__()`` but not ``__hash__()``, its instances will not be\n usable as items in hashable collections. If a class defines\n mutable objects and implements an ``__eq__()`` method, it should\n not implement ``__hash__()``, since the implementation of hashable\n collections requires that a key\'s hash value is immutable (if the\n object\'s hash value changes, it will be in the wrong hash bucket).\n\n User-defined classes have ``__eq__()`` and ``__hash__()`` methods\n by default; with them, all objects compare unequal (except with\n themselves) and ``x.__hash__()`` returns ``id(x)``.\n\n Classes which inherit a ``__hash__()`` method from a parent class\n but change the meaning of ``__eq__()`` such that the hash value\n returned is no longer appropriate (e.g. by switching to a value-\n based concept of equality instead of the default identity based\n equality) can explicitly flag themselves as being unhashable by\n setting ``__hash__ = None`` in the class definition. Doing so means\n that not only will instances of the class raise an appropriate\n ``TypeError`` when a program attempts to retrieve their hash value,\n but they will also be correctly identified as unhashable when\n checking ``isinstance(obj, collections.Hashable)`` (unlike classes\n which define their own ``__hash__()`` to explicitly raise\n ``TypeError``).\n\n If a class that overrides ``__eq__()`` needs to retain the\n implementation of ``__hash__()`` from a parent class, the\n interpreter must be told this explicitly by setting ``__hash__ =\n .__hash__``. Otherwise the inheritance of\n ``__hash__()`` will be blocked, just as if ``__hash__`` had been\n explicitly set to ``None``.\n\n Note: Note by default the ``__hash__()`` values of str, bytes and\n datetime objects are "salted" with an unpredictable random value.\n Although they remain constant within an individual Python\n process, they are not predictable between repeated invocations of\n Python.This is intended to provide protection against a denial-\n of-service caused by carefully-chosen inputs that exploit the\n worst case performance of a dict insertion, O(n^2) complexity.\n See http://www.ocert.org/advisories/ocert-2011-003.html for\n details.Changing hash values affects the order in which keys are\n retrieved from a dict. Note Python has never made guarantees\n about this ordering (and it typically varies between 32-bit and\n 64-bit builds).See also ``PYTHONHASHSEED``.\n\n Changed in version 3.3: Hash randomization is enabled by default.\n\nobject.__bool__(self)\n\n Called to implement truth value testing and the built-in operation\n ``bool()``; should return ``False`` or ``True``. When this method\n is not defined, ``__len__()`` is called, if it is defined, and the\n object is considered true if its result is nonzero. If a class\n defines neither ``__len__()`` nor ``__bool__()``, all its instances\n are considered true.\n', - 'debugger': '\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\nand ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > (0)?()\n (Pdb) continue\n > (1)?()\n (Pdb) continue\n NameError: \'spam\'\n > (1)?()\n (Pdb)\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: ``pdb.py`` now accepts a ``-c`` option that\nexecutes commands as if given in a ``.pdbrc`` file, see *Debugger\nCommands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the ``continue`` command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type ``continue``, or you can\n step through the statement using ``step`` or ``next`` (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module ``__main__`` is used. (See\n the explanation of the built-in ``exec()`` or ``eval()``\n functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When ``runeval()`` returns, it returns the\n value of the expression. Otherwise this function is similar to\n ``run()``.\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n\nThe ``run*`` functions and ``set_trace()`` are aliases for\ninstantiating the ``Pdb`` class and calling the method of the same\nname. If you want to access further features, you have to do this\nyourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n ``Pdb`` is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying ``cmd.Cmd`` class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses Ctrl-C on the console) when you give a\n ``continue`` command. This allows you to break into the debugger\n again by pressing Ctrl-C. If you want Pdb not to touch the SIGINT\n handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n``h(elp)`` means that either ``h`` or ``help`` can be used to enter\nthe help command (but not ``he`` or ``hel``, nor ``H`` or ``Help`` or\n``HELP``). Arguments to commands must be separated by whitespace\n(spaces or tabs). Optional arguments are enclosed in square brackets\n(``[]``) in the command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n(``|``).\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a ``list`` command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint (``!``). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by\n``;;``. (A single ``;`` is not used as it is the separator for\nmultiple commands in a line that is passed to the Python parser.) No\nintelligence is applied to separating the commands; the input is split\nat the first ``;;`` pair, even if it is in the middle of a quoted\nstring.\n\nIf a file ``.pdbrc`` exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ``.pdbrc`` can now contain commands that\ncontinue debugging, such as ``continue`` or ``next``. Previously,\nthese commands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. ``help pdb``\n displays the full documentation (the docstring of the ``pdb``\n module). Since the *command* argument must be an identifier,\n ``help exec`` must be entered to get help on the ``!`` command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on ``sys.path``. Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for ``break``.\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just ``end`` to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) print some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with ``end``; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between ``next`` and\n ``step`` is that ``step`` stops inside a called function, while\n ``next`` executes called functions at (nearly) full speed, only\n stopping at the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a ``for`` loop or out\n of a ``finally`` clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With ``.`` as argument, list 11 lines around the current line.\n With one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by ``->``. If\n an exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ``>>``, if it\n differs from the current line.\n\n New in version 3.2: The ``>>`` marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for ``list``.\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np(rint) expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\npp expression\n\n Like the ``print`` command, except the value of the expression is\n pretty-printed using the ``pprint`` module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the ``code`` module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by ``%1``, ``%2``, and so on, while ``%*`` is replaced by\n all the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ``.pdbrc`` file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n ``global`` statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with ``shlex`` and the result is used as the new\n ``sys.argv``. History, breakpoints, actions and debugger options\n are preserved. ``restart`` is an alias for ``run``.\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module is\n determined by the ``__name__`` in the frame globals.\n', + 'debugger': '\n``pdb`` --- The Python Debugger\n*******************************\n\nThe module ``pdb`` defines an interactive source code debugger for\nPython programs. It supports setting (conditional) breakpoints and\nsingle stepping at the source line level, inspection of stack frames,\nsource code listing, and evaluation of arbitrary Python code in the\ncontext of any stack frame. It also supports post-mortem debugging\nand can be called under program control.\n\nThe debugger is extensible -- it is actually defined as the class\n``Pdb``. This is currently undocumented but easily understood by\nreading the source. The extension interface uses the modules ``bdb``\nand ``cmd``.\n\nThe debugger\'s prompt is ``(Pdb)``. Typical usage to run a program\nunder control of the debugger is:\n\n >>> import pdb\n >>> import mymodule\n >>> pdb.run(\'mymodule.test()\')\n > (0)?()\n (Pdb) continue\n > (1)?()\n (Pdb) continue\n NameError: \'spam\'\n > (1)?()\n (Pdb)\n\nChanged in version 3.3: Tab-completion via the ``readline`` module is\navailable for commands and command arguments, e.g. the current global\nand local names are offered as arguments of the ``print`` command.\n\n``pdb.py`` can also be invoked as a script to debug other scripts.\nFor example:\n\n python3 -m pdb myscript.py\n\nWhen invoked as a script, pdb will automatically enter post-mortem\ndebugging if the program being debugged exits abnormally. After post-\nmortem debugging (or after normal exit of the program), pdb will\nrestart the program. Automatic restarting preserves pdb\'s state (such\nas breakpoints) and in most cases is more useful than quitting the\ndebugger upon program\'s exit.\n\nNew in version 3.2: ``pdb.py`` now accepts a ``-c`` option that\nexecutes commands as if given in a ``.pdbrc`` file, see *Debugger\nCommands*.\n\nThe typical usage to break into the debugger from a running program is\nto insert\n\n import pdb; pdb.set_trace()\n\nat the location you want to break into the debugger. You can then\nstep through the code following this statement, and continue running\nwithout the debugger using the ``continue`` command.\n\nThe typical usage to inspect a crashed program is:\n\n >>> import pdb\n >>> import mymodule\n >>> mymodule.test()\n Traceback (most recent call last):\n File "", line 1, in ?\n File "./mymodule.py", line 4, in test\n test2()\n File "./mymodule.py", line 3, in test2\n print(spam)\n NameError: spam\n >>> pdb.pm()\n > ./mymodule.py(3)test2()\n -> print(spam)\n (Pdb)\n\nThe module defines the following functions; each enters the debugger\nin a slightly different way:\n\npdb.run(statement, globals=None, locals=None)\n\n Execute the *statement* (given as a string or a code object) under\n debugger control. The debugger prompt appears before any code is\n executed; you can set breakpoints and type ``continue``, or you can\n step through the statement using ``step`` or ``next`` (all these\n commands are explained below). The optional *globals* and *locals*\n arguments specify the environment in which the code is executed; by\n default the dictionary of the module ``__main__`` is used. (See\n the explanation of the built-in ``exec()`` or ``eval()``\n functions.)\n\npdb.runeval(expression, globals=None, locals=None)\n\n Evaluate the *expression* (given as a string or a code object)\n under debugger control. When ``runeval()`` returns, it returns the\n value of the expression. Otherwise this function is similar to\n ``run()``.\n\npdb.runcall(function, *args, **kwds)\n\n Call the *function* (a function or method object, not a string)\n with the given arguments. When ``runcall()`` returns, it returns\n whatever the function call returned. The debugger prompt appears\n as soon as the function is entered.\n\npdb.set_trace()\n\n Enter the debugger at the calling stack frame. This is useful to\n hard-code a breakpoint at a given point in a program, even if the\n code is not otherwise being debugged (e.g. when an assertion\n fails).\n\npdb.post_mortem(traceback=None)\n\n Enter post-mortem debugging of the given *traceback* object. If no\n *traceback* is given, it uses the one of the exception that is\n currently being handled (an exception must be being handled if the\n default is to be used).\n\npdb.pm()\n\n Enter post-mortem debugging of the traceback found in\n ``sys.last_traceback``.\n\nThe ``run*`` functions and ``set_trace()`` are aliases for\ninstantiating the ``Pdb`` class and calling the method of the same\nname. If you want to access further features, you have to do this\nyourself:\n\nclass class pdb.Pdb(completekey=\'tab\', stdin=None, stdout=None, skip=None, nosigint=False)\n\n ``Pdb`` is the debugger class.\n\n The *completekey*, *stdin* and *stdout* arguments are passed to the\n underlying ``cmd.Cmd`` class; see the description there.\n\n The *skip* argument, if given, must be an iterable of glob-style\n module name patterns. The debugger will not step into frames that\n originate in a module that matches one of these patterns. [1]\n\n By default, Pdb sets a handler for the SIGINT signal (which is sent\n when the user presses Ctrl-C on the console) when you give a\n ``continue`` command. This allows you to break into the debugger\n again by pressing Ctrl-C. If you want Pdb not to touch the SIGINT\n handler, set *nosigint* tot true.\n\n Example call to enable tracing with *skip*:\n\n import pdb; pdb.Pdb(skip=[\'django.*\']).set_trace()\n\n New in version 3.1: The *skip* argument.\n\n New in version 3.2: The *nosigint* argument. Previously, a SIGINT\n handler was never set by Pdb.\n\n run(statement, globals=None, locals=None)\n runeval(expression, globals=None, locals=None)\n runcall(function, *args, **kwds)\n set_trace()\n\n See the documentation for the functions explained above.\n\n\nDebugger Commands\n=================\n\nThe commands recognized by the debugger are listed below. Most\ncommands can be abbreviated to one or two letters as indicated; e.g.\n``h(elp)`` means that either ``h`` or ``help`` can be used to enter\nthe help command (but not ``he`` or ``hel``, nor ``H`` or ``Help`` or\n``HELP``). Arguments to commands must be separated by whitespace\n(spaces or tabs). Optional arguments are enclosed in square brackets\n(``[]``) in the command syntax; the square brackets must not be typed.\nAlternatives in the command syntax are separated by a vertical bar\n(``|``).\n\nEntering a blank line repeats the last command entered. Exception: if\nthe last command was a ``list`` command, the next 11 lines are listed.\n\nCommands that the debugger doesn\'t recognize are assumed to be Python\nstatements and are executed in the context of the program being\ndebugged. Python statements can also be prefixed with an exclamation\npoint (``!``). This is a powerful way to inspect the program being\ndebugged; it is even possible to change a variable or call a function.\nWhen an exception occurs in such a statement, the exception name is\nprinted but the debugger\'s state is not changed.\n\nThe debugger supports *aliases*. Aliases can have parameters which\nallows one a certain level of adaptability to the context under\nexamination.\n\nMultiple commands may be entered on a single line, separated by\n``;;``. (A single ``;`` is not used as it is the separator for\nmultiple commands in a line that is passed to the Python parser.) No\nintelligence is applied to separating the commands; the input is split\nat the first ``;;`` pair, even if it is in the middle of a quoted\nstring.\n\nIf a file ``.pdbrc`` exists in the user\'s home directory or in the\ncurrent directory, it is read in and executed as if it had been typed\nat the debugger prompt. This is particularly useful for aliases. If\nboth files exist, the one in the home directory is read first and\naliases defined there can be overridden by the local file.\n\nChanged in version 3.2: ``.pdbrc`` can now contain commands that\ncontinue debugging, such as ``continue`` or ``next``. Previously,\nthese commands had no effect.\n\nh(elp) [command]\n\n Without argument, print the list of available commands. With a\n *command* as argument, print help about that command. ``help pdb``\n displays the full documentation (the docstring of the ``pdb``\n module). Since the *command* argument must be an identifier,\n ``help exec`` must be entered to get help on the ``!`` command.\n\nw(here)\n\n Print a stack trace, with the most recent frame at the bottom. An\n arrow indicates the current frame, which determines the context of\n most commands.\n\nd(own) [count]\n\n Move the current frame *count* (default one) levels down in the\n stack trace (to a newer frame).\n\nu(p) [count]\n\n Move the current frame *count* (default one) levels up in the stack\n trace (to an older frame).\n\nb(reak) [([filename:]lineno | function) [, condition]]\n\n With a *lineno* argument, set a break there in the current file.\n With a *function* argument, set a break at the first executable\n statement within that function. The line number may be prefixed\n with a filename and a colon, to specify a breakpoint in another\n file (probably one that hasn\'t been loaded yet). The file is\n searched on ``sys.path``. Note that each breakpoint is assigned a\n number to which all the other breakpoint commands refer.\n\n If a second argument is present, it is an expression which must\n evaluate to true before the breakpoint is honored.\n\n Without argument, list all breaks, including for each breakpoint,\n the number of times that breakpoint has been hit, the current\n ignore count, and the associated condition if any.\n\ntbreak [([filename:]lineno | function) [, condition]]\n\n Temporary breakpoint, which is removed automatically when it is\n first hit. The arguments are the same as for ``break``.\n\ncl(ear) [filename:lineno | bpnumber [bpnumber ...]]\n\n With a *filename:lineno* argument, clear all the breakpoints at\n this line. With a space separated list of breakpoint numbers, clear\n those breakpoints. Without argument, clear all breaks (but first\n ask confirmation).\n\ndisable [bpnumber [bpnumber ...]]\n\n Disable the breakpoints given as a space separated list of\n breakpoint numbers. Disabling a breakpoint means it cannot cause\n the program to stop execution, but unlike clearing a breakpoint, it\n remains in the list of breakpoints and can be (re-)enabled.\n\nenable [bpnumber [bpnumber ...]]\n\n Enable the breakpoints specified.\n\nignore bpnumber [count]\n\n Set the ignore count for the given breakpoint number. If count is\n omitted, the ignore count is set to 0. A breakpoint becomes active\n when the ignore count is zero. When non-zero, the count is\n decremented each time the breakpoint is reached and the breakpoint\n is not disabled and any associated condition evaluates to true.\n\ncondition bpnumber [condition]\n\n Set a new *condition* for the breakpoint, an expression which must\n evaluate to true before the breakpoint is honored. If *condition*\n is absent, any existing condition is removed; i.e., the breakpoint\n is made unconditional.\n\ncommands [bpnumber]\n\n Specify a list of commands for breakpoint number *bpnumber*. The\n commands themselves appear on the following lines. Type a line\n containing just ``end`` to terminate the commands. An example:\n\n (Pdb) commands 1\n (com) print some_variable\n (com) end\n (Pdb)\n\n To remove all commands from a breakpoint, type commands and follow\n it immediately with ``end``; that is, give no commands.\n\n With no *bpnumber* argument, commands refers to the last breakpoint\n set.\n\n You can use breakpoint commands to start your program up again.\n Simply use the continue command, or step, or any other command that\n resumes execution.\n\n Specifying any command resuming execution (currently continue,\n step, next, return, jump, quit and their abbreviations) terminates\n the command list (as if that command was immediately followed by\n end). This is because any time you resume execution (even with a\n simple next or step), you may encounter another breakpoint--which\n could have its own command list, leading to ambiguities about which\n list to execute.\n\n If you use the \'silent\' command in the command list, the usual\n message about stopping at a breakpoint is not printed. This may be\n desirable for breakpoints that are to print a specific message and\n then continue. If none of the other commands print anything, you\n see no sign that the breakpoint was reached.\n\ns(tep)\n\n Execute the current line, stop at the first possible occasion\n (either in a function that is called or on the next line in the\n current function).\n\nn(ext)\n\n Continue execution until the next line in the current function is\n reached or it returns. (The difference between ``next`` and\n ``step`` is that ``step`` stops inside a called function, while\n ``next`` executes called functions at (nearly) full speed, only\n stopping at the next line in the current function.)\n\nunt(il) [lineno]\n\n Without argument, continue execution until the line with a number\n greater than the current one is reached.\n\n With a line number, continue execution until a line with a number\n greater or equal to that is reached. In both cases, also stop when\n the current frame returns.\n\n Changed in version 3.2: Allow giving an explicit line number.\n\nr(eturn)\n\n Continue execution until the current function returns.\n\nc(ont(inue))\n\n Continue execution, only stop when a breakpoint is encountered.\n\nj(ump) lineno\n\n Set the next line that will be executed. Only available in the\n bottom-most frame. This lets you jump back and execute code again,\n or jump forward to skip code that you don\'t want to run.\n\n It should be noted that not all jumps are allowed -- for instance\n it is not possible to jump into the middle of a ``for`` loop or out\n of a ``finally`` clause.\n\nl(ist) [first[, last]]\n\n List source code for the current file. Without arguments, list 11\n lines around the current line or continue the previous listing.\n With ``.`` as argument, list 11 lines around the current line.\n With one argument, list 11 lines around at that line. With two\n arguments, list the given range; if the second argument is less\n than the first, it is interpreted as a count.\n\n The current line in the current frame is indicated by ``->``. If\n an exception is being debugged, the line where the exception was\n originally raised or propagated is indicated by ``>>``, if it\n differs from the current line.\n\n New in version 3.2: The ``>>`` marker.\n\nll | longlist\n\n List all source code for the current function or frame.\n Interesting lines are marked as for ``list``.\n\n New in version 3.2.\n\na(rgs)\n\n Print the argument list of the current function.\n\np(rint) expression\n\n Evaluate the *expression* in the current context and print its\n value.\n\npp expression\n\n Like the ``print`` command, except the value of the expression is\n pretty-printed using the ``pprint`` module.\n\nwhatis expression\n\n Print the type of the *expression*.\n\nsource expression\n\n Try to get source code for the given object and display it.\n\n New in version 3.2.\n\ndisplay [expression]\n\n Display the value of the expression if it changed, each time\n execution stops in the current frame.\n\n Without expression, list all display expressions for the current\n frame.\n\n New in version 3.2.\n\nundisplay [expression]\n\n Do not display the expression any more in the current frame.\n Without expression, clear all display expressions for the current\n frame.\n\n New in version 3.2.\n\ninteract\n\n Start an interative interpreter (using the ``code`` module) whose\n global namespace contains all the (global and local) names found in\n the current scope.\n\n New in version 3.2.\n\nalias [name [command]]\n\n Create an alias called *name* that executes *command*. The command\n must *not* be enclosed in quotes. Replaceable parameters can be\n indicated by ``%1``, ``%2``, and so on, while ``%*`` is replaced by\n all the parameters. If no command is given, the current alias for\n *name* is shown. If no arguments are given, all aliases are listed.\n\n Aliases may be nested and can contain anything that can be legally\n typed at the pdb prompt. Note that internal pdb commands *can* be\n overridden by aliases. Such a command is then hidden until the\n alias is removed. Aliasing is recursively applied to the first\n word of the command line; all other words in the line are left\n alone.\n\n As an example, here are two useful aliases (especially when placed\n in the ``.pdbrc`` file):\n\n # Print instance variables (usage "pi classInst")\n alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])\n # Print instance variables in self\n alias ps pi self\n\nunalias name\n\n Delete the specified alias.\n\n! statement\n\n Execute the (one-line) *statement* in the context of the current\n stack frame. The exclamation point can be omitted unless the first\n word of the statement resembles a debugger command. To set a\n global variable, you can prefix the assignment command with a\n ``global`` statement on the same line, e.g.:\n\n (Pdb) global list_options; list_options = [\'-l\']\n (Pdb)\n\nrun [args ...]\nrestart [args ...]\n\n Restart the debugged Python program. If an argument is supplied,\n it is split with ``shlex`` and the result is used as the new\n ``sys.argv``. History, breakpoints, actions and debugger options\n are preserved. ``restart`` is an alias for ``run``.\n\nq(uit)\n\n Quit from the debugger. The program being executed is aborted.\n\n-[ Footnotes ]-\n\n[1] Whether a frame is considered to originate in a certain module is\n determined by the ``__name__`` in the frame globals.\n', 'del': '\nThe ``del`` statement\n*********************\n\n del_stmt ::= "del" target_list\n\nDeletion is recursively defined very similar to the way assignment is\ndefined. Rather than spelling it out in full details, here are some\nhints.\n\nDeletion of a target list recursively deletes each target, from left\nto right.\n\nDeletion of a name removes the binding of that name from the local or\nglobal namespace, depending on whether the name occurs in a ``global``\nstatement in the same code block. If the name is unbound, a\n``NameError`` exception will be raised.\n\nDeletion of attribute references, subscriptions and slicings is passed\nto the primary object involved; deletion of a slicing is in general\nequivalent to assignment of an empty slice of the right type (but even\nthis is determined by the sliced object).\n\nChanged in version 3.2.\n', 'dict': '\nDictionary displays\n*******************\n\nA dictionary display is a possibly empty series of key/datum pairs\nenclosed in curly braces:\n\n dict_display ::= "{" [key_datum_list | dict_comprehension] "}"\n key_datum_list ::= key_datum ("," key_datum)* [","]\n key_datum ::= expression ":" expression\n dict_comprehension ::= expression ":" expression comp_for\n\nA dictionary display yields a new dictionary object.\n\nIf a comma-separated sequence of key/datum pairs is given, they are\nevaluated from left to right to define the entries of the dictionary:\neach key object is used as a key into the dictionary to store the\ncorresponding datum. This means that you can specify the same key\nmultiple times in the key/datum list, and the final dictionary\'s value\nfor that key will be the last one given.\n\nA dict comprehension, in contrast to list and set comprehensions,\nneeds two expressions separated with a colon followed by the usual\n"for" and "if" clauses. When the comprehension is run, the resulting\nkey and value elements are inserted in the new dictionary in the order\nthey are produced.\n\nRestrictions on the types of the key values are listed earlier in\nsection *The standard type hierarchy*. (To summarize, the key type\nshould be *hashable*, which excludes all mutable objects.) Clashes\nbetween duplicate keys are not detected; the last datum (textually\nrightmost in the display) stored for a given key value prevails.\n', 'dynamic-features': '\nInteraction with dynamic features\n*********************************\n\nThere are several cases where Python statements are illegal when used\nin conjunction with nested scopes that contain free variables.\n\nIf a variable is referenced in an enclosing scope, it is illegal to\ndelete the name. An error will be reported at compile time.\n\nIf the wild card form of import --- ``import *`` --- is used in a\nfunction and the function contains or is a nested block with free\nvariables, the compiler will raise a ``SyntaxError``.\n\nThe ``eval()`` and ``exec()`` functions do not have access to the full\nenvironment for resolving names. Names may be resolved in the local\nand global namespaces of the caller. Free variables are not resolved\nin the nearest enclosing namespace, but in the global namespace. [1]\nThe ``exec()`` and ``eval()`` functions have optional arguments to\noverride the global and local namespace. If only one namespace is\nspecified, it is used for both.\n', @@ -35,7 +35,7 @@ 'floating': '\nFloating point literals\n***********************\n\nFloating point literals are described by the following lexical\ndefinitions:\n\n floatnumber ::= pointfloat | exponentfloat\n pointfloat ::= [intpart] fraction | intpart "."\n exponentfloat ::= (intpart | pointfloat) exponent\n intpart ::= digit+\n fraction ::= "." digit+\n exponent ::= ("e" | "E") ["+" | "-"] digit+\n\nNote that the integer and exponent parts are always interpreted using\nradix 10. For example, ``077e010`` is legal, and denotes the same\nnumber as ``77e10``. The allowed range of floating point literals is\nimplementation-dependent. Some examples of floating point literals:\n\n 3.14 10. .001 1e100 3.14e-10 0e0\n\nNote that numeric literals do not include a sign; a phrase like ``-1``\nis actually an expression composed of the unary operator ``-`` and the\nliteral ``1``.\n', 'for': '\nThe ``for`` statement\n*********************\n\nThe ``for`` statement is used to iterate over the elements of a\nsequence (such as a string, tuple or list) or other iterable object:\n\n for_stmt ::= "for" target_list "in" expression_list ":" suite\n ["else" ":" suite]\n\nThe expression list is evaluated once; it should yield an iterable\nobject. An iterator is created for the result of the\n``expression_list``. The suite is then executed once for each item\nprovided by the iterator, in the order of ascending indices. Each\nitem in turn is assigned to the target list using the standard rules\nfor assignments (see *Assignment statements*), and then the suite is\nexecuted. When the items are exhausted (which is immediately when the\nsequence is empty or an iterator raises a ``StopIteration``\nexception), the suite in the ``else`` clause, if present, is executed,\nand the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ncontinues with the next item, or with the ``else`` clause if there was\nno next item.\n\nThe suite may assign to the variable(s) in the target list; this does\nnot affect the next item assigned to it.\n\nNames in the target list are not deleted when the loop is finished,\nbut if the sequence is empty, it will not have been assigned to at all\nby the loop. Hint: the built-in function ``range()`` returns an\niterator of integers suitable to emulate the effect of Pascal\'s ``for\ni := a to b do``; e.g., ``list(range(3))`` returns the list ``[0, 1,\n2]``.\n\nNote: There is a subtlety when the sequence is being modified by the loop\n (this can only occur for mutable sequences, i.e. lists). An\n internal counter is used to keep track of which item is used next,\n and this is incremented on each iteration. When this counter has\n reached the length of the sequence the loop terminates. This means\n that if the suite deletes the current (or a previous) item from the\n sequence, the next item will be skipped (since it gets the index of\n the current item which has already been treated). Likewise, if the\n suite inserts an item in the sequence before the current item, the\n current item will be treated again the next time through the loop.\n This can lead to nasty bugs that can be avoided by making a\n temporary copy using a slice of the whole sequence, e.g.,\n\n for x in a[:]:\n if x < 0: a.remove(x)\n', 'formatstrings': '\nFormat String Syntax\n********************\n\nThe ``str.format()`` method and the ``Formatter`` class share the same\nsyntax for format strings (although in the case of ``Formatter``,\nsubclasses can define their own format string syntax).\n\nFormat strings contain "replacement fields" surrounded by curly braces\n``{}``. Anything that is not contained in braces is considered literal\ntext, which is copied unchanged to the output. If you need to include\na brace character in the literal text, it can be escaped by doubling:\n``{{`` and ``}}``.\n\nThe grammar for a replacement field is as follows:\n\n replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"\n field_name ::= arg_name ("." attribute_name | "[" element_index "]")*\n arg_name ::= [identifier | integer]\n attribute_name ::= identifier\n element_index ::= integer | index_string\n index_string ::= +\n conversion ::= "r" | "s" | "a"\n format_spec ::= \n\nIn less formal terms, the replacement field can start with a\n*field_name* that specifies the object whose value is to be formatted\nand inserted into the output instead of the replacement field. The\n*field_name* is optionally followed by a *conversion* field, which is\npreceded by an exclamation point ``\'!\'``, and a *format_spec*, which\nis preceded by a colon ``\':\'``. These specify a non-default format\nfor the replacement value.\n\nSee also the *Format Specification Mini-Language* section.\n\nThe *field_name* itself begins with an *arg_name* that is either a\nnumber or a keyword. If it\'s a number, it refers to a positional\nargument, and if it\'s a keyword, it refers to a named keyword\nargument. If the numerical arg_names in a format string are 0, 1, 2,\n... in sequence, they can all be omitted (not just some) and the\nnumbers 0, 1, 2, ... will be automatically inserted in that order.\nBecause *arg_name* is not quote-delimited, it is not possible to\nspecify arbitrary dictionary keys (e.g., the strings ``\'10\'`` or\n``\':-]\'``) within a format string. The *arg_name* can be followed by\nany number of index or attribute expressions. An expression of the\nform ``\'.name\'`` selects the named attribute using ``getattr()``,\nwhile an expression of the form ``\'[index]\'`` does an index lookup\nusing ``__getitem__()``.\n\nChanged in version 3.1: The positional argument specifiers can be\nomitted, so ``\'{} {}\'`` is equivalent to ``\'{0} {1}\'``.\n\nSome simple format string examples:\n\n "First, thou shalt count to {0}" # References first positional argument\n "Bring me a {}" # Implicitly references the first positional argument\n "From {} to {}" # Same as "From {0} to {1}"\n "My quest is {name}" # References keyword argument \'name\'\n "Weight in tons {0.weight}" # \'weight\' attribute of first positional arg\n "Units destroyed: {players[0]}" # First element of keyword argument \'players\'.\n\nThe *conversion* field causes a type coercion before formatting.\nNormally, the job of formatting a value is done by the\n``__format__()`` method of the value itself. However, in some cases\nit is desirable to force a type to be formatted as a string,\noverriding its own definition of formatting. By converting the value\nto a string before calling ``__format__()``, the normal formatting\nlogic is bypassed.\n\nThree conversion flags are currently supported: ``\'!s\'`` which calls\n``str()`` on the value, ``\'!r\'`` which calls ``repr()`` and ``\'!a\'``\nwhich calls ``ascii()``.\n\nSome examples:\n\n "Harold\'s a clever {0!s}" # Calls str() on the argument first\n "Bring out the holy {name!r}" # Calls repr() on the argument first\n "More {!a}" # Calls ascii() on the argument first\n\nThe *format_spec* field contains a specification of how the value\nshould be presented, including such details as field width, alignment,\npadding, decimal precision and so on. Each value type can define its\nown "formatting mini-language" or interpretation of the *format_spec*.\n\nMost built-in types support a common formatting mini-language, which\nis described in the next section.\n\nA *format_spec* field can also include nested replacement fields\nwithin it. These nested replacement fields can contain only a field\nname; conversion flags and format specifications are not allowed. The\nreplacement fields within the format_spec are substituted before the\n*format_spec* string is interpreted. This allows the formatting of a\nvalue to be dynamically specified.\n\nSee the *Format examples* section for some examples.\n\n\nFormat Specification Mini-Language\n==================================\n\n"Format specifications" are used within replacement fields contained\nwithin a format string to define how individual values are presented\n(see *Format String Syntax*). They can also be passed directly to the\nbuilt-in ``format()`` function. Each formattable type may define how\nthe format specification is to be interpreted.\n\nMost built-in types implement the following options for format\nspecifications, although some of the formatting options are only\nsupported by the numeric types.\n\nA general convention is that an empty format string (``""``) produces\nthe same result as if you had called ``str()`` on the value. A non-\nempty format string typically modifies the result.\n\nThe general form of a *standard format specifier* is:\n\n format_spec ::= [[fill]align][sign][#][0][width][,][.precision][type]\n fill ::= \n align ::= "<" | ">" | "=" | "^"\n sign ::= "+" | "-" | " "\n width ::= integer\n precision ::= integer\n type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n\nThe *fill* character can be any character other than \'{\' or \'}\'. The\npresence of a fill character is signaled by the character following\nit, which must be one of the alignment options. If the second\ncharacter of *format_spec* is not a valid alignment option, then it is\nassumed that both the fill character and the alignment option are\nabsent.\n\nThe meaning of the various alignment options is as follows:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'<\'`` | Forces the field to be left-aligned within the available |\n | | space (this is the default for most objects). |\n +-----------+------------------------------------------------------------+\n | ``\'>\'`` | Forces the field to be right-aligned within the available |\n | | space (this is the default for numbers). |\n +-----------+------------------------------------------------------------+\n | ``\'=\'`` | Forces the padding to be placed after the sign (if any) |\n | | but before the digits. This is used for printing fields |\n | | in the form \'+000000120\'. This alignment option is only |\n | | valid for numeric types. |\n +-----------+------------------------------------------------------------+\n | ``\'^\'`` | Forces the field to be centered within the available |\n | | space. |\n +-----------+------------------------------------------------------------+\n\nNote that unless a minimum field width is defined, the field width\nwill always be the same size as the data to fill it, so that the\nalignment option has no meaning in this case.\n\nThe *sign* option is only valid for number types, and can be one of\nthe following:\n\n +-----------+------------------------------------------------------------+\n | Option | Meaning |\n +===========+============================================================+\n | ``\'+\'`` | indicates that a sign should be used for both positive as |\n | | well as negative numbers. |\n +-----------+------------------------------------------------------------+\n | ``\'-\'`` | indicates that a sign should be used only for negative |\n | | numbers (this is the default behavior). |\n +-----------+------------------------------------------------------------+\n | space | indicates that a leading space should be used on positive |\n | | numbers, and a minus sign on negative numbers. |\n +-----------+------------------------------------------------------------+\n\nThe ``\'#\'`` option causes the "alternate form" to be used for the\nconversion. The alternate form is defined differently for different\ntypes. This option is only valid for integer, float, complex and\nDecimal types. For integers, when binary, octal, or hexadecimal output\nis used, this option adds the prefix respective ``\'0b\'``, ``\'0o\'``, or\n``\'0x\'`` to the output value. For floats, complex and Decimal the\nalternate form causes the result of the conversion to always contain a\ndecimal-point character, even if no digits follow it. Normally, a\ndecimal-point character appears in the result of these conversions\nonly if a digit follows it. In addition, for ``\'g\'`` and ``\'G\'``\nconversions, trailing zeros are not removed from the result.\n\nThe ``\',\'`` option signals the use of a comma for a thousands\nseparator. For a locale aware separator, use the ``\'n\'`` integer\npresentation type instead.\n\nChanged in version 3.1: Added the ``\',\'`` option (see also **PEP\n378**).\n\n*width* is a decimal integer defining the minimum field width. If not\nspecified, then the field width will be determined by the content.\n\nIf the *width* field is preceded by a zero (``\'0\'``) character, this\nenables zero-padding. This is equivalent to an *alignment* type of\n``\'=\'`` and a *fill* character of ``\'0\'``.\n\nThe *precision* is a decimal number indicating how many digits should\nbe displayed after the decimal point for a floating point value\nformatted with ``\'f\'`` and ``\'F\'``, or before and after the decimal\npoint for a floating point value formatted with ``\'g\'`` or ``\'G\'``.\nFor non-number types the field indicates the maximum field size - in\nother words, how many characters will be used from the field content.\nThe *precision* is not allowed for integer values.\n\nFinally, the *type* determines how the data should be presented.\n\nThe available string presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'s\'`` | String format. This is the default type for strings and |\n | | may be omitted. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'s\'``. |\n +-----------+------------------------------------------------------------+\n\nThe available integer presentation types are:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'b\'`` | Binary format. Outputs the number in base 2. |\n +-----------+------------------------------------------------------------+\n | ``\'c\'`` | Character. Converts the integer to the corresponding |\n | | unicode character before printing. |\n +-----------+------------------------------------------------------------+\n | ``\'d\'`` | Decimal Integer. Outputs the number in base 10. |\n +-----------+------------------------------------------------------------+\n | ``\'o\'`` | Octal format. Outputs the number in base 8. |\n +-----------+------------------------------------------------------------+\n | ``\'x\'`` | Hex format. Outputs the number in base 16, using lower- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'X\'`` | Hex format. Outputs the number in base 16, using upper- |\n | | case letters for the digits above 9. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'d\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | None | The same as ``\'d\'``. |\n +-----------+------------------------------------------------------------+\n\nIn addition to the above presentation types, integers can be formatted\nwith the floating point presentation types listed below (except\n``\'n\'`` and None). When doing so, ``float()`` is used to convert the\ninteger to a floating point number before formatting.\n\nThe available presentation types for floating point and decimal values\nare:\n\n +-----------+------------------------------------------------------------+\n | Type | Meaning |\n +===========+============================================================+\n | ``\'e\'`` | Exponent notation. Prints the number in scientific |\n | | notation using the letter \'e\' to indicate the exponent. |\n +-----------+------------------------------------------------------------+\n | ``\'E\'`` | Exponent notation. Same as ``\'e\'`` except it uses an upper |\n | | case \'E\' as the separator character. |\n +-----------+------------------------------------------------------------+\n | ``\'f\'`` | Fixed point. Displays the number as a fixed-point number. |\n +-----------+------------------------------------------------------------+\n | ``\'F\'`` | Fixed point. Same as ``\'f\'``, but converts ``nan`` to |\n | | ``NAN`` and ``inf`` to ``INF``. |\n +-----------+------------------------------------------------------------+\n | ``\'g\'`` | General format. For a given precision ``p >= 1``, this |\n | | rounds the number to ``p`` significant digits and then |\n | | formats the result in either fixed-point format or in |\n | | scientific notation, depending on its magnitude. The |\n | | precise rules are as follows: suppose that the result |\n | | formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1`` would have exponent ``exp``. Then if ``-4 <= exp |\n | | < p``, the number is formatted with presentation type |\n | | ``\'f\'`` and precision ``p-1-exp``. Otherwise, the number |\n | | is formatted with presentation type ``\'e\'`` and precision |\n | | ``p-1``. In both cases insignificant trailing zeros are |\n | | removed from the significand, and the decimal point is |\n | | also removed if there are no remaining digits following |\n | | it. Positive and negative infinity, positive and negative |\n | | zero, and nans, are formatted as ``inf``, ``-inf``, ``0``, |\n | | ``-0`` and ``nan`` respectively, regardless of the |\n | | precision. A precision of ``0`` is treated as equivalent |\n | | to a precision of ``1``. |\n +-----------+------------------------------------------------------------+\n | ``\'G\'`` | General format. Same as ``\'g\'`` except switches to ``\'E\'`` |\n | | if the number gets too large. The representations of |\n | | infinity and NaN are uppercased, too. |\n +-----------+------------------------------------------------------------+\n | ``\'n\'`` | Number. This is the same as ``\'g\'``, except that it uses |\n | | the current locale setting to insert the appropriate |\n | | number separator characters. |\n +-----------+------------------------------------------------------------+\n | ``\'%\'`` | Percentage. Multiplies the number by 100 and displays in |\n | | fixed (``\'f\'``) format, followed by a percent sign. |\n +-----------+------------------------------------------------------------+\n | None | Similar to ``\'g\'``, except with at least one digit past |\n | | the decimal point and a default precision of 12. This is |\n | | intended to match ``str()``, except you can add the other |\n | | format modifiers. |\n +-----------+------------------------------------------------------------+\n\n\nFormat examples\n===============\n\nThis section contains examples of the new format syntax and comparison\nwith the old ``%``-formatting.\n\nIn most of the cases the syntax is similar to the old\n``%``-formatting, with the addition of the ``{}`` and with ``:`` used\ninstead of ``%``. For example, ``\'%03.2f\'`` can be translated to\n``\'{:03.2f}\'``.\n\nThe new format syntax also supports new and different options, shown\nin the follow examples.\n\nAccessing arguments by position:\n\n >>> \'{0}, {1}, {2}\'.format(\'a\', \'b\', \'c\')\n \'a, b, c\'\n >>> \'{}, {}, {}\'.format(\'a\', \'b\', \'c\') # 3.1+ only\n \'a, b, c\'\n >>> \'{2}, {1}, {0}\'.format(\'a\', \'b\', \'c\')\n \'c, b, a\'\n >>> \'{2}, {1}, {0}\'.format(*\'abc\') # unpacking argument sequence\n \'c, b, a\'\n >>> \'{0}{1}{0}\'.format(\'abra\', \'cad\') # arguments\' indices can be repeated\n \'abracadabra\'\n\nAccessing arguments by name:\n\n >>> \'Coordinates: {latitude}, {longitude}\'.format(latitude=\'37.24N\', longitude=\'-115.81W\')\n \'Coordinates: 37.24N, -115.81W\'\n >>> coord = {\'latitude\': \'37.24N\', \'longitude\': \'-115.81W\'}\n >>> \'Coordinates: {latitude}, {longitude}\'.format(**coord)\n \'Coordinates: 37.24N, -115.81W\'\n\nAccessing arguments\' attributes:\n\n >>> c = 3-5j\n >>> (\'The complex number {0} is formed from the real part {0.real} \'\n ... \'and the imaginary part {0.imag}.\').format(c)\n \'The complex number (3-5j) is formed from the real part 3.0 and the imaginary part -5.0.\'\n >>> class Point:\n ... def __init__(self, x, y):\n ... self.x, self.y = x, y\n ... def __str__(self):\n ... return \'Point({self.x}, {self.y})\'.format(self=self)\n ...\n >>> str(Point(4, 2))\n \'Point(4, 2)\'\n\nAccessing arguments\' items:\n\n >>> coord = (3, 5)\n >>> \'X: {0[0]}; Y: {0[1]}\'.format(coord)\n \'X: 3; Y: 5\'\n\nReplacing ``%s`` and ``%r``:\n\n >>> "repr() shows quotes: {!r}; str() doesn\'t: {!s}".format(\'test1\', \'test2\')\n "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n\nAligning the text and specifying a width:\n\n >>> \'{:<30}\'.format(\'left aligned\')\n \'left aligned \'\n >>> \'{:>30}\'.format(\'right aligned\')\n \' right aligned\'\n >>> \'{:^30}\'.format(\'centered\')\n \' centered \'\n >>> \'{:*^30}\'.format(\'centered\') # use \'*\' as a fill char\n \'***********centered***********\'\n\nReplacing ``%+f``, ``%-f``, and ``% f`` and specifying a sign:\n\n >>> \'{:+f}; {:+f}\'.format(3.14, -3.14) # show it always\n \'+3.140000; -3.140000\'\n >>> \'{: f}; {: f}\'.format(3.14, -3.14) # show a space for positive numbers\n \' 3.140000; -3.140000\'\n >>> \'{:-f}; {:-f}\'.format(3.14, -3.14) # show only the minus -- same as \'{:f}; {:f}\'\n \'3.140000; -3.140000\'\n\nReplacing ``%x`` and ``%o`` and converting the value to different\nbases:\n\n >>> # format also supports binary numbers\n >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: {0:b}".format(42)\n \'int: 42; hex: 2a; oct: 52; bin: 101010\'\n >>> # with 0x, 0o, or 0b as prefix:\n >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: {0:#b}".format(42)\n \'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010\'\n\nUsing the comma as a thousands separator:\n\n >>> \'{:,}\'.format(1234567890)\n \'1,234,567,890\'\n\nExpressing a percentage:\n\n >>> points = 19\n >>> total = 22\n >>> \'Correct answers: {:.2%}\'.format(points/total)\n \'Correct answers: 86.36%\'\n\nUsing type-specific formatting:\n\n >>> import datetime\n >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n >>> \'{:%Y-%m-%d %H:%M:%S}\'.format(d)\n \'2010-07-04 12:15:58\'\n\nNesting arguments and more complex examples:\n\n >>> for align, text in zip(\'<^>\', [\'left\', \'center\', \'right\']):\n ... \'{0:{fill}{align}16}\'.format(text, fill=align, align=align)\n ...\n \'left<<<<<<<<<<<<\'\n \'^^^^^center^^^^^\'\n \'>>>>>>>>>>>right\'\n >>>\n >>> octets = [192, 168, 0, 1]\n >>> \'{:02X}{:02X}{:02X}{:02X}\'.format(*octets)\n \'C0A80001\'\n >>> int(_, 16)\n 3232235521\n >>>\n >>> width = 5\n >>> for num in range(5,12):\n ... for base in \'dXob\':\n ... print(\'{0:{width}{base}}\'.format(num, base=base, width=width), end=\' \')\n ... print()\n ...\n 5 5 5 101\n 6 6 6 110\n 7 7 7 111\n 8 8 10 1000\n 9 9 11 1001\n 10 A 12 1010\n 11 B 13 1011\n', - 'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)*\n [, "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the "``*``" must also have a default value ---\nthis is a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after "``*``" or "``*identifier``" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "``: expression``"\nfollowing the parameter name. Any parameter may have an annotation\neven those of the form ``*identifier`` or ``**identifier``. Functions\nmay have "return" annotation of the form "``-> expression``" after the\nparameter list. These annotations can be any valid Python expression\nand are evaluated when the function definition is executed.\nAnnotations may be evaluated in a different order than they appear in\nthe source code. The presence of annotations does not change the\nsemantics of a function. The annotation values are available as\nvalues of a dictionary keyed by the parameters\' names in the\n``__annotations__`` attribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n', + 'function': '\nFunction definitions\n********************\n\nA function definition defines a user-defined function object (see\nsection *The standard type hierarchy*):\n\n funcdef ::= [decorators] "def" funcname "(" [parameter_list] ")" ["->" expression] ":" suite\n decorators ::= decorator+\n decorator ::= "@" dotted_name ["(" [parameter_list [","]] ")"] NEWLINE\n dotted_name ::= identifier ("." identifier)*\n parameter_list ::= (defparameter ",")*\n ( "*" [parameter] ("," defparameter)*\n [, "**" parameter]\n | "**" parameter\n | defparameter [","] )\n parameter ::= identifier [":" expression]\n defparameter ::= parameter ["=" expression]\n funcname ::= identifier\n\nA function definition is an executable statement. Its execution binds\nthe function name in the current local namespace to a function object\n(a wrapper around the executable code for the function). This\nfunction object contains a reference to the current global namespace\nas the global namespace to be used when the function is called.\n\nThe function definition does not execute the function body; this gets\nexecuted only when the function is called. [3]\n\nA function definition may be wrapped by one or more *decorator*\nexpressions. Decorator expressions are evaluated when the function is\ndefined, in the scope that contains the function definition. The\nresult must be a callable, which is invoked with the function object\nas the only argument. The returned value is bound to the function name\ninstead of the function object. Multiple decorators are applied in\nnested fashion. For example, the following code\n\n @f1(arg)\n @f2\n def func(): pass\n\nis equivalent to\n\n def func(): pass\n func = f1(arg)(f2(func))\n\nWhen one or more parameters have the form *parameter* ``=``\n*expression*, the function is said to have "default parameter values."\nFor a parameter with a default value, the corresponding argument may\nbe omitted from a call, in which case the parameter\'s default value is\nsubstituted. If a parameter has a default value, all following\nparameters up until the "``*``" must also have a default value ---\nthis is a syntactic restriction that is not expressed by the grammar.\n\n**Default parameter values are evaluated when the function definition\nis executed.** This means that the expression is evaluated once, when\nthe function is defined, and that the same "pre-computed" value is\nused for each call. This is especially important to understand when a\ndefault parameter is a mutable object, such as a list or a dictionary:\nif the function modifies the object (e.g. by appending an item to a\nlist), the default value is in effect modified. This is generally not\nwhat was intended. A way around this is to use ``None`` as the\ndefault, and explicitly test for it in the body of the function, e.g.:\n\n def whats_on_the_telly(penguin=None):\n if penguin is None:\n penguin = []\n penguin.append("property of the zoo")\n return penguin\n\nFunction call semantics are described in more detail in section\n*Calls*. A function call always assigns values to all parameters\nmentioned in the parameter list, either from position arguments, from\nkeyword arguments, or from default values. If the form\n"``*identifier``" is present, it is initialized to a tuple receiving\nany excess positional parameters, defaulting to the empty tuple. If\nthe form "``**identifier``" is present, it is initialized to a new\ndictionary receiving any excess keyword arguments, defaulting to a new\nempty dictionary. Parameters after "``*``" or "``*identifier``" are\nkeyword-only parameters and may only be passed used keyword arguments.\n\nParameters may have annotations of the form "``: expression``"\nfollowing the parameter name. Any parameter may have an annotation\neven those of the form ``*identifier`` or ``**identifier``. Functions\nmay have "return" annotation of the form "``-> expression``" after the\nparameter list. These annotations can be any valid Python expression\nand are evaluated when the function definition is executed.\nAnnotations may be evaluated in a different order than they appear in\nthe source code. The presence of annotations does not change the\nsemantics of a function. The annotation values are available as\nvalues of a dictionary keyed by the parameters\' names in the\n``__annotations__`` attribute of the function object.\n\nIt is also possible to create anonymous functions (functions not bound\nto a name), for immediate use in expressions. This uses lambda forms,\ndescribed in section *Lambdas*. Note that the lambda form is merely a\nshorthand for a simplified function definition; a function defined in\na "``def``" statement can be passed around or assigned to another name\njust like a function defined by a lambda form. The "``def``" form is\nactually more powerful since it allows the execution of multiple\nstatements and annotations.\n\n**Programmer\'s note:** Functions are first-class objects. A "``def``"\nform executed inside a function definition defines a local function\nthat can be returned or passed around. Free variables used in the\nnested function can access the local variables of the function\ncontaining the def. See section *Naming and binding* for details.\n\nSee also:\n\n **PEP 3107** - Function Annotations\n The original specification for function annotations.\n', 'global': '\nThe ``global`` statement\n************************\n\n global_stmt ::= "global" identifier ("," identifier)*\n\nThe ``global`` statement is a declaration which holds for the entire\ncurrent code block. It means that the listed identifiers are to be\ninterpreted as globals. It would be impossible to assign to a global\nvariable without ``global``, although free variables may refer to\nglobals without being declared global.\n\nNames listed in a ``global`` statement must not be used in the same\ncode block textually preceding that ``global`` statement.\n\nNames listed in a ``global`` statement must not be defined as formal\nparameters or in a ``for`` loop control target, ``class`` definition,\nfunction definition, or ``import`` statement.\n\n**CPython implementation detail:** The current implementation does not\nenforce the latter two restrictions, but programs should not abuse\nthis freedom, as future implementations may enforce them or silently\nchange the meaning of the program.\n\n**Programmer\'s note:** the ``global`` is a directive to the parser.\nIt applies only to code parsed at the same time as the ``global``\nstatement. In particular, a ``global`` statement contained in a string\nor code object supplied to the built-in ``exec()`` function does not\naffect the code block *containing* the function call, and code\ncontained in such a string is unaffected by ``global`` statements in\nthe code containing the function call. The same applies to the\n``eval()`` and ``compile()`` functions.\n', 'id-classes': '\nReserved classes of identifiers\n*******************************\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``builtins`` module. When\n not in interactive mode, ``_`` has no special meaning and is not\n defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', 'identifiers': '\nIdentifiers and keywords\n************************\n\nIdentifiers (also referred to as *names*) are described by the\nfollowing lexical definitions.\n\nThe syntax of identifiers in Python is based on the Unicode standard\nannex UAX-31, with elaboration and changes as defined below; see also\n**PEP 3131** for further details.\n\nWithin the ASCII range (U+0001..U+007F), the valid characters for\nidentifiers are the same as in Python 2.x: the uppercase and lowercase\nletters ``A`` through ``Z``, the underscore ``_`` and, except for the\nfirst character, the digits ``0`` through ``9``.\n\nPython 3.0 introduces additional characters from outside the ASCII\nrange (see **PEP 3131**). For these characters, the classification\nuses the version of the Unicode Character Database as included in the\n``unicodedata`` module.\n\nIdentifiers are unlimited in length. Case is significant.\n\n identifier ::= xid_start xid_continue*\n id_start ::= \n id_continue ::= \n xid_start ::= \n xid_continue ::= \n\nThe Unicode category codes mentioned above stand for:\n\n* *Lu* - uppercase letters\n\n* *Ll* - lowercase letters\n\n* *Lt* - titlecase letters\n\n* *Lm* - modifier letters\n\n* *Lo* - other letters\n\n* *Nl* - letter numbers\n\n* *Mn* - nonspacing marks\n\n* *Mc* - spacing combining marks\n\n* *Nd* - decimal numbers\n\n* *Pc* - connector punctuations\n\n* *Other_ID_Start* - explicit list of characters in PropList.txt to\n support backwards compatibility\n\n* *Other_ID_Continue* - likewise\n\nAll identifiers are converted into the normal form NFKC while parsing;\ncomparison of identifiers is based on NFKC.\n\nA non-normative HTML file listing all valid identifier characters for\nUnicode 4.1 can be found at http://www.dcl.hpi.uni-\npotsdam.de/home/loewis/table-3131.html.\n\n\nKeywords\n========\n\nThe following identifiers are used as reserved words, or *keywords* of\nthe language, and cannot be used as ordinary identifiers. They must\nbe spelled exactly as written here:\n\n False class finally is return\n None continue for lambda try\n True def from nonlocal while\n and del global not with\n as elif if or yield\n assert else import pass\n break except in raise\n\n\nReserved classes of identifiers\n===============================\n\nCertain classes of identifiers (besides keywords) have special\nmeanings. These classes are identified by the patterns of leading and\ntrailing underscore characters:\n\n``_*``\n Not imported by ``from module import *``. The special identifier\n ``_`` is used in the interactive interpreter to store the result of\n the last evaluation; it is stored in the ``builtins`` module. When\n not in interactive mode, ``_`` has no special meaning and is not\n defined. See section *The import statement*.\n\n Note: The name ``_`` is often used in conjunction with\n internationalization; refer to the documentation for the\n ``gettext`` module for more information on this convention.\n\n``__*__``\n System-defined names. These names are defined by the interpreter\n and its implementation (including the standard library). Current\n system names are discussed in the *Special method names* section\n and elsewhere. More will likely be defined in future versions of\n Python. *Any* use of ``__*__`` names, in any context, that does\n not follow explicitly documented use, is subject to breakage\n without warning.\n\n``__*``\n Class-private names. Names in this category, when used within the\n context of a class definition, are re-written to use a mangled form\n to help avoid name clashes between "private" attributes of base and\n derived classes. See section *Identifiers (Names)*.\n', @@ -68,10 +68,10 @@ 'try': '\nThe ``try`` statement\n*********************\n\nThe ``try`` statement specifies exception handlers and/or cleanup code\nfor a group of statements:\n\n try_stmt ::= try1_stmt | try2_stmt\n try1_stmt ::= "try" ":" suite\n ("except" [expression ["as" target]] ":" suite)+\n ["else" ":" suite]\n ["finally" ":" suite]\n try2_stmt ::= "try" ":" suite\n "finally" ":" suite\n\nThe ``except`` clause(s) specify one or more exception handlers. When\nno exception occurs in the ``try`` clause, no exception handler is\nexecuted. When an exception occurs in the ``try`` suite, a search for\nan exception handler is started. This search inspects the except\nclauses in turn until one is found that matches the exception. An\nexpression-less except clause, if present, must be last; it matches\nany exception. For an except clause with an expression, that\nexpression is evaluated, and the clause matches the exception if the\nresulting object is "compatible" with the exception. An object is\ncompatible with an exception if it is the class or a base class of the\nexception object or a tuple containing an item compatible with the\nexception.\n\nIf no except clause matches the exception, the search for an exception\nhandler continues in the surrounding code and on the invocation stack.\n[1]\n\nIf the evaluation of an expression in the header of an except clause\nraises an exception, the original search for a handler is canceled and\na search starts for the new exception in the surrounding code and on\nthe call stack (it is treated as if the entire ``try`` statement\nraised the exception).\n\nWhen a matching except clause is found, the exception is assigned to\nthe target specified after the ``as`` keyword in that except clause,\nif present, and the except clause\'s suite is executed. All except\nclauses must have an executable block. When the end of this block is\nreached, execution continues normally after the entire try statement.\n(This means that if two nested handlers exist for the same exception,\nand the exception occurs in the try clause of the inner handler, the\nouter handler will not handle the exception.)\n\nWhen an exception has been assigned using ``as target``, it is cleared\nat the end of the except clause. This is as if\n\n except E as N:\n foo\n\nwas translated to\n\n except E as N:\n try:\n foo\n finally:\n del N\n\nThis means the exception must be assigned to a different name to be\nable to refer to it after the except clause. Exceptions are cleared\nbecause with the traceback attached to them, they form a reference\ncycle with the stack frame, keeping all locals in that frame alive\nuntil the next garbage collection occurs.\n\nBefore an except clause\'s suite is executed, details about the\nexception are stored in the ``sys`` module and can be access via\n``sys.exc_info()``. ``sys.exc_info()`` returns a 3-tuple consisting of\nthe exception class, the exception instance and a traceback object\n(see section *The standard type hierarchy*) identifying the point in\nthe program where the exception occurred. ``sys.exc_info()`` values\nare restored to their previous values (before the call) when returning\nfrom a function that handled an exception.\n\nThe optional ``else`` clause is executed if and when control flows off\nthe end of the ``try`` clause. [2] Exceptions in the ``else`` clause\nare not handled by the preceding ``except`` clauses.\n\nIf ``finally`` is present, it specifies a \'cleanup\' handler. The\n``try`` clause is executed, including any ``except`` and ``else``\nclauses. If an exception occurs in any of the clauses and is not\nhandled, the exception is temporarily saved. The ``finally`` clause is\nexecuted. If there is a saved exception, it is re-raised at the end\nof the ``finally`` clause. If the ``finally`` clause raises another\nexception or executes a ``return`` or ``break`` statement, the saved\nexception is set as the context of the new exception. The exception\ninformation is not available to the program during execution of the\n``finally`` clause.\n\nWhen a ``return``, ``break`` or ``continue`` statement is executed in\nthe ``try`` suite of a ``try``...``finally`` statement, the\n``finally`` clause is also executed \'on the way out.\' A ``continue``\nstatement is illegal in the ``finally`` clause. (The reason is a\nproblem with the current implementation --- this restriction may be\nlifted in the future).\n\nAdditional information on exceptions can be found in section\n*Exceptions*, and information on using the ``raise`` statement to\ngenerate exceptions may be found in section *The raise statement*.\n', 'types': '\nThe standard type hierarchy\n***************************\n\nBelow is a list of the types that are built into Python. Extension\nmodules (written in C, Java, or other languages, depending on the\nimplementation) can define additional types. Future versions of\nPython may add types to the type hierarchy (e.g., rational numbers,\nefficiently stored arrays of integers, etc.), although such additions\nwill often be provided via the standard library instead.\n\nSome of the type descriptions below contain a paragraph listing\n\'special attributes.\' These are attributes that provide access to the\nimplementation and are not intended for general use. Their definition\nmay change in the future.\n\nNone\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name ``None``.\n It is used to signify the absence of a value in many situations,\n e.g., it is returned from functions that don\'t explicitly return\n anything. Its truth value is false.\n\nNotImplemented\n This type has a single value. There is a single object with this\n value. This object is accessed through the built-in name\n ``NotImplemented``. Numeric methods and rich comparison methods may\n return this value if they do not implement the operation for the\n operands provided. (The interpreter will then try the reflected\n operation, or some other fallback, depending on the operator.) Its\n truth value is true.\n\nEllipsis\n This type has a single value. There is a single object with this\n value. This object is accessed through the literal ``...`` or the\n built-in name ``Ellipsis``. Its truth value is true.\n\n``numbers.Number``\n These are created by numeric literals and returned as results by\n arithmetic operators and arithmetic built-in functions. Numeric\n objects are immutable; once created their value never changes.\n Python numbers are of course strongly related to mathematical\n numbers, but subject to the limitations of numerical representation\n in computers.\n\n Python distinguishes between integers, floating point numbers, and\n complex numbers:\n\n ``numbers.Integral``\n These represent elements from the mathematical set of integers\n (positive and negative).\n\n There are two types of integers:\n\n Integers (``int``)\n\n These represent numbers in an unlimited range, subject to\n available (virtual) memory only. For the purpose of shift\n and mask operations, a binary representation is assumed, and\n negative numbers are represented in a variant of 2\'s\n complement which gives the illusion of an infinite string of\n sign bits extending to the left.\n\n Booleans (``bool``)\n These represent the truth values False and True. The two\n objects representing the values False and True are the only\n Boolean objects. The Boolean type is a subtype of the integer\n type, and Boolean values behave like the values 0 and 1,\n respectively, in almost all contexts, the exception being\n that when converted to a string, the strings ``"False"`` or\n ``"True"`` are returned, respectively.\n\n The rules for integer representation are intended to give the\n most meaningful interpretation of shift and mask operations\n involving negative integers.\n\n ``numbers.Real`` (``float``)\n These represent machine-level double precision floating point\n numbers. You are at the mercy of the underlying machine\n architecture (and C or Java implementation) for the accepted\n range and handling of overflow. Python does not support single-\n precision floating point numbers; the savings in processor and\n memory usage that are usually the reason for using these is\n dwarfed by the overhead of using objects in Python, so there is\n no reason to complicate the language with two kinds of floating\n point numbers.\n\n ``numbers.Complex`` (``complex``)\n These represent complex numbers as a pair of machine-level\n double precision floating point numbers. The same caveats apply\n as for floating point numbers. The real and imaginary parts of a\n complex number ``z`` can be retrieved through the read-only\n attributes ``z.real`` and ``z.imag``.\n\nSequences\n These represent finite ordered sets indexed by non-negative\n numbers. The built-in function ``len()`` returns the number of\n items of a sequence. When the length of a sequence is *n*, the\n index set contains the numbers 0, 1, ..., *n*-1. Item *i* of\n sequence *a* is selected by ``a[i]``.\n\n Sequences also support slicing: ``a[i:j]`` selects all items with\n index *k* such that *i* ``<=`` *k* ``<`` *j*. When used as an\n expression, a slice is a sequence of the same type. This implies\n that the index set is renumbered so that it starts at 0.\n\n Some sequences also support "extended slicing" with a third "step"\n parameter: ``a[i:j:k]`` selects all items of *a* with index *x*\n where ``x = i + n*k``, *n* ``>=`` ``0`` and *i* ``<=`` *x* ``<``\n *j*.\n\n Sequences are distinguished according to their mutability:\n\n Immutable sequences\n An object of an immutable sequence type cannot change once it is\n created. (If the object contains references to other objects,\n these other objects may be mutable and may be changed; however,\n the collection of objects directly referenced by an immutable\n object cannot change.)\n\n The following types are immutable sequences:\n\n Strings\n A string is a sequence of values that represent Unicode\n codepoints. All the codepoints in range ``U+0000 - U+10FFFF``\n can be represented in a string. Python doesn\'t have a\n ``chr`` type, and every character in the string is\n represented as a string object with length ``1``. The built-\n in function ``ord()`` converts a character to its codepoint\n (as an integer); ``chr()`` converts an integer in range ``0 -\n 10FFFF`` to the corresponding character. ``str.encode()`` can\n be used to convert a ``str`` to ``bytes`` using the given\n encoding, and ``bytes.decode()`` can be used to achieve the\n opposite.\n\n Tuples\n The items of a tuple are arbitrary Python objects. Tuples of\n two or more items are formed by comma-separated lists of\n expressions. A tuple of one item (a \'singleton\') can be\n formed by affixing a comma to an expression (an expression by\n itself does not create a tuple, since parentheses must be\n usable for grouping of expressions). An empty tuple can be\n formed by an empty pair of parentheses.\n\n Bytes\n A bytes object is an immutable array. The items are 8-bit\n bytes, represented by integers in the range 0 <= x < 256.\n Bytes literals (like ``b\'abc\'`` and the built-in function\n ``bytes()`` can be used to construct bytes objects. Also,\n bytes objects can be decoded to strings via the ``decode()``\n method.\n\n Mutable sequences\n Mutable sequences can be changed after they are created. The\n subscription and slicing notations can be used as the target of\n assignment and ``del`` (delete) statements.\n\n There are currently two intrinsic mutable sequence types:\n\n Lists\n The items of a list are arbitrary Python objects. Lists are\n formed by placing a comma-separated list of expressions in\n square brackets. (Note that there are no special cases needed\n to form lists of length 0 or 1.)\n\n Byte Arrays\n A bytearray object is a mutable array. They are created by\n the built-in ``bytearray()`` constructor. Aside from being\n mutable (and hence unhashable), byte arrays otherwise provide\n the same interface and functionality as immutable bytes\n objects.\n\n The extension module ``array`` provides an additional example of\n a mutable sequence type, as does the ``collections`` module.\n\nSet types\n These represent unordered, finite sets of unique, immutable\n objects. As such, they cannot be indexed by any subscript. However,\n they can be iterated over, and the built-in function ``len()``\n returns the number of items in a set. Common uses for sets are fast\n membership testing, removing duplicates from a sequence, and\n computing mathematical operations such as intersection, union,\n difference, and symmetric difference.\n\n For set elements, the same immutability rules apply as for\n dictionary keys. Note that numeric types obey the normal rules for\n numeric comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``), only one of them can be contained in a set.\n\n There are currently two intrinsic set types:\n\n Sets\n These represent a mutable set. They are created by the built-in\n ``set()`` constructor and can be modified afterwards by several\n methods, such as ``add()``.\n\n Frozen sets\n These represent an immutable set. They are created by the\n built-in ``frozenset()`` constructor. As a frozenset is\n immutable and *hashable*, it can be used again as an element of\n another set, or as a dictionary key.\n\nMappings\n These represent finite sets of objects indexed by arbitrary index\n sets. The subscript notation ``a[k]`` selects the item indexed by\n ``k`` from the mapping ``a``; this can be used in expressions and\n as the target of assignments or ``del`` statements. The built-in\n function ``len()`` returns the number of items in a mapping.\n\n There is currently a single intrinsic mapping type:\n\n Dictionaries\n These represent finite sets of objects indexed by nearly\n arbitrary values. The only types of values not acceptable as\n keys are values containing lists or dictionaries or other\n mutable types that are compared by value rather than by object\n identity, the reason being that the efficient implementation of\n dictionaries requires a key\'s hash value to remain constant.\n Numeric types used for keys obey the normal rules for numeric\n comparison: if two numbers compare equal (e.g., ``1`` and\n ``1.0``) then they can be used interchangeably to index the same\n dictionary entry.\n\n Dictionaries are mutable; they can be created by the ``{...}``\n notation (see section *Dictionary displays*).\n\n The extension modules ``dbm.ndbm`` and ``dbm.gnu`` provide\n additional examples of mapping types, as does the\n ``collections`` module.\n\nCallable types\n These are the types to which the function call operation (see\n section *Calls*) can be applied:\n\n User-defined functions\n A user-defined function object is created by a function\n definition (see section *Function definitions*). It should be\n called with an argument list containing the same number of items\n as the function\'s formal parameter list.\n\n Special attributes:\n\n +---------------------------+---------------------------------+-------------+\n | Attribute | Meaning | |\n +===========================+=================================+=============+\n | ``__doc__`` | The function\'s documentation | Writable |\n | | string, or ``None`` if | |\n | | unavailable | |\n +---------------------------+---------------------------------+-------------+\n | ``__name__`` | The function\'s name | Writable |\n +---------------------------+---------------------------------+-------------+\n | ``__qualname__`` | The function\'s *qualified name* | Writable |\n | | New in version 3.3. | |\n +---------------------------+---------------------------------+-------------+\n | ``__module__`` | The name of the module the | Writable |\n | | function was defined in, or | |\n | | ``None`` if unavailable. | |\n +---------------------------+---------------------------------+-------------+\n | ``__defaults__`` | A tuple containing default | Writable |\n | | argument values for those | |\n | | arguments that have defaults, | |\n | | or ``None`` if no arguments | |\n | | have a default value | |\n +---------------------------+---------------------------------+-------------+\n | ``__code__`` | The code object representing | Writable |\n | | the compiled function body. | |\n +---------------------------+---------------------------------+-------------+\n | ``__globals__`` | A reference to the dictionary | Read-only |\n | | that holds the function\'s | |\n | | global variables --- the global | |\n | | namespace of the module in | |\n | | which the function was defined. | |\n +---------------------------+---------------------------------+-------------+\n | ``__dict__`` | The namespace supporting | Writable |\n | | arbitrary function attributes. | |\n +---------------------------+---------------------------------+-------------+\n | ``__closure__`` | ``None`` or a tuple of cells | Read-only |\n | | that contain bindings for the | |\n | | function\'s free variables. | |\n +---------------------------+---------------------------------+-------------+\n | ``__annotations__`` | A dict containing annotations | Writable |\n | | of parameters. The keys of the | |\n | | dict are the parameter names, | |\n | | or ``\'return\'`` for the return | |\n | | annotation, if provided. | |\n +---------------------------+---------------------------------+-------------+\n | ``__kwdefaults__`` | A dict containing defaults for | Writable |\n | | keyword-only parameters. | |\n +---------------------------+---------------------------------+-------------+\n\n Most of the attributes labelled "Writable" check the type of the\n assigned value.\n\n Function objects also support getting and setting arbitrary\n attributes, which can be used, for example, to attach metadata\n to functions. Regular attribute dot-notation is used to get and\n set such attributes. *Note that the current implementation only\n supports function attributes on user-defined functions. Function\n attributes on built-in functions may be supported in the\n future.*\n\n Additional information about a function\'s definition can be\n retrieved from its code object; see the description of internal\n types below.\n\n Instance methods\n An instance method object combines a class, a class instance and\n any callable object (normally a user-defined function).\n\n Special read-only attributes: ``__self__`` is the class instance\n object, ``__func__`` is the function object; ``__doc__`` is the\n method\'s documentation (same as ``__func__.__doc__``);\n ``__name__`` is the method name (same as ``__func__.__name__``);\n ``__module__`` is the name of the module the method was defined\n in, or ``None`` if unavailable.\n\n Methods also support accessing (but not setting) the arbitrary\n function attributes on the underlying function object.\n\n User-defined method objects may be created when getting an\n attribute of a class (perhaps via an instance of that class), if\n that attribute is a user-defined function object or a class\n method object.\n\n When an instance method object is created by retrieving a user-\n defined function object from a class via one of its instances,\n its ``__self__`` attribute is the instance, and the method\n object is said to be bound. The new method\'s ``__func__``\n attribute is the original function object.\n\n When a user-defined method object is created by retrieving\n another method object from a class or instance, the behaviour is\n the same as for a function object, except that the ``__func__``\n attribute of the new instance is not the original method object\n but its ``__func__`` attribute.\n\n When an instance method object is created by retrieving a class\n method object from a class or instance, its ``__self__``\n attribute is the class itself, and its ``__func__`` attribute is\n the function object underlying the class method.\n\n When an instance method object is called, the underlying\n function (``__func__``) is called, inserting the class instance\n (``__self__``) in front of the argument list. For instance,\n when ``C`` is a class which contains a definition for a function\n ``f()``, and ``x`` is an instance of ``C``, calling ``x.f(1)``\n is equivalent to calling ``C.f(x, 1)``.\n\n When an instance method object is derived from a class method\n object, the "class instance" stored in ``__self__`` will\n actually be the class itself, so that calling either ``x.f(1)``\n or ``C.f(1)`` is equivalent to calling ``f(C,1)`` where ``f`` is\n the underlying function.\n\n Note that the transformation from function object to instance\n method object happens each time the attribute is retrieved from\n the instance. In some cases, a fruitful optimization is to\n assign the attribute to a local variable and call that local\n variable. Also notice that this transformation only happens for\n user-defined functions; other callable objects (and all non-\n callable objects) are retrieved without transformation. It is\n also important to note that user-defined functions which are\n attributes of a class instance are not converted to bound\n methods; this *only* happens when the function is an attribute\n of the class.\n\n Generator functions\n A function or method which uses the ``yield`` statement (see\n section *The yield statement*) is called a *generator function*.\n Such a function, when called, always returns an iterator object\n which can be used to execute the body of the function: calling\n the iterator\'s ``__next__()`` method will cause the function to\n execute until it provides a value using the ``yield`` statement.\n When the function executes a ``return`` statement or falls off\n the end, a ``StopIteration`` exception is raised and the\n iterator will have reached the end of the set of values to be\n returned.\n\n Built-in functions\n A built-in function object is a wrapper around a C function.\n Examples of built-in functions are ``len()`` and ``math.sin()``\n (``math`` is a standard built-in module). The number and type of\n the arguments are determined by the C function. Special read-\n only attributes: ``__doc__`` is the function\'s documentation\n string, or ``None`` if unavailable; ``__name__`` is the\n function\'s name; ``__self__`` is set to ``None`` (but see the\n next item); ``__module__`` is the name of the module the\n function was defined in or ``None`` if unavailable.\n\n Built-in methods\n This is really a different disguise of a built-in function, this\n time containing an object passed to the C function as an\n implicit extra argument. An example of a built-in method is\n ``alist.append()``, assuming *alist* is a list object. In this\n case, the special read-only attribute ``__self__`` is set to the\n object denoted by *alist*.\n\n Classes\n Classes are callable. These objects normally act as factories\n for new instances of themselves, but variations are possible for\n class types that override ``__new__()``. The arguments of the\n call are passed to ``__new__()`` and, in the typical case, to\n ``__init__()`` to initialize the new instance.\n\n Class Instances\n Instances of arbitrary classes can be made callable by defining\n a ``__call__()`` method in their class.\n\nModules\n Modules are imported by the ``import`` statement (see section *The\n import statement*). A module object has a namespace implemented by\n a dictionary object (this is the dictionary referenced by the\n __globals__ attribute of functions defined in the module).\n Attribute references are translated to lookups in this dictionary,\n e.g., ``m.x`` is equivalent to ``m.__dict__["x"]``. A module object\n does not contain the code object used to initialize the module\n (since it isn\'t needed once the initialization is done).\n\n Attribute assignment updates the module\'s namespace dictionary,\n e.g., ``m.x = 1`` is equivalent to ``m.__dict__["x"] = 1``.\n\n Special read-only attribute: ``__dict__`` is the module\'s namespace\n as a dictionary object.\n\n **CPython implementation detail:** Because of the way CPython\n clears module dictionaries, the module dictionary will be cleared\n when the module falls out of scope even if the dictionary still has\n live references. To avoid this, copy the dictionary or keep the\n module around while using its dictionary directly.\n\n Predefined (writable) attributes: ``__name__`` is the module\'s\n name; ``__doc__`` is the module\'s documentation string, or ``None``\n if unavailable; ``__file__`` is the pathname of the file from which\n the module was loaded, if it was loaded from a file. The\n ``__file__`` attribute is not present for C modules that are\n statically linked into the interpreter; for extension modules\n loaded dynamically from a shared library, it is the pathname of the\n shared library file.\n\nCustom classes\n Custom class types are typically created by class definitions (see\n section *Class definitions*). A class has a namespace implemented\n by a dictionary object. Class attribute references are translated\n to lookups in this dictionary, e.g., ``C.x`` is translated to\n ``C.__dict__["x"]`` (although there are a number of hooks which\n allow for other means of locating attributes). When the attribute\n name is not found there, the attribute search continues in the base\n classes. This search of the base classes uses the C3 method\n resolution order which behaves correctly even in the presence of\n \'diamond\' inheritance structures where there are multiple\n inheritance paths leading back to a common ancestor. Additional\n details on the C3 MRO used by Python can be found in the\n documentation accompanying the 2.3 release at\n http://www.python.org/download/releases/2.3/mro/.\n\n When a class attribute reference (for class ``C``, say) would yield\n a class method object, it is transformed into an instance method\n object whose ``__self__`` attributes is ``C``. When it would yield\n a static method object, it is transformed into the object wrapped\n by the static method object. See section *Implementing Descriptors*\n for another way in which attributes retrieved from a class may\n differ from those actually contained in its ``__dict__``.\n\n Class attribute assignments update the class\'s dictionary, never\n the dictionary of a base class.\n\n A class object can be called (see above) to yield a class instance\n (see below).\n\n Special attributes: ``__name__`` is the class name; ``__module__``\n is the module name in which the class was defined; ``__dict__`` is\n the dictionary containing the class\'s namespace; ``__bases__`` is a\n tuple (possibly empty or a singleton) containing the base classes,\n in the order of their occurrence in the base class list;\n ``__doc__`` is the class\'s documentation string, or None if\n undefined.\n\nClass instances\n A class instance is created by calling a class object (see above).\n A class instance has a namespace implemented as a dictionary which\n is the first place in which attribute references are searched.\n When an attribute is not found there, and the instance\'s class has\n an attribute by that name, the search continues with the class\n attributes. If a class attribute is found that is a user-defined\n function object, it is transformed into an instance method object\n whose ``__self__`` attribute is the instance. Static method and\n class method objects are also transformed; see above under\n "Classes". See section *Implementing Descriptors* for another way\n in which attributes of a class retrieved via its instances may\n differ from the objects actually stored in the class\'s\n ``__dict__``. If no class attribute is found, and the object\'s\n class has a ``__getattr__()`` method, that is called to satisfy the\n lookup.\n\n Attribute assignments and deletions update the instance\'s\n dictionary, never a class\'s dictionary. If the class has a\n ``__setattr__()`` or ``__delattr__()`` method, this is called\n instead of updating the instance dictionary directly.\n\n Class instances can pretend to be numbers, sequences, or mappings\n if they have methods with certain special names. See section\n *Special method names*.\n\n Special attributes: ``__dict__`` is the attribute dictionary;\n ``__class__`` is the instance\'s class.\n\nI/O objects (also known as file objects)\n A *file object* represents an open file. Various shortcuts are\n available to create file objects: the ``open()`` built-in function,\n and also ``os.popen()``, ``os.fdopen()``, and the ``makefile()``\n method of socket objects (and perhaps by other functions or methods\n provided by extension modules).\n\n The objects ``sys.stdin``, ``sys.stdout`` and ``sys.stderr`` are\n initialized to file objects corresponding to the interpreter\'s\n standard input, output and error streams; they are all open in text\n mode and therefore follow the interface defined by the\n ``io.TextIOBase`` abstract class.\n\nInternal types\n A few types used internally by the interpreter are exposed to the\n user. Their definitions may change with future versions of the\n interpreter, but they are mentioned here for completeness.\n\n Code objects\n Code objects represent *byte-compiled* executable Python code,\n or *bytecode*. The difference between a code object and a\n function object is that the function object contains an explicit\n reference to the function\'s globals (the module in which it was\n defined), while a code object contains no context; also the\n default argument values are stored in the function object, not\n in the code object (because they represent values calculated at\n run-time). Unlike function objects, code objects are immutable\n and contain no references (directly or indirectly) to mutable\n objects.\n\n Special read-only attributes: ``co_name`` gives the function\n name; ``co_argcount`` is the number of positional arguments\n (including arguments with default values); ``co_nlocals`` is the\n number of local variables used by the function (including\n arguments); ``co_varnames`` is a tuple containing the names of\n the local variables (starting with the argument names);\n ``co_cellvars`` is a tuple containing the names of local\n variables that are referenced by nested functions;\n ``co_freevars`` is a tuple containing the names of free\n variables; ``co_code`` is a string representing the sequence of\n bytecode instructions; ``co_consts`` is a tuple containing the\n literals used by the bytecode; ``co_names`` is a tuple\n containing the names used by the bytecode; ``co_filename`` is\n the filename from which the code was compiled;\n ``co_firstlineno`` is the first line number of the function;\n ``co_lnotab`` is a string encoding the mapping from bytecode\n offsets to line numbers (for details see the source code of the\n interpreter); ``co_stacksize`` is the required stack size\n (including local variables); ``co_flags`` is an integer encoding\n a number of flags for the interpreter.\n\n The following flag bits are defined for ``co_flags``: bit\n ``0x04`` is set if the function uses the ``*arguments`` syntax\n to accept an arbitrary number of positional arguments; bit\n ``0x08`` is set if the function uses the ``**keywords`` syntax\n to accept arbitrary keyword arguments; bit ``0x20`` is set if\n the function is a generator.\n\n Future feature declarations (``from __future__ import\n division``) also use bits in ``co_flags`` to indicate whether a\n code object was compiled with a particular feature enabled: bit\n ``0x2000`` is set if the function was compiled with future\n division enabled; bits ``0x10`` and ``0x1000`` were used in\n earlier versions of Python.\n\n Other bits in ``co_flags`` are reserved for internal use.\n\n If a code object represents a function, the first item in\n ``co_consts`` is the documentation string of the function, or\n ``None`` if undefined.\n\n Frame objects\n Frame objects represent execution frames. They may occur in\n traceback objects (see below).\n\n Special read-only attributes: ``f_back`` is to the previous\n stack frame (towards the caller), or ``None`` if this is the\n bottom stack frame; ``f_code`` is the code object being executed\n in this frame; ``f_locals`` is the dictionary used to look up\n local variables; ``f_globals`` is used for global variables;\n ``f_builtins`` is used for built-in (intrinsic) names;\n ``f_lasti`` gives the precise instruction (this is an index into\n the bytecode string of the code object).\n\n Special writable attributes: ``f_trace``, if not ``None``, is a\n function called at the start of each source code line (this is\n used by the debugger); ``f_lineno`` is the current line number\n of the frame --- writing to this from within a trace function\n jumps to the given line (only for the bottom-most frame). A\n debugger can implement a Jump command (aka Set Next Statement)\n by writing to f_lineno.\n\n Traceback objects\n Traceback objects represent a stack trace of an exception. A\n traceback object is created when an exception occurs. When the\n search for an exception handler unwinds the execution stack, at\n each unwound level a traceback object is inserted in front of\n the current traceback. When an exception handler is entered,\n the stack trace is made available to the program. (See section\n *The try statement*.) It is accessible as the third item of the\n tuple returned by ``sys.exc_info()``. When the program contains\n no suitable handler, the stack trace is written (nicely\n formatted) to the standard error stream; if the interpreter is\n interactive, it is also made available to the user as\n ``sys.last_traceback``.\n\n Special read-only attributes: ``tb_next`` is the next level in\n the stack trace (towards the frame where the exception\n occurred), or ``None`` if there is no next level; ``tb_frame``\n points to the execution frame of the current level;\n ``tb_lineno`` gives the line number where the exception\n occurred; ``tb_lasti`` indicates the precise instruction. The\n line number and last instruction in the traceback may differ\n from the line number of its frame object if the exception\n occurred in a ``try`` statement with no matching except clause\n or with a finally clause.\n\n Slice objects\n Slice objects are used to represent slices for ``__getitem__()``\n methods. They are also created by the built-in ``slice()``\n function.\n\n Special read-only attributes: ``start`` is the lower bound;\n ``stop`` is the upper bound; ``step`` is the step value; each is\n ``None`` if omitted. These attributes can have any type.\n\n Slice objects support one method:\n\n slice.indices(self, length)\n\n This method takes a single integer argument *length* and\n computes information about the slice that the slice object\n would describe if applied to a sequence of *length* items.\n It returns a tuple of three integers; respectively these are\n the *start* and *stop* indices and the *step* or stride\n length of the slice. Missing or out-of-bounds indices are\n handled in a manner consistent with regular slices.\n\n Static method objects\n Static method objects provide a way of defeating the\n transformation of function objects to method objects described\n above. A static method object is a wrapper around any other\n object, usually a user-defined method object. When a static\n method object is retrieved from a class or a class instance, the\n object actually returned is the wrapped object, which is not\n subject to any further transformation. Static method objects are\n not themselves callable, although the objects they wrap usually\n are. Static method objects are created by the built-in\n ``staticmethod()`` constructor.\n\n Class method objects\n A class method object, like a static method object, is a wrapper\n around another object that alters the way in which that object\n is retrieved from classes and class instances. The behaviour of\n class method objects upon such retrieval is described above,\n under "User-defined methods". Class method objects are created\n by the built-in ``classmethod()`` constructor.\n', 'typesfunctions': '\nFunctions\n*********\n\nFunction objects are created by function definitions. The only\noperation on a function object is to call it: ``func(argument-list)``.\n\nThere are really two flavors of function objects: built-in functions\nand user-defined functions. Both support the same operation (to call\nthe function), but the implementation is different, hence the\ndifferent object types.\n\nSee *Function definitions* for more information.\n', - 'typesmapping': '\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict([arg])\n\n Return a new dictionary initialized from an optional positional\n argument or from a set of keyword arguments. If no arguments are\n given, return a new empty dictionary. If the positional argument\n *arg* is a mapping object, return a dictionary mapping the same\n keys to the same values as does the mapping object. Otherwise the\n positional argument must be a sequence, a container that supports\n iteration, or an iterator object. The elements of the argument\n must each also be of one of those kinds, and each must in turn\n contain exactly two objects. The first is used as a key in the new\n dictionary, and the second as the key\'s value. If a given key is\n seen more than once, the last value associated with it is retained\n in the new dictionary.\n\n If keyword arguments are given, the keywords themselves with their\n associated values are added as items to the dictionary. If a key\n is specified both in the positional argument and as a keyword\n argument, the value associated with the keyword is retained in the\n dictionary. For example, these all return a dictionary equal to\n ``{"one": 1, "two": 2}``:\n\n * ``dict(one=1, two=2)``\n\n * ``dict({\'one\': 1, \'two\': 2})``\n\n * ``dict(zip((\'one\', \'two\'), (1, 2)))``\n\n * ``dict([[\'two\', 2], [\'one\', 1]])``\n\n The first example only works for keys that are valid Python\n identifiers; the others work with any valid keys.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n If a subclass of dict defines a method ``__missing__()``, if the\n key *key* is not present, the ``d[key]`` operation calls that\n method with the key *key* as argument. The ``d[key]`` operation\n then returns or raises whatever is returned or raised by the\n ``__missing__(key)`` call if the key is not present. No other\n operations or methods invoke ``__missing__()``. If\n ``__missing__()`` is not defined, ``KeyError`` is raised.\n ``__missing__()`` must be a method; it cannot be an instance\n variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n See ``collections.Counter`` for a complete implementation\n including other methods helpful for accumulating and managing\n tallies.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iter(d.keys())``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n items()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See below for documentation of view objects.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n values()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.keys()``, ``dict.values()`` and\n``dict.items()`` are *view objects*. They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that ``(key, value)`` pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class ``collections.Set`` are available (for example, ``==``,\n``<``, or ``^``).\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n', + 'typesmapping': '\nMapping Types --- ``dict``\n**************************\n\nA *mapping* object maps *hashable* values to arbitrary objects.\nMappings are mutable objects. There is currently only one standard\nmapping type, the *dictionary*. (For other containers see the built\nin ``list``, ``set``, and ``tuple`` classes, and the ``collections``\nmodule.)\n\nA dictionary\'s keys are *almost* arbitrary values. Values that are\nnot *hashable*, that is, values containing lists, dictionaries or\nother mutable types (that are compared by value rather than by object\nidentity) may not be used as keys. Numeric types used for keys obey\nthe normal rules for numeric comparison: if two numbers compare equal\n(such as ``1`` and ``1.0``) then they can be used interchangeably to\nindex the same dictionary entry. (Note however, that since computers\nstore floating-point numbers as approximations it is usually unwise to\nuse them as dictionary keys.)\n\nDictionaries can be created by placing a comma-separated list of\n``key: value`` pairs within braces, for example: ``{\'jack\': 4098,\n\'sjoerd\': 4127}`` or ``{4098: \'jack\', 4127: \'sjoerd\'}``, or by the\n``dict`` constructor.\n\nclass class dict([arg])\n\n Return a new dictionary initialized from an optional positional\n argument or from a set of keyword arguments. If no arguments are\n given, return a new empty dictionary. If the positional argument\n *arg* is a mapping object, return a dictionary mapping the same\n keys to the same values as does the mapping object. Otherwise the\n positional argument must be a sequence, a container that supports\n iteration, or an iterator object. The elements of the argument\n must each also be of one of those kinds, and each must in turn\n contain exactly two objects. The first is used as a key in the new\n dictionary, and the second as the key\'s value. If a given key is\n seen more than once, the last value associated with it is retained\n in the new dictionary.\n\n If keyword arguments are given, the keywords themselves with their\n associated values are added as items to the dictionary. If a key\n is specified both in the positional argument and as a keyword\n argument, the value associated with the keyword is retained in the\n dictionary. For example, these all return a dictionary equal to\n ``{"one": 1, "two": 2}``:\n\n * ``dict(one=1, two=2)``\n\n * ``dict({\'one\': 1, \'two\': 2})``\n\n * ``dict(zip((\'one\', \'two\'), (1, 2)))``\n\n * ``dict([[\'two\', 2], [\'one\', 1]])``\n\n The first example only works for keys that are valid Python\n identifiers; the others work with any valid keys.\n\n These are the operations that dictionaries support (and therefore,\n custom mapping types should support too):\n\n len(d)\n\n Return the number of items in the dictionary *d*.\n\n d[key]\n\n Return the item of *d* with key *key*. Raises a ``KeyError`` if\n *key* is not in the map.\n\n If a subclass of dict defines a method ``__missing__()``, if the\n key *key* is not present, the ``d[key]`` operation calls that\n method with the key *key* as argument. The ``d[key]`` operation\n then returns or raises whatever is returned or raised by the\n ``__missing__(key)`` call if the key is not present. No other\n operations or methods invoke ``__missing__()``. If\n ``__missing__()`` is not defined, ``KeyError`` is raised.\n ``__missing__()`` must be a method; it cannot be an instance\n variable:\n\n >>> class Counter(dict):\n ... def __missing__(self, key):\n ... return 0\n >>> c = Counter()\n >>> c[\'red\']\n 0\n >>> c[\'red\'] += 1\n >>> c[\'red\']\n 1\n\n See ``collections.Counter`` for a complete implementation\n including other methods helpful for accumulating and managing\n tallies.\n\n Changed in version 3.3: If the dict is modified during the\n lookup, a ``RuntimeError`` exception is now raised.\n\n d[key] = value\n\n Set ``d[key]`` to *value*.\n\n del d[key]\n\n Remove ``d[key]`` from *d*. Raises a ``KeyError`` if *key* is\n not in the map.\n\n key in d\n\n Return ``True`` if *d* has a key *key*, else ``False``.\n\n key not in d\n\n Equivalent to ``not key in d``.\n\n iter(d)\n\n Return an iterator over the keys of the dictionary. This is a\n shortcut for ``iter(d.keys())``.\n\n clear()\n\n Remove all items from the dictionary.\n\n copy()\n\n Return a shallow copy of the dictionary.\n\n classmethod fromkeys(seq[, value])\n\n Create a new dictionary with keys from *seq* and values set to\n *value*.\n\n ``fromkeys()`` is a class method that returns a new dictionary.\n *value* defaults to ``None``.\n\n get(key[, default])\n\n Return the value for *key* if *key* is in the dictionary, else\n *default*. If *default* is not given, it defaults to ``None``,\n so that this method never raises a ``KeyError``.\n\n items()\n\n Return a new view of the dictionary\'s items (``(key, value)``\n pairs). See below for documentation of view objects.\n\n keys()\n\n Return a new view of the dictionary\'s keys. See below for\n documentation of view objects.\n\n pop(key[, default])\n\n If *key* is in the dictionary, remove it and return its value,\n else return *default*. If *default* is not given and *key* is\n not in the dictionary, a ``KeyError`` is raised.\n\n popitem()\n\n Remove and return an arbitrary ``(key, value)`` pair from the\n dictionary.\n\n ``popitem()`` is useful to destructively iterate over a\n dictionary, as often used in set algorithms. If the dictionary\n is empty, calling ``popitem()`` raises a ``KeyError``.\n\n setdefault(key[, default])\n\n If *key* is in the dictionary, return its value. If not, insert\n *key* with a value of *default* and return *default*. *default*\n defaults to ``None``.\n\n update([other])\n\n Update the dictionary with the key/value pairs from *other*,\n overwriting existing keys. Return ``None``.\n\n ``update()`` accepts either another dictionary object or an\n iterable of key/value pairs (as tuples or other iterables of\n length two). If keyword arguments are specified, the dictionary\n is then updated with those key/value pairs: ``d.update(red=1,\n blue=2)``.\n\n values()\n\n Return a new view of the dictionary\'s values. See below for\n documentation of view objects.\n\n\nDictionary view objects\n=======================\n\nThe objects returned by ``dict.keys()``, ``dict.values()`` and\n``dict.items()`` are *view objects*. They provide a dynamic view on\nthe dictionary\'s entries, which means that when the dictionary\nchanges, the view reflects these changes.\n\nDictionary views can be iterated over to yield their respective data,\nand support membership tests:\n\nlen(dictview)\n\n Return the number of entries in the dictionary.\n\niter(dictview)\n\n Return an iterator over the keys, values or items (represented as\n tuples of ``(key, value)``) in the dictionary.\n\n Keys and values are iterated over in an arbitrary order which is\n non-random, varies across Python implementations, and depends on\n the dictionary\'s history of insertions and deletions. If keys,\n values and items views are iterated over with no intervening\n modifications to the dictionary, the order of items will directly\n correspond. This allows the creation of ``(value, key)`` pairs\n using ``zip()``: ``pairs = zip(d.values(), d.keys())``. Another\n way to create the same list is ``pairs = [(v, k) for (k, v) in\n d.items()]``.\n\n Iterating views while adding or deleting entries in the dictionary\n may raise a ``RuntimeError`` or fail to iterate over all entries.\n\nx in dictview\n\n Return ``True`` if *x* is in the underlying dictionary\'s keys,\n values or items (in the latter case, *x* should be a ``(key,\n value)`` tuple).\n\nKeys views are set-like since their entries are unique and hashable.\nIf all values are hashable, so that ``(key, value)`` pairs are unique\nand hashable, then the items view is also set-like. (Values views are\nnot treated as set-like since the entries are generally not unique.)\nFor set-like views, all of the operations defined for the abstract\nbase class ``collections.Set`` are available (for example, ``==``,\n``<``, or ``^``).\n\nAn example of dictionary view usage:\n\n >>> dishes = {\'eggs\': 2, \'sausage\': 1, \'bacon\': 1, \'spam\': 500}\n >>> keys = dishes.keys()\n >>> values = dishes.values()\n\n >>> # iteration\n >>> n = 0\n >>> for val in values:\n ... n += val\n >>> print(n)\n 504\n\n >>> # keys and values are iterated over in the same order\n >>> list(keys)\n [\'eggs\', \'bacon\', \'sausage\', \'spam\']\n >>> list(values)\n [2, 1, 1, 500]\n\n >>> # view objects are dynamic and reflect dict changes\n >>> del dishes[\'eggs\']\n >>> del dishes[\'sausage\']\n >>> list(keys)\n [\'spam\', \'bacon\']\n\n >>> # set operations\n >>> keys & {\'eggs\', \'bacon\', \'salad\'}\n {\'bacon\'}\n >>> keys ^ {\'sausage\', \'juice\'}\n {\'juice\', \'sausage\', \'bacon\', \'spam\'}\n', 'typesmethods': "\nMethods\n*******\n\nMethods are functions that are called using the attribute notation.\nThere are two flavors: built-in methods (such as ``append()`` on\nlists) and class instance methods. Built-in methods are described\nwith the types that support them.\n\nIf you access a method (a function defined in a class namespace)\nthrough an instance, you get a special object: a *bound method* (also\ncalled *instance method*) object. When called, it will add the\n``self`` argument to the argument list. Bound methods have two\nspecial read-only attributes: ``m.__self__`` is the object on which\nthe method operates, and ``m.__func__`` is the function implementing\nthe method. Calling ``m(arg-1, arg-2, ..., arg-n)`` is completely\nequivalent to calling ``m.__func__(m.__self__, arg-1, arg-2, ...,\narg-n)``.\n\nLike function objects, bound method objects support getting arbitrary\nattributes. However, since method attributes are actually stored on\nthe underlying function object (``meth.__func__``), setting method\nattributes on bound methods is disallowed. Attempting to set a method\nattribute results in a ``TypeError`` being raised. In order to set a\nmethod attribute, you need to explicitly set it on the underlying\nfunction object:\n\n class C:\n def method(self):\n pass\n\n c = C()\n c.method.__func__.whoami = 'my name is c'\n\nSee *The standard type hierarchy* for more information.\n", 'typesmodules': "\nModules\n*******\n\nThe only special operation on a module is attribute access:\n``m.name``, where *m* is a module and *name* accesses a name defined\nin *m*'s symbol table. Module attributes can be assigned to. (Note\nthat the ``import`` statement is not, strictly speaking, an operation\non a module object; ``import foo`` does not require a module object\nnamed *foo* to exist, rather it requires an (external) *definition*\nfor a module named *foo* somewhere.)\n\nA special attribute of every module is ``__dict__``. This is the\ndictionary containing the module's symbol table. Modifying this\ndictionary will actually change the module's symbol table, but direct\nassignment to the ``__dict__`` attribute is not possible (you can\nwrite ``m.__dict__['a'] = 1``, which defines ``m.a`` to be ``1``, but\nyou can't write ``m.__dict__ = {}``). Modifying ``__dict__`` directly\nis not recommended.\n\nModules built into the interpreter are written like this: ````. If loaded from a file, they are written as\n````.\n", - 'typesseq': '\nSequence Types --- ``str``, ``bytes``, ``bytearray``, ``list``, ``tuple``, ``range``\n************************************************************************************\n\nThere are six sequence types: strings, byte sequences (``bytes``\nobjects), byte arrays (``bytearray`` objects), lists, tuples, and\nrange objects. For other containers see the built in ``dict`` and\n``set`` classes, and the ``collections`` module.\n\nStrings contain Unicode characters. Their literals are written in\nsingle or double quotes: ``\'xyzzy\'``, ``"frobozz"``. See *String and\nBytes literals* for more about string literals. In addition to the\nfunctionality described here, there are also string-specific methods\ndescribed in the *String Methods* section.\n\nBytes and bytearray objects contain single bytes -- the former is\nimmutable while the latter is a mutable sequence. Bytes objects can\nbe constructed the constructor, ``bytes()``, and from literals; use a\n``b`` prefix with normal string syntax: ``b\'xyzzy\'``. To construct\nbyte arrays, use the ``bytearray()`` function.\n\nWhile string objects are sequences of characters (represented by\nstrings of length 1), bytes and bytearray objects are sequences of\n*integers* (between 0 and 255), representing the ASCII value of single\nbytes. That means that for a bytes or bytearray object *b*, ``b[0]``\nwill be an integer, while ``b[0:1]`` will be a bytes or bytearray\nobject of length 1. The representation of bytes objects uses the\nliteral format (``b\'...\'``) since it is generally more useful than\ne.g. ``bytes([50, 19, 100])``. You can always convert a bytes object\ninto a list of integers using ``list(b)``.\n\nAlso, while in previous Python versions, byte strings and Unicode\nstrings could be exchanged for each other rather freely (barring\nencoding issues), strings and bytes are now completely separate\nconcepts. There\'s no implicit en-/decoding if you pass an object of\nthe wrong type. A string always compares unequal to a bytes or\nbytearray object.\n\nLists are constructed with square brackets, separating items with\ncommas: ``[a, b, c]``. Tuples are constructed by the comma operator\n(not within square brackets), with or without enclosing parentheses,\nbut an empty tuple must have the enclosing parentheses, such as ``a,\nb, c`` or ``()``. A single item tuple must have a trailing comma,\nsuch as ``(d,)``.\n\nObjects of type range are created using the ``range()`` function.\nThey don\'t support concatenation or repetition, and using ``min()`` or\n``max()`` on them is inefficient.\n\nMost sequence types support the following operations. The ``in`` and\n``not in`` operations have the same priorities as the comparison\noperations. The ``+`` and ``*`` operations have the same priority as\nthe corresponding numeric operations. [3] Additional methods are\nprovided for *Mutable Sequence Types*.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type; *n*, *i*, *j* and *k* are\nintegers.\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+--------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+--------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| ``s * n, n * s`` | *n* shallow copies of *s* | (2) |\n| | concatenated | |\n+--------------------+----------------------------------+------------+\n| ``s[i]`` | *i*th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+--------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``s.index(i)`` | index of the first occurence of | |\n| | *i* in *s* | |\n+--------------------+----------------------------------+------------+\n| ``s.count(i)`` | total number of occurences of | |\n| | *i* in *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must\ncompare equal and the two sequences must be of the same type and have\nthe same length. (For full details see *Comparisons* in the language\nreference.)\n\nNotes:\n\n1. When *s* is a string object, the ``in`` and ``not in`` operations\n act like a substring test.\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. Concatenating immutable strings always results in a new object.\n This means that building up a string by repeated concatenation will\n have a quadratic runtime cost in the total string length. To get a\n linear runtime cost, you must switch to one of the alternatives\n below:\n\n * if concatenating ``str`` objects, you can build a list and use\n ``str.join()`` at the end;\n\n * if concatenating ``bytes`` objects, you can similarly use\n ``bytes.join()``, or you can do in-place concatenation with a\n ``bytearray`` object. ``bytearray`` objects are mutable and have\n an efficient overallocation mechanism.\n\n\nString Methods\n==============\n\nString objects support the methods listed below.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, bytes, bytearray, list,\ntuple, range* section. To output formatted strings, see the *String\nFormatting* section. Also, see the ``re`` module for string functions\nbased on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter ``\'\xc3\x9f\'`` is equivalent to\n ``"ss"``. Since it is already lowercase, ``lower()`` would do\n nothing to ``\'\xc3\x9f\'``; ``casefold()`` converts it to ``"ss"``.\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is ``\'utf-8\'``. *errors* may be given to set a different\n error handling scheme. The default for *errors* is ``\'strict\'``,\n meaning that encoding errors raise a ``UnicodeError``. Other\n possible values are ``\'ignore\'``, ``\'replace\'``,\n ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and any other name\n registered via ``codecs.register_error()``, see section *Codec Base\n Classes*. For a list of possible encodings, see section *Standard\n Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by zero or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to ``str.format(**mapping)``, except that ``mapping`` is\n used directly and not copied to a ``dict`` . This is useful if for\n example ``mapping`` is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character\n ``c`` is alphanumeric if one of the following returns ``True``:\n ``c.isalpha()``, ``c.isdecimal()``, ``c.isdigit()``, or\n ``c.isnumeric()``.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when ``repr()`` is\n invoked on a string. It has no bearing on the handling of strings\n written to ``sys.stdout`` or ``sys.stderr``.)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A ``TypeError`` will be raised if there are\n any non-string values in *iterable*, including ``bytes`` objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n ``str.translate()``.\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n ``s.swapcase().swapcase() == s``.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n Return a copy of the *s* where all characters have been mapped\n through the *map* which must be a dictionary of Unicode ordinals\n (integers) to Unicode ordinals, strings or ``None``. Unmapped\n characters are left untouched. Characters mapped to ``None`` are\n deleted.\n\n You can use ``str.maketrans()`` to create a translation map from\n character-to-character mappings in different formats.\n\n Note: An even more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see\n ``encodings.cp1251`` for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that ``str.upper().isupper()`` might\n be ``False`` if ``s`` contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to ``len(s)``.\n\n\nOld String Formatting Operations\n================================\n\nNote: The formatting operations described here are modelled on C\'s\n printf() syntax. They only support formatting of certain builtin\n types. The use of a binary operator means that care may be needed\n in order to format tuples and dictionaries correctly. As the new\n *String Formatting* syntax is more flexible and handles tuples and\n dictionaries naturally, it is recommended for new code. However,\n there are no current plans to deprecate printf-style formatting.\n\nString objects have one unique built-in operation: the ``%`` operator\n(modulo). This is also known as the string *formatting* or\n*interpolation* operator. Given ``format % values`` (where *format* is\na string), ``%`` conversion specifications in *format* are replaced\nwith zero or more elements of *values*. The effect is similar to the\nusing ``sprintf()`` in the C language.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [5] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The ``\'%\'`` character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence of\n characters (for example, ``(somename)``).\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an ``\'*\'``\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a ``\'.\'`` (dot) followed by the\n precision. If specified as ``\'*\'`` (an asterisk), the actual\n precision is read from the next element of the tuple in *values*,\n and the value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the ``\'%\'`` character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print(\'%(language)s has %(number)03d quote types.\' %\n... {\'language\': "Python", "number": 2})\nPython has 002 quote types.\n\nIn this case no ``*`` specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| ``\'#\'`` | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| ``\'0\'`` | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'-\'`` | The converted value is left adjusted (overrides the ``\'0\'`` |\n| | conversion if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| ``\' \'`` | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'+\'`` | A sign character (``\'+\'`` or ``\'-\'``) will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier (``h``, ``l``, or ``L``) may be present, but is\nignored as it is not necessary for Python -- so e.g. ``%ld`` is\nidentical to ``%d``.\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| ``\'d\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'i\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'o\'`` | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'u\'`` | Obsolete type -- it is identical to ``\'d\'``. | (7) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'x\'`` | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'X\'`` | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'e\'`` | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'E\'`` | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'f\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'F\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'g\'`` | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'G\'`` | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'c\'`` | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'r\'`` | String (converts any Python object using ``repr()``). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'s\'`` | String (converts any Python object using ``str()``). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'a\'`` | String (converts any Python object using | (5) |\n| | ``ascii()``). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'%\'`` | No argument is converted, results in a ``\'%\'`` | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero (``\'0\'``) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading ``\'0x\'`` or ``\'0X\'`` (depending\n on whether the ``\'x\'`` or ``\'X\'`` format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. If precision is ``N``, the output is truncated to ``N`` characters.\n\n1. See **PEP 237**.\n\nSince Python strings have an explicit length, ``%s`` conversions do\nnot assume that ``\'\\0\'`` is the end of the string.\n\nChanged in version 3.1: ``%f`` conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by ``%g`` conversions.\n\nAdditional string operations are defined in standard modules\n``string`` and ``re``.\n\n\nRange Type\n==========\n\nThe ``range`` type is an immutable sequence which is commonly used for\nlooping. The advantage of the ``range`` type is that an ``range``\nobject will always take the same amount of memory, no matter the size\nof the range it represents.\n\nRange objects have relatively little behavior: they support indexing,\ncontains, iteration, the ``len()`` function, and the following\nmethods:\n\nrange.count(x)\n\n Return the number of *i*\'s for which ``s[i] == x``.\n\n New in version 3.2.\n\nrange.index(x)\n\n Return the smallest *i* such that ``s[i] == x``. Raises\n ``ValueError`` when *x* is not in the range.\n\n New in version 3.2.\n\n\nMutable Sequence Types\n======================\n\nList and bytearray objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object).\n\nNote that while lists allow their items to be of any type, bytearray\nobject "items" are all integers in the range 0 <= x < 256.\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.clear()`` | remove all items from ``s`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.copy()`` | return a shallow copy of ``s`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*\'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (3) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (5) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (6) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([key[, reverse]])`` | sort the items of *s* in place | (6), (7), (8) |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. *x* can be any iterable object.\n\n3. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the sequence length is added, as for slice indices. If it\n is still negative, it is truncated to zero, as for slice indices.\n\n4. When a negative index is passed as the first parameter to the\n ``insert()`` method, the sequence length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n5. The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n6. The ``sort()`` and ``reverse()`` methods modify the sequence in\n place for economy of space when sorting or reversing a large\n sequence. To remind you that they operate by side effect, they\n don\'t return the sorted or reversed sequence.\n\n7. The ``sort()`` method takes optional arguments for controlling the\n comparisons. Each must be specified as a keyword argument.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``. Use ``functools.cmp_to_key()`` to\n convert an old-style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n The ``sort()`` method is guaranteed to be stable. A sort is stable\n if it guarantees not to change the relative order of elements that\n compare equal --- this is helpful for sorting in multiple passes\n (for example, sort by department, then by salary grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises ``ValueError`` if it can detect\n that the list has been mutated during a sort.\n\n8. ``sort()`` is not supported by ``bytearray`` objects.\n\n New in version 3.3: ``clear()`` and ``copy()`` methods.\n\n\nBytes and Byte Array Methods\n============================\n\nBytes and bytearray objects, being "strings of bytes", have all\nmethods found on strings, with the exception of ``encode()``,\n``format()`` and ``isidentifier()``, which do not make sense with\nthese types. For converting the objects to strings, they have a\n``decode()`` method.\n\nWherever one of these methods needs to interpret the bytes as\ncharacters (e.g. the ``is...()`` methods), the ASCII character set is\nassumed.\n\nNew in version 3.3: The functions ``count()``, ``find()``,\n``index()``, ``rfind()`` and ``rindex()`` have additional semantics\ncompared to the corresponding string functions: They also accept an\ninteger in range 0 to 255 (a byte) as their first argument.\n\nNote: The methods on bytes and bytearray objects don\'t accept strings as\n their arguments, just as the methods on strings don\'t accept bytes\n as their arguments. For example, you have to write\n\n a = "abc"\n b = a.replace("a", "f")\n\n and\n\n a = b"abc"\n b = a.replace(b"a", b"f")\n\nbytes.decode(encoding="utf-8", errors="strict")\nbytearray.decode(encoding="utf-8", errors="strict")\n\n Return a string decoded from the given bytes. Default encoding is\n ``\'utf-8\'``. *errors* may be given to set a different error\n handling scheme. The default for *errors* is ``\'strict\'``, meaning\n that encoding errors raise a ``UnicodeError``. Other possible\n values are ``\'ignore\'``, ``\'replace\'`` and any other name\n registered via ``codecs.register_error()``, see section *Codec Base\n Classes*. For a list of possible encodings, see section *Standard\n Encodings*.\n\n Changed in version 3.1: Added support for keyword arguments.\n\nThe bytes and bytearray types have an additional class method:\n\nclassmethod bytes.fromhex(string)\nclassmethod bytearray.fromhex(string)\n\n This ``bytes`` class method returns a bytes or bytearray object,\n decoding the given string object. The string must contain two\n hexadecimal digits per byte, spaces are ignored.\n\n >>> bytes.fromhex(\'f0 f1f2 \')\n b\'\\xf0\\xf1\\xf2\'\n\nThe maketrans and translate methods differ in semantics from the\nversions available on strings:\n\nbytes.translate(table[, delete])\nbytearray.translate(table[, delete])\n\n Return a copy of the bytes or bytearray object where all bytes\n occurring in the optional argument *delete* are removed, and the\n remaining bytes have been mapped through the given translation\n table, which must be a bytes object of length 256.\n\n You can use the ``bytes.maketrans()`` method to create a\n translation table.\n\n Set the *table* argument to ``None`` for translations that only\n delete characters:\n\n >>> b\'read this short text\'.translate(None, b\'aeiou\')\n b\'rd ths shrt txt\'\n\nstatic bytes.maketrans(from, to)\nstatic bytearray.maketrans(from, to)\n\n This static method returns a translation table usable for\n ``bytes.translate()`` that will map each character in *from* into\n the character at the same position in *to*; *from* and *to* must be\n bytes objects and have the same length.\n\n New in version 3.1.\n', + 'typesseq': '\nSequence Types --- ``str``, ``bytes``, ``bytearray``, ``list``, ``tuple``, ``range``\n************************************************************************************\n\nThere are six sequence types: strings, byte sequences (``bytes``\nobjects), byte arrays (``bytearray`` objects), lists, tuples, and\nrange objects. For other containers see the built in ``dict`` and\n``set`` classes, and the ``collections`` module.\n\nStrings contain Unicode characters. Their literals are written in\nsingle or double quotes: ``\'xyzzy\'``, ``"frobozz"``. See *String and\nBytes literals* for more about string literals. In addition to the\nfunctionality described here, there are also string-specific methods\ndescribed in the *String Methods* section.\n\nBytes and bytearray objects contain single bytes -- the former is\nimmutable while the latter is a mutable sequence. Bytes objects can\nbe constructed the constructor, ``bytes()``, and from literals; use a\n``b`` prefix with normal string syntax: ``b\'xyzzy\'``. To construct\nbyte arrays, use the ``bytearray()`` function.\n\nWhile string objects are sequences of characters (represented by\nstrings of length 1), bytes and bytearray objects are sequences of\n*integers* (between 0 and 255), representing the ASCII value of single\nbytes. That means that for a bytes or bytearray object *b*, ``b[0]``\nwill be an integer, while ``b[0:1]`` will be a bytes or bytearray\nobject of length 1. The representation of bytes objects uses the\nliteral format (``b\'...\'``) since it is generally more useful than\ne.g. ``bytes([50, 19, 100])``. You can always convert a bytes object\ninto a list of integers using ``list(b)``.\n\nAlso, while in previous Python versions, byte strings and Unicode\nstrings could be exchanged for each other rather freely (barring\nencoding issues), strings and bytes are now completely separate\nconcepts. There\'s no implicit en-/decoding if you pass an object of\nthe wrong type. A string always compares unequal to a bytes or\nbytearray object.\n\nLists are constructed with square brackets, separating items with\ncommas: ``[a, b, c]``. Tuples are constructed by the comma operator\n(not within square brackets), with or without enclosing parentheses,\nbut an empty tuple must have the enclosing parentheses, such as ``a,\nb, c`` or ``()``. A single item tuple must have a trailing comma,\nsuch as ``(d,)``.\n\nObjects of type range are created using the ``range()`` function.\nThey don\'t support concatenation or repetition, and using ``min()`` or\n``max()`` on them is inefficient.\n\nMost sequence types support the following operations. The ``in`` and\n``not in`` operations have the same priorities as the comparison\noperations. The ``+`` and ``*`` operations have the same priority as\nthe corresponding numeric operations. [3] Additional methods are\nprovided for *Mutable Sequence Types*.\n\nThis table lists the sequence operations sorted in ascending priority\n(operations in the same box have the same priority). In the table,\n*s* and *t* are sequences of the same type; *n*, *i*, *j* and *k* are\nintegers.\n\n+--------------------+----------------------------------+------------+\n| Operation | Result | Notes |\n+====================+==================================+============+\n| ``x in s`` | ``True`` if an item of *s* is | (1) |\n| | equal to *x*, else ``False`` | |\n+--------------------+----------------------------------+------------+\n| ``x not in s`` | ``False`` if an item of *s* is | (1) |\n| | equal to *x*, else ``True`` | |\n+--------------------+----------------------------------+------------+\n| ``s + t`` | the concatenation of *s* and *t* | (6) |\n+--------------------+----------------------------------+------------+\n| ``s * n, n * s`` | *n* shallow copies of *s* | (2) |\n| | concatenated | |\n+--------------------+----------------------------------+------------+\n| ``s[i]`` | *i*th item of *s*, origin 0 | (3) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j]`` | slice of *s* from *i* to *j* | (3)(4) |\n+--------------------+----------------------------------+------------+\n| ``s[i:j:k]`` | slice of *s* from *i* to *j* | (3)(5) |\n| | with step *k* | |\n+--------------------+----------------------------------+------------+\n| ``len(s)`` | length of *s* | |\n+--------------------+----------------------------------+------------+\n| ``min(s)`` | smallest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``max(s)`` | largest item of *s* | |\n+--------------------+----------------------------------+------------+\n| ``s.index(i)`` | index of the first occurence of | |\n| | *i* in *s* | |\n+--------------------+----------------------------------+------------+\n| ``s.count(i)`` | total number of occurences of | |\n| | *i* in *s* | |\n+--------------------+----------------------------------+------------+\n\nSequence types also support comparisons. In particular, tuples and\nlists are compared lexicographically by comparing corresponding\nelements. This means that to compare equal, every element must\ncompare equal and the two sequences must be of the same type and have\nthe same length. (For full details see *Comparisons* in the language\nreference.)\n\nNotes:\n\n1. When *s* is a string object, the ``in`` and ``not in`` operations\n act like a substring test.\n\n2. Values of *n* less than ``0`` are treated as ``0`` (which yields an\n empty sequence of the same type as *s*). Note also that the copies\n are shallow; nested structures are not copied. This often haunts\n new Python programmers; consider:\n\n >>> lists = [[]] * 3\n >>> lists\n [[], [], []]\n >>> lists[0].append(3)\n >>> lists\n [[3], [3], [3]]\n\n What has happened is that ``[[]]`` is a one-element list containing\n an empty list, so all three elements of ``[[]] * 3`` are (pointers\n to) this single empty list. Modifying any of the elements of\n ``lists`` modifies this single list. You can create a list of\n different lists this way:\n\n >>> lists = [[] for i in range(3)]\n >>> lists[0].append(3)\n >>> lists[1].append(5)\n >>> lists[2].append(7)\n >>> lists\n [[3], [5], [7]]\n\n3. If *i* or *j* is negative, the index is relative to the end of the\n string: ``len(s) + i`` or ``len(s) + j`` is substituted. But note\n that ``-0`` is still ``0``.\n\n4. The slice of *s* from *i* to *j* is defined as the sequence of\n items with index *k* such that ``i <= k < j``. If *i* or *j* is\n greater than ``len(s)``, use ``len(s)``. If *i* is omitted or\n ``None``, use ``0``. If *j* is omitted or ``None``, use\n ``len(s)``. If *i* is greater than or equal to *j*, the slice is\n empty.\n\n5. The slice of *s* from *i* to *j* with step *k* is defined as the\n sequence of items with index ``x = i + n*k`` such that ``0 <= n <\n (j-i)/k``. In other words, the indices are ``i``, ``i+k``,\n ``i+2*k``, ``i+3*k`` and so on, stopping when *j* is reached (but\n never including *j*). If *i* or *j* is greater than ``len(s)``,\n use ``len(s)``. If *i* or *j* are omitted or ``None``, they become\n "end" values (which end depends on the sign of *k*). Note, *k*\n cannot be zero. If *k* is ``None``, it is treated like ``1``.\n\n6. Concatenating immutable strings always results in a new object.\n This means that building up a string by repeated concatenation will\n have a quadratic runtime cost in the total string length. To get a\n linear runtime cost, you must switch to one of the alternatives\n below:\n\n * if concatenating ``str`` objects, you can build a list and use\n ``str.join()`` at the end;\n\n * if concatenating ``bytes`` objects, you can similarly use\n ``bytes.join()``, or you can do in-place concatenation with a\n ``bytearray`` object. ``bytearray`` objects are mutable and have\n an efficient overallocation mechanism.\n\n\nString Methods\n==============\n\nString objects support the methods listed below.\n\nIn addition, Python\'s strings support the sequence type methods\ndescribed in the *Sequence Types --- str, bytes, bytearray, list,\ntuple, range* section. To output formatted strings, see the *String\nFormatting* section. Also, see the ``re`` module for string functions\nbased on regular expressions.\n\nstr.capitalize()\n\n Return a copy of the string with its first character capitalized\n and the rest lowercased.\n\nstr.casefold()\n\n Return a casefolded copy of the string. Casefolded strings may be\n used for caseless matching.\n\n Casefolding is similar to lowercasing but more aggressive because\n it is intended to remove all case distinctions in a string. For\n example, the German lowercase letter ``\'\xc3\x9f\'`` is equivalent to\n ``"ss"``. Since it is already lowercase, ``lower()`` would do\n nothing to ``\'\xc3\x9f\'``; ``casefold()`` converts it to ``"ss"``.\n\n The casefolding algorithm is described in section 3.13 of the\n Unicode Standard.\n\n New in version 3.3.\n\nstr.center(width[, fillchar])\n\n Return centered in a string of length *width*. Padding is done\n using the specified *fillchar* (default is a space).\n\nstr.count(sub[, start[, end]])\n\n Return the number of non-overlapping occurrences of substring *sub*\n in the range [*start*, *end*]. Optional arguments *start* and\n *end* are interpreted as in slice notation.\n\nstr.encode(encoding="utf-8", errors="strict")\n\n Return an encoded version of the string as a bytes object. Default\n encoding is ``\'utf-8\'``. *errors* may be given to set a different\n error handling scheme. The default for *errors* is ``\'strict\'``,\n meaning that encoding errors raise a ``UnicodeError``. Other\n possible values are ``\'ignore\'``, ``\'replace\'``,\n ``\'xmlcharrefreplace\'``, ``\'backslashreplace\'`` and any other name\n registered via ``codecs.register_error()``, see section *Codec Base\n Classes*. For a list of possible encodings, see section *Standard\n Encodings*.\n\n Changed in version 3.1: Support for keyword arguments added.\n\nstr.endswith(suffix[, start[, end]])\n\n Return ``True`` if the string ends with the specified *suffix*,\n otherwise return ``False``. *suffix* can also be a tuple of\n suffixes to look for. With optional *start*, test beginning at\n that position. With optional *end*, stop comparing at that\n position.\n\nstr.expandtabs([tabsize])\n\n Return a copy of the string where all tab characters are replaced\n by zero or more spaces, depending on the current column and the\n given tab size. The column number is reset to zero after each\n newline occurring in the string. If *tabsize* is not given, a tab\n size of ``8`` characters is assumed. This doesn\'t understand other\n non-printing characters or escape sequences.\n\nstr.find(sub[, start[, end]])\n\n Return the lowest index in the string where substring *sub* is\n found, such that *sub* is contained in the slice ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` if *sub* is not found.\n\n Note: The ``find()`` method should be used only if you need to know the\n position of *sub*. To check if *sub* is a substring or not, use\n the ``in`` operator:\n\n >>> \'Py\' in \'Python\'\n True\n\nstr.format(*args, **kwargs)\n\n Perform a string formatting operation. The string on which this\n method is called can contain literal text or replacement fields\n delimited by braces ``{}``. Each replacement field contains either\n the numeric index of a positional argument, or the name of a\n keyword argument. Returns a copy of the string where each\n replacement field is replaced with the string value of the\n corresponding argument.\n\n >>> "The sum of 1 + 2 is {0}".format(1+2)\n \'The sum of 1 + 2 is 3\'\n\n See *Format String Syntax* for a description of the various\n formatting options that can be specified in format strings.\n\nstr.format_map(mapping)\n\n Similar to ``str.format(**mapping)``, except that ``mapping`` is\n used directly and not copied to a ``dict`` . This is useful if for\n example ``mapping`` is a dict subclass:\n\n >>> class Default(dict):\n ... def __missing__(self, key):\n ... return key\n ...\n >>> \'{name} was born in {country}\'.format_map(Default(name=\'Guido\'))\n \'Guido was born in country\'\n\n New in version 3.2.\n\nstr.index(sub[, start[, end]])\n\n Like ``find()``, but raise ``ValueError`` when the substring is not\n found.\n\nstr.isalnum()\n\n Return true if all characters in the string are alphanumeric and\n there is at least one character, false otherwise. A character\n ``c`` is alphanumeric if one of the following returns ``True``:\n ``c.isalpha()``, ``c.isdecimal()``, ``c.isdigit()``, or\n ``c.isnumeric()``.\n\nstr.isalpha()\n\n Return true if all characters in the string are alphabetic and\n there is at least one character, false otherwise. Alphabetic\n characters are those characters defined in the Unicode character\n database as "Letter", i.e., those with general category property\n being one of "Lm", "Lt", "Lu", "Ll", or "Lo". Note that this is\n different from the "Alphabetic" property defined in the Unicode\n Standard.\n\nstr.isdecimal()\n\n Return true if all characters in the string are decimal characters\n and there is at least one character, false otherwise. Decimal\n characters are those from general category "Nd". This category\n includes digit characters, and all characters that can be used to\n form decimal-radix numbers, e.g. U+0660, ARABIC-INDIC DIGIT ZERO.\n\nstr.isdigit()\n\n Return true if all characters in the string are digits and there is\n at least one character, false otherwise. Digits include decimal\n characters and digits that need special handling, such as the\n compatibility superscript digits. Formally, a digit is a character\n that has the property value Numeric_Type=Digit or\n Numeric_Type=Decimal.\n\nstr.isidentifier()\n\n Return true if the string is a valid identifier according to the\n language definition, section *Identifiers and keywords*.\n\nstr.islower()\n\n Return true if all cased characters [4] in the string are lowercase\n and there is at least one cased character, false otherwise.\n\nstr.isnumeric()\n\n Return true if all characters in the string are numeric characters,\n and there is at least one character, false otherwise. Numeric\n characters include digit characters, and all characters that have\n the Unicode numeric value property, e.g. U+2155, VULGAR FRACTION\n ONE FIFTH. Formally, numeric characters are those with the\n property value Numeric_Type=Digit, Numeric_Type=Decimal or\n Numeric_Type=Numeric.\n\nstr.isprintable()\n\n Return true if all characters in the string are printable or the\n string is empty, false otherwise. Nonprintable characters are\n those characters defined in the Unicode character database as\n "Other" or "Separator", excepting the ASCII space (0x20) which is\n considered printable. (Note that printable characters in this\n context are those which should not be escaped when ``repr()`` is\n invoked on a string. It has no bearing on the handling of strings\n written to ``sys.stdout`` or ``sys.stderr``.)\n\nstr.isspace()\n\n Return true if there are only whitespace characters in the string\n and there is at least one character, false otherwise. Whitespace\n characters are those characters defined in the Unicode character\n database as "Other" or "Separator" and those with bidirectional\n property being one of "WS", "B", or "S".\n\nstr.istitle()\n\n Return true if the string is a titlecased string and there is at\n least one character, for example uppercase characters may only\n follow uncased characters and lowercase characters only cased ones.\n Return false otherwise.\n\nstr.isupper()\n\n Return true if all cased characters [4] in the string are uppercase\n and there is at least one cased character, false otherwise.\n\nstr.join(iterable)\n\n Return a string which is the concatenation of the strings in the\n *iterable* *iterable*. A ``TypeError`` will be raised if there are\n any non-string values in *iterable*, including ``bytes`` objects.\n The separator between elements is the string providing this method.\n\nstr.ljust(width[, fillchar])\n\n Return the string left justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\nstr.lower()\n\n Return a copy of the string with all the cased characters [4]\n converted to lowercase.\n\n The lowercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.lstrip([chars])\n\n Return a copy of the string with leading characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a prefix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.lstrip()\n \'spacious \'\n >>> \'www.example.com\'.lstrip(\'cmowz.\')\n \'example.com\'\n\nstatic str.maketrans(x[, y[, z]])\n\n This static method returns a translation table usable for\n ``str.translate()``.\n\n If there is only one argument, it must be a dictionary mapping\n Unicode ordinals (integers) or characters (strings of length 1) to\n Unicode ordinals, strings (of arbitrary lengths) or None.\n Character keys will then be converted to ordinals.\n\n If there are two arguments, they must be strings of equal length,\n and in the resulting dictionary, each character in x will be mapped\n to the character at the same position in y. If there is a third\n argument, it must be a string, whose characters will be mapped to\n None in the result.\n\nstr.partition(sep)\n\n Split the string at the first occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing the string itself, followed by\n two empty strings.\n\nstr.replace(old, new[, count])\n\n Return a copy of the string with all occurrences of substring *old*\n replaced by *new*. If the optional argument *count* is given, only\n the first *count* occurrences are replaced.\n\nstr.rfind(sub[, start[, end]])\n\n Return the highest index in the string where substring *sub* is\n found, such that *sub* is contained within ``s[start:end]``.\n Optional arguments *start* and *end* are interpreted as in slice\n notation. Return ``-1`` on failure.\n\nstr.rindex(sub[, start[, end]])\n\n Like ``rfind()`` but raises ``ValueError`` when the substring *sub*\n is not found.\n\nstr.rjust(width[, fillchar])\n\n Return the string right justified in a string of length *width*.\n Padding is done using the specified *fillchar* (default is a\n space). The original string is returned if *width* is less than or\n equal to ``len(s)``.\n\nstr.rpartition(sep)\n\n Split the string at the last occurrence of *sep*, and return a\n 3-tuple containing the part before the separator, the separator\n itself, and the part after the separator. If the separator is not\n found, return a 3-tuple containing two empty strings, followed by\n the string itself.\n\nstr.rsplit(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit* splits\n are done, the *rightmost* ones. If *sep* is not specified or\n ``None``, any whitespace string is a separator. Except for\n splitting from the right, ``rsplit()`` behaves like ``split()``\n which is described in detail below.\n\nstr.rstrip([chars])\n\n Return a copy of the string with trailing characters removed. The\n *chars* argument is a string specifying the set of characters to be\n removed. If omitted or ``None``, the *chars* argument defaults to\n removing whitespace. The *chars* argument is not a suffix; rather,\n all combinations of its values are stripped:\n\n >>> \' spacious \'.rstrip()\n \' spacious\'\n >>> \'mississippi\'.rstrip(\'ipz\')\n \'mississ\'\n\nstr.split(sep=None, maxsplit=-1)\n\n Return a list of the words in the string, using *sep* as the\n delimiter string. If *maxsplit* is given, at most *maxsplit*\n splits are done (thus, the list will have at most ``maxsplit+1``\n elements). If *maxsplit* is not specified, then there is no limit\n on the number of splits (all possible splits are made).\n\n If *sep* is given, consecutive delimiters are not grouped together\n and are deemed to delimit empty strings (for example,\n ``\'1,,2\'.split(\',\')`` returns ``[\'1\', \'\', \'2\']``). The *sep*\n argument may consist of multiple characters (for example,\n ``\'1<>2<>3\'.split(\'<>\')`` returns ``[\'1\', \'2\', \'3\']``). Splitting\n an empty string with a specified separator returns ``[\'\']``.\n\n If *sep* is not specified or is ``None``, a different splitting\n algorithm is applied: runs of consecutive whitespace are regarded\n as a single separator, and the result will contain no empty strings\n at the start or end if the string has leading or trailing\n whitespace. Consequently, splitting an empty string or a string\n consisting of just whitespace with a ``None`` separator returns\n ``[]``.\n\n For example, ``\' 1 2 3 \'.split()`` returns ``[\'1\', \'2\', \'3\']``,\n and ``\' 1 2 3 \'.split(None, 1)`` returns ``[\'1\', \'2 3 \']``.\n\nstr.splitlines([keepends])\n\n Return a list of the lines in the string, breaking at line\n boundaries. Line breaks are not included in the resulting list\n unless *keepends* is given and true.\n\nstr.startswith(prefix[, start[, end]])\n\n Return ``True`` if string starts with the *prefix*, otherwise\n return ``False``. *prefix* can also be a tuple of prefixes to look\n for. With optional *start*, test string beginning at that\n position. With optional *end*, stop comparing string at that\n position.\n\nstr.strip([chars])\n\n Return a copy of the string with the leading and trailing\n characters removed. The *chars* argument is a string specifying the\n set of characters to be removed. If omitted or ``None``, the\n *chars* argument defaults to removing whitespace. The *chars*\n argument is not a prefix or suffix; rather, all combinations of its\n values are stripped:\n\n >>> \' spacious \'.strip()\n \'spacious\'\n >>> \'www.example.com\'.strip(\'cmowz.\')\n \'example\'\n\nstr.swapcase()\n\n Return a copy of the string with uppercase characters converted to\n lowercase and vice versa. Note that it is not necessarily true that\n ``s.swapcase().swapcase() == s``.\n\nstr.title()\n\n Return a titlecased version of the string where words start with an\n uppercase character and the remaining characters are lowercase.\n\n The algorithm uses a simple language-independent definition of a\n word as groups of consecutive letters. The definition works in\n many contexts but it means that apostrophes in contractions and\n possessives form word boundaries, which may not be the desired\n result:\n\n >>> "they\'re bill\'s friends from the UK".title()\n "They\'Re Bill\'S Friends From The Uk"\n\n A workaround for apostrophes can be constructed using regular\n expressions:\n\n >>> import re\n >>> def titlecase(s):\n return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),\n s)\n\n >>> titlecase("they\'re bill\'s friends.")\n "They\'re Bill\'s Friends."\n\nstr.translate(map)\n\n Return a copy of the *s* where all characters have been mapped\n through the *map* which must be a dictionary of Unicode ordinals\n (integers) to Unicode ordinals, strings or ``None``. Unmapped\n characters are left untouched. Characters mapped to ``None`` are\n deleted.\n\n You can use ``str.maketrans()`` to create a translation map from\n character-to-character mappings in different formats.\n\n Note: An even more flexible approach is to create a custom character\n mapping codec using the ``codecs`` module (see\n ``encodings.cp1251`` for an example).\n\nstr.upper()\n\n Return a copy of the string with all the cased characters [4]\n converted to uppercase. Note that ``str.upper().isupper()`` might\n be ``False`` if ``s`` contains uncased characters or if the Unicode\n category of the resulting character(s) is not "Lu" (Letter,\n uppercase), but e.g. "Lt" (Letter, titlecase).\n\n The uppercasing algorithm used is described in section 3.13 of the\n Unicode Standard.\n\nstr.zfill(width)\n\n Return the numeric string left filled with zeros in a string of\n length *width*. A sign prefix is handled correctly. The original\n string is returned if *width* is less than or equal to ``len(s)``.\n\n\nOld String Formatting Operations\n================================\n\nNote: The formatting operations described here are modelled on C\'s\n printf() syntax. They only support formatting of certain builtin\n types. The use of a binary operator means that care may be needed\n in order to format tuples and dictionaries correctly. As the new\n *String Formatting* syntax is more flexible and handles tuples and\n dictionaries naturally, it is recommended for new code. However,\n there are no current plans to deprecate printf-style formatting.\n\nString objects have one unique built-in operation: the ``%`` operator\n(modulo). This is also known as the string *formatting* or\n*interpolation* operator. Given ``format % values`` (where *format* is\na string), ``%`` conversion specifications in *format* are replaced\nwith zero or more elements of *values*. The effect is similar to the\nusing ``sprintf()`` in the C language.\n\nIf *format* requires a single argument, *values* may be a single non-\ntuple object. [5] Otherwise, *values* must be a tuple with exactly\nthe number of items specified by the format string, or a single\nmapping object (for example, a dictionary).\n\nA conversion specifier contains two or more characters and has the\nfollowing components, which must occur in this order:\n\n1. The ``\'%\'`` character, which marks the start of the specifier.\n\n2. Mapping key (optional), consisting of a parenthesised sequence of\n characters (for example, ``(somename)``).\n\n3. Conversion flags (optional), which affect the result of some\n conversion types.\n\n4. Minimum field width (optional). If specified as an ``\'*\'``\n (asterisk), the actual width is read from the next element of the\n tuple in *values*, and the object to convert comes after the\n minimum field width and optional precision.\n\n5. Precision (optional), given as a ``\'.\'`` (dot) followed by the\n precision. If specified as ``\'*\'`` (an asterisk), the actual\n precision is read from the next element of the tuple in *values*,\n and the value to convert comes after the precision.\n\n6. Length modifier (optional).\n\n7. Conversion type.\n\nWhen the right argument is a dictionary (or other mapping type), then\nthe formats in the string *must* include a parenthesised mapping key\ninto that dictionary inserted immediately after the ``\'%\'`` character.\nThe mapping key selects the value to be formatted from the mapping.\nFor example:\n\n>>> print(\'%(language)s has %(number)03d quote types.\' %\n... {\'language\': "Python", "number": 2})\nPython has 002 quote types.\n\nIn this case no ``*`` specifiers may occur in a format (since they\nrequire a sequential parameter list).\n\nThe conversion flag characters are:\n\n+-----------+-----------------------------------------------------------------------+\n| Flag | Meaning |\n+===========+=======================================================================+\n| ``\'#\'`` | The value conversion will use the "alternate form" (where defined |\n| | below). |\n+-----------+-----------------------------------------------------------------------+\n| ``\'0\'`` | The conversion will be zero padded for numeric values. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'-\'`` | The converted value is left adjusted (overrides the ``\'0\'`` |\n| | conversion if both are given). |\n+-----------+-----------------------------------------------------------------------+\n| ``\' \'`` | (a space) A blank should be left before a positive number (or empty |\n| | string) produced by a signed conversion. |\n+-----------+-----------------------------------------------------------------------+\n| ``\'+\'`` | A sign character (``\'+\'`` or ``\'-\'``) will precede the conversion |\n| | (overrides a "space" flag). |\n+-----------+-----------------------------------------------------------------------+\n\nA length modifier (``h``, ``l``, or ``L``) may be present, but is\nignored as it is not necessary for Python -- so e.g. ``%ld`` is\nidentical to ``%d``.\n\nThe conversion types are:\n\n+--------------+-------------------------------------------------------+---------+\n| Conversion | Meaning | Notes |\n+==============+=======================================================+=========+\n| ``\'d\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'i\'`` | Signed integer decimal. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'o\'`` | Signed octal value. | (1) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'u\'`` | Obsolete type -- it is identical to ``\'d\'``. | (7) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'x\'`` | Signed hexadecimal (lowercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'X\'`` | Signed hexadecimal (uppercase). | (2) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'e\'`` | Floating point exponential format (lowercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'E\'`` | Floating point exponential format (uppercase). | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'f\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'F\'`` | Floating point decimal format. | (3) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'g\'`` | Floating point format. Uses lowercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'G\'`` | Floating point format. Uses uppercase exponential | (4) |\n| | format if exponent is less than -4 or not less than | |\n| | precision, decimal format otherwise. | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'c\'`` | Single character (accepts integer or single character | |\n| | string). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'r\'`` | String (converts any Python object using ``repr()``). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'s\'`` | String (converts any Python object using ``str()``). | (5) |\n+--------------+-------------------------------------------------------+---------+\n| ``\'a\'`` | String (converts any Python object using | (5) |\n| | ``ascii()``). | |\n+--------------+-------------------------------------------------------+---------+\n| ``\'%\'`` | No argument is converted, results in a ``\'%\'`` | |\n| | character in the result. | |\n+--------------+-------------------------------------------------------+---------+\n\nNotes:\n\n1. The alternate form causes a leading zero (``\'0\'``) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n2. The alternate form causes a leading ``\'0x\'`` or ``\'0X\'`` (depending\n on whether the ``\'x\'`` or ``\'X\'`` format was used) to be inserted\n between left-hand padding and the formatting of the number if the\n leading character of the result is not already a zero.\n\n3. The alternate form causes the result to always contain a decimal\n point, even if no digits follow it.\n\n The precision determines the number of digits after the decimal\n point and defaults to 6.\n\n4. The alternate form causes the result to always contain a decimal\n point, and trailing zeroes are not removed as they would otherwise\n be.\n\n The precision determines the number of significant digits before\n and after the decimal point and defaults to 6.\n\n5. If precision is ``N``, the output is truncated to ``N`` characters.\n\n1. See **PEP 237**.\n\nSince Python strings have an explicit length, ``%s`` conversions do\nnot assume that ``\'\\0\'`` is the end of the string.\n\nChanged in version 3.1: ``%f`` conversions for numbers whose absolute\nvalue is over 1e50 are no longer replaced by ``%g`` conversions.\n\nAdditional string operations are defined in standard modules\n``string`` and ``re``.\n\n\nRange Type\n==========\n\nThe ``range`` type is an immutable sequence which is commonly used for\nlooping. The advantage of the ``range`` type is that an ``range``\nobject will always take the same amount of memory, no matter the size\nof the range it represents.\n\nRange objects have relatively little behavior: they support indexing,\ncontains, iteration, the ``len()`` function, and the following\nmethods:\n\nrange.count(x)\n\n Return the number of *i*\'s for which ``s[i] == x``.\n\n New in version 3.2.\n\nrange.index(x)\n\n Return the smallest *i* such that ``s[i] == x``. Raises\n ``ValueError`` when *x* is not in the range.\n\n New in version 3.2.\n\n\nMutable Sequence Types\n======================\n\nList and bytearray objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object).\n\nNote that while lists allow their items to be of any type, bytearray\nobject "items" are all integers in the range 0 <= x < 256.\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.clear()`` | remove all items from ``s`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.copy()`` | return a shallow copy of ``s`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*\'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (3) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (5) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (6) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([key[, reverse]])`` | sort the items of *s* in place | (6), (7), (8) |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. *x* can be any iterable object.\n\n3. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the sequence length is added, as for slice indices. If it\n is still negative, it is truncated to zero, as for slice indices.\n\n4. When a negative index is passed as the first parameter to the\n ``insert()`` method, the sequence length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n5. The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n6. The ``sort()`` and ``reverse()`` methods modify the sequence in\n place for economy of space when sorting or reversing a large\n sequence. To remind you that they operate by side effect, they\n don\'t return the sorted or reversed sequence.\n\n7. The ``sort()`` method takes optional arguments for controlling the\n comparisons. Each must be specified as a keyword argument.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``. Use ``functools.cmp_to_key()`` to\n convert an old-style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n The ``sort()`` method is guaranteed to be stable. A sort is stable\n if it guarantees not to change the relative order of elements that\n compare equal --- this is helpful for sorting in multiple passes\n (for example, sort by department, then by salary grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises ``ValueError`` if it can detect\n that the list has been mutated during a sort.\n\n8. ``sort()`` is not supported by ``bytearray`` objects.\n\n New in version 3.3: ``clear()`` and ``copy()`` methods.\n\n\nBytes and Byte Array Methods\n============================\n\nBytes and bytearray objects, being "strings of bytes", have all\nmethods found on strings, with the exception of ``encode()``,\n``format()`` and ``isidentifier()``, which do not make sense with\nthese types. For converting the objects to strings, they have a\n``decode()`` method.\n\nWherever one of these methods needs to interpret the bytes as\ncharacters (e.g. the ``is...()`` methods), the ASCII character set is\nassumed.\n\nNew in version 3.3: The functions ``count()``, ``find()``,\n``index()``, ``rfind()`` and ``rindex()`` have additional semantics\ncompared to the corresponding string functions: They also accept an\ninteger in range 0 to 255 (a byte) as their first argument.\n\nNote: The methods on bytes and bytearray objects don\'t accept strings as\n their arguments, just as the methods on strings don\'t accept bytes\n as their arguments. For example, you have to write\n\n a = "abc"\n b = a.replace("a", "f")\n\n and\n\n a = b"abc"\n b = a.replace(b"a", b"f")\n\nbytes.decode(encoding="utf-8", errors="strict")\nbytearray.decode(encoding="utf-8", errors="strict")\n\n Return a string decoded from the given bytes. Default encoding is\n ``\'utf-8\'``. *errors* may be given to set a different error\n handling scheme. The default for *errors* is ``\'strict\'``, meaning\n that encoding errors raise a ``UnicodeError``. Other possible\n values are ``\'ignore\'``, ``\'replace\'`` and any other name\n registered via ``codecs.register_error()``, see section *Codec Base\n Classes*. For a list of possible encodings, see section *Standard\n Encodings*.\n\n Changed in version 3.1: Added support for keyword arguments.\n\nThe bytes and bytearray types have an additional class method:\n\nclassmethod bytes.fromhex(string)\nclassmethod bytearray.fromhex(string)\n\n This ``bytes`` class method returns a bytes or bytearray object,\n decoding the given string object. The string must contain two\n hexadecimal digits per byte, spaces are ignored.\n\n >>> bytes.fromhex(\'f0 f1f2 \')\n b\'\\xf0\\xf1\\xf2\'\n\nThe maketrans and translate methods differ in semantics from the\nversions available on strings:\n\nbytes.translate(table[, delete])\nbytearray.translate(table[, delete])\n\n Return a copy of the bytes or bytearray object where all bytes\n occurring in the optional argument *delete* are removed, and the\n remaining bytes have been mapped through the given translation\n table, which must be a bytes object of length 256.\n\n You can use the ``bytes.maketrans()`` method to create a\n translation table.\n\n Set the *table* argument to ``None`` for translations that only\n delete characters:\n\n >>> b\'read this short text\'.translate(None, b\'aeiou\')\n b\'rd ths shrt txt\'\n\nstatic bytes.maketrans(from, to)\nstatic bytearray.maketrans(from, to)\n\n This static method returns a translation table usable for\n ``bytes.translate()`` that will map each character in *from* into\n the character at the same position in *to*; *from* and *to* must be\n bytes objects and have the same length.\n\n New in version 3.1.\n', 'typesseq-mutable': '\nMutable Sequence Types\n**********************\n\nList and bytearray objects support additional operations that allow\nin-place modification of the object. Other mutable sequence types\n(when added to the language) should also support these operations.\nStrings and tuples are immutable sequence types: such objects cannot\nbe modified once created. The following operations are defined on\nmutable sequence types (where *x* is an arbitrary object).\n\nNote that while lists allow their items to be of any type, bytearray\nobject "items" are all integers in the range 0 <= x < 256.\n\n+--------------------------------+----------------------------------+-----------------------+\n| Operation | Result | Notes |\n+================================+==================================+=======================+\n| ``s[i] = x`` | item *i* of *s* is replaced by | |\n| | *x* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j] = t`` | slice of *s* from *i* to *j* is | |\n| | replaced by the contents of the | |\n| | iterable *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j]`` | same as ``s[i:j] = []`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s[i:j:k] = t`` | the elements of ``s[i:j:k]`` are | (1) |\n| | replaced by those of *t* | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``del s[i:j:k]`` | removes the elements of | |\n| | ``s[i:j:k]`` from the list | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.append(x)`` | same as ``s[len(s):len(s)] = | |\n| | [x]`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.extend(x)`` | same as ``s[len(s):len(s)] = x`` | (2) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.clear()`` | remove all items from ``s`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.copy()`` | return a shallow copy of ``s`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.count(x)`` | return number of *i*\'s for which | |\n| | ``s[i] == x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.index(x[, i[, j]])`` | return smallest *k* such that | (3) |\n| | ``s[k] == x`` and ``i <= k < j`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.insert(i, x)`` | same as ``s[i:i] = [x]`` | (4) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.pop([i])`` | same as ``x = s[i]; del s[i]; | (5) |\n| | return x`` | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.remove(x)`` | same as ``del s[s.index(x)]`` | (3) |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.reverse()`` | reverses the items of *s* in | (6) |\n| | place | |\n+--------------------------------+----------------------------------+-----------------------+\n| ``s.sort([key[, reverse]])`` | sort the items of *s* in place | (6), (7), (8) |\n+--------------------------------+----------------------------------+-----------------------+\n\nNotes:\n\n1. *t* must have the same length as the slice it is replacing.\n\n2. *x* can be any iterable object.\n\n3. Raises ``ValueError`` when *x* is not found in *s*. When a negative\n index is passed as the second or third parameter to the ``index()``\n method, the sequence length is added, as for slice indices. If it\n is still negative, it is truncated to zero, as for slice indices.\n\n4. When a negative index is passed as the first parameter to the\n ``insert()`` method, the sequence length is added, as for slice\n indices. If it is still negative, it is truncated to zero, as for\n slice indices.\n\n5. The optional argument *i* defaults to ``-1``, so that by default\n the last item is removed and returned.\n\n6. The ``sort()`` and ``reverse()`` methods modify the sequence in\n place for economy of space when sorting or reversing a large\n sequence. To remind you that they operate by side effect, they\n don\'t return the sorted or reversed sequence.\n\n7. The ``sort()`` method takes optional arguments for controlling the\n comparisons. Each must be specified as a keyword argument.\n\n *key* specifies a function of one argument that is used to extract\n a comparison key from each list element: ``key=str.lower``. The\n default value is ``None``. Use ``functools.cmp_to_key()`` to\n convert an old-style *cmp* function to a *key* function.\n\n *reverse* is a boolean value. If set to ``True``, then the list\n elements are sorted as if each comparison were reversed.\n\n The ``sort()`` method is guaranteed to be stable. A sort is stable\n if it guarantees not to change the relative order of elements that\n compare equal --- this is helpful for sorting in multiple passes\n (for example, sort by department, then by salary grade).\n\n **CPython implementation detail:** While a list is being sorted,\n the effect of attempting to mutate, or even inspect, the list is\n undefined. The C implementation of Python makes the list appear\n empty for the duration, and raises ``ValueError`` if it can detect\n that the list has been mutated during a sort.\n\n8. ``sort()`` is not supported by ``bytearray`` objects.\n\n New in version 3.3: ``clear()`` and ``copy()`` methods.\n', 'unary': '\nUnary arithmetic and bitwise operations\n***************************************\n\nAll unary arithmetic and bitwise operations have the same priority:\n\n u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n\nThe unary ``-`` (minus) operator yields the negation of its numeric\nargument.\n\nThe unary ``+`` (plus) operator yields its numeric argument unchanged.\n\nThe unary ``~`` (invert) operator yields the bitwise inversion of its\ninteger argument. The bitwise inversion of ``x`` is defined as\n``-(x+1)``. It only applies to integral numbers.\n\nIn all three cases, if the argument does not have the proper type, a\n``TypeError`` exception is raised.\n', 'while': '\nThe ``while`` statement\n***********************\n\nThe ``while`` statement is used for repeated execution as long as an\nexpression is true:\n\n while_stmt ::= "while" expression ":" suite\n ["else" ":" suite]\n\nThis repeatedly tests the expression and, if it is true, executes the\nfirst suite; if the expression is false (which may be the first time\nit is tested) the suite of the ``else`` clause, if present, is\nexecuted and the loop terminates.\n\nA ``break`` statement executed in the first suite terminates the loop\nwithout executing the ``else`` clause\'s suite. A ``continue``\nstatement executed in the first suite skips the rest of the suite and\ngoes back to testing the expression.\n', -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 13:48:28 2012 From: python-checkins at python.org (georg.brandl) Date: Sun, 01 Apr 2012 13:48:28 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Fix_markup_errors_found_by_?= =?utf8?q?=22make_suspicious=22=2E?= Message-ID: http://hg.python.org/cpython/rev/6fce4022f86b changeset: 76044:6fce4022f86b user: Georg Brandl date: Sun Apr 01 13:48:26 2012 +0200 summary: Fix markup errors found by "make suspicious". files: Doc/library/unittest.mock-examples.rst | 2 +- Doc/library/unittest.mock.rst | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Doc/library/unittest.mock-examples.rst b/Doc/library/unittest.mock-examples.rst --- a/Doc/library/unittest.mock-examples.rst +++ b/Doc/library/unittest.mock-examples.rst @@ -39,7 +39,7 @@ Once the mock has been called its :attr:`~Mock.called` attribute is set to `True`. More importantly we can use the :meth:`~Mock.assert_called_with` or -:meth`~Mock.assert_called_once_with` method to check that it was called with +:meth:`~Mock.assert_called_once_with` method to check that it was called with the correct arguments. This example tests that calling `ProductionClass().method` results in a call to diff --git a/Doc/library/unittest.mock.rst b/Doc/library/unittest.mock.rst --- a/Doc/library/unittest.mock.rst +++ b/Doc/library/unittest.mock.rst @@ -1712,9 +1712,9 @@ .. function:: call(*args, **kwargs) - `call` is a helper object for making simpler assertions, for comparing - with :attr:`~Mock.call_args`, :attr:`~Mock.call_args_list`, - :attr:`~Mock.mock_calls` and :attr: `~Mock.method_calls`. `call` can also be + `call` is a helper object for making simpler assertions, for comparing with + :attr:`~Mock.call_args`, :attr:`~Mock.call_args_list`, + :attr:`~Mock.mock_calls` and :attr:`~Mock.method_calls`. `call` can also be used with :meth:`~Mock.assert_has_calls`. >>> m = MagicMock(return_value=None) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 13:49:38 2012 From: python-checkins at python.org (georg.brandl) Date: Sun, 01 Apr 2012 13:49:38 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Bump_to_3=2E3=2E0a2=2E?= Message-ID: http://hg.python.org/cpython/rev/2f69db52d6de changeset: 76045:2f69db52d6de tag: v3.3.0a2 user: Georg Brandl date: Sun Apr 01 13:49:21 2012 +0200 summary: Bump to 3.3.0a2. files: Include/patchlevel.h | 4 ++-- Lib/distutils/__init__.py | 2 +- Lib/idlelib/idlever.py | 2 +- Misc/NEWS | 2 +- Misc/RPM/python-3.3.spec | 2 +- README | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -20,10 +20,10 @@ #define PY_MINOR_VERSION 3 #define PY_MICRO_VERSION 0 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_ALPHA -#define PY_RELEASE_SERIAL 1 +#define PY_RELEASE_SERIAL 2 /* Version as a string */ -#define PY_VERSION "3.3.0a1+" +#define PY_VERSION "3.3.0a2" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Lib/distutils/__init__.py b/Lib/distutils/__init__.py --- a/Lib/distutils/__init__.py +++ b/Lib/distutils/__init__.py @@ -13,5 +13,5 @@ # Updated automatically by the Python release process. # #--start constants-- -__version__ = "3.3.0a1" +__version__ = "3.3.0a2" #--end constants-- diff --git a/Lib/idlelib/idlever.py b/Lib/idlelib/idlever.py --- a/Lib/idlelib/idlever.py +++ b/Lib/idlelib/idlever.py @@ -1,1 +1,1 @@ -IDLE_VERSION = "3.3.0a1" +IDLE_VERSION = "3.3.0a2" diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -5,7 +5,7 @@ What's New in Python 3.3.0 Alpha 2? =================================== -*Release date: XXXX-XX-XX* +*Release date: 01-Apr-2012* Core and Builtins ----------------- diff --git a/Misc/RPM/python-3.3.spec b/Misc/RPM/python-3.3.spec --- a/Misc/RPM/python-3.3.spec +++ b/Misc/RPM/python-3.3.spec @@ -39,7 +39,7 @@ %define name python #--start constants-- -%define version 3.3.0a1 +%define version 3.3.0a2 %define libvers 3.3 #--end constants-- %define release 1pydotorg diff --git a/README b/README --- a/README +++ b/README @@ -1,4 +1,4 @@ -This is Python version 3.3.0 alpha 1 +This is Python version 3.3.0 alpha 2 ==================================== Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 13:49:39 2012 From: python-checkins at python.org (georg.brandl) Date: Sun, 01 Apr 2012 13:49:39 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Added_tag_v3=2E3=2E0a2_for_?= =?utf8?q?changeset_2f69db52d6de?= Message-ID: http://hg.python.org/cpython/rev/90bc4b367fcb changeset: 76046:90bc4b367fcb user: Georg Brandl date: Sun Apr 01 13:49:38 2012 +0200 summary: Added tag v3.3.0a2 for changeset 2f69db52d6de files: .hgtags | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -97,3 +97,4 @@ 7085403daf439adb3f9e70ef13f6bedb1c447376 v3.2.3rc1 428f05cb7277e1d42bb9dd8d1af6b6270ebc6112 v3.2.3rc2 f1a9a6505731714f0e157453ff850e3b71615c45 v3.3.0a1 +2f69db52d6de306cdaef0a0cc00cc823fb350b01 v3.3.0a2 -- Repository URL: http://hg.python.org/cpython From kristjan at ccpgames.com Sun Apr 1 14:31:57 2012 From: kristjan at ccpgames.com (=?iso-8859-1?Q?Kristj=E1n_Valur_J=F3nsson?=) Date: Sun, 1 Apr 2012 12:31:57 +0000 Subject: [Python-checkins] cpython: Issue #14435: Add Misc/NEWS and Misc/ACKS In-Reply-To: References: , Message-ID: Wishing to cause minimal disruption, I actually read http://docs.python.org/devguide/committing.html where this file is mentioned as part of the commit checklist. Never knew it existed before. K ________________________________________ Fr?: python-checkins-bounces+kristjan=ccpgames.com at python.org [python-checkins-bounces+kristjan=ccpgames.com at python.org] fyrir hönd Nick Coghlan [ncoghlan at gmail.com] Sent: 1. apr?l 2012 03:33 To: python-dev at python.org Cc: python-checkins at python.org Efni: Re: [Python-checkins] cpython: Issue #14435: Add Misc/NEWS and Misc/ACKS On Sat, Mar 31, 2012 at 11:10 PM, kristjan.jonsson wrote: > diff --git a/Misc/ACKS b/Misc/ACKS > --- a/Misc/ACKS > +++ b/Misc/ACKS > @@ -507,6 +507,7 @@ > Richard Jones > Irmen de Jong > Lucas de Jonge > +Kristj?n Valur J?nsson > Jens B. Jorgensen > John Jorgensen > Sijin Joseph *blinks* This must have been one of those cases where everyone assumed your name was already there and never thought to check... Cheers, Nick. -- Nick Coghlan | ncoghlan at gmail.com | Brisbane, Australia _______________________________________________ Python-checkins mailing list Python-checkins at python.org http://mail.python.org/mailman/listinfo/python-checkins From python-checkins at python.org Sun Apr 1 16:14:42 2012 From: python-checkins at python.org (antoine.pitrou) Date: Sun, 01 Apr 2012 16:14:42 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzEzMDE5?= =?utf8?q?=3A_Fix_potential_reference_leaks_in_bytearray=2Eextend=28=29=2E?= Message-ID: http://hg.python.org/cpython/rev/03396c9ffe8c changeset: 76047:03396c9ffe8c branch: 3.2 parent: 76037:0a4a6f98bd8e user: Antoine Pitrou date: Sun Apr 01 16:05:46 2012 +0200 summary: Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch by Suman Saha. files: Misc/NEWS | 3 +++ Objects/bytearrayobject.c | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch + by Suman Saha. + - Issue #14378: Fix compiling ast.ImportFrom nodes with a "__future__" string as the module name that was not interned. diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -2234,8 +2234,10 @@ } bytearray_obj = PyByteArray_FromStringAndSize(NULL, buf_size); - if (bytearray_obj == NULL) + if (bytearray_obj == NULL) { + Py_DECREF(it); return NULL; + } buf = PyByteArray_AS_STRING(bytearray_obj); while ((item = PyIter_Next(it)) != NULL) { @@ -2268,8 +2270,10 @@ return NULL; } - if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) + if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) { + Py_DECREF(bytearray_obj); return NULL; + } Py_DECREF(bytearray_obj); Py_RETURN_NONE; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 16:14:43 2012 From: python-checkins at python.org (antoine.pitrou) Date: Sun, 01 Apr 2012 16:14:43 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_Issue_=2313019=3A_Fix_potential_reference_leaks_in_bytearray?= =?utf8?b?LmV4dGVuZCgpLg==?= Message-ID: http://hg.python.org/cpython/rev/015c546615ca changeset: 76048:015c546615ca parent: 76046:90bc4b367fcb parent: 76047:03396c9ffe8c user: Antoine Pitrou date: Sun Apr 01 16:08:11 2012 +0200 summary: Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch by Suman Saha. files: Misc/NEWS | 3 +++ Objects/bytearrayobject.c | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch + by Suman Saha. + - Issue #1683368: object.__new__ and object.__init__ raise a TypeError if they are passed arguments and their complementary method is not overridden. diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -2289,8 +2289,10 @@ } bytearray_obj = PyByteArray_FromStringAndSize(NULL, buf_size); - if (bytearray_obj == NULL) + if (bytearray_obj == NULL) { + Py_DECREF(it); return NULL; + } buf = PyByteArray_AS_STRING(bytearray_obj); while ((item = PyIter_Next(it)) != NULL) { @@ -2323,8 +2325,10 @@ return NULL; } - if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) + if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) { + Py_DECREF(bytearray_obj); return NULL; + } Py_DECREF(bytearray_obj); Py_RETURN_NONE; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 16:17:11 2012 From: python-checkins at python.org (antoine.pitrou) Date: Sun, 01 Apr 2012 16:17:11 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzEzMDE5?= =?utf8?q?=3A_Fix_potential_reference_leaks_in_bytearray=2Eextend=28=29=2E?= Message-ID: http://hg.python.org/cpython/rev/d3a82a26c705 changeset: 76049:d3a82a26c705 branch: 2.7 parent: 76036:3623c3e6c049 user: Antoine Pitrou date: Sun Apr 01 16:05:46 2012 +0200 summary: Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch by Suman Saha. files: Misc/NEWS | 3 +++ Objects/bytearrayobject.c | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -9,6 +9,9 @@ Core and Builtins ----------------- +- Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch + by Suman Saha. + - Issue #14378: Fix compiling ast.ImportFrom nodes with a "__future__" string as the module name that was not interned. diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -2296,8 +2296,10 @@ } bytearray_obj = PyByteArray_FromStringAndSize(NULL, buf_size); - if (bytearray_obj == NULL) + if (bytearray_obj == NULL) { + Py_DECREF(it); return NULL; + } buf = PyByteArray_AS_STRING(bytearray_obj); while ((item = PyIter_Next(it)) != NULL) { @@ -2330,8 +2332,10 @@ return NULL; } - if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) + if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) { + Py_DECREF(bytearray_obj); return NULL; + } Py_DECREF(bytearray_obj); Py_RETURN_NONE; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 16:41:47 2012 From: python-checkins at python.org (eli.bendersky) Date: Sun, 01 Apr 2012 16:41:47 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Removing_the_test_of_Elemen?= =?utf8?q?t_that_causes_ref-leak_in_GC_=28issue_=2314464=29=2E?= Message-ID: http://hg.python.org/cpython/rev/c5cf48752d81 changeset: 76050:c5cf48752d81 parent: 76048:015c546615ca user: Eli Bendersky date: Sun Apr 01 17:40:17 2012 +0300 summary: Removing the test of Element that causes ref-leak in GC (issue #14464). I will now continue investigating the cause of the ref-leak, but I wanted to remove the test so that the refcount test in the buildbots could be clean. The whole change (adding GC to Element) is not reverted because it improved the situation (GC works for immediate cycles) and didn't cause regressions (the test is new and was added together with the fix). files: Lib/test/test_xml_etree.py | 10 ---------- 1 files changed, 0 insertions(+), 10 deletions(-) diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -1859,16 +1859,6 @@ gc_collect() self.assertIsNone(wref()) - # A longer cycle: d->e->e2->d - e = ET.Element('joe') - d = Dummy() - d.dummyref = e - wref = weakref.ref(d) - e2 = ET.SubElement(e, 'foo', attr=d) - del d, e, e2 - gc_collect() - self.assertIsNone(wref()) - class ElementTreeTest(unittest.TestCase): def test_istype(self): -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 17:30:58 2012 From: python-checkins at python.org (antoine.pitrou) Date: Sun, 01 Apr 2012 17:30:58 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE0MTUx?= =?utf8?q?=3A_Raise_a_ValueError=2C_not_a_NameError=2C_when_trying_to_crea?= =?utf8?q?te?= Message-ID: http://hg.python.org/cpython/rev/273d7502ced1 changeset: 76051:273d7502ced1 branch: 3.2 parent: 76047:03396c9ffe8c user: Antoine Pitrou date: Sun Apr 01 17:19:09 2012 +0200 summary: Issue #14151: Raise a ValueError, not a NameError, when trying to create a multiprocessing Client or Listener with an AF_PIPE type address under non-Windows platforms. Patch by Popa Claudiu. files: Lib/multiprocessing/connection.py | 9 +++++++++ Lib/test/test_multiprocessing.py | 14 +++++++++++++- Misc/NEWS | 4 ++++ 3 files changed, 26 insertions(+), 1 deletions(-) diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -94,6 +94,13 @@ else: raise ValueError('unrecognized family') +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + def address_type(address): ''' @@ -126,6 +133,7 @@ or default_family address = address or arbitrary_address(family) + _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: @@ -163,6 +171,7 @@ Returns a connection to the address of a `Listener` ''' family = family or address_type(address) + _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py --- a/Lib/test/test_multiprocessing.py +++ b/Lib/test/test_multiprocessing.py @@ -2319,8 +2319,20 @@ flike.flush() assert sio.getvalue() == 'foo' + +# +# Issue 14151: Test invalid family on invalid environment +# + +class TestInvalidFamily(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_family(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener(r'\\.\test') + testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, - TestStdinBadfiledescriptor] + TestStdinBadfiledescriptor, TestInvalidFamily] # # diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -34,6 +34,10 @@ Library ------- +- Issue #14151: Raise a ValueError, not a NameError, when trying to create + a multiprocessing Client or Listener with an AF_PIPE type address under + non-Windows platforms. Patch by Popa Claudiu. + - Issue #13872: socket.detach() now marks the socket closed (as mirrored in the socket repr()). Patch by Matt Joiner. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 17:30:59 2012 From: python-checkins at python.org (antoine.pitrou) Date: Sun, 01 Apr 2012 17:30:59 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_Issue_=2314151=3A_Raise_a_ValueError=2C_not_a_NameError=2C_w?= =?utf8?q?hen_trying_to_create?= Message-ID: http://hg.python.org/cpython/rev/42b29aea1c98 changeset: 76052:42b29aea1c98 parent: 76050:c5cf48752d81 parent: 76051:273d7502ced1 user: Antoine Pitrou date: Sun Apr 01 17:25:49 2012 +0200 summary: Issue #14151: Raise a ValueError, not a NameError, when trying to create a multiprocessing Client or Listener with an AF_PIPE type address under non-Windows platforms. Patch by Popa Claudiu. files: Lib/multiprocessing/connection.py | 9 +++++++++ Lib/test/test_multiprocessing.py | 14 +++++++++++++- Misc/NEWS | 4 ++++ 3 files changed, 26 insertions(+), 1 deletions(-) diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -104,6 +104,13 @@ else: raise ValueError('unrecognized family') +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + def address_type(address): ''' @@ -436,6 +443,7 @@ or default_family address = address or arbitrary_address(family) + _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: @@ -473,6 +481,7 @@ Returns a connection to the address of a `Listener` ''' family = family or address_type(address) + _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py --- a/Lib/test/test_multiprocessing.py +++ b/Lib/test/test_multiprocessing.py @@ -2638,8 +2638,20 @@ p.join() +# +# Issue 14151: Test invalid family on invalid environment +# + +class TestInvalidFamily(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_family(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener(r'\\.\test') + + testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, - TestStdinBadfiledescriptor, TestWait] + TestStdinBadfiledescriptor, TestWait, TestInvalidFamily] # # diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -40,6 +40,10 @@ Library ------- +- Issue #14151: Raise a ValueError, not a NameError, when trying to create + a multiprocessing Client or Listener with an AF_PIPE type address under + non-Windows platforms. Patch by Popa Claudiu. + - Issue #14300: Under Windows, sockets created using socket.dup() now allow overlapped I/O. Patch by sbt. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 18:32:09 2012 From: python-checkins at python.org (ross.lagerwall) Date: Sun, 01 Apr 2012 18:32:09 +0200 Subject: [Python-checkins] =?utf8?q?devguide=3A_Issue_14467=3A_Simplify_Au?= =?utf8?q?toconf_section_and_move_it_to_FAQ=2E?= Message-ID: http://hg.python.org/devguide/rev/27be97280cff changeset: 499:27be97280cff user: Ross Lagerwall date: Sun Apr 01 18:31:12 2012 +0200 summary: Issue 14467: Simplify Autoconf section and move it to FAQ. files: faq.rst | 32 ++++++++++++++++++++++++ patch.rst | 56 ------------------------------------------- 2 files changed, 32 insertions(+), 56 deletions(-) diff --git a/faq.rst b/faq.rst --- a/faq.rst +++ b/faq.rst @@ -853,3 +853,35 @@ on which your keys are stored or the machine on which your keys are stored, be sure to report this to pydotorg at python.org at the same time that you change your keys. + + +General +======= + +How do I regenerate configure? +------------------------------ + +If a change is made to Python which relies on some POSIX system-specific +functionality (such as using a new system call), it is necessary to update the +``configure`` script to test for availability of the functionality. + +Python's ``configure`` script is generated from ``configure.ac`` using Autoconf. +Instead of editing ``configure``, edit ``configure.ac`` and then run +``autoreconf`` to regenerate ``configure`` and a number of other files (such as +``pyconfig.h``. + +When submitting a patch with changes made to ``configure.ac``, it is preferred +to leave out the generated files as differences between Autoconf versions +frequently results in many spurious changes cluttering the patch. Instead, +remind any potential reviewers on the tracker to run ``autoreconf``. + +Note that running ``autoreconf`` is not the same as running ``autoconf``. For +example, ``autoconf`` by itself will not regenerate ``pyconfig.h.in``. +``autoreconf`` runs ``autoconf`` and a number of other tools repeatedly as is +appropriate. + +Python's ``configure.ac`` script typically requires a specific version of +Autoconf. At the moment, this reads: ``version_required(2.65)`` + +If the system copy of Autoconf does not match this version, you will need to +install your own copy of Autoconf. diff --git a/patch.rst b/patch.rst --- a/patch.rst +++ b/patch.rst @@ -137,62 +137,6 @@ Also, please make sure your patch is whitespace normalized. ``patchcheck`` will check this for you. -Autoconf -'''''''' - -If a change is made to Python which relies on some POSIX system-specific -functionality (such as using a new system call), it is necessary to update the -``configure`` script to test for availability of the functionality. - -Python's ``configure`` script is generated from ``configure.ac`` using Autoconf. -Instead of editing ``configure``, edit ``configure.ac`` and then run -``autoreconf`` to regenerate ``configure`` and a number of other files (such as -``pyconfig.h``. - -When submitting a patch with changes made to ``configure.ac``, it is preferred -to leave out the generated files as differences between Autoconf versions -frequently results in many spurious changes cluttering the patch. Instead, -remind any potential reviewers on the tracker to run ``autoreconf``. - -Note that running ``autoreconf`` is not the same as running ``autoconf``. For -example, ``autoconf`` by itself will not regenerate ``pyconfig.h.in``. -``autoreconf`` runs ``autoconf`` and a number of other tools repeatedly as is -appropriate. - -Python's ``configure.ac`` script typically requires a specific version of -Autoconf. At the moment, this reads: ``version_required(2.65)`` - -If the system copy of Autoconf does not match this version, you will need to -install your own copy of Autoconf: - -1. Go to http://ftp.gnu.org/gnu/autoconf/ and download the version of Autoconf - matching the one in ``configure.ac``:: - - wget http://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.bz2 - -2. Unpack the tarball:: - - tar -jxf autoconf-2.65.tar.bz2 - -3. Build the specified version of Autoconf and install it to a writable location - (e.g. within your home directory):: - - pushd autoconf-2.65.tar.bz2 - ./configure --prefix=$HOME/autoconf-2.65 - make - make install - - This installs a copy of the appropriate version of Autoconf into - ~/autoconf-2.65. - -4. Go back to the Python source and rerun ``autoreconf``, pointing ``PATH`` at - the newly installed copy of Autoconf:: - - popd - PATH=~/autoconf-2.65/bin:$PATH autoreconf - -5. Autoconf should now have updated your local copy of ``configure`` to reflect - your changes. Submitting ---------- -- Repository URL: http://hg.python.org/devguide From python-checkins at python.org Sun Apr 1 19:55:28 2012 From: python-checkins at python.org (martin.v.loewis) Date: Sun, 01 Apr 2012 19:55:28 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Add_MASM_define_to_PGI_and_?= =?utf8?q?PGO_builds?= Message-ID: http://hg.python.org/cpython/rev/a8cacedf2ffc changeset: 76053:a8cacedf2ffc user: Martin v. L?wis date: Sun Apr 01 19:54:33 2012 +0200 summary: Add MASM define to PGI and PGO builds files: PCbuild/_decimal.vcproj | 1486 +++++++++++++------------- 1 files changed, 743 insertions(+), 743 deletions(-) diff --git a/PCbuild/_decimal.vcproj b/PCbuild/_decimal.vcproj --- a/PCbuild/_decimal.vcproj +++ b/PCbuild/_decimal.vcproj @@ -1,743 +1,743 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 19:56:21 2012 From: python-checkins at python.org (martin.v.loewis) Date: Sun, 01 Apr 2012 19:56:21 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Add_=5Fdecimal_and_=5Ftestb?= =?utf8?q?uffer_modules=2E?= Message-ID: http://hg.python.org/cpython/rev/7ec7623b27ec changeset: 76054:7ec7623b27ec user: Martin v. L?wis date: Sun Apr 01 19:55:48 2012 +0200 summary: Add _decimal and _testbuffer modules. files: Tools/msi/msi.py | 4 +++- 1 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Tools/msi/msi.py b/Tools/msi/msi.py --- a/Tools/msi/msi.py +++ b/Tools/msi/msi.py @@ -97,7 +97,9 @@ '_sqlite3.pyd', '_hashlib.pyd', '_multiprocessing.pyd', - '_lzma.pyd' + '_lzma.pyd', + '_decimal.pyd', + '_testbuffer.pyd' ] # Well-known component UUIDs -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Sun Apr 1 23:27:04 2012 From: python-checkins at python.org (stefan.krah) Date: Sun, 01 Apr 2012 23:27:04 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Fix_Overflow_exception_in_t?= =?utf8?q?he_bignum_factorial_benchmark_that_is_due_to?= Message-ID: http://hg.python.org/cpython/rev/1f5701ffa077 changeset: 76055:1f5701ffa077 user: Stefan Krah date: Sun Apr 01 23:25:34 2012 +0200 summary: Fix Overflow exception in the bignum factorial benchmark that is due to the recent change of the default value for context.Emax. files: Modules/_decimal/tests/bench.py | 5 ++++- 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/Modules/_decimal/tests/bench.py b/Modules/_decimal/tests/bench.py --- a/Modules/_decimal/tests/bench.py +++ b/Modules/_decimal/tests/bench.py @@ -84,7 +84,10 @@ print("# Factorial") print("# ======================================================================\n") -C.getcontext().prec = C.MAX_PREC +c = C.getcontext() +c.prec = C.MAX_PREC +c.Emax = C.MAX_EMAX +c.Emin = C.MIN_EMIN for n in [100000, 1000000]: -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 00:50:51 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 00:50:51 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=283=2E2=29=3A_be_consistent_w?= =?utf8?q?ith_rest_of_function?= Message-ID: http://hg.python.org/cpython/rev/5bb0acb69921 changeset: 76056:5bb0acb69921 branch: 3.2 parent: 75941:66117d4bb586 user: Benjamin Peterson date: Sun Apr 01 18:48:02 2012 -0400 summary: be consistent with rest of function files: Objects/typeobject.c | 3 +-- 1 files changed, 1 insertions(+), 2 deletions(-) diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -475,9 +475,8 @@ new_base = best_base(value); - if (!new_base) { + if (!new_base) return -1; - } if (!compatible_for_assignment(type->tp_base, new_base, "__bases__")) return -1; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 00:50:52 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 00:50:52 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAobWVyZ2UgMy4yIC0+IDMuMik6?= =?utf8?q?_merge_heads?= Message-ID: http://hg.python.org/cpython/rev/6b3713113ec9 changeset: 76057:6b3713113ec9 branch: 3.2 parent: 76056:5bb0acb69921 parent: 76051:273d7502ced1 user: Benjamin Peterson date: Sun Apr 01 18:48:11 2012 -0400 summary: merge heads files: Doc/glossary.rst | 2 +- Doc/library/argparse.rst | 9 +- Doc/library/logging.handlers.rst | 2 +- Doc/library/signal.rst | 78 +++++++++------- Doc/library/syslog.rst | 3 +- Doc/library/unittest.rst | 2 +- Doc/library/webbrowser.rst | 2 + Lib/concurrent/futures/_base.py | 8 +- Lib/idlelib/NEWS.txt | 5 + Lib/idlelib/configHandler.py | 2 +- Lib/logging/handlers.py | 15 ++- Lib/multiprocessing/connection.py | 9 + Lib/pydoc.py | 4 +- Lib/rlcompleter.py | 36 +++---- Lib/socket.py | 11 ++ Lib/test/test_concurrent_futures.py | 18 +++- Lib/test/test_multiprocessing.py | 14 ++- Lib/test/test_socket.py | 1 + Misc/NEWS | 20 ++++ Modules/_io/_iomodule.h | 2 +- Modules/python.c | 6 +- Objects/bytearrayobject.c | 8 +- 22 files changed, 178 insertions(+), 79 deletions(-) diff --git a/Doc/glossary.rst b/Doc/glossary.rst --- a/Doc/glossary.rst +++ b/Doc/glossary.rst @@ -385,7 +385,7 @@ :meth:`str.lower` method can serve as a key function for case insensitive sorts. Alternatively, an ad-hoc key function can be built from a :keyword:`lambda` expression such as ``lambda r: (r[0], r[2])``. Also, - the :mod:`operator` module provides three key function constuctors: + the :mod:`operator` module provides three key function constructors: :func:`~operator.attrgetter`, :func:`~operator.itemgetter`, and :func:`~operator.methodcaller`. See the :ref:`Sorting HOW TO ` for examples of how to create and use key functions. diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -1642,8 +1642,8 @@ --bar BAR bar help - Note that any arguments not your user defined groups will end up back in the - usual "positional arguments" and "optional arguments" sections. + Note that any arguments not in your user-defined groups will end up back + in the usual "positional arguments" and "optional arguments" sections. Mutual exclusion @@ -1833,9 +1833,10 @@ * Replace all :meth:`optparse.OptionParser.add_option` calls with :meth:`ArgumentParser.add_argument` calls. -* Replace ``options, args = parser.parse_args()`` with ``args = +* Replace ``(options, args) = parser.parse_args()`` with ``args = parser.parse_args()`` and add additional :meth:`ArgumentParser.add_argument` - calls for the positional arguments. + calls for the positional arguments. Keep in mind that what was previously + called ``options``, now in :mod:`argparse` context is called ``args``. * Replace callback actions and the ``callback_*`` keyword arguments with ``type`` or ``action`` arguments. diff --git a/Doc/library/logging.handlers.rst b/Doc/library/logging.handlers.rst --- a/Doc/library/logging.handlers.rst +++ b/Doc/library/logging.handlers.rst @@ -654,7 +654,7 @@ :class:`BufferingHandler`, which is an abstract class. This buffers logging records in memory. Whenever each record is added to the buffer, a check is made by calling :meth:`shouldFlush` to see if the buffer should be flushed. If it -should, then :meth:`flush` is expected to do the needful. +should, then :meth:`flush` is expected to do the flushing. .. class:: BufferingHandler(capacity) diff --git a/Doc/library/signal.rst b/Doc/library/signal.rst --- a/Doc/library/signal.rst +++ b/Doc/library/signal.rst @@ -5,46 +5,58 @@ :synopsis: Set handlers for asynchronous events. -This module provides mechanisms to use signal handlers in Python. Some general -rules for working with signals and their handlers: +This module provides mechanisms to use signal handlers in Python. -* A handler for a particular signal, once set, remains installed until it is - explicitly reset (Python emulates the BSD style interface regardless of the - underlying implementation), with the exception of the handler for - :const:`SIGCHLD`, which follows the underlying implementation. -* There is no way to "block" signals temporarily from critical sections (since - this is not supported by all Unix flavors). +General rules +------------- -* Although Python signal handlers are called asynchronously as far as the Python - user is concerned, they can only occur between the "atomic" instructions of the - Python interpreter. This means that signals arriving during long calculations - implemented purely in C (such as regular expression matches on large bodies of - text) may be delayed for an arbitrary amount of time. +The :func:`signal.signal` function allows to define custom handlers to be +executed when a signal is received. A small number of default handlers are +installed: :const:`SIGPIPE` is ignored (so write errors on pipes and sockets +can be reported as ordinary Python exceptions) and :const:`SIGINT` is +translated into a :exc:`KeyboardInterrupt` exception. -* When a signal arrives during an I/O operation, it is possible that the I/O - operation raises an exception after the signal handler returns. This is - dependent on the underlying Unix system's semantics regarding interrupted system - calls. +A handler for a particular signal, once set, remains installed until it is +explicitly reset (Python emulates the BSD style interface regardless of the +underlying implementation), with the exception of the handler for +:const:`SIGCHLD`, which follows the underlying implementation. -* Because the C signal handler always returns, it makes little sense to catch - synchronous errors like :const:`SIGFPE` or :const:`SIGSEGV`. +There is no way to "block" signals temporarily from critical sections (since +this is not supported by all Unix flavors). -* Python installs a small number of signal handlers by default: :const:`SIGPIPE` - is ignored (so write errors on pipes and sockets can be reported as ordinary - Python exceptions) and :const:`SIGINT` is translated into a - :exc:`KeyboardInterrupt` exception. All of these can be overridden. -* Some care must be taken if both signals and threads are used in the same - program. The fundamental thing to remember in using signals and threads - simultaneously is: always perform :func:`signal` operations in the main thread - of execution. Any thread can perform an :func:`alarm`, :func:`getsignal`, - :func:`pause`, :func:`setitimer` or :func:`getitimer`; only the main thread - can set a new signal handler, and the main thread will be the only one to - receive signals (this is enforced by the Python :mod:`signal` module, even - if the underlying thread implementation supports sending signals to - individual threads). This means that signals can't be used as a means of - inter-thread communication. Use locks instead. +Execution of Python signal handlers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A Python signal handler does not get executed inside the low-level (C) signal +handler. Instead, the low-level signal handler sets a flag which tells the +:term:`virtual machine` to execute the corresponding Python signal handler +at a later point(for example at the next :term:`bytecode` instruction). +This has consequences: + +* It makes little sense to catch synchronous errors like :const:`SIGFPE` or + :const:`SIGSEGV`. + +* A long-running calculation implemented purely in C (such as regular + expression matching on a large body of text) may run uninterrupted for an + arbitrary amount of time, regardless of any signals received. The Python + signal handlers will be called when the calculation finishes. + + +Signals and threads +^^^^^^^^^^^^^^^^^^^ + +Python signal handlers are always executed in the main Python thread, +even if the signal was received in another thread. This means that signals +can't be used as a means of inter-thread communication. You can use +the synchronization primitives from the :mod:`threading` module instead. + +Besides, only the main thread is allowed to set a new signal handler. + + +Module contents +--------------- The variables defined in the :mod:`signal` module are: diff --git a/Doc/library/syslog.rst b/Doc/library/syslog.rst --- a/Doc/library/syslog.rst +++ b/Doc/library/syslog.rst @@ -78,7 +78,8 @@ Facilities: :const:`LOG_KERN`, :const:`LOG_USER`, :const:`LOG_MAIL`, :const:`LOG_DAEMON`, :const:`LOG_AUTH`, :const:`LOG_LPR`, :const:`LOG_NEWS`, :const:`LOG_UUCP`, - :const:`LOG_CRON` and :const:`LOG_LOCAL0` to :const:`LOG_LOCAL7`. + :const:`LOG_CRON`, :const:`LOG_SYSLOG` and :const:`LOG_LOCAL0` to + :const:`LOG_LOCAL7`. Log options: :const:`LOG_PID`, :const:`LOG_CONS`, :const:`LOG_NDELAY`, :const:`LOG_NOWAIT` diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst --- a/Doc/library/unittest.rst +++ b/Doc/library/unittest.rst @@ -640,7 +640,7 @@ Classes can be skipped just like methods: :: - @skip("showing class skipping") + @unittest.skip("showing class skipping") class MySkippedTestCase(unittest.TestCase): def test_not_run(self): pass diff --git a/Doc/library/webbrowser.rst b/Doc/library/webbrowser.rst --- a/Doc/library/webbrowser.rst +++ b/Doc/library/webbrowser.rst @@ -137,6 +137,8 @@ +-----------------------+-----------------------------------------+-------+ | ``'macosx'`` | :class:`MacOSX('default')` | \(4) | +-----------------------+-----------------------------------------+-------+ +| ``'safari'`` | :class:`MacOSX('safari')` | \(4) | ++-----------------------+-----------------------------------------+-------+ Notes: diff --git a/Lib/concurrent/futures/_base.py b/Lib/concurrent/futures/_base.py --- a/Lib/concurrent/futures/_base.py +++ b/Lib/concurrent/futures/_base.py @@ -112,12 +112,14 @@ def __init__(self, num_pending_calls, stop_on_exception): self.num_pending_calls = num_pending_calls self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() super().__init__() def _decrement_pending_calls(self): - self.num_pending_calls -= 1 - if not self.num_pending_calls: - self.event.set() + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() def add_result(self, future): super().add_result(future) diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt --- a/Lib/idlelib/NEWS.txt +++ b/Lib/idlelib/NEWS.txt @@ -1,6 +1,11 @@ What's New in IDLE 3.2.3? ========================= +- Issue #14409: IDLE now properly executes commands in the Shell window + when it cannot read the normal config files on startup and + has to use the built-in default key bindings. + There was previously a bug in one of the defaults. + - Issue #3573: IDLE hangs when passing invalid command line args (directory(ies) instead of file(s)). diff --git a/Lib/idlelib/configHandler.py b/Lib/idlelib/configHandler.py --- a/Lib/idlelib/configHandler.py +++ b/Lib/idlelib/configHandler.py @@ -595,7 +595,7 @@ '<>': [''], '<>': [''], '<>': [''], - '<>': [' '], + '<>': ['', ''], '<>': [''], '<>': [''], '<>': [''], diff --git a/Lib/logging/handlers.py b/Lib/logging/handlers.py --- a/Lib/logging/handlers.py +++ b/Lib/logging/handlers.py @@ -519,11 +519,16 @@ """ ei = record.exc_info if ei: - dummy = self.format(record) # just to get traceback text into record.exc_text - record.exc_info = None # to avoid Unpickleable error - s = pickle.dumps(record.__dict__, 1) - if ei: - record.exc_info = ei # for next handler + # just to get traceback text into record.exc_text ... + dummy = self.format(record) + # See issue #14436: If msg or args are objects, they may not be + # available on the receiving end. So we convert the msg % args + # to a string, save it as msg and zap the args. + d = dict(record.__dict__) + d['msg'] = record.getMessage() + d['args'] = None + d['exc_info'] = None + s = pickle.dumps(d, 1) slen = struct.pack(">L", len(s)) return slen + s diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -94,6 +94,13 @@ else: raise ValueError('unrecognized family') +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + def address_type(address): ''' @@ -126,6 +133,7 @@ or default_family address = address or arbitrary_address(family) + _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: @@ -163,6 +171,7 @@ Returns a connection to the address of a `Listener` ''' family = family or address_type(address) + _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: diff --git a/Lib/pydoc.py b/Lib/pydoc.py --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -1829,7 +1829,7 @@ Welcome to Python %s! This is the online help utility. If this is your first time using Python, you should definitely check out -the tutorial on the Internet at http://docs.python.org/tutorial/. +the tutorial on the Internet at http://docs.python.org/%s/tutorial/. Enter the name of any module, keyword, or topic to get help on writing Python programs and using Python modules. To quit this help utility and @@ -1839,7 +1839,7 @@ "keywords", or "topics". Each module also comes with a one-line summary of what it does; to list the modules whose summaries contain a given word such as "spam", type "modules spam". -''' % sys.version[:3]) +''' % tuple([sys.version[:3]]*2)) def list(self, items, columns=4, width=80): items = list(sorted(items)) diff --git a/Lib/rlcompleter.py b/Lib/rlcompleter.py --- a/Lib/rlcompleter.py +++ b/Lib/rlcompleter.py @@ -1,13 +1,11 @@ -"""Word completion for GNU readline 2.0. +"""Word completion for GNU readline. -This requires the latest extension to the readline module. The completer -completes keywords, built-ins and globals in a selectable namespace (which -defaults to __main__); when completing NAME.NAME..., it evaluates (!) the -expression up to the last dot and completes its attributes. +The completer completes keywords, built-ins and globals in a selectable +namespace (which defaults to __main__); when completing NAME.NAME..., it +evaluates (!) the expression up to the last dot and completes its attributes. -It's very cool to do "import sys" type "sys.", hit the -completion key (twice), and see the list of names defined by the -sys module! +It's very cool to do "import sys" type "sys.", hit the completion key (twice), +and see the list of names defined by the sys module! Tip: to use the tab key as the completion key, call @@ -15,21 +13,19 @@ Notes: -- Exceptions raised by the completer function are *ignored* (and -generally cause the completion to fail). This is a feature -- since -readline sets the tty device in raw (or cbreak) mode, printing a -traceback wouldn't work well without some complicated hoopla to save, -reset and restore the tty state. +- Exceptions raised by the completer function are *ignored* (and generally cause + the completion to fail). This is a feature -- since readline sets the tty + device in raw (or cbreak) mode, printing a traceback wouldn't work well + without some complicated hoopla to save, reset and restore the tty state. -- The evaluation of the NAME.NAME... form may cause arbitrary -application defined code to be executed if an object with a -__getattr__ hook is found. Since it is the responsibility of the -application (or the user) to enable this feature, I consider this an -acceptable risk. More complicated expressions (e.g. function calls or -indexing operations) are *not* evaluated. +- The evaluation of the NAME.NAME... form may cause arbitrary application + defined code to be executed if an object with a __getattr__ hook is found. + Since it is the responsibility of the application (or the user) to enable this + feature, I consider this an acceptable risk. More complicated expressions + (e.g. function calls or indexing operations) are *not* evaluated. - When the original stdin is not a tty device, GNU readline is never -used, and this module (and the readline module) are silently inactive. + used, and this module (and the readline module) are silently inactive. """ diff --git a/Lib/socket.py b/Lib/socket.py --- a/Lib/socket.py +++ b/Lib/socket.py @@ -197,6 +197,17 @@ if self._io_refs <= 0: self._real_close() + def detach(self): + """detach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + """ + self._closed = True + return super().detach() + + def fromfd(fd, family, type, proto=0): """ fromfd(fd, family, type[, proto]) -> socket object diff --git a/Lib/test/test_concurrent_futures.py b/Lib/test/test_concurrent_futures.py --- a/Lib/test/test_concurrent_futures.py +++ b/Lib/test/test_concurrent_futures.py @@ -183,7 +183,9 @@ for p in processes: p.join() + class WaitTests(unittest.TestCase): + def test_first_completed(self): future1 = self.executor.submit(mul, 21, 2) future2 = self.executor.submit(time.sleep, 1.5) @@ -284,7 +286,21 @@ class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests): - pass + + def test_pending_calls_race(self): + # Issue #14406: multi-threaded race condition when waiting on all + # futures. + event = threading.Event() + def future_func(): + event.wait() + oldswitchinterval = sys.getswitchinterval() + sys.setswitchinterval(1e-6) + try: + fs = {self.executor.submit(future_func) for i in range(100)} + event.set() + futures.wait(fs, return_when=futures.ALL_COMPLETED) + finally: + sys.setswitchinterval(oldswitchinterval) class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests): diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py --- a/Lib/test/test_multiprocessing.py +++ b/Lib/test/test_multiprocessing.py @@ -2319,8 +2319,20 @@ flike.flush() assert sio.getvalue() == 'foo' + +# +# Issue 14151: Test invalid family on invalid environment +# + +class TestInvalidFamily(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_family(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener(r'\\.\test') + testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, - TestStdinBadfiledescriptor] + TestStdinBadfiledescriptor, TestInvalidFamily] # # diff --git a/Lib/test/test_socket.py b/Lib/test/test_socket.py --- a/Lib/test/test_socket.py +++ b/Lib/test/test_socket.py @@ -951,6 +951,7 @@ f = self.cli_conn.detach() self.assertEqual(f, fileno) # cli_conn cannot be used anymore... + self.assertTrue(self.cli_conn._closed) self.assertRaises(socket.error, self.cli_conn.recv, 1024) self.cli_conn.close() # ...but we can create another socket using the (still open) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch + by Suman Saha. + - Issue #14378: Fix compiling ast.ImportFrom nodes with a "__future__" string as the module name that was not interned. @@ -31,6 +34,21 @@ Library ------- +- Issue #14151: Raise a ValueError, not a NameError, when trying to create + a multiprocessing Client or Listener with an AF_PIPE type address under + non-Windows platforms. Patch by Popa Claudiu. + +- Issue #13872: socket.detach() now marks the socket closed (as mirrored + in the socket repr()). Patch by Matt Joiner. + +- Issue #14406: Fix a race condition when using ``concurrent.futures.wait( + return_when=ALL_COMPLETED)``. Patch by Matt Joiner. + +- Issue #14409: IDLE now properly executes commands in the Shell window + when it cannot read the normal config files on startup and + has to use the built-in default key bindings. + There was previously a bug in one of the defaults. + - Issue #10340: asyncore - properly handle EINVAL in dispatcher constructor on OSX; avoid to call handle_connect in case of a disconnected socket which was not meant to connect. @@ -97,6 +115,8 @@ Build ----- +- Issue #14437: Fix building the _io module under Cygwin. + - Issue #14387: Do not include accu.h from Python.h. - Issue #14359: Only use O_CLOEXEC in _posixmodule.c if it is defined. diff --git a/Modules/_io/_iomodule.h b/Modules/_io/_iomodule.h --- a/Modules/_io/_iomodule.h +++ b/Modules/_io/_iomodule.h @@ -67,7 +67,7 @@ PyObject *filename; /* Not used, but part of the IOError object */ Py_ssize_t written; } PyBlockingIOErrorObject; -PyAPI_DATA(PyObject *) PyExc_BlockingIOError; +extern PyObject *PyExc_BlockingIOError; /* * Offset type for positioning. diff --git a/Modules/python.c b/Modules/python.c --- a/Modules/python.c +++ b/Modules/python.c @@ -22,9 +22,9 @@ int main(int argc, char **argv) { - wchar_t **argv_copy = (wchar_t **)PyMem_Malloc(sizeof(wchar_t*)*argc); + wchar_t **argv_copy = (wchar_t **)PyMem_Malloc(sizeof(wchar_t*)*(argc+1)); /* We need a second copies, as Python might modify the first one. */ - wchar_t **argv_copy2 = (wchar_t **)PyMem_Malloc(sizeof(wchar_t*)*argc); + wchar_t **argv_copy2 = (wchar_t **)PyMem_Malloc(sizeof(wchar_t*)*(argc+1)); int i, res; char *oldloc; /* 754 requires that FP exceptions run in "no stop" mode by default, @@ -58,6 +58,8 @@ } argv_copy2[i] = argv_copy[i]; } + argv_copy2[argc] = argv_copy[argc] = NULL; + setlocale(LC_ALL, oldloc); free(oldloc); res = Py_Main(argc, argv_copy); diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -2234,8 +2234,10 @@ } bytearray_obj = PyByteArray_FromStringAndSize(NULL, buf_size); - if (bytearray_obj == NULL) + if (bytearray_obj == NULL) { + Py_DECREF(it); return NULL; + } buf = PyByteArray_AS_STRING(bytearray_obj); while ((item = PyIter_Next(it)) != NULL) { @@ -2268,8 +2270,10 @@ return NULL; } - if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) + if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) { + Py_DECREF(bytearray_obj); return NULL; + } Py_DECREF(bytearray_obj); Py_RETURN_NONE; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 00:50:53 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 00:50:53 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=283=2E2=29=3A_remove_extraneo?= =?utf8?q?us_condition?= Message-ID: http://hg.python.org/cpython/rev/c1fff85711a6 changeset: 76058:c1fff85711a6 branch: 3.2 user: Benjamin Peterson date: Sun Apr 01 18:48:40 2012 -0400 summary: remove extraneous condition files: Objects/typeobject.c | 10 ++++------ 1 files changed, 4 insertions(+), 6 deletions(-) diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -464,12 +464,10 @@ type->tp_name, Py_TYPE(ob)->tp_name); return -1; } - if (PyType_Check(ob)) { - if (PyType_IsSubtype((PyTypeObject*)ob, type)) { - PyErr_SetString(PyExc_TypeError, - "a __bases__ item causes an inheritance cycle"); - return -1; - } + if (PyType_IsSubtype((PyTypeObject*)ob, type)) { + PyErr_SetString(PyExc_TypeError, + "a __bases__ item causes an inheritance cycle"); + return -1; } } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 00:50:54 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 00:50:54 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=283=2E2=29=3A_adjust_formatti?= =?utf8?q?ng?= Message-ID: http://hg.python.org/cpython/rev/d746ffc34e0f changeset: 76059:d746ffc34e0f branch: 3.2 user: Benjamin Peterson date: Sun Apr 01 18:49:54 2012 -0400 summary: adjust formatting files: Objects/typeobject.c | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -458,11 +458,11 @@ for (i = 0; i < PyTuple_GET_SIZE(value); i++) { ob = PyTuple_GET_ITEM(value, i); if (!PyType_Check(ob)) { - PyErr_Format( - PyExc_TypeError, - "%s.__bases__ must be tuple of old- or new-style classes, not '%s'", - type->tp_name, Py_TYPE(ob)->tp_name); - return -1; + PyErr_Format(PyExc_TypeError, + "%s.__bases__ must be tuple of old- or " + "new-style classes, not '%s'", + type->tp_name, Py_TYPE(ob)->tp_name); + return -1; } if (PyType_IsSubtype((PyTypeObject*)ob, type)) { PyErr_SetString(PyExc_TypeError, -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 00:51:44 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 00:51:44 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_merge_3=2E2?= Message-ID: http://hg.python.org/cpython/rev/1c80a7bcbd6b changeset: 76060:1c80a7bcbd6b parent: 76055:1f5701ffa077 parent: 76059:d746ffc34e0f user: Benjamin Peterson date: Sun Apr 01 18:51:37 2012 -0400 summary: merge 3.2 files: Objects/typeobject.c | 22 +++++++++------------- 1 files changed, 9 insertions(+), 13 deletions(-) diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -490,26 +490,22 @@ for (i = 0; i < PyTuple_GET_SIZE(value); i++) { ob = PyTuple_GET_ITEM(value, i); if (!PyType_Check(ob)) { - PyErr_Format( - PyExc_TypeError, - "%s.__bases__ must be tuple of classes, not '%s'", - type->tp_name, Py_TYPE(ob)->tp_name); - return -1; + PyErr_Format(PyExc_TypeError, + "%s.__bases__ must be tuple of classes, not '%s'", + type->tp_name, Py_TYPE(ob)->tp_name); + return -1; } - if (PyType_Check(ob)) { - if (PyType_IsSubtype((PyTypeObject*)ob, type)) { - PyErr_SetString(PyExc_TypeError, - "a __bases__ item causes an inheritance cycle"); - return -1; - } + if (PyType_IsSubtype((PyTypeObject*)ob, type)) { + PyErr_SetString(PyExc_TypeError, + "a __bases__ item causes an inheritance cycle"); + return -1; } } new_base = best_base(value); - if (!new_base) { + if (!new_base) return -1; - } if (!compatible_for_assignment(type->tp_base, new_base, "__bases__")) return -1; -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Mon Apr 2 05:35:20 2012 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Mon, 02 Apr 2012 05:35:20 +0200 Subject: [Python-checkins] Daily reference leaks (1c80a7bcbd6b): sum=0 Message-ID: results for 1c80a7bcbd6b on branch "default" -------------------------------------------- Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogWSPbp7', '-x'] From python-checkins at python.org Mon Apr 2 07:52:24 2012 From: python-checkins at python.org (georg.brandl) Date: Mon, 02 Apr 2012 07:52:24 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Post-release_update=2E?= Message-ID: http://hg.python.org/cpython/rev/985cc819a70f changeset: 76061:985cc819a70f parent: 76046:90bc4b367fcb user: Georg Brandl date: Mon Apr 02 07:51:45 2012 +0200 summary: Post-release update. files: Include/patchlevel.h | 2 +- Misc/NEWS | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -23,7 +23,7 @@ #define PY_RELEASE_SERIAL 2 /* Version as a string */ -#define PY_VERSION "3.3.0a2" +#define PY_VERSION "3.3.0a2+" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -2,6 +2,18 @@ Python News +++++++++++ +What's New in Python 3.3.0 Alpha 3? +=================================== + +*Release date: XXXX-XX-XX* + +Core and Builtins +----------------- + +Library +------- + + What's New in Python 3.3.0 Alpha 2? =================================== -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 07:52:24 2012 From: python-checkins at python.org (georg.brandl) Date: Mon, 02 Apr 2012 07:52:24 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_default_-=3E_default?= =?utf8?b?KTogTWVyZ2Uu?= Message-ID: http://hg.python.org/cpython/rev/9fcb2676696c changeset: 76062:9fcb2676696c parent: 76061:985cc819a70f parent: 76060:1c80a7bcbd6b user: Georg Brandl date: Mon Apr 02 07:52:29 2012 +0200 summary: Merge. files: Lib/multiprocessing/connection.py | 9 + Lib/test/test_multiprocessing.py | 14 +- Lib/test/test_xml_etree.py | 10 - Misc/NEWS | 7 + Modules/_decimal/tests/bench.py | 5 +- Objects/bytearrayobject.c | 8 +- Objects/typeobject.c | 22 +- PCbuild/_decimal.vcproj | 1486 ++++++++-------- Tools/msi/msi.py | 4 +- 9 files changed, 794 insertions(+), 771 deletions(-) diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -104,6 +104,13 @@ else: raise ValueError('unrecognized family') +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + def address_type(address): ''' @@ -436,6 +443,7 @@ or default_family address = address or arbitrary_address(family) + _validate_family(family) if family == 'AF_PIPE': self._listener = PipeListener(address, backlog) else: @@ -473,6 +481,7 @@ Returns a connection to the address of a `Listener` ''' family = family or address_type(address) + _validate_family(family) if family == 'AF_PIPE': c = PipeClient(address) else: diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py --- a/Lib/test/test_multiprocessing.py +++ b/Lib/test/test_multiprocessing.py @@ -2638,8 +2638,20 @@ p.join() +# +# Issue 14151: Test invalid family on invalid environment +# + +class TestInvalidFamily(unittest.TestCase): + + @unittest.skipIf(WIN32, "skipped on Windows") + def test_invalid_family(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener(r'\\.\test') + + testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, - TestStdinBadfiledescriptor, TestWait] + TestStdinBadfiledescriptor, TestWait, TestInvalidFamily] # # diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -1859,16 +1859,6 @@ gc_collect() self.assertIsNone(wref()) - # A longer cycle: d->e->e2->d - e = ET.Element('joe') - d = Dummy() - d.dummyref = e - wref = weakref.ref(d) - e2 = ET.SubElement(e, 'foo', attr=d) - del d, e, e2 - gc_collect() - self.assertIsNone(wref()) - class ElementTreeTest(unittest.TestCase): def test_istype(self): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,9 +10,16 @@ Core and Builtins ----------------- +- Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch + by Suman Saha. + Library ------- +- Issue #14151: Raise a ValueError, not a NameError, when trying to create + a multiprocessing Client or Listener with an AF_PIPE type address under + non-Windows platforms. Patch by Popa Claudiu. + What's New in Python 3.3.0 Alpha 2? =================================== diff --git a/Modules/_decimal/tests/bench.py b/Modules/_decimal/tests/bench.py --- a/Modules/_decimal/tests/bench.py +++ b/Modules/_decimal/tests/bench.py @@ -84,7 +84,10 @@ print("# Factorial") print("# ======================================================================\n") -C.getcontext().prec = C.MAX_PREC +c = C.getcontext() +c.prec = C.MAX_PREC +c.Emax = C.MAX_EMAX +c.Emin = C.MIN_EMIN for n in [100000, 1000000]: diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -2289,8 +2289,10 @@ } bytearray_obj = PyByteArray_FromStringAndSize(NULL, buf_size); - if (bytearray_obj == NULL) + if (bytearray_obj == NULL) { + Py_DECREF(it); return NULL; + } buf = PyByteArray_AS_STRING(bytearray_obj); while ((item = PyIter_Next(it)) != NULL) { @@ -2323,8 +2325,10 @@ return NULL; } - if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) + if (bytearray_setslice(self, Py_SIZE(self), Py_SIZE(self), bytearray_obj) == -1) { + Py_DECREF(bytearray_obj); return NULL; + } Py_DECREF(bytearray_obj); Py_RETURN_NONE; diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -490,26 +490,22 @@ for (i = 0; i < PyTuple_GET_SIZE(value); i++) { ob = PyTuple_GET_ITEM(value, i); if (!PyType_Check(ob)) { - PyErr_Format( - PyExc_TypeError, - "%s.__bases__ must be tuple of classes, not '%s'", - type->tp_name, Py_TYPE(ob)->tp_name); - return -1; + PyErr_Format(PyExc_TypeError, + "%s.__bases__ must be tuple of classes, not '%s'", + type->tp_name, Py_TYPE(ob)->tp_name); + return -1; } - if (PyType_Check(ob)) { - if (PyType_IsSubtype((PyTypeObject*)ob, type)) { - PyErr_SetString(PyExc_TypeError, - "a __bases__ item causes an inheritance cycle"); - return -1; - } + if (PyType_IsSubtype((PyTypeObject*)ob, type)) { + PyErr_SetString(PyExc_TypeError, + "a __bases__ item causes an inheritance cycle"); + return -1; } } new_base = best_base(value); - if (!new_base) { + if (!new_base) return -1; - } if (!compatible_for_assignment(type->tp_base, new_base, "__bases__")) return -1; diff --git a/PCbuild/_decimal.vcproj b/PCbuild/_decimal.vcproj --- a/PCbuild/_decimal.vcproj +++ b/PCbuild/_decimal.vcproj @@ -1,743 +1,743 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Tools/msi/msi.py b/Tools/msi/msi.py --- a/Tools/msi/msi.py +++ b/Tools/msi/msi.py @@ -97,7 +97,9 @@ '_sqlite3.pyd', '_hashlib.pyd', '_multiprocessing.pyd', - '_lzma.pyd' + '_lzma.pyd', + '_decimal.pyd', + '_testbuffer.pyd' ] # Well-known component UUIDs -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 15:13:18 2012 From: python-checkins at python.org (stefan.krah) Date: Mon, 02 Apr 2012 15:13:18 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Clear_the_context_flags_if_?= =?utf8?q?a_context_is_initialized_from_the_DefaultContext=2E?= Message-ID: http://hg.python.org/cpython/rev/2345a042c08a changeset: 76063:2345a042c08a parent: 76055:1f5701ffa077 user: Stefan Krah date: Mon Apr 02 15:02:21 2012 +0200 summary: Clear the context flags if a context is initialized from the DefaultContext. files: Lib/test/test_decimal.py | 70 +++++++++++++++++++++++++ Modules/_decimal/_decimal.c | 8 ++ 2 files changed, 78 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -56,6 +56,76 @@ fractions = {C:cfractions, P:pfractions} sys.modules['decimal'] = orig_sys_decimal +############ RunFirst ############ +class RunFirst(unittest.TestCase): + + def setUp(self): + self.save_default = self.decimal.DefaultContext.copy() + + def tearDown(self): + DefaultContext = self.decimal.DefaultContext + + DefaultContext.prec = self.save_default.prec + DefaultContext.rounding = self.save_default.rounding + DefaultContext.Emax = self.save_default.Emax + DefaultContext.Emin = self.save_default.Emin + DefaultContext.capitals = self.save_default.capitals + DefaultContext.clamp = self.save_default.clamp + DefaultContext.flags = self.save_default.flags + DefaultContext.traps = self.save_default.traps + + self.decimal.setcontext(self.decimal.DefaultContext) + + def test_00default_context(self): + # The test depends on the fact that getcontext() is called + # for the first time. + DefaultContext = self.decimal.DefaultContext + ROUND_05UP = self.decimal.ROUND_05UP + Clamped = self.decimal.Clamped + InvalidOperation = self.decimal.InvalidOperation + + DefaultContext.prec = 5001 + DefaultContext.rounding = ROUND_05UP + DefaultContext.Emax = 10025 + DefaultContext.Emin = -10025 + DefaultContext.capitals = 0 + DefaultContext.clamp = 1 + DefaultContext.flags[InvalidOperation] = True + DefaultContext.clear_traps() + DefaultContext.traps[Clamped] = True + + # implicit initialization on first access + c = self.decimal.getcontext() + + self.assertEqual(c.prec, 5001) + self.assertEqual(c.rounding, ROUND_05UP) + self.assertEqual(c.Emax, 10025) + self.assertEqual(c.Emin, -10025) + self.assertEqual(c.capitals, 0) + self.assertEqual(c.clamp, 1) + for k in c.flags: + self.assertFalse(c.flags[k]) + for k in c.traps: + if k is Clamped: + self.assertTrue(c.traps[k]) + else: + self.assertFalse(c.traps[k]) + + # explicit initialization + self.decimal.setcontext(DefaultContext) + c = self.decimal.getcontext() + for k in c.flags: + self.assertFalse(c.flags[k]) + +class CRunFirst(RunFirst): + decimal = C +class PyRunFirst(RunFirst): + decimal = P +if C: + run_unittest(CRunFirst, PyRunFirst) +else: + run_unittest(PyRunFirst) +############ END RunFirst ############ # Useful Test Constant Signals = { diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -1494,6 +1494,10 @@ } *CTX(module_context) = *CTX(default_context_template); + CTX(module_context)->status = 0; + CTX(module_context)->newtrap = 0; + CtxCaps(module_context) = CtxCaps(default_context_template); + module_context_set = 1; return module_context; } @@ -1533,6 +1537,7 @@ if (v == NULL) { return NULL; } + CTX(v)->status = 0; } else { Py_INCREF(v); @@ -1581,6 +1586,8 @@ if (tl_context == NULL) { return NULL; } + CTX(tl_context)->status = 0; + if (PyDict_SetItem(dict, tls_context_key, tl_context) < 0) { Py_DECREF(tl_context); return NULL; @@ -1646,6 +1653,7 @@ if (v == NULL) { return NULL; } + CTX(v)->status = 0; } else { Py_INCREF(v); -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 15:13:19 2012 From: python-checkins at python.org (stefan.krah) Date: Mon, 02 Apr 2012 15:13:19 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_default_-=3E_default?= =?utf8?b?KTogTWVyZ2Uu?= Message-ID: http://hg.python.org/cpython/rev/f00fae41f95d changeset: 76064:f00fae41f95d parent: 76063:2345a042c08a parent: 76062:9fcb2676696c user: Stefan Krah date: Mon Apr 02 15:04:14 2012 +0200 summary: Merge. files: Include/patchlevel.h | 2 +- Misc/NEWS | 64 +++++++++++++++++++------------- Objects/typeobject.c | 22 ++++------ 3 files changed, 48 insertions(+), 40 deletions(-) diff --git a/Include/patchlevel.h b/Include/patchlevel.h --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -23,7 +23,7 @@ #define PY_RELEASE_SERIAL 2 /* Version as a string */ -#define PY_VERSION "3.3.0a2" +#define PY_VERSION "3.3.0a2+" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -2,10 +2,10 @@ Python News +++++++++++ -What's New in Python 3.3.0 Alpha 2? +What's New in Python 3.3.0 Alpha 3? =================================== -*Release date: 01-Apr-2012* +*Release date: XXXX-XX-XX* Core and Builtins ----------------- @@ -13,30 +13,6 @@ - Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch by Suman Saha. -- Issue #1683368: object.__new__ and object.__init__ raise a TypeError if they - are passed arguments and their complementary method is not overridden. - -- Issue #14378: Fix compiling ast.ImportFrom nodes with a "__future__" string as - the module name that was not interned. - -- Issue #14331: Use significantly less stack space when importing modules by - allocating path buffers on the heap instead of the stack. - -- Issue #14334: Prevent in a segfault in type.__getattribute__ when it was not - passed strings. - -- Issue #1469629: Allow cycles through an object's __dict__ slot to be - collected. (For example if ``x.__dict__ is x``). - -- Issue #14205: dict lookup raises a RuntimeError if the dict is modified - during a lookup. - -- Issue #14220: When a generator is delegating to another iterator with the - yield from syntax, it needs to have its ``gi_running`` flag set to True. - -- Issue #14435: Remove dedicated block allocator from floatobject.c and rely - on the PyObject_Malloc() api like all other objects. - Library ------- @@ -44,6 +20,42 @@ a multiprocessing Client or Listener with an AF_PIPE type address under non-Windows platforms. Patch by Popa Claudiu. + +What's New in Python 3.3.0 Alpha 2? +=================================== + +*Release date: 01-Apr-2012* + +Core and Builtins +----------------- + +- Issue #1683368: object.__new__ and object.__init__ raise a TypeError if they + are passed arguments and their complementary method is not overridden. + +- Issue #14378: Fix compiling ast.ImportFrom nodes with a "__future__" string as + the module name that was not interned. + +- Issue #14331: Use significantly less stack space when importing modules by + allocating path buffers on the heap instead of the stack. + +- Issue #14334: Prevent in a segfault in type.__getattribute__ when it was not + passed strings. + +- Issue #1469629: Allow cycles through an object's __dict__ slot to be + collected. (For example if ``x.__dict__ is x``). + +- Issue #14205: dict lookup raises a RuntimeError if the dict is modified + during a lookup. + +- Issue #14220: When a generator is delegating to another iterator with the + yield from syntax, it needs to have its ``gi_running`` flag set to True. + +- Issue #14435: Remove dedicated block allocator from floatobject.c and rely + on the PyObject_Malloc() api like all other objects. + +Library +------- + - Issue #14300: Under Windows, sockets created using socket.dup() now allow overlapped I/O. Patch by sbt. diff --git a/Objects/typeobject.c b/Objects/typeobject.c --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -490,26 +490,22 @@ for (i = 0; i < PyTuple_GET_SIZE(value); i++) { ob = PyTuple_GET_ITEM(value, i); if (!PyType_Check(ob)) { - PyErr_Format( - PyExc_TypeError, - "%s.__bases__ must be tuple of classes, not '%s'", - type->tp_name, Py_TYPE(ob)->tp_name); - return -1; + PyErr_Format(PyExc_TypeError, + "%s.__bases__ must be tuple of classes, not '%s'", + type->tp_name, Py_TYPE(ob)->tp_name); + return -1; } - if (PyType_Check(ob)) { - if (PyType_IsSubtype((PyTypeObject*)ob, type)) { - PyErr_SetString(PyExc_TypeError, - "a __bases__ item causes an inheritance cycle"); - return -1; - } + if (PyType_IsSubtype((PyTypeObject*)ob, type)) { + PyErr_SetString(PyExc_TypeError, + "a __bases__ item causes an inheritance cycle"); + return -1; } } new_base = best_base(value); - if (!new_base) { + if (!new_base) return -1; - } if (!compatible_for_assignment(type->tp_base, new_base, "__bases__")) return -1; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 15:13:19 2012 From: python-checkins at python.org (stefan.krah) Date: Mon, 02 Apr 2012 15:13:19 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Whitespace=2E?= Message-ID: http://hg.python.org/cpython/rev/883689757a2e changeset: 76065:883689757a2e user: Stefan Krah date: Mon Apr 02 15:10:36 2012 +0200 summary: Whitespace. files: Lib/test/test_decimal.py | 114 +++++++++++++------------- 1 files changed, 57 insertions(+), 57 deletions(-) diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -59,63 +59,63 @@ ############ RunFirst ############ class RunFirst(unittest.TestCase): - def setUp(self): - self.save_default = self.decimal.DefaultContext.copy() - - def tearDown(self): - DefaultContext = self.decimal.DefaultContext - - DefaultContext.prec = self.save_default.prec - DefaultContext.rounding = self.save_default.rounding - DefaultContext.Emax = self.save_default.Emax - DefaultContext.Emin = self.save_default.Emin - DefaultContext.capitals = self.save_default.capitals - DefaultContext.clamp = self.save_default.clamp - DefaultContext.flags = self.save_default.flags - DefaultContext.traps = self.save_default.traps - - self.decimal.setcontext(self.decimal.DefaultContext) - - def test_00default_context(self): - # The test depends on the fact that getcontext() is called - # for the first time. - DefaultContext = self.decimal.DefaultContext - ROUND_05UP = self.decimal.ROUND_05UP - Clamped = self.decimal.Clamped - InvalidOperation = self.decimal.InvalidOperation - - DefaultContext.prec = 5001 - DefaultContext.rounding = ROUND_05UP - DefaultContext.Emax = 10025 - DefaultContext.Emin = -10025 - DefaultContext.capitals = 0 - DefaultContext.clamp = 1 - DefaultContext.flags[InvalidOperation] = True - DefaultContext.clear_traps() - DefaultContext.traps[Clamped] = True - - # implicit initialization on first access - c = self.decimal.getcontext() - - self.assertEqual(c.prec, 5001) - self.assertEqual(c.rounding, ROUND_05UP) - self.assertEqual(c.Emax, 10025) - self.assertEqual(c.Emin, -10025) - self.assertEqual(c.capitals, 0) - self.assertEqual(c.clamp, 1) - for k in c.flags: - self.assertFalse(c.flags[k]) - for k in c.traps: - if k is Clamped: - self.assertTrue(c.traps[k]) - else: - self.assertFalse(c.traps[k]) - - # explicit initialization - self.decimal.setcontext(DefaultContext) - c = self.decimal.getcontext() - for k in c.flags: - self.assertFalse(c.flags[k]) + def setUp(self): + self.save_default = self.decimal.DefaultContext.copy() + + def tearDown(self): + DefaultContext = self.decimal.DefaultContext + + DefaultContext.prec = self.save_default.prec + DefaultContext.rounding = self.save_default.rounding + DefaultContext.Emax = self.save_default.Emax + DefaultContext.Emin = self.save_default.Emin + DefaultContext.capitals = self.save_default.capitals + DefaultContext.clamp = self.save_default.clamp + DefaultContext.flags = self.save_default.flags + DefaultContext.traps = self.save_default.traps + + self.decimal.setcontext(self.decimal.DefaultContext) + + def test_00default_context(self): + # The test depends on the fact that getcontext() is called + # for the first time. + DefaultContext = self.decimal.DefaultContext + ROUND_05UP = self.decimal.ROUND_05UP + Clamped = self.decimal.Clamped + InvalidOperation = self.decimal.InvalidOperation + + DefaultContext.prec = 5001 + DefaultContext.rounding = ROUND_05UP + DefaultContext.Emax = 10025 + DefaultContext.Emin = -10025 + DefaultContext.capitals = 0 + DefaultContext.clamp = 1 + DefaultContext.flags[InvalidOperation] = True + DefaultContext.clear_traps() + DefaultContext.traps[Clamped] = True + + # implicit initialization on first access + c = self.decimal.getcontext() + + self.assertEqual(c.prec, 5001) + self.assertEqual(c.rounding, ROUND_05UP) + self.assertEqual(c.Emax, 10025) + self.assertEqual(c.Emin, -10025) + self.assertEqual(c.capitals, 0) + self.assertEqual(c.clamp, 1) + for k in c.flags: + self.assertFalse(c.flags[k]) + for k in c.traps: + if k is Clamped: + self.assertTrue(c.traps[k]) + else: + self.assertFalse(c.traps[k]) + + # explicit initialization + self.decimal.setcontext(DefaultContext) + c = self.decimal.getcontext() + for k in c.flags: + self.assertFalse(c.flags[k]) class CRunFirst(RunFirst): decimal = C -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 17:15:24 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 17:15:24 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=282=2E7=29=3A_prevent_writing?= =?utf8?q?_to_stderr_from_messing_up_the_exception_state_=28closes_=231447?= =?utf8?q?4=29?= Message-ID: http://hg.python.org/cpython/rev/8609d7fcdcc7 changeset: 76066:8609d7fcdcc7 branch: 2.7 parent: 76049:d3a82a26c705 user: Benjamin Peterson date: Mon Apr 02 11:15:17 2012 -0400 summary: prevent writing to stderr from messing up the exception state (closes #14474) files: Lib/test/test_thread.py | 24 ++++++++++++++++++++++++ Misc/NEWS | 3 +++ Modules/threadmodule.c | 3 +++ 3 files changed, 30 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_thread.py b/Lib/test/test_thread.py --- a/Lib/test/test_thread.py +++ b/Lib/test/test_thread.py @@ -130,6 +130,30 @@ time.sleep(0.01) self.assertEqual(thread._count(), orig) + def test_save_exception_state_on_error(self): + # See issue #14474 + def task(): + started.release() + sys.stderr = stderr + raise SyntaxError + def mywrite(self, *args): + try: + raise ValueError + except ValueError: + pass + real_write(self, *args) + c = thread._count() + started = thread.allocate_lock() + with test_support.captured_output("stderr") as stderr: + real_write = stderr.write + stderr.write = mywrite + started.acquire() + thread.start_new_thread(task, ()) + started.acquire() + while thread._count() > c: + pass + self.assertIn("Traceback", stderr.getvalue()) + class Barrier: def __init__(self, num_threads): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -9,6 +9,9 @@ Core and Builtins ----------------- +- Issue #14474: Save and restore exception state in thread.start_new_thread() + while writing error message if the thread leaves a unhandled exception. + - Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch by Suman Saha. diff --git a/Modules/threadmodule.c b/Modules/threadmodule.c --- a/Modules/threadmodule.c +++ b/Modules/threadmodule.c @@ -618,6 +618,8 @@ PyErr_Clear(); else { PyObject *file; + PyObject *exc, *value, *tb; + PyErr_Fetch(&exc, &value, &tb); PySys_WriteStderr( "Unhandled exception in thread started by "); file = PySys_GetObject("stderr"); @@ -625,6 +627,7 @@ PyFile_WriteObject(boot->func, file, 0); else PyObject_Print(boot->func, stderr, 0); + PyErr_Restore(exc, value, tb); PySys_WriteStderr("\n"); PyErr_PrintEx(0); } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 17:20:05 2012 From: python-checkins at python.org (martin.v.loewis) Date: Mon, 2 Apr 2012 17:20:05 +0200 (CEST) Subject: [Python-checkins] r88934 - tracker/instances/spambayes_integration/extensions/spambayes.py Message-ID: <3VLxv91LfQzLrZ@mail.python.org> Author: martin.v.loewis Date: Mon Apr 2 17:20:04 2012 New Revision: 88934 Log: Issue 409: support spam classification without spambayes. Modified: tracker/instances/spambayes_integration/extensions/spambayes.py Modified: tracker/instances/spambayes_integration/extensions/spambayes.py ============================================================================== --- tracker/instances/spambayes_integration/extensions/spambayes.py (original) +++ tracker/instances/spambayes_integration/extensions/spambayes.py Mon Apr 2 17:20:04 2012 @@ -24,6 +24,9 @@ return (content, tokens) def train_spambayes(db, content, tokens, is_spam): + # spambayes training is now disabled; only leave + # spam classification UI + return True, None spambayes_uri = db.config.detectors['SPAMBAYES_URI'] server = xmlrpclib.ServerProxy(spambayes_uri, verbose=False) From python-checkins at python.org Mon Apr 2 17:28:55 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 17:28:55 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=282=2E7=29=3A_protect_this_ca?= =?utf8?q?ll=2C_too?= Message-ID: http://hg.python.org/cpython/rev/d552016fc07c changeset: 76067:d552016fc07c branch: 2.7 user: Benjamin Peterson date: Mon Apr 02 11:18:18 2012 -0400 summary: protect this call, too files: Modules/threadmodule.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Modules/threadmodule.c b/Modules/threadmodule.c --- a/Modules/threadmodule.c +++ b/Modules/threadmodule.c @@ -627,8 +627,8 @@ PyFile_WriteObject(boot->func, file, 0); else PyObject_Print(boot->func, stderr, 0); + PySys_WriteStderr("\n"); PyErr_Restore(exc, value, tb); - PySys_WriteStderr("\n"); PyErr_PrintEx(0); } } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 17:28:56 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 17:28:56 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=283=2E2=29=3A_prevent_writing?= =?utf8?q?_to_stderr_from_messing_up_the_exception_state_=28closes_=231447?= =?utf8?q?4=29?= Message-ID: http://hg.python.org/cpython/rev/60ad83716733 changeset: 76068:60ad83716733 branch: 3.2 parent: 76059:d746ffc34e0f user: Benjamin Peterson date: Mon Apr 02 11:15:17 2012 -0400 summary: prevent writing to stderr from messing up the exception state (closes #14474) files: Lib/test/test_thread.py | 24 ++++++++++++++++++++++++ Misc/NEWS | 3 +++ Modules/_threadmodule.c | 3 +++ 3 files changed, 30 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_thread.py b/Lib/test/test_thread.py --- a/Lib/test/test_thread.py +++ b/Lib/test/test_thread.py @@ -128,6 +128,30 @@ time.sleep(0.01) self.assertEqual(thread._count(), orig) + def test_save_exception_state_on_error(self): + # See issue #14474 + def task(): + started.release() + sys.stderr = stderr + raise SyntaxError + def mywrite(self, *args): + try: + raise ValueError + except ValueError: + pass + real_write(self, *args) + c = thread._count() + started = thread.allocate_lock() + with support.captured_output("stderr") as stderr: + real_write = stderr.write + stderr.write = mywrite + started.acquire() + thread.start_new_thread(task, ()) + started.acquire() + while thread._count() > c: + pass + self.assertIn("Traceback", stderr.getvalue()) + class Barrier: def __init__(self, num_threads): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #14474: Save and restore exception state in thread.start_new_thread() + while writing error message if the thread leaves a unhandled exception. + - Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch by Suman Saha. diff --git a/Modules/_threadmodule.c b/Modules/_threadmodule.c --- a/Modules/_threadmodule.c +++ b/Modules/_threadmodule.c @@ -994,14 +994,17 @@ PyErr_Clear(); else { PyObject *file; + PyObject *exc, *value, *tb; PySys_WriteStderr( "Unhandled exception in thread started by "); + PyErr_Fetch(&exc, &value, &tb); file = PySys_GetObject("stderr"); if (file != NULL && file != Py_None) PyFile_WriteObject(boot->func, file, 0); else PyObject_Print(boot->func, stderr, 0); PySys_WriteStderr("\n"); + PyErr_Restore(exc, value, tb); PyErr_PrintEx(0); } } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 17:28:58 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 17:28:58 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_merge_3=2E2?= Message-ID: http://hg.python.org/cpython/rev/b92e34c1e07c changeset: 76069:b92e34c1e07c parent: 76065:883689757a2e parent: 76068:60ad83716733 user: Benjamin Peterson date: Mon Apr 02 11:28:49 2012 -0400 summary: merge 3.2 files: Lib/test/test_thread.py | 24 ++++++++++++++++++++++++ Misc/NEWS | 3 +++ Modules/_threadmodule.c | 3 +++ 3 files changed, 30 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_thread.py b/Lib/test/test_thread.py --- a/Lib/test/test_thread.py +++ b/Lib/test/test_thread.py @@ -128,6 +128,30 @@ time.sleep(0.01) self.assertEqual(thread._count(), orig) + def test_save_exception_state_on_error(self): + # See issue #14474 + def task(): + started.release() + sys.stderr = stderr + raise SyntaxError + def mywrite(self, *args): + try: + raise ValueError + except ValueError: + pass + real_write(self, *args) + c = thread._count() + started = thread.allocate_lock() + with support.captured_output("stderr") as stderr: + real_write = stderr.write + stderr.write = mywrite + started.acquire() + thread.start_new_thread(task, ()) + started.acquire() + while thread._count() > c: + pass + self.assertIn("Traceback", stderr.getvalue()) + class Barrier: def __init__(self, num_threads): diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -10,6 +10,9 @@ Core and Builtins ----------------- +- Issue #14474: Save and restore exception state in thread.start_new_thread() + while writing error message if the thread leaves a unhandled exception. + - Issue #13019: Fix potential reference leaks in bytearray.extend(). Patch by Suman Saha. diff --git a/Modules/_threadmodule.c b/Modules/_threadmodule.c --- a/Modules/_threadmodule.c +++ b/Modules/_threadmodule.c @@ -1001,14 +1001,17 @@ PyErr_Clear(); else { PyObject *file; + PyObject *exc, *value, *tb; PySys_WriteStderr( "Unhandled exception in thread started by "); + PyErr_Fetch(&exc, &value, &tb); file = PySys_GetObject("stderr"); if (file != NULL && file != Py_None) PyFile_WriteObject(boot->func, file, 0); else PyObject_Print(boot->func, stderr, 0); PySys_WriteStderr("\n"); + PyErr_Restore(exc, value, tb); PyErr_PrintEx(0); } } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 17:41:33 2012 From: python-checkins at python.org (kristjan.jonsson) Date: Mon, 02 Apr 2012 17:41:33 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE0NDcx?= =?utf8?q?=3A_Fix_a_possible_buffer_overrun_in_the_winreg_module=2E?= Message-ID: http://hg.python.org/cpython/rev/b3639f6aaa2b changeset: 76070:b3639f6aaa2b branch: 3.2 parent: 76068:60ad83716733 user: Kristj?n Valur J?nsson date: Mon Apr 02 15:23:29 2012 +0000 summary: Issue #14471: Fix a possible buffer overrun in the winreg module. files: Misc/NEWS | 2 ++ PC/winreg.c | 2 +- 2 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -34,6 +34,8 @@ - Issue #13521: dict.setdefault() now does only one lookup for the given key, making it "atomic" for many purposes. Patch by Filip Gruszczy?ski. +- Issue #14471: Fix a possible buffer overrun in the winreg module. + Library ------- diff --git a/PC/winreg.c b/PC/winreg.c --- a/PC/winreg.c +++ b/PC/winreg.c @@ -1110,7 +1110,7 @@ * nul. RegEnumKeyEx requires a 257 character buffer to * retrieve such a key name. */ wchar_t tmpbuf[257]; - DWORD len = sizeof(tmpbuf); /* includes NULL terminator */ + DWORD len = sizeof(tmpbuf)/sizeof(wchar_t); /* includes NULL terminator */ if (!PyArg_ParseTuple(args, "Oi:EnumKey", &obKey, &index)) return NULL; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 17:41:34 2012 From: python-checkins at python.org (kristjan.jonsson) Date: Mon, 02 Apr 2012 17:41:34 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_Merge_with_3=2E2_=28Issue_=2314471=29?= Message-ID: http://hg.python.org/cpython/rev/80d814d7b886 changeset: 76071:80d814d7b886 parent: 76069:b92e34c1e07c parent: 76070:b3639f6aaa2b user: Kristj?n Valur J?nsson date: Mon Apr 02 15:41:06 2012 +0000 summary: Merge with 3.2 (Issue #14471) files: Misc/NEWS | 2 ++ PC/winreg.c | 2 +- 2 files changed, 3 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -56,6 +56,8 @@ - Issue #14435: Remove dedicated block allocator from floatobject.c and rely on the PyObject_Malloc() api like all other objects. +- Issue #14471: Fix a possible buffer overrun in the winreg module. + Library ------- diff --git a/PC/winreg.c b/PC/winreg.c --- a/PC/winreg.c +++ b/PC/winreg.c @@ -1122,7 +1122,7 @@ * nul. RegEnumKeyEx requires a 257 character buffer to * retrieve such a key name. */ wchar_t tmpbuf[257]; - DWORD len = sizeof(tmpbuf); /* includes NULL terminator */ + DWORD len = sizeof(tmpbuf)/sizeof(wchar_t); /* includes NULL terminator */ if (!PyArg_ParseTuple(args, "Oi:EnumKey", &obKey, &index)) return NULL; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 18:18:08 2012 From: python-checkins at python.org (brett.cannon) Date: Mon, 02 Apr 2012 18:18:08 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Guard_an_LLTRACE_variable_t?= =?utf8?q?o_silence_an_unused_variable_warning=2E?= Message-ID: http://hg.python.org/cpython/rev/bac033e488b4 changeset: 76072:bac033e488b4 user: Brett Cannon date: Mon Apr 02 12:17:59 2012 -0400 summary: Guard an LLTRACE variable to silence an unused variable warning. files: Python/ceval.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Python/ceval.c b/Python/ceval.c --- a/Python/ceval.c +++ b/Python/ceval.c @@ -822,7 +822,9 @@ PyObject *names; PyObject *consts; +#ifdef LLTRACE _Py_IDENTIFIER(__ltrace__); +#endif /* Computed GOTOs, or the-optimization-commonly-but-improperly-known-as-"threaded code" -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 19:11:58 2012 From: python-checkins at python.org (stefan.krah) Date: Mon, 02 Apr 2012 19:11:58 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Testing_the_implicit_initia?= =?utf8?q?lization_of_the_thread_local_context_on_first?= Message-ID: http://hg.python.org/cpython/rev/09cff57b5541 changeset: 76073:09cff57b5541 user: Stefan Krah date: Mon Apr 02 19:10:20 2012 +0200 summary: Testing the implicit initialization of the thread local context on first access fails (expectedly) if other modules have already used decimal. The only option is to remove the test. files: Lib/test/test_decimal.py | 70 ---------------------------- 1 files changed, 0 insertions(+), 70 deletions(-) diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -56,76 +56,6 @@ fractions = {C:cfractions, P:pfractions} sys.modules['decimal'] = orig_sys_decimal -############ RunFirst ############ -class RunFirst(unittest.TestCase): - - def setUp(self): - self.save_default = self.decimal.DefaultContext.copy() - - def tearDown(self): - DefaultContext = self.decimal.DefaultContext - - DefaultContext.prec = self.save_default.prec - DefaultContext.rounding = self.save_default.rounding - DefaultContext.Emax = self.save_default.Emax - DefaultContext.Emin = self.save_default.Emin - DefaultContext.capitals = self.save_default.capitals - DefaultContext.clamp = self.save_default.clamp - DefaultContext.flags = self.save_default.flags - DefaultContext.traps = self.save_default.traps - - self.decimal.setcontext(self.decimal.DefaultContext) - - def test_00default_context(self): - # The test depends on the fact that getcontext() is called - # for the first time. - DefaultContext = self.decimal.DefaultContext - ROUND_05UP = self.decimal.ROUND_05UP - Clamped = self.decimal.Clamped - InvalidOperation = self.decimal.InvalidOperation - - DefaultContext.prec = 5001 - DefaultContext.rounding = ROUND_05UP - DefaultContext.Emax = 10025 - DefaultContext.Emin = -10025 - DefaultContext.capitals = 0 - DefaultContext.clamp = 1 - DefaultContext.flags[InvalidOperation] = True - DefaultContext.clear_traps() - DefaultContext.traps[Clamped] = True - - # implicit initialization on first access - c = self.decimal.getcontext() - - self.assertEqual(c.prec, 5001) - self.assertEqual(c.rounding, ROUND_05UP) - self.assertEqual(c.Emax, 10025) - self.assertEqual(c.Emin, -10025) - self.assertEqual(c.capitals, 0) - self.assertEqual(c.clamp, 1) - for k in c.flags: - self.assertFalse(c.flags[k]) - for k in c.traps: - if k is Clamped: - self.assertTrue(c.traps[k]) - else: - self.assertFalse(c.traps[k]) - - # explicit initialization - self.decimal.setcontext(DefaultContext) - c = self.decimal.getcontext() - for k in c.flags: - self.assertFalse(c.flags[k]) - -class CRunFirst(RunFirst): - decimal = C -class PyRunFirst(RunFirst): - decimal = P -if C: - run_unittest(CRunFirst, PyRunFirst) -else: - run_unittest(PyRunFirst) -############ END RunFirst ############ # Useful Test Constant Signals = { -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 20:26:00 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 20:26:00 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=282=2E7=29=3A_remove_uneeded_?= =?utf8?q?line?= Message-ID: http://hg.python.org/cpython/rev/2b7aff01ca89 changeset: 76074:2b7aff01ca89 branch: 2.7 parent: 76067:d552016fc07c user: Benjamin Peterson date: Mon Apr 02 14:22:50 2012 -0400 summary: remove uneeded line files: Lib/test/test_thread.py | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_thread.py b/Lib/test/test_thread.py --- a/Lib/test/test_thread.py +++ b/Lib/test/test_thread.py @@ -134,7 +134,6 @@ # See issue #14474 def task(): started.release() - sys.stderr = stderr raise SyntaxError def mywrite(self, *args): try: -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 20:26:01 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 20:26:01 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=283=2E2=29=3A_remove_uneeded_?= =?utf8?q?line?= Message-ID: http://hg.python.org/cpython/rev/98ab6e322c40 changeset: 76075:98ab6e322c40 branch: 3.2 parent: 76070:b3639f6aaa2b user: Benjamin Peterson date: Mon Apr 02 14:22:50 2012 -0400 summary: remove uneeded line files: Lib/test/test_thread.py | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_thread.py b/Lib/test/test_thread.py --- a/Lib/test/test_thread.py +++ b/Lib/test/test_thread.py @@ -132,7 +132,6 @@ # See issue #14474 def task(): started.release() - sys.stderr = stderr raise SyntaxError def mywrite(self, *args): try: -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 20:26:03 2012 From: python-checkins at python.org (benjamin.peterson) Date: Mon, 02 Apr 2012 20:26:03 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_merge_3=2E2?= Message-ID: http://hg.python.org/cpython/rev/e97940f701be changeset: 76076:e97940f701be parent: 76073:09cff57b5541 parent: 76075:98ab6e322c40 user: Benjamin Peterson date: Mon Apr 02 14:25:55 2012 -0400 summary: merge 3.2 files: Lib/test/test_thread.py | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_thread.py b/Lib/test/test_thread.py --- a/Lib/test/test_thread.py +++ b/Lib/test/test_thread.py @@ -132,7 +132,6 @@ # See issue #14474 def task(): started.release() - sys.stderr = stderr raise SyntaxError def mywrite(self, *args): try: -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 21:00:32 2012 From: python-checkins at python.org (stefan.krah) Date: Mon, 02 Apr 2012 21:00:32 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Improve_comments=2E?= Message-ID: http://hg.python.org/cpython/rev/5fe882b2e00f changeset: 76077:5fe882b2e00f parent: 76073:09cff57b5541 user: Stefan Krah date: Mon Apr 02 20:51:08 2012 +0200 summary: Improve comments. files: Modules/_decimal/_decimal.c | 10 +++++----- 1 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -1510,7 +1510,7 @@ #define CURRENT_CONTEXT_ADDR(ctx) \ ctx = CTX(current_context()) -/* Return current context, increment reference */ +/* Return a new reference to the current context */ static PyObject * PyDec_GetCurrentContext(void) { @@ -1614,7 +1614,7 @@ ctx = CTX(_c_t_x_o_b_j); \ } -/* Return current context, increment reference */ +/* Return a new reference to the current context */ static PyObject * PyDec_GetCurrentContext(void) { @@ -1759,7 +1759,7 @@ 0, /* tp_print */ (getattrfunc) 0, /* tp_getattr */ (setattrfunc) 0, /* tp_setattr */ - 0, /* tp_compare */ + 0, /* tp_reserved */ (reprfunc) 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ @@ -2699,7 +2699,7 @@ /******************************************************************************/ -/* Implicit conversions to Decimal */ +/* Implicit conversions to Decimal */ /******************************************************************************/ /* Try to convert PyObject v to a new PyDecObject conv. If the conversion @@ -2796,7 +2796,7 @@ /******************************************************************************/ -/* Implicit conversions to Decimal for comparison */ +/* Implicit conversions to Decimal for comparison */ /******************************************************************************/ /* Convert rationals for comparison */ -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 21:00:33 2012 From: python-checkins at python.org (stefan.krah) Date: Mon, 02 Apr 2012 21:00:33 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_default_-=3E_default?= =?utf8?b?KTogTWVyZ2Uu?= Message-ID: http://hg.python.org/cpython/rev/44c6997191d3 changeset: 76078:44c6997191d3 parent: 76077:5fe882b2e00f parent: 76076:e97940f701be user: Stefan Krah date: Mon Apr 02 20:59:15 2012 +0200 summary: Merge. files: Lib/test/test_thread.py | 1 - 1 files changed, 0 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_thread.py b/Lib/test/test_thread.py --- a/Lib/test/test_thread.py +++ b/Lib/test/test_thread.py @@ -132,7 +132,6 @@ # See issue #14474 def task(): started.release() - sys.stderr = stderr raise SyntaxError def mywrite(self, *args): try: -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Mon Apr 2 21:48:06 2012 From: python-checkins at python.org (victor.stinner) Date: Mon, 02 Apr 2012 21:48:06 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_time=2Eget=5Fclock?= =?utf8?q?=5Finfo=28=29_may_provide_more_information_=28accuracy=2C?= Message-ID: http://hg.python.org/peps/rev/aeff04d14ad8 changeset: 4186:aeff04d14ad8 user: Victor Stinner date: Mon Apr 02 21:48:03 2012 +0200 summary: PEP 418: time.get_clock_info() may provide more information (accuracy, is_adjusted) files: pep-0418.txt | 21 ++++++++++++++------- 1 files changed, 14 insertions(+), 7 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -201,20 +201,27 @@ time.get_clock_info(name) ------------------------- -Get information on the specified time function as a dictionary. Only the following -names are accepted: +Get information on the specified clock. Supported clocks: * "clock": time.clock() * "highres": time.highres() * "monotonic": time.monotonic() * "time": time.time() -The following keys are always present: +Return a dictionary with the following keys: - * "function" (str): name of the underlying operating system function (ex: - "QueryPerformanceCounter()" or "clock_gettime(CLOCK_REALTIME)") - * "resolution" (float): resolution in seconds of the function - * "is_monotonic" (bool): True if the clock is monotonic + * Mandatory keys: + + * "function" (str): name of the underlying operating system function. + Examples: "QueryPerformanceCounter()", "clock_gettime(CLOCK_REALTIME)". + * "resolution" (float): resolution in seconds of the clock + * "is_monotonic" (bool): True if the clock cannot go backward + + * Optional keys: + + * "accuracy" (float): accuracy in seconds of the clock + * "is_adjusted" (bool): True if the clock can be adjusted (e.g. by a NTP + daemon) Hardware clocks -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Mon Apr 2 21:52:40 2012 From: python-checkins at python.org (victor.stinner) Date: Mon, 02 Apr 2012 21:52:40 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_link_to_libpthr?= =?utf8?q?ead_for_Windows?= Message-ID: http://hg.python.org/peps/rev/13cf90c1c02b changeset: 4187:13cf90c1c02b user: Victor Stinner date: Mon Apr 02 21:52:32 2012 +0200 summary: PEP 418: Add link to libpthread for Windows files: pep-0418.txt | 3 +++ 1 files changed, 3 insertions(+), 0 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -764,6 +764,9 @@ clock_gettime(CLOCK_MONOTONIC), mach_absolute_time() or gettimeofday(). "AbsoluteTime.monotonic?" method indicates if AbsoluteTime.now is monotonic or not. +* `libpthread + `_: POSIX thread library for Windows + (`clock.c `_) Time: -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Tue Apr 3 00:18:54 2012 From: python-checkins at python.org (victor.stinner) Date: Tue, 03 Apr 2012 00:18:54 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_tables_of_clock?= =?utf8?q?_accuracy?= Message-ID: http://hg.python.org/peps/rev/8fbc2d2213b9 changeset: 4188:8fbc2d2213b9 user: Victor Stinner date: Tue Apr 03 00:18:51 2012 +0200 summary: PEP 418: Add tables of clock accuracy * Add a "NTP adjusted" section * Don't use CLOCK_MONOTONIC_RAW on Linux for time.monotonic() files: pep-0418.txt | 95 ++++++++++++++++++++++++++++++++++----- 1 files changed, 82 insertions(+), 13 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -143,12 +143,9 @@ clk_id = monotonic.clocks[0] return time.clock_gettime(clk_id) except OSError: - # CLOCK_MONOTONIC_RAW requires a Linux kernel >= 2.6.28 del monotonic.clocks[0] return time.time() monotonic.clocks = [] - if hasattr(time, 'CLOCK_MONOTONIC_RAW'): - monotonic.clocks.append(time.CLOCK_MONOTONIC_RAW) if hasattr(time, 'CLOCK_HIGHRES'): monotonic.clocks.append(time.CLOCK_HIGHRES) monotonic.clocks.append(time.CLOCK_MONOTONIC) @@ -246,6 +243,32 @@ 32,768 Hz +NTP adjusted +============ + +NTP has diffent methods to adjust a clock: + + * "slewing": change the clock frequency to be slightly faster or slower + (which is done with adjtime()). Since the slew rate is limited to 0.5 ms/s, + each second of adjustment requires an amortization interval of 2000 s. Thus, + an adjustment of many seconds can take hours or days to amortize. + * "stepping": jump by a large amount in a single discrete step (which is done + with settimeofday()) + +By default, the time is slewed if the offset is less than 128 ms, or stepped +otherwise. + +Slewing is generally desirable (i.e. we should use CLOCK_MONOTONIC, not +CLOCK_MONOTONIC_RAW) if one wishes to measure "real" time (and not a time-like +object like CPU cycles). This is because the clock on the other end of the NTP +connection from you is probably better at keeping time: hopefully that thirty +five thousand dollars of Cesium timekeeping goodness is doing something better +than your PC's $3 quartz crystal, after all. + +Get more detail in the `documentation of the NTP daemon +`_. + + Operating system clocks ======================= @@ -258,7 +281,7 @@ CLOCK_MONOTONIC_RAW 1 ns (*) No Stopped gethrtime 1 ns (*) No Not stopped CLOCK_HIGHRES 1 ns (*) No ? -CLOCK_MONOTONIC 1 ns (*) Yes on Linux Stopped on Linux +CLOCK_MONOTONIC 1 ns (*) Slewed on Linux Stopped on Linux mach_absolute_time() 1 ns (*) No ? QueryPerformanceCounter() \- 0.3 ns - 5 ns No Accuracy issue GetTickCount[64]() 1 ms 1 ms - 15 ms No Include suspend time @@ -278,6 +301,25 @@ example, QueryPerformanceCounter() has a good accuracy but is known to not have a steady rate. +Examples of clock accuracy on x86_64: + +========================= ================ =============== +Name Operating system Accuracy +========================= ================ =============== +CLOCK_MONOTONIC_RAW Linux 3.2 1 ns +CLOCK_MONOTONIC Linux 3.2 1 ns +CLOCK_HIGHRES SunOS 5.11 2 ns +CLOCK_MONOTONIC SunOS 5.11 2 ns +QueryPerformanceCounter Windows Vista 10 ns +CLOCK_MONOTONIC FreeBSD 8.2 11 ns +CLOCK_MONOTONIC OpenBSD 5.0 10 ms +GetTickCount Windows Vista 15.6 ms +========================= ================ =============== + +For CLOCK_MONOTONIC and CLOCK_MONOTONIC_RAW, the accuracy of this table is the +result of clock_getres(). It looks like Linux does not implement +clock_getres() and always return 1 nanosecond. + mach_absolute_time ^^^^^^^^^^^^^^^^^^ @@ -325,14 +367,12 @@ * Mac OS X * Windows -CLOCK_MONOTONIC_RAW is specific to Linux. It is similar to -CLOCK_MONOTONIC, but provides access to a raw hardware-based time that -is not subject to NTP adjustments. CLOCK_MONOTONIC_RAW requires Linux -2.6.28 or later. +On Linux, NTP may adjust the CLOCK_MONOTONIC rate (slewed), but it cannot +jump backward. -On Linux, NTP may adjust the CLOCK_MONOTONIC rate, but it cannot jump -backward. If available, CLOCK_MONOTONIC_RAW should be used instead of -CLOCK_MONOTONIC to avoid the NTP adjustment. +CLOCK_MONOTONIC_RAW is specific to Linux. It is similar to CLOCK_MONOTONIC, but +provides access to a raw hardware-based time that is not subject to NTP +adjustments. CLOCK_MONOTONIC_RAW requires Linux 2.6.28 or later. CLOCK_MONOTONIC stops while the machine is suspended. @@ -504,8 +544,23 @@ ========================= =============== =============== (*) The accuracy of system clocks depends on the operating system and -the hardware clock. On Windows, the accuracy is in the range 1 ms - -15 ms. +the hardware clock. + +Examples of clock accuracy on x86_64: + +========================= ================ =============== +Name Operating system Accuracy +========================= ================ =============== +CLOCK_REALTIME Linux 3.2 1 ns +CLOCK_REALTIME FreeBSD 8.2 11 ns +CLOCK_REALTIME SunOS 5.11 10 ms +CLOCK_REALTIME OpenBSD 5.0 10 ms +GetSystemTimeAsFileTime Windows Vista 15.6 ms +========================= ================ =============== + +For CLOCK_REALTIME, the accuracy of this table is the result of clock_getres(). +It looks like Linux does not implement clock_getres() and always return 1 +nanosecond. Windows: GetSystemTimeAsFileTime @@ -573,6 +628,20 @@ clock_getres(CLOCK_REALTIME) is 1 nanosecond on Linux. * GetProcessTimes(): call GetSystemTimeAdjustment(). +Examples of clock accuracy on x86_64: + +========================= ================ =============== +Name Operating system Accuracy +========================= ================ =============== +clock() Linux 3.2 1 ms +clock() SunOS 5.11 1 ms +clock() FreeBSD 8.2 7.8 ms +clock() OpenBSD 5.0 10 ms +========================= ================ =============== + +The accuracy of clock() in this table is the result of 1 / CLOCKS_PER_SEC. + + Thread ^^^^^^ -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Tue Apr 3 00:31:22 2012 From: python-checkins at python.org (victor.stinner) Date: Tue, 03 Apr 2012 00:31:22 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Add_time=2ECLOCK=5FHIGHRES_?= =?utf8?q?constant=2C_needed_on_Solaris?= Message-ID: http://hg.python.org/cpython/rev/c5395ab7447a changeset: 76079:c5395ab7447a user: Victor Stinner date: Tue Apr 03 00:31:17 2012 +0200 summary: Add time.CLOCK_HIGHRES constant, needed on Solaris files: Doc/library/time.rst | 9 +++++++++ Modules/timemodule.c | 3 +++ 2 files changed, 12 insertions(+), 0 deletions(-) diff --git a/Doc/library/time.rst b/Doc/library/time.rst --- a/Doc/library/time.rst +++ b/Doc/library/time.rst @@ -159,6 +159,15 @@ .. versionadded:: 3.3 +.. data:: CLOCK_HIGHRES + + The Solaris OS has a CLOCK_HIGHRES timer that attempts to use an optimal + hardware source, and may give close to nanosecond resolution. CLOCK_HIGHRES + is the nonadjustable, high-resolution clock. + + .. versionadded:: 3.3 + + .. data:: CLOCK_MONOTONIC Clock that cannot be set and represents monotonic time since some diff --git a/Modules/timemodule.c b/Modules/timemodule.c --- a/Modules/timemodule.c +++ b/Modules/timemodule.c @@ -962,6 +962,9 @@ #ifdef CLOCK_MONOTONIC_RAW PyModule_AddIntMacro(m, CLOCK_MONOTONIC_RAW); #endif +#ifdef CLOCK_HIGHRES + PyModule_AddIntMacro(m, CLOCK_HIGHRES); +#endif #ifdef CLOCK_PROCESS_CPUTIME_ID PyModule_AddIntMacro(m, CLOCK_PROCESS_CPUTIME_ID); #endif -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 00:45:16 2012 From: python-checkins at python.org (victor.stinner) Date: Tue, 03 Apr 2012 00:45:16 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Expose_clock=5Fsettime=28?= =?utf8?q?=29_as_time=2Eclock=5Fsettime=28=29?= Message-ID: http://hg.python.org/cpython/rev/8ba72c0987dc changeset: 76080:8ba72c0987dc user: Victor Stinner date: Tue Apr 03 00:45:07 2012 +0200 summary: Expose clock_settime() as time.clock_settime() files: Doc/library/time.rst | 7 +++++++ Lib/test/test_time.py | 11 +++++++++++ Modules/timemodule.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 0 deletions(-) diff --git a/Doc/library/time.rst b/Doc/library/time.rst --- a/Doc/library/time.rst +++ b/Doc/library/time.rst @@ -151,6 +151,13 @@ .. versionadded:: 3.3 +.. function:: clock_settime(clk_id, time) + + Set the time of the specified clock *clk_id*. + + .. versionadded:: 3.3 + + .. data:: CLOCK_REALTIME System-wide real-time clock. Setting this clock requires appropriate diff --git a/Lib/test/test_time.py b/Lib/test/test_time.py --- a/Lib/test/test_time.py +++ b/Lib/test/test_time.py @@ -47,6 +47,17 @@ self.assertGreater(res, 0.0) self.assertLessEqual(res, 1.0) + @unittest.skipUnless(hasattr(time, 'clock_settime'), + 'need time.clock_settime()') + def test_clock_settime(self): + t = time.clock_gettime(time.CLOCK_REALTIME) + try: + time.clock_settime(time.CLOCK_REALTIME, t) + except PermissionError: + pass + + self.assertRaises(OSError, time.clock_settime, time.CLOCK_MONOTONIC, 0) + def test_conversions(self): self.assertEqual(time.ctime(self.t), time.asctime(time.localtime(self.t))) diff --git a/Modules/timemodule.c b/Modules/timemodule.c --- a/Modules/timemodule.c +++ b/Modules/timemodule.c @@ -158,6 +158,33 @@ "clock_gettime(clk_id) -> floating point number\n\ \n\ Return the time of the specified clock clk_id."); + +static PyObject * +time_clock_settime(PyObject *self, PyObject *args) +{ + clockid_t clk_id; + PyObject *obj; + struct timespec tp; + int ret; + + if (!PyArg_ParseTuple(args, "iO:clock_settime", &clk_id, &obj)) + return NULL; + + if (_PyTime_ObjectToTimespec(obj, &tp.tv_sec, &tp.tv_nsec) == -1) + return NULL; + + ret = clock_settime((clockid_t)clk_id, &tp); + if (ret != 0) { + PyErr_SetFromErrno(PyExc_IOError); + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(clock_settime_doc, +"clock_settime(clk_id, time)\n\ +\n\ +Set the time of the specified clock clk_id."); #endif #ifdef HAVE_CLOCK_GETRES @@ -983,6 +1010,9 @@ #ifdef HAVE_CLOCK_GETTIME {"clock_gettime", time_clock_gettime, METH_VARARGS, clock_gettime_doc}, #endif +#ifdef HAVE_CLOCK_GETTIME + {"clock_settime", time_clock_settime, METH_VARARGS, clock_settime_doc}, +#endif #ifdef HAVE_CLOCK_GETRES {"clock_getres", time_clock_getres, METH_VARARGS, clock_getres_doc}, #endif -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 01:14:31 2012 From: python-checkins at python.org (victor.stinner) Date: Tue, 03 Apr 2012 01:14:31 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_the_accuracy_of?= =?utf8?q?_process_and_thread_time_clocks?= Message-ID: http://hg.python.org/peps/rev/c98fcd2b4631 changeset: 4189:c98fcd2b4631 user: Victor Stinner date: Tue Apr 03 01:14:28 2012 +0200 summary: PEP 418: Add the accuracy of process and thread time clocks files: pep-0418.txt | 146 ++++++++++++++++++++++++-------------- 1 files changed, 90 insertions(+), 56 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -275,21 +275,18 @@ Monotonic clocks ---------------- -========================= =============== =============== ================ ==================== -Name Resolution Accuracy Adjusted by NTP? Action on suspend -========================= =============== =============== ================ ==================== -CLOCK_MONOTONIC_RAW 1 ns (*) No Stopped -gethrtime 1 ns (*) No Not stopped -CLOCK_HIGHRES 1 ns (*) No ? -CLOCK_MONOTONIC 1 ns (*) Slewed on Linux Stopped on Linux -mach_absolute_time() 1 ns (*) No ? -QueryPerformanceCounter() \- 0.3 ns - 5 ns No Accuracy issue -GetTickCount[64]() 1 ms 1 ms - 15 ms No Include suspend time -timeGetTime() 1 ms 1 ms - 15 ms No ? -========================= =============== =============== ================ ==================== - -(*) The accuracy of monotonic clocks depends on the operating system -and the hardware clock. +========================= =============== ================ ==================== +Name Resolution Adjusted by NTP? Action on suspend +========================= =============== ================ ==================== +CLOCK_MONOTONIC_RAW 1 ns No Stopped +gethrtime 1 ns No Not stopped +CLOCK_HIGHRES 1 ns No ? +CLOCK_MONOTONIC 1 ns Slewed on Linux Stopped on Linux +mach_absolute_time() 1 ns No ? +QueryPerformanceCounter() \- No Accuracy issue +GetTickCount[64]() 1 ms No Include suspend time +timeGetTime() 1 ms No ? +========================= =============== ================ ==================== The resolution is the smallest difference between two timestamps supported by the format used by the clock. For example, @@ -306,14 +303,14 @@ ========================= ================ =============== Name Operating system Accuracy ========================= ================ =============== -CLOCK_MONOTONIC_RAW Linux 3.2 1 ns -CLOCK_MONOTONIC Linux 3.2 1 ns -CLOCK_HIGHRES SunOS 5.11 2 ns -CLOCK_MONOTONIC SunOS 5.11 2 ns -QueryPerformanceCounter Windows Vista 10 ns -CLOCK_MONOTONIC FreeBSD 8.2 11 ns -CLOCK_MONOTONIC OpenBSD 5.0 10 ms -GetTickCount Windows Vista 15.6 ms +CLOCK_MONOTONIC_RAW Linux 3.2 1 ns +CLOCK_MONOTONIC Linux 3.2 1 ns +CLOCK_HIGHRES SunOS 5.11 2 ns +CLOCK_MONOTONIC SunOS 5.11 2 ns +QueryPerformanceCounter Windows Seven 10 ns +CLOCK_MONOTONIC FreeBSD 8.2 11 ns +CLOCK_MONOTONIC OpenBSD 5.0 10 ms +GetTickCount Windows Seven 15.6 ms ========================= ================ =============== For CLOCK_MONOTONIC and CLOCK_MONOTONIC_RAW, the accuracy of this table is the @@ -533,18 +530,15 @@ System time clocks ------------------ -========================= =============== =============== -Name Resolution Accuracy -========================= =============== =============== -CLOCK_REALTIME 1 ns (*) -GetSystemTimeAsFileTime 100 ns 1 ms - 15 ms -gettimeofday() 1 ?s (*) -ftime() 1 ms (*) -time() 1 sec 1 sec -========================= =============== =============== - -(*) The accuracy of system clocks depends on the operating system and -the hardware clock. +========================= =============== +Name Resolution +========================= =============== +CLOCK_REALTIME 1 ns +GetSystemTimeAsFileTime 100 ns +gettimeofday() 1 ?s +ftime() 1 ms +time() 1 sec +========================= =============== Examples of clock accuracy on x86_64: @@ -555,7 +549,7 @@ CLOCK_REALTIME FreeBSD 8.2 11 ns CLOCK_REALTIME SunOS 5.11 10 ms CLOCK_REALTIME OpenBSD 5.0 10 ms -GetSystemTimeAsFileTime Windows Vista 15.6 ms +GetSystemTimeAsFileTime Windows Seven 15.6 ms ========================= ================ =============== For CLOCK_REALTIME, the accuracy of this table is the result of clock_getres(). @@ -587,7 +581,7 @@ Resolution: -* clock_gettime(): clock_getres(CLOCK_REALTIME), 1 nanosecond on Linux +* clock_gettime(): clock_getres(CLOCK_REALTIME) * gettimeofday(): 1 microsecond * ftime(): 1 millisecond * time(): 1 second @@ -596,14 +590,41 @@ clock_settime(CLOCK_REALTIME). -Process and thread time ------------------------ +Process time +------------ -The process and thread time cannot be set. They are not monotonic: -the clocks stop while the process/thread is idle. +The process time cannot be set. It is not monotonic: the clocks stop while the +process is idle. -Process -^^^^^^^ +========================= =============== +Name Resolution +========================= =============== +GetProcessTimes() 100 ns +CLOCK_PROCESS_CPUTIME_ID 1 ns +clock() \- +========================= =============== + +Examples of clock accuracy on x86_64: + +========================= ================ =============== +Name Operating system Accuracy +========================= ================ =============== +CLOCK_PROCESS_CPUTIME_ID Linux 3.2 1 ns +clock() Linux 3.2 1 ?s +clock() SunOS 5.11 1 ?s +clock() FreeBSD 8.2 7.8 ms +clock() OpenBSD 5.0 10 ms +GetProcessTimes() Windows Seven 15.6 ms +========================= ================ =============== + +The accuracy of clock() in this table is the result of 1 / CLOCKS_PER_SEC. +For CLOCK_PROCESS_CPUTIME_ID, the accuracy of this table is the result of +clock_getres(). It looks like Linux does not implement clock_getres() and +always return 1 nanosecond. For GetProcessTimes(), the accuracy is read using +GetSystemTimeAdjustment(). + +Functions +^^^^^^^^^ * Windows: GetProcessTimes() * clock_gettime(CLOCK_PROCESS_CPUTIME_ID): High-resolution per-process @@ -622,28 +643,41 @@ Resolution: * clock() rate is CLOCKS_PER_SEC. It was called CLK_TCK in Microsoft - C before 6.0. On Linux 3, clock() has a resolution of 1 - microsecond. + C before 6.0. * The clock resolution can be read using clock_getres(). - clock_getres(CLOCK_REALTIME) is 1 nanosecond on Linux. * GetProcessTimes(): call GetSystemTimeAdjustment(). + +Thread time +----------- + +The thread time cannot be set. It is not monotonic: the clocks stop while the +thread is idle. + +========================= =============== +Name Resolution +========================= =============== +GetThreadTimes() 100 ns +CLOCK_THREAD_CPUTIME_ID 1 ns +========================= =============== + Examples of clock accuracy on x86_64: ========================= ================ =============== Name Operating system Accuracy ========================= ================ =============== -clock() Linux 3.2 1 ms -clock() SunOS 5.11 1 ms -clock() FreeBSD 8.2 7.8 ms -clock() OpenBSD 5.0 10 ms +CLOCK_THREAD_CPUTIME_ID Linux 3.2 1 ns +CLOCK_THREAD_CPUTIME_ID FreeBSD 8.2 1 ?s +GetThreadTimes() Windows Seven 15.6 ms ========================= ================ =============== -The accuracy of clock() in this table is the result of 1 / CLOCKS_PER_SEC. +For CLOCK_THREAD_CPUTIME_ID, the accuracy of this table is the result of +clock_getres(). It looks like Linux does not implement clock_getres() and +always return 1 nanosecond. For GetThreadTimes(), the accuracy is read using +GetSystemTimeAdjustment(). - -Thread -^^^^^^ +Functions +^^^^^^^^^ * Windows: GetThreadTimes() * clock_gettime(CLOCK_THREAD_CPUTIME_ID): Thread-specific CPU-time @@ -651,15 +685,14 @@ Resolution: -* CLOCK_THREAD_CPUTIME_ID: call clock_getres(). 1 nanosecond on - Linux. +* CLOCK_THREAD_CPUTIME_ID: call clock_getres(). * GetThreadTimes(): call GetSystemTimeAdjustment() See also pthread_getcpuclockid(). Windows: QueryUnbiasedInterruptTime -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +----------------------------------- Gets the current unbiased interrupt time from the biased interrupt time and the current sleep bias amount. This time is not affected by -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Tue Apr 3 01:26:38 2012 From: python-checkins at python.org (victor.stinner) Date: Tue, 03 Apr 2012 01:26:38 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Fix_a_typo?= Message-ID: http://hg.python.org/peps/rev/3044ae06a38f changeset: 4190:3044ae06a38f user: Victor Stinner date: Tue Apr 03 01:26:35 2012 +0200 summary: PEP 418: Fix a typo files: pep-0418.txt | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -87,7 +87,7 @@ adjusted by NTP. The reference point of the returned value is undefined so only the difference of consecutive calls is valid. -Use time.get_clock_info('monotonic')['monotonic'] to check if the clock +Use time.get_clock_info('monotonic')['is_monotonic'] to check if the clock monotonic or not. The elapsed time may or may not include time the system spends in -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Tue Apr 3 01:42:46 2012 From: python-checkins at python.org (victor.stinner) Date: Tue, 03 Apr 2012 01:42:46 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Rename_time=2Emonot?= =?utf8?b?b25pYygpIHRvIHRpbWUuc3RlYWR5KCk=?= Message-ID: http://hg.python.org/peps/rev/9729fd8ba5b3 changeset: 4191:9729fd8ba5b3 user: Victor Stinner date: Tue Apr 03 01:42:42 2012 +0200 summary: PEP 418: Rename time.monotonic() to time.steady() files: pep-0418.txt | 98 ++++++++++++++++++++-------------------- 1 files changed, 49 insertions(+), 49 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -1,5 +1,5 @@ PEP: 418 -Title: Add monotonic and high-resolution time functions +Title: Add steady and high-resolution time functions Version: $Revision$ Last-Modified: $Date$ Author: Victor Stinner @@ -13,7 +13,7 @@ Abstract ======== -Add time.monotonic(), time.highres(), time.get_clock_info(name) functions to +Add time.steady(), time.highres(), time.get_clock_info(name) functions to Python 3.3. @@ -26,7 +26,7 @@ a wall clock): use system clock, i.e. time.time() or datetime.datetime.now(). * Benchmark, profiling: time.highres(). -* Event scheduler, timeout: time.monotonic(). +* Event scheduler, timeout: time.steady(). Functions @@ -36,8 +36,7 @@ * time.time(): system clock, "wall clock". * time.highres(): clock with the best accuracy. -* time.monotonic(): monotonic clock, or system clock if no monotonic - clock is available +* time.steady(): steady clock, should be monotonic * time.get_clock_info(name): get information on the specified time function @@ -79,15 +78,14 @@ return _time.time() -time.monotonic() ----------------- +time.steady() +------------- -Monotonic clock, or system clock if the platform does not provide a monotonic -clock (e.g. on GNU/Hurd). Its rate is as steady as possible. Its rate may be -adjusted by NTP. The reference point of the returned value is undefined so -only the difference of consecutive calls is valid. +Steady clock. Use a monotonic clock, or falls back to the system clock. Its +rate may be adjusted by NTP. The reference point of the returned value is +undefined so only the difference of consecutive calls is valid. -Use time.get_clock_info('monotonic')['is_monotonic'] to check if the clock +Use time.get_clock_info('steady')['is_monotonic'] to check if the clock monotonic or not. The elapsed time may or may not include time the system spends in @@ -98,60 +96,60 @@ if os.name == 'nt': # GetTickCount64() requires Windows Vista, Server 2008 or later if hasattr(time, '_GetTickCount64'): - def monotonic(): + def steady(): return _time.GetTickCount64() else: - def monotonic(): + def steady(): ticks = _time.GetTickCount() - if ticks < monotonic.last: + if ticks < steady.last: # Integer overflow detected - monotonic.delta += 2**32 - monotonic.last = ticks - return ticks + monotonic.delta - monotonic.last = 0 - monotonic.delta = 0 + steady.delta += 2**32 + steady.last = ticks + return ticks + steady.delta + steady.last = 0 + steady.delta = 0 elif os.name == 'mac': - def monotonic(): - if monotonic.factor is None: + def steady(): + if steady.factor is None: factor = _time.mach_timebase_info() - monotonic.factor = timebase[0] / timebase[1] - return _time.mach_absolute_time() * monotonic.factor - monotonic.factor = None + steady.factor = timebase[0] / timebase[1] + return _time.mach_absolute_time() * steady.factor + steady.factor = None elif os.name.startswith('sunos'): - def monotonic(): - if monotonic.use_clock_highres: + def steady(): + if steady.use_clock_highres: try: time.clock_gettime(time.CLOCK_HIGHRES) except OSError: - monotonic.use_clock_highres = False - if monotonic.use_gethrtime: + steady.use_clock_highres = False + if steady.use_gethrtime: try: return time.gethrtime() except OSError: - monotonic.use_gethrtime = False + steady.use_gethrtime = False return time.time() - monotonic.use_clock_highres = (hasattr(time, 'clock_gettime') + steady.use_clock_highres = (hasattr(time, 'clock_gettime') and hasattr(time, 'CLOCK_HIGHRES')) - monotonic.use_gethrtime = True + steady.use_gethrtime = True elif hasattr(time, "clock_gettime"): - def monotonic(): - while monotonic.clocks: + def steady(): + while steady.clocks: try: - clk_id = monotonic.clocks[0] + clk_id = steady.clocks[0] return time.clock_gettime(clk_id) except OSError: - del monotonic.clocks[0] + del steady.clocks[0] return time.time() - monotonic.clocks = [] + steady.clocks = [] if hasattr(time, 'CLOCK_HIGHRES'): - monotonic.clocks.append(time.CLOCK_HIGHRES) - monotonic.clocks.append(time.CLOCK_MONOTONIC) + steady.clocks.append(time.CLOCK_HIGHRES) + steady.clocks.append(time.CLOCK_MONOTONIC) else: - def monotonic(): + def steady(): return time.time() On Windows, QueryPerformanceCounter() is not used even though it has a @@ -160,10 +158,10 @@ .. note:: - time.monotonic() detects GetTickCount() integer overflow (32 bits, + time.steady() detects GetTickCount() integer overflow (32 bits, roll-over after 49.7 days): it increases a delta by 2\ :sup:`32` each time than an overflow is detected. The delta is stored in the - process-local state and so the value of time.monotonic() may be + process-local state and so the value of time.steady() may be different in two Python processes. @@ -185,15 +183,15 @@ # hardware does not support a high-resolution performance # counter for example highres.use_performance_counter = False - if highres.use_monotonic: + if highres.use_steady: # Monotonic clock is preferred over system clock try: - return time.monotonic() + return time.steady() except OSError: - highres.use_monotonic = False + highres.use_steady = False return time.time() highres.use_performance_counter = (os.name == 'nt') - highres.use_monotonic = hasattr(time, 'monotonic') + highres.use_steady = hasattr(time, 'steady') time.get_clock_info(name) ------------------------- @@ -202,7 +200,7 @@ * "clock": time.clock() * "highres": time.highres() - * "monotonic": time.monotonic() + * "steady": time.steady() * "time": time.time() Return a dictionary with the following keys: @@ -766,10 +764,11 @@ * time.timer(): "it would be too easy to confuse with (or misspell as) time.time()" -time.monotonic(): +time.steady(): -* time.steady(): no OS provides a clock advancing at a steady rate, so - "steady" should be avoided. +* time.monotonic(): QueryPerformanceCounter() is monotonic but it is not used + by time.steady() because it is not steady, and it is surprising to have to + check for time.get_clock_info('monotonic')['is_monotonic']. * time.try_monotonic(): it is a clear and obvious solution for the use-case of "I prefer the monotonic clock, if it is available, otherwise I'll take my chances with a best-effect clock." -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Tue Apr 3 01:45:51 2012 From: python-checkins at python.org (victor.stinner) Date: Tue, 03 Apr 2012 01:45:51 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Fix_time=2Esteady?= =?utf8?q?=28=29_pseudo-code?= Message-ID: http://hg.python.org/peps/rev/6048daf4d45c changeset: 4192:6048daf4d45c user: Victor Stinner date: Tue Apr 03 01:45:49 2012 +0200 summary: PEP 418: Fix time.steady() pseudo-code GetTickCount[64] returns a number of milliseconds, not a number of seconds. files: pep-0418.txt | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -97,7 +97,7 @@ # GetTickCount64() requires Windows Vista, Server 2008 or later if hasattr(time, '_GetTickCount64'): def steady(): - return _time.GetTickCount64() + return _time.GetTickCount64() * 1e-3 else: def steady(): ticks = _time.GetTickCount() @@ -105,7 +105,7 @@ # Integer overflow detected steady.delta += 2**32 steady.last = ticks - return ticks + steady.delta + return (ticks + steady.delta) * 1e-3 steady.last = 0 steady.delta = 0 -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Tue Apr 3 02:34:27 2012 From: python-checkins at python.org (brett.cannon) Date: Tue, 03 Apr 2012 02:34:27 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_If_a_module_injects_somethi?= =?utf8?q?ng_into_sys=2Emodules_as_a_side-effect_of?= Message-ID: http://hg.python.org/cpython/rev/a40cd5976215 changeset: 76081:a40cd5976215 parent: 76072:bac033e488b4 user: Brett Cannon date: Mon Apr 02 20:33:56 2012 -0400 summary: If a module injects something into sys.modules as a side-effect of importation, then respect that injection. Discovered thanks to Lib/xml/parsers/expat.py injecting xml.parsers.expat.errors and etree now importing that directly as a module. files: Lib/importlib/_bootstrap.py | 3 ++ Lib/importlib/test/import_/test_packages.py | 13 ++++++++++ 2 files changed, 16 insertions(+), 0 deletions(-) diff --git a/Lib/importlib/_bootstrap.py b/Lib/importlib/_bootstrap.py --- a/Lib/importlib/_bootstrap.py +++ b/Lib/importlib/_bootstrap.py @@ -927,6 +927,9 @@ if parent: if parent not in sys.modules: import_(parent) + # Crazy side-effects! + if name in sys.modules: + return sys.modules[name] # Backwards-compatibility; be nicer to skip the dict lookup. parent_module = sys.modules[parent] try: diff --git a/Lib/importlib/test/import_/test_packages.py b/Lib/importlib/test/import_/test_packages.py --- a/Lib/importlib/test/import_/test_packages.py +++ b/Lib/importlib/test/import_/test_packages.py @@ -27,6 +27,19 @@ with self.assertRaises(ImportError): import_util.import_('sys.no_submodules_here') + def test_module_not_package_but_side_effects(self): + # If a module injects something into sys.modules as a side-effect, then + # pick up on that fact. + name = 'mod' + subname = name + '.b' + def module_injection(): + sys.modules[subname] = 'total bunk' + mock_modules = util.mock_modules('mod', + module_code={'mod': module_injection}) + with mock_modules as mock: + with util.import_state(meta_path=[mock]): + submodule = import_util.import_(subname) + def test_main(): from test.support import run_unittest -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 02:34:28 2012 From: python-checkins at python.org (brett.cannon) Date: Tue, 03 Apr 2012 02:34:28 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_default_-=3E_default?= =?utf8?q?=29=3A_merge?= Message-ID: http://hg.python.org/cpython/rev/d0e4e3ef224e changeset: 76082:d0e4e3ef224e parent: 76081:a40cd5976215 parent: 76080:8ba72c0987dc user: Brett Cannon date: Mon Apr 02 20:34:20 2012 -0400 summary: merge files: Doc/library/time.rst | 16 +++++ Lib/test/test_decimal.py | 70 ------------------------- Lib/test/test_thread.py | 1 - Lib/test/test_time.py | 11 +++ Modules/_decimal/_decimal.c | 10 +- Modules/timemodule.c | 33 +++++++++++ 6 files changed, 65 insertions(+), 76 deletions(-) diff --git a/Doc/library/time.rst b/Doc/library/time.rst --- a/Doc/library/time.rst +++ b/Doc/library/time.rst @@ -151,6 +151,13 @@ .. versionadded:: 3.3 +.. function:: clock_settime(clk_id, time) + + Set the time of the specified clock *clk_id*. + + .. versionadded:: 3.3 + + .. data:: CLOCK_REALTIME System-wide real-time clock. Setting this clock requires appropriate @@ -159,6 +166,15 @@ .. versionadded:: 3.3 +.. data:: CLOCK_HIGHRES + + The Solaris OS has a CLOCK_HIGHRES timer that attempts to use an optimal + hardware source, and may give close to nanosecond resolution. CLOCK_HIGHRES + is the nonadjustable, high-resolution clock. + + .. versionadded:: 3.3 + + .. data:: CLOCK_MONOTONIC Clock that cannot be set and represents monotonic time since some diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -56,76 +56,6 @@ fractions = {C:cfractions, P:pfractions} sys.modules['decimal'] = orig_sys_decimal -############ RunFirst ############ -class RunFirst(unittest.TestCase): - - def setUp(self): - self.save_default = self.decimal.DefaultContext.copy() - - def tearDown(self): - DefaultContext = self.decimal.DefaultContext - - DefaultContext.prec = self.save_default.prec - DefaultContext.rounding = self.save_default.rounding - DefaultContext.Emax = self.save_default.Emax - DefaultContext.Emin = self.save_default.Emin - DefaultContext.capitals = self.save_default.capitals - DefaultContext.clamp = self.save_default.clamp - DefaultContext.flags = self.save_default.flags - DefaultContext.traps = self.save_default.traps - - self.decimal.setcontext(self.decimal.DefaultContext) - - def test_00default_context(self): - # The test depends on the fact that getcontext() is called - # for the first time. - DefaultContext = self.decimal.DefaultContext - ROUND_05UP = self.decimal.ROUND_05UP - Clamped = self.decimal.Clamped - InvalidOperation = self.decimal.InvalidOperation - - DefaultContext.prec = 5001 - DefaultContext.rounding = ROUND_05UP - DefaultContext.Emax = 10025 - DefaultContext.Emin = -10025 - DefaultContext.capitals = 0 - DefaultContext.clamp = 1 - DefaultContext.flags[InvalidOperation] = True - DefaultContext.clear_traps() - DefaultContext.traps[Clamped] = True - - # implicit initialization on first access - c = self.decimal.getcontext() - - self.assertEqual(c.prec, 5001) - self.assertEqual(c.rounding, ROUND_05UP) - self.assertEqual(c.Emax, 10025) - self.assertEqual(c.Emin, -10025) - self.assertEqual(c.capitals, 0) - self.assertEqual(c.clamp, 1) - for k in c.flags: - self.assertFalse(c.flags[k]) - for k in c.traps: - if k is Clamped: - self.assertTrue(c.traps[k]) - else: - self.assertFalse(c.traps[k]) - - # explicit initialization - self.decimal.setcontext(DefaultContext) - c = self.decimal.getcontext() - for k in c.flags: - self.assertFalse(c.flags[k]) - -class CRunFirst(RunFirst): - decimal = C -class PyRunFirst(RunFirst): - decimal = P -if C: - run_unittest(CRunFirst, PyRunFirst) -else: - run_unittest(PyRunFirst) -############ END RunFirst ############ # Useful Test Constant Signals = { diff --git a/Lib/test/test_thread.py b/Lib/test/test_thread.py --- a/Lib/test/test_thread.py +++ b/Lib/test/test_thread.py @@ -132,7 +132,6 @@ # See issue #14474 def task(): started.release() - sys.stderr = stderr raise SyntaxError def mywrite(self, *args): try: diff --git a/Lib/test/test_time.py b/Lib/test/test_time.py --- a/Lib/test/test_time.py +++ b/Lib/test/test_time.py @@ -47,6 +47,17 @@ self.assertGreater(res, 0.0) self.assertLessEqual(res, 1.0) + @unittest.skipUnless(hasattr(time, 'clock_settime'), + 'need time.clock_settime()') + def test_clock_settime(self): + t = time.clock_gettime(time.CLOCK_REALTIME) + try: + time.clock_settime(time.CLOCK_REALTIME, t) + except PermissionError: + pass + + self.assertRaises(OSError, time.clock_settime, time.CLOCK_MONOTONIC, 0) + def test_conversions(self): self.assertEqual(time.ctime(self.t), time.asctime(time.localtime(self.t))) diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -1510,7 +1510,7 @@ #define CURRENT_CONTEXT_ADDR(ctx) \ ctx = CTX(current_context()) -/* Return current context, increment reference */ +/* Return a new reference to the current context */ static PyObject * PyDec_GetCurrentContext(void) { @@ -1614,7 +1614,7 @@ ctx = CTX(_c_t_x_o_b_j); \ } -/* Return current context, increment reference */ +/* Return a new reference to the current context */ static PyObject * PyDec_GetCurrentContext(void) { @@ -1759,7 +1759,7 @@ 0, /* tp_print */ (getattrfunc) 0, /* tp_getattr */ (setattrfunc) 0, /* tp_setattr */ - 0, /* tp_compare */ + 0, /* tp_reserved */ (reprfunc) 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ @@ -2699,7 +2699,7 @@ /******************************************************************************/ -/* Implicit conversions to Decimal */ +/* Implicit conversions to Decimal */ /******************************************************************************/ /* Try to convert PyObject v to a new PyDecObject conv. If the conversion @@ -2796,7 +2796,7 @@ /******************************************************************************/ -/* Implicit conversions to Decimal for comparison */ +/* Implicit conversions to Decimal for comparison */ /******************************************************************************/ /* Convert rationals for comparison */ diff --git a/Modules/timemodule.c b/Modules/timemodule.c --- a/Modules/timemodule.c +++ b/Modules/timemodule.c @@ -158,6 +158,33 @@ "clock_gettime(clk_id) -> floating point number\n\ \n\ Return the time of the specified clock clk_id."); + +static PyObject * +time_clock_settime(PyObject *self, PyObject *args) +{ + clockid_t clk_id; + PyObject *obj; + struct timespec tp; + int ret; + + if (!PyArg_ParseTuple(args, "iO:clock_settime", &clk_id, &obj)) + return NULL; + + if (_PyTime_ObjectToTimespec(obj, &tp.tv_sec, &tp.tv_nsec) == -1) + return NULL; + + ret = clock_settime((clockid_t)clk_id, &tp); + if (ret != 0) { + PyErr_SetFromErrno(PyExc_IOError); + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(clock_settime_doc, +"clock_settime(clk_id, time)\n\ +\n\ +Set the time of the specified clock clk_id."); #endif #ifdef HAVE_CLOCK_GETRES @@ -962,6 +989,9 @@ #ifdef CLOCK_MONOTONIC_RAW PyModule_AddIntMacro(m, CLOCK_MONOTONIC_RAW); #endif +#ifdef CLOCK_HIGHRES + PyModule_AddIntMacro(m, CLOCK_HIGHRES); +#endif #ifdef CLOCK_PROCESS_CPUTIME_ID PyModule_AddIntMacro(m, CLOCK_PROCESS_CPUTIME_ID); #endif @@ -980,6 +1010,9 @@ #ifdef HAVE_CLOCK_GETTIME {"clock_gettime", time_clock_gettime, METH_VARARGS, clock_gettime_doc}, #endif +#ifdef HAVE_CLOCK_GETTIME + {"clock_settime", time_clock_settime, METH_VARARGS, clock_settime_doc}, +#endif #ifdef HAVE_CLOCK_GETRES {"clock_getres", time_clock_getres, METH_VARARGS, clock_getres_doc}, #endif -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Tue Apr 3 05:34:22 2012 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Tue, 03 Apr 2012 05:34:22 +0200 Subject: [Python-checkins] Daily reference leaks (d0e4e3ef224e): sum=3 Message-ID: results for d0e4e3ef224e on branch "default" -------------------------------------------- test_thread leaked [1, 1, 1] references, sum=3 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflog9pcCae', '-x'] From python-checkins at python.org Tue Apr 3 06:35:50 2012 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 03 Apr 2012 06:35:50 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=282=2E7=29=3A_fix_parse=5Fsyn?= =?utf8?q?tax=5Ferror_to_clean_up_its_resources?= Message-ID: http://hg.python.org/cpython/rev/013766e7a6eb changeset: 76083:013766e7a6eb branch: 2.7 parent: 76074:2b7aff01ca89 user: Benjamin Peterson date: Tue Apr 03 00:30:38 2012 -0400 summary: fix parse_syntax_error to clean up its resources files: Python/pythonrun.c | 56 ++++++++++++++++++++------------- 1 files changed, 34 insertions(+), 22 deletions(-) diff --git a/Python/pythonrun.c b/Python/pythonrun.c --- a/Python/pythonrun.c +++ b/Python/pythonrun.c @@ -989,55 +989,67 @@ return PyArg_ParseTuple(err, "O(ziiz)", message, filename, lineno, offset, text); + *message = NULL; + /* new style errors. `err' is an instance */ - - if (! (v = PyObject_GetAttrString(err, "msg"))) - goto finally; - *message = v; - - if (!(v = PyObject_GetAttrString(err, "filename"))) - goto finally; - if (v == Py_None) - *filename = NULL; - else if (! (*filename = PyString_AsString(v))) + *message = PyObject_GetAttrString(err, "msg"); + if (!*message) goto finally; - Py_DECREF(v); - if (!(v = PyObject_GetAttrString(err, "lineno"))) + v = PyObject_GetAttrString(err, "filename"); + if (!v) + goto finally; + if (v == Py_None) { + Py_DECREF(v); + *filename = NULL; + } + else { + *filename = PyString_AsString(v); + Py_DECREF(v); + if (!*filename) + goto finally; + } + + v = PyObject_GetAttrString(err, "lineno"); + if (!v) goto finally; hold = PyInt_AsLong(v); Py_DECREF(v); - v = NULL; if (hold < 0 && PyErr_Occurred()) goto finally; *lineno = (int)hold; - if (!(v = PyObject_GetAttrString(err, "offset"))) + v = PyObject_GetAttrString(err, "offset"); + if (!v) goto finally; if (v == Py_None) { *offset = -1; Py_DECREF(v); - v = NULL; } else { hold = PyInt_AsLong(v); Py_DECREF(v); - v = NULL; if (hold < 0 && PyErr_Occurred()) goto finally; *offset = (int)hold; } - if (!(v = PyObject_GetAttrString(err, "text"))) + v = PyObject_GetAttrString(err, "text"); + if (!v) goto finally; - if (v == Py_None) + if (v == Py_None) { + Py_DECREF(v); *text = NULL; - else if (! (*text = PyString_AsString(v))) - goto finally; - Py_DECREF(v); + } + else { + *text = PyString_AsString(v); + Py_DECREF(v); + if (!*text) + goto finally; + } return 1; finally: - Py_XDECREF(v); + Py_XDECREF(*message); return 0; } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 06:35:52 2012 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 03 Apr 2012 06:35:52 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=283=2E2=29=3A_fix_parse=5Fsyn?= =?utf8?q?tax=5Ferror_to_clean_up_its_resources?= Message-ID: http://hg.python.org/cpython/rev/43606a4085b0 changeset: 76084:43606a4085b0 branch: 3.2 parent: 76075:98ab6e322c40 user: Benjamin Peterson date: Tue Apr 03 00:30:38 2012 -0400 summary: fix parse_syntax_error to clean up its resources files: Python/pythonrun.c | 57 ++++++++++++++++++++------------- 1 files changed, 34 insertions(+), 23 deletions(-) diff --git a/Python/pythonrun.c b/Python/pythonrun.c --- a/Python/pythonrun.c +++ b/Python/pythonrun.c @@ -1335,56 +1335,67 @@ return PyArg_ParseTuple(err, "O(ziiz)", message, filename, lineno, offset, text); + *message = NULL; + /* new style errors. `err' is an instance */ - - if (! (v = PyObject_GetAttrString(err, "msg"))) - goto finally; - *message = v; - - if (!(v = PyObject_GetAttrString(err, "filename"))) - goto finally; - if (v == Py_None) - *filename = NULL; - else if (! (*filename = _PyUnicode_AsString(v))) + *message = PyObject_GetAttrString(err, "msg"); + if (!*message) goto finally; - Py_DECREF(v); - if (!(v = PyObject_GetAttrString(err, "lineno"))) + v = PyObject_GetAttrString(err, "filename"); + if (!v) + goto finally; + if (v == Py_None) { + Py_DECREF(v); + *filename = NULL; + } + else { + *filename = _PyUnicode_AsString(v); + Py_DECREF(v); + if (!*filename) + goto finally; + } + + v = PyObject_GetAttrString(err, "lineno"); + if (!v) goto finally; hold = PyLong_AsLong(v); Py_DECREF(v); - v = NULL; if (hold < 0 && PyErr_Occurred()) goto finally; *lineno = (int)hold; - if (!(v = PyObject_GetAttrString(err, "offset"))) + v = PyObject_GetAttrString(err, "offset"); + if (!v) goto finally; if (v == Py_None) { *offset = -1; Py_DECREF(v); - v = NULL; } else { hold = PyLong_AsLong(v); Py_DECREF(v); - v = NULL; if (hold < 0 && PyErr_Occurred()) goto finally; *offset = (int)hold; } - if (!(v = PyObject_GetAttrString(err, "text"))) + v = PyObject_GetAttrString(err, "text"); + if (!v) goto finally; - if (v == Py_None) + if (v == Py_None) { + Py_DECREF(v); *text = NULL; - else if (!PyUnicode_Check(v) || - !(*text = _PyUnicode_AsString(v))) - goto finally; - Py_DECREF(v); + } + else { + *text = _PyUnicode_AsString(v); + Py_DECREF(v); + if (!*text) + goto finally; + } return 1; finally: - Py_XDECREF(v); + Py_XDECREF(*message); return 0; } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 06:35:53 2012 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 03 Apr 2012 06:35:53 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_merge_3=2E2?= Message-ID: http://hg.python.org/cpython/rev/ff7587546a1d changeset: 76085:ff7587546a1d parent: 76076:e97940f701be parent: 76084:43606a4085b0 user: Benjamin Peterson date: Tue Apr 03 00:35:36 2012 -0400 summary: merge 3.2 files: Python/pythonrun.c | 57 ++++++++++++++++++++------------- 1 files changed, 34 insertions(+), 23 deletions(-) diff --git a/Python/pythonrun.c b/Python/pythonrun.c --- a/Python/pythonrun.c +++ b/Python/pythonrun.c @@ -1356,56 +1356,67 @@ _Py_IDENTIFIER(offset); _Py_IDENTIFIER(text); + *message = NULL; + /* new style errors. `err' is an instance */ - - if (! (v = _PyObject_GetAttrId(err, &PyId_msg))) - goto finally; - *message = v; - - if (!(v = _PyObject_GetAttrId(err, &PyId_filename))) - goto finally; - if (v == Py_None) - *filename = NULL; - else if (! (*filename = _PyUnicode_AsString(v))) + *message = _PyObject_GetAttrId(err, &PyId_msg); + if (!*message) goto finally; - Py_DECREF(v); - if (!(v = _PyObject_GetAttrId(err, &PyId_lineno))) + v = _PyObject_GetAttrId(err, &PyId_filename); + if (!v) + goto finally; + if (v == Py_None) { + Py_DECREF(v); + *filename = NULL; + } + else { + *filename = _PyUnicode_AsString(v); + Py_DECREF(v); + if (!*filename) + goto finally; + } + + v = _PyObject_GetAttrId(err, &PyId_lineno); + if (!v) goto finally; hold = PyLong_AsLong(v); Py_DECREF(v); - v = NULL; if (hold < 0 && PyErr_Occurred()) goto finally; *lineno = (int)hold; - if (!(v = _PyObject_GetAttrId(err, &PyId_offset))) + v = _PyObject_GetAttrId(err, &PyId_offset); + if (!v) goto finally; if (v == Py_None) { *offset = -1; Py_DECREF(v); - v = NULL; } else { hold = PyLong_AsLong(v); Py_DECREF(v); - v = NULL; if (hold < 0 && PyErr_Occurred()) goto finally; *offset = (int)hold; } - if (!(v = _PyObject_GetAttrId(err, &PyId_text))) + v = _PyObject_GetAttrId(err, &PyId_text); + if (!v) goto finally; - if (v == Py_None) + if (v == Py_None) { + Py_DECREF(v); *text = NULL; - else if (!PyUnicode_Check(v) || - !(*text = _PyUnicode_AsString(v))) - goto finally; - Py_DECREF(v); + } + else { + *text = _PyUnicode_AsString(v); + Py_DECREF(v); + if (!*text) + goto finally; + } return 1; finally: - Py_XDECREF(v); + Py_XDECREF(*message); return 0; } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 06:35:55 2012 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 03 Apr 2012 06:35:55 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_default_-=3E_default?= =?utf8?q?=29=3A_merge_heads?= Message-ID: http://hg.python.org/cpython/rev/05cfde25ded1 changeset: 76086:05cfde25ded1 parent: 76085:ff7587546a1d parent: 76082:d0e4e3ef224e user: Benjamin Peterson date: Tue Apr 03 00:35:44 2012 -0400 summary: merge heads files: Doc/library/time.rst | 16 ++++ Lib/importlib/_bootstrap.py | 3 + Lib/importlib/test/import_/test_packages.py | 13 +++ Lib/test/test_time.py | 11 +++ Modules/_decimal/_decimal.c | 10 +- Modules/timemodule.c | 33 ++++++++++ 6 files changed, 81 insertions(+), 5 deletions(-) diff --git a/Doc/library/time.rst b/Doc/library/time.rst --- a/Doc/library/time.rst +++ b/Doc/library/time.rst @@ -151,6 +151,13 @@ .. versionadded:: 3.3 +.. function:: clock_settime(clk_id, time) + + Set the time of the specified clock *clk_id*. + + .. versionadded:: 3.3 + + .. data:: CLOCK_REALTIME System-wide real-time clock. Setting this clock requires appropriate @@ -159,6 +166,15 @@ .. versionadded:: 3.3 +.. data:: CLOCK_HIGHRES + + The Solaris OS has a CLOCK_HIGHRES timer that attempts to use an optimal + hardware source, and may give close to nanosecond resolution. CLOCK_HIGHRES + is the nonadjustable, high-resolution clock. + + .. versionadded:: 3.3 + + .. data:: CLOCK_MONOTONIC Clock that cannot be set and represents monotonic time since some diff --git a/Lib/importlib/_bootstrap.py b/Lib/importlib/_bootstrap.py --- a/Lib/importlib/_bootstrap.py +++ b/Lib/importlib/_bootstrap.py @@ -927,6 +927,9 @@ if parent: if parent not in sys.modules: import_(parent) + # Crazy side-effects! + if name in sys.modules: + return sys.modules[name] # Backwards-compatibility; be nicer to skip the dict lookup. parent_module = sys.modules[parent] try: diff --git a/Lib/importlib/test/import_/test_packages.py b/Lib/importlib/test/import_/test_packages.py --- a/Lib/importlib/test/import_/test_packages.py +++ b/Lib/importlib/test/import_/test_packages.py @@ -27,6 +27,19 @@ with self.assertRaises(ImportError): import_util.import_('sys.no_submodules_here') + def test_module_not_package_but_side_effects(self): + # If a module injects something into sys.modules as a side-effect, then + # pick up on that fact. + name = 'mod' + subname = name + '.b' + def module_injection(): + sys.modules[subname] = 'total bunk' + mock_modules = util.mock_modules('mod', + module_code={'mod': module_injection}) + with mock_modules as mock: + with util.import_state(meta_path=[mock]): + submodule = import_util.import_(subname) + def test_main(): from test.support import run_unittest diff --git a/Lib/test/test_time.py b/Lib/test/test_time.py --- a/Lib/test/test_time.py +++ b/Lib/test/test_time.py @@ -47,6 +47,17 @@ self.assertGreater(res, 0.0) self.assertLessEqual(res, 1.0) + @unittest.skipUnless(hasattr(time, 'clock_settime'), + 'need time.clock_settime()') + def test_clock_settime(self): + t = time.clock_gettime(time.CLOCK_REALTIME) + try: + time.clock_settime(time.CLOCK_REALTIME, t) + except PermissionError: + pass + + self.assertRaises(OSError, time.clock_settime, time.CLOCK_MONOTONIC, 0) + def test_conversions(self): self.assertEqual(time.ctime(self.t), time.asctime(time.localtime(self.t))) diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -1510,7 +1510,7 @@ #define CURRENT_CONTEXT_ADDR(ctx) \ ctx = CTX(current_context()) -/* Return current context, increment reference */ +/* Return a new reference to the current context */ static PyObject * PyDec_GetCurrentContext(void) { @@ -1614,7 +1614,7 @@ ctx = CTX(_c_t_x_o_b_j); \ } -/* Return current context, increment reference */ +/* Return a new reference to the current context */ static PyObject * PyDec_GetCurrentContext(void) { @@ -1759,7 +1759,7 @@ 0, /* tp_print */ (getattrfunc) 0, /* tp_getattr */ (setattrfunc) 0, /* tp_setattr */ - 0, /* tp_compare */ + 0, /* tp_reserved */ (reprfunc) 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ @@ -2699,7 +2699,7 @@ /******************************************************************************/ -/* Implicit conversions to Decimal */ +/* Implicit conversions to Decimal */ /******************************************************************************/ /* Try to convert PyObject v to a new PyDecObject conv. If the conversion @@ -2796,7 +2796,7 @@ /******************************************************************************/ -/* Implicit conversions to Decimal for comparison */ +/* Implicit conversions to Decimal for comparison */ /******************************************************************************/ /* Convert rationals for comparison */ diff --git a/Modules/timemodule.c b/Modules/timemodule.c --- a/Modules/timemodule.c +++ b/Modules/timemodule.c @@ -158,6 +158,33 @@ "clock_gettime(clk_id) -> floating point number\n\ \n\ Return the time of the specified clock clk_id."); + +static PyObject * +time_clock_settime(PyObject *self, PyObject *args) +{ + clockid_t clk_id; + PyObject *obj; + struct timespec tp; + int ret; + + if (!PyArg_ParseTuple(args, "iO:clock_settime", &clk_id, &obj)) + return NULL; + + if (_PyTime_ObjectToTimespec(obj, &tp.tv_sec, &tp.tv_nsec) == -1) + return NULL; + + ret = clock_settime((clockid_t)clk_id, &tp); + if (ret != 0) { + PyErr_SetFromErrno(PyExc_IOError); + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(clock_settime_doc, +"clock_settime(clk_id, time)\n\ +\n\ +Set the time of the specified clock clk_id."); #endif #ifdef HAVE_CLOCK_GETRES @@ -962,6 +989,9 @@ #ifdef CLOCK_MONOTONIC_RAW PyModule_AddIntMacro(m, CLOCK_MONOTONIC_RAW); #endif +#ifdef CLOCK_HIGHRES + PyModule_AddIntMacro(m, CLOCK_HIGHRES); +#endif #ifdef CLOCK_PROCESS_CPUTIME_ID PyModule_AddIntMacro(m, CLOCK_PROCESS_CPUTIME_ID); #endif @@ -980,6 +1010,9 @@ #ifdef HAVE_CLOCK_GETTIME {"clock_gettime", time_clock_gettime, METH_VARARGS, clock_gettime_doc}, #endif +#ifdef HAVE_CLOCK_GETTIME + {"clock_settime", time_clock_settime, METH_VARARGS, clock_settime_doc}, +#endif #ifdef HAVE_CLOCK_GETRES {"clock_getres", time_clock_getres, METH_VARARGS, clock_getres_doc}, #endif -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 06:52:22 2012 From: python-checkins at python.org (benjamin.peterson) Date: Tue, 03 Apr 2012 06:52:22 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_add_XXX?= Message-ID: http://hg.python.org/cpython/rev/737f710a72c9 changeset: 76087:737f710a72c9 user: Benjamin Peterson date: Tue Apr 03 00:52:18 2012 -0400 summary: add XXX files: Doc/whatsnew/3.3.rst | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst --- a/Doc/whatsnew/3.3.rst +++ b/Doc/whatsnew/3.3.rst @@ -486,6 +486,8 @@ (:issue:`10516`) +.. XXX mention new error messages for passing wrong number of arguments to functions + New and Improved Modules ======================== -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 08:48:12 2012 From: python-checkins at python.org (andrew.svetlov) Date: Tue, 03 Apr 2012 08:48:12 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_reformat_code_to_follow_PEP?= =?utf8?q?8?= Message-ID: http://hg.python.org/cpython/rev/db0063ce8a5c changeset: 76088:db0063ce8a5c user: Andrew Svetlov date: Tue Apr 03 09:39:47 2012 +0300 summary: reformat code to follow PEP8 files: Lib/tkinter/font.py | 16 ++++++++++------ 1 files changed, 10 insertions(+), 6 deletions(-) diff --git a/Lib/tkinter/font.py b/Lib/tkinter/font.py --- a/Lib/tkinter/font.py +++ b/Lib/tkinter/font.py @@ -10,19 +10,21 @@ import tkinter + # weight/slant NORMAL = "normal" ROMAN = "roman" BOLD = "bold" ITALIC = "italic" + def nametofont(name): """Given the name of a tk named font, returns a Font representation. """ return Font(name=name, exists=True) + class Font: - """Represents a named font. Constructor options are: @@ -63,7 +65,8 @@ options[args[i][1:]] = args[i+1] return options - def __init__(self, root=None, font=None, name=None, exists=False, **options): + def __init__(self, root=None, font=None, name=None, exists=False, + **options): if not root: root = tkinter._default_root if font: @@ -138,8 +141,7 @@ *self._set(options)) else: return self._mkdict( - self._split(self._call("font", "config", self.name)) - ) + self._split(self._call("font", "config", self.name))) configure = config @@ -155,8 +157,7 @@ if options: return int( - self._call("font", "metrics", self.name, self._get(options)) - ) + self._call("font", "metrics", self.name, self._get(options))) else: res = self._split(self._call("font", "metrics", self.name)) options = {} @@ -164,18 +165,21 @@ options[res[i][1:]] = int(res[i+1]) return options + def families(root=None): "Get font families (as a tuple)" if not root: root = tkinter._default_root return root.tk.splitlist(root.tk.call("font", "families")) + def names(root=None): "Get names of defined fonts (as a tuple)" if not root: root = tkinter._default_root return root.tk.splitlist(root.tk.call("font", "names")) + # -------------------------------------------------------------------- # test stuff -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 08:48:14 2012 From: python-checkins at python.org (andrew.svetlov) Date: Tue, 03 Apr 2012 08:48:14 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=23802310=3A_Generate?= =?utf8?q?_always_unique_tkinter_font_names_if_not_directly_passed?= Message-ID: http://hg.python.org/cpython/rev/a77e23135675 changeset: 76089:a77e23135675 user: Andrew Svetlov date: Tue Apr 03 09:48:07 2012 +0300 summary: Issue #802310: Generate always unique tkinter font names if not directly passed files: Lib/tkinter/font.py | 5 ++++- Misc/NEWS | 2 ++ 2 files changed, 6 insertions(+), 1 deletions(-) diff --git a/Lib/tkinter/font.py b/Lib/tkinter/font.py --- a/Lib/tkinter/font.py +++ b/Lib/tkinter/font.py @@ -8,6 +8,7 @@ __version__ = "0.9" +import itertools import tkinter @@ -46,6 +47,8 @@ """ + counter = itertools.count(1) + def _set(self, kw): options = [] for k, v in kw.items(): @@ -75,7 +78,7 @@ else: font = self._set(options) if not name: - name = "font" + str(id(self)) + name = "font" + str(next(self.counter)) self.name = name if exists: diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -19,6 +19,8 @@ Library ------- +- Issue #802310: Generate always unique tkinter font names if not directly passed. + - Issue #14151: Raise a ValueError, not a NameError, when trying to create a multiprocessing Client or Listener with an AF_PIPE type address under non-Windows platforms. Patch by Popa Claudiu. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 08:57:31 2012 From: python-checkins at python.org (georg.brandl) Date: Tue, 03 Apr 2012 08:57:31 +0200 Subject: [Python-checkins] =?utf8?q?devguide=3A_Closes_=2314479=3A_replace?= =?utf8?q?_transplant_advice_with_graft?= Message-ID: http://hg.python.org/devguide/rev/b1dfbaae4458 changeset: 500:b1dfbaae4458 user: Georg Brandl date: Tue Apr 03 08:57:38 2012 +0200 summary: Closes #14479: replace transplant advice with graft files: committing.rst | 35 +++++++++++++++-------------------- faq.rst | 2 +- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/committing.rst b/committing.rst --- a/committing.rst +++ b/committing.rst @@ -308,30 +308,27 @@ '''''''''''''''''''''''''''''' Let's say you have committed your changes as changeset ``a7df1a869e4a`` -in the 3.2 branch and now want to port it to 2.7. This is simple. First -update your working copy to the 2.7 branch, then import the patch:: +in the 3.2 branch and now want to port it to 2.7. This is simple using +the "graft" command, which uses Mercurial's merge functionality to +cherry-pick:: + + hg update 2.7 + hg graft a7df1a869e4a + # Compile; run the test suite + +Graft always commits automatically, except in case of conflicts, when you +have to resolve them and run ``hg graft --continue`` afterwards. + +Another method is using "export" and "import": this has the advantage that +you can run the test suite before committing, but the disadvantage that +in case of conflicts, you will only get ``.rej`` files, not inline merge +markers. :: hg update 2.7 hg export a7df1a869e4a | hg import --no-commit - # Compile; run the test suite hg commit -You can also use the `transplant extension`_:: - - hg update 2.7 - hg transplant a7df1a869e4a - # Compile; run the test suite - -If you often get failures porting patches this way, you should consider -using the :ref:`mpatch ` utility. - - -.. warning:: - transplant always commits automatically. This breaks the - "run the test suite before committing" rule. We could advocate using - "hg qimport -r tip -P" afterwards but that would add another level of - complexity. - Using several working copies '''''''''''''''''''''''''''' @@ -408,8 +405,6 @@ someone else that haven't been merged yet. -.. _transplant extension: http://mercurial.selenic.com/wiki/TransplantExtension - .. seealso:: `Merging work `_, diff --git a/faq.rst b/faq.rst --- a/faq.rst +++ b/faq.rst @@ -758,7 +758,7 @@ Mercurial comes with many bundled extensions which can be explicitly enabled. You can get a list of them by typing ``hg help extensions``. Some of these extensions, such as ``color``, can prettify output; others, such as ``fetch`` -or ``transplant``, add new Mercurial commands. +or ``graphlog``, add new Mercurial commands. There are also many `configuration options`_ to tweak various aspects of the command line and other Mercurial behaviour; typing `man hgrc`_ displays -- Repository URL: http://hg.python.org/devguide From python-checkins at python.org Tue Apr 3 09:12:45 2012 From: python-checkins at python.org (georg.brandl) Date: Tue, 03 Apr 2012 09:12:45 +0200 Subject: [Python-checkins] =?utf8?q?devguide=3A_devguide=3A_fix_some_typos?= Message-ID: http://hg.python.org/devguide/rev/9cf7dc7aca86 changeset: 501:9cf7dc7aca86 user: Georg Brandl date: Tue Apr 03 09:12:53 2012 +0200 summary: devguide: fix some typos files: buildbots.rst | 2 +- committing.rst | 4 ++-- communication.rst | 4 ++-- compiler.rst | 2 +- documenting.rst | 6 +++--- faq.rst | 6 +++--- gdb.rst | 2 +- grammar.rst | 4 ++-- triaging.rst | 2 +- 9 files changed, 16 insertions(+), 16 deletions(-) diff --git a/buildbots.rst b/buildbots.rst --- a/buildbots.rst +++ b/buildbots.rst @@ -156,7 +156,7 @@ ------------------ While we try to make the test suite as reliable as possible, some tests do -not reach a perfect level of reproduceability. Some of them will sometimes +not reach a perfect level of reproducibility. Some of them will sometimes display spurious failures, depending on various conditions. Here are common offenders: diff --git a/committing.rst b/committing.rst --- a/committing.rst +++ b/committing.rst @@ -399,7 +399,7 @@ If you are coming from Subversion, you might be surprised by Mercurial :ref:`merges `. Despite its name, ``svnmerge`` is different from ``hg merge``: while ``svnmerge`` -allows to cherrypick individual revisions, ``hg merge`` can only merge whole +allows to cherry-pick individual revisions, ``hg merge`` can only merge whole lines of development in the repository's :abbr:`DAG (directed acyclic graph)`. Therefore, ``hg merge`` might force you to review outstanding changesets by someone else that haven't been merged yet. @@ -503,7 +503,7 @@ In this scheme, your work will probably consist of many commits (some of them merges). If you want to upload a patch for review somewhere, you need -a single agregate patch. This is where having a dedicated named branch +a single aggregate patch. This is where having a dedicated named branch ``mywork`` gets handy. First ensure that you have pulled *and merged* all changes from the main diff --git a/communication.rst b/communication.rst --- a/communication.rst +++ b/communication.rst @@ -48,8 +48,8 @@ A complete list of Python mailing lists can be found at http://mail.python.org. Most lists are also mirrored at http://news.gmane.org/ and can be read and -posted to in various ways, including via web browers, NNTP newsreaders, and -RSS feed readers. +posted to in various ways, including via web browsers, NNTP newsreaders, and +RSS feed readers. .. _issue tracker: http://bugs.python.org .. _new-bugs-announce: http://mail.python.org/mailman/listinfo/new-bugs-announce diff --git a/compiler.rst b/compiler.rst --- a/compiler.rst +++ b/compiler.rst @@ -186,7 +186,7 @@ of Python uses reference counting, there is extra support added to the arena to cleanup each PyObject that was allocated. These cases are very rare. However, if you've allocated a PyObject, you must tell -the arena about it by calling PyArena_AddPyObject(). +the arena about it by calling ``PyArena_AddPyObject()``. Parse Tree to AST diff --git a/documenting.rst b/documenting.rst --- a/documenting.rst +++ b/documenting.rst @@ -6,7 +6,7 @@ The Python language has a substantial body of documentation, much of it contributed by various authors. The markup used for the Python documentation is `reStructuredText`_, developed by the `docutils`_ project, amended by custom -directives and using a toolset named `Sphinx`_ to postprocess the HTML output. +directives and using a toolset named `Sphinx`_ to post-process the HTML output. This document describes the style guide for our documentation as well as the custom reStructuredText markup introduced by Sphinx to support Python @@ -108,7 +108,7 @@ Apple style guide recommends the use of title case in section titles. However, rules for which words should be capitalized in title case -vary greaty between publications. +vary greatly between publications. In Python documentation, use of sentence case in section titles is preferable, but consistency within a unit is more important than @@ -327,7 +327,7 @@ List markup is natural: just place an asterisk at the start of a paragraph and indent properly. The same goes for numbered lists; they can also be -autonumbered using a ``#`` sign:: +automatically numbered using a ``#`` sign:: * This is a bulleted list. * It has two items, the second diff --git a/faq.rst b/faq.rst --- a/faq.rst +++ b/faq.rst @@ -475,7 +475,7 @@ hg add PATH If ``PATH`` is a directory, Mercurial will recursively add any files in that -directory and its descendents. +directory and its descendants. If you want Mercurial to figure out by itself which files should be added and/or removed, just run:: @@ -534,7 +534,7 @@ .. note:: If you do not like the default text editor Mercurial uses for - entering commmit messages, you may specify a different editor, + entering commit messages, you may specify a different editor, either by changing the ``EDITOR`` environment variable or by setting a Mercurial-specific editor in your global ``.hgrc`` with the ``editor`` option in the ``[ui]`` section. @@ -562,7 +562,7 @@ hg status will list any pending changes in the working copy. These changes will get -commited to the local repository if you issue an ``hg commit`` without +committed to the local repository if you issue an ``hg commit`` without specifying any path. Some diff --git a/gdb.rst b/gdb.rst --- a/gdb.rst +++ b/gdb.rst @@ -131,7 +131,7 @@ the python source. ``py-up`` and ``py-down`` - The ``py-up`` and ``py-down`` commands are analagous to gdb's regular ``up`` + The ``py-up`` and ``py-down`` commands are analogous to gdb's regular ``up`` and ``down`` commands, but try to move at the level of CPython frames, rather than C frames. diff --git a/grammar.rst b/grammar.rst --- a/grammar.rst +++ b/grammar.rst @@ -39,7 +39,7 @@ * Parser/pgen needs to be rerun to regenerate Include/graminit.h and Python/graminit.c. (make should handle this for you.) -* Python/symbtable.c: This handles the symbol collection pass +* Python/symtable.c: This handles the symbol collection pass that happens immediately before the compilation pass. * Python/compile.c: You will need to create or modify the @@ -60,7 +60,7 @@ * Documentation must be written! -* After everything's been checked in, you're likely to see a new +* After everything has been checked in, you're likely to see a new change to Python/Python-ast.c. This is because this (generated) file contains the hg version of the source from which it was generated. There's no way to avoid this; you just diff --git a/triaging.rst b/triaging.rst --- a/triaging.rst +++ b/triaging.rst @@ -180,7 +180,7 @@ attention. Use the :ref:`experts` to know who wants to be added to the nosy list for issues targeting specific areas. -If you have Javascript enabled and permission to edit the nosy list, you can +If you have JavaScript enabled and permission to edit the nosy list, you can use the ``[+]`` button to add yourself to the nosy (remember to click on "Submit Changes" afterwards). Note that you are added to the nosy automatically when you submit a message. -- Repository URL: http://hg.python.org/devguide From python-checkins at python.org Tue Apr 3 09:16:40 2012 From: python-checkins at python.org (georg.brandl) Date: Tue, 03 Apr 2012 09:16:40 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Move_ChainMap_versionadded_?= =?utf8?q?to_be_less_ambiguous=2E?= Message-ID: http://hg.python.org/cpython/rev/555f5fe53341 changeset: 76090:555f5fe53341 user: Georg Brandl date: Tue Apr 03 09:16:46 2012 +0200 summary: Move ChainMap versionadded to be less ambiguous. files: Doc/library/collections.rst | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -41,6 +41,8 @@ :class:`ChainMap` objects ------------------------- +.. versionadded:: 3.3 + A :class:`ChainMap` class is provided for quickly linking a number of mappings so they can be treated as a single unit. It is often much faster than creating a new dictionary and running multiple :meth:`~dict.update` calls. @@ -91,8 +93,6 @@ The use-cases also parallel those for the builtin :func:`super` function. A reference to ``d.parents`` is equivalent to: ``ChainMap(*d.maps[1:])``. - .. versionadded:: 3.3 - Example of simulating Python's internal lookup chain:: import builtins -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 13:12:04 2012 From: python-checkins at python.org (kristjan.jonsson) Date: Tue, 03 Apr 2012 13:12:04 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=2314288=3A_Serializa?= =?utf8?q?tion_support_for_builtin_iterators=2E?= Message-ID: http://hg.python.org/cpython/rev/4ff234337e24 changeset: 76091:4ff234337e24 user: Kristj?n Valur J?nsson date: Tue Apr 03 10:49:41 2012 +0000 summary: Issue #14288: Serialization support for builtin iterators. files: Include/iterobject.h | 2 + Lib/test/seq_tests.py | 7 + Lib/test/test_array.py | 14 + Lib/test/test_builtin.py | 41 + Lib/test/test_bytes.py | 18 + Lib/test/test_deque.py | 13 + Lib/test/test_dict.py | 54 + Lib/test/test_enumerate.py | 30 +- Lib/test/test_iter.py | 43 +- Lib/test/test_itertools.py | 386 +++++++++- Lib/test/test_list.py | 28 + Lib/test/test_range.py | 24 +- Lib/test/test_set.py | 21 + Lib/test/test_tuple.py | 29 + Modules/_collectionsmodule.c | 93 ++- Modules/arraymodule.c | 30 +- Modules/itertoolsmodule.c | 890 +++++++++++++++++++++- Objects/bytearrayobject.c | 36 +- Objects/bytesobject.c | 34 + Objects/dictobject.c | 53 + Objects/enumobject.c | 50 +- Objects/iterobject.c | 60 +- Objects/listobject.c | 80 ++ Objects/rangeobject.c | 92 ++ Objects/setobject.c | 45 +- Objects/tupleobject.c | 31 + Objects/unicodeobject.c | 34 + Python/bltinmodule.c | 56 +- 28 files changed, 2190 insertions(+), 104 deletions(-) diff --git a/Include/iterobject.h b/Include/iterobject.h --- a/Include/iterobject.h +++ b/Include/iterobject.h @@ -18,6 +18,8 @@ PyAPI_FUNC(PyObject *) PyCallIter_New(PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) _PyIter_GetBuiltin(const char *iter); + #ifdef __cplusplus } #endif diff --git a/Lib/test/seq_tests.py b/Lib/test/seq_tests.py --- a/Lib/test/seq_tests.py +++ b/Lib/test/seq_tests.py @@ -4,6 +4,7 @@ import unittest import sys +import pickle # Various iterables # This is used for checking the constructor (here and in test_deque.py) @@ -388,3 +389,9 @@ self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2) self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize) self.assertRaises(ValueError, a.index, 2, 0, -10) + + def test_pickle(self): + lst = self.type2test([4, 5, 6, 7]) + lst2 = pickle.loads(pickle.dumps(lst)) + self.assertEqual(lst2, lst) + self.assertNotEqual(id(lst2), id(lst)) diff --git a/Lib/test/test_array.py b/Lib/test/test_array.py --- a/Lib/test/test_array.py +++ b/Lib/test/test_array.py @@ -285,6 +285,20 @@ self.assertEqual(a.x, b.x) self.assertEqual(type(a), type(b)) + def test_iterator_pickle(self): + data = array.array(self.typecode, self.example) + orgit = iter(data) + d = pickle.dumps(orgit) + it = pickle.loads(d) + self.assertEqual(type(orgit), type(it)) + self.assertEqual(list(it), list(data)) + + if len(data): + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(list(it), list(data)[1:]) + def test_insert(self): a = array.array(self.typecode, self.example) a.insert(0, self.example[0]) diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -14,6 +14,7 @@ import traceback from test.support import TESTFN, unlink, run_unittest, check_warnings from operator import neg +import pickle try: import pty, signal except ImportError: @@ -110,7 +111,30 @@ def __iter__(self): raise RuntimeError +def filter_char(arg): + return ord(arg) > ord("d") + +def map_char(arg): + return chr(ord(arg)+1) + class BuiltinTest(unittest.TestCase): + # Helper to check picklability + def check_iter_pickle(self, it, seq): + itorg = it + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(list(it), seq) + + #test the iterator after dropping one from it + it = pickle.loads(d) + try: + next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(list(it), seq[1:]) def test_import(self): __import__('sys') @@ -566,6 +590,11 @@ self.assertEqual(list(filter(lambda x: x>=3, (1, 2, 3, 4))), [3, 4]) self.assertRaises(TypeError, list, filter(42, (1, 2))) + def test_filter_pickle(self): + f1 = filter(filter_char, "abcdeabcde") + f2 = filter(filter_char, "abcdeabcde") + self.check_iter_pickle(f1, list(f2)) + def test_getattr(self): self.assertTrue(getattr(sys, 'stdout') is sys.stdout) self.assertRaises(TypeError, getattr, sys, 1) @@ -759,6 +788,11 @@ raise RuntimeError self.assertRaises(RuntimeError, list, map(badfunc, range(5))) + def test_map_pickle(self): + m1 = map(map_char, "Is this the real life?") + m2 = map(map_char, "Is this the real life?") + self.check_iter_pickle(m1, list(m2)) + def test_max(self): self.assertEqual(max('123123'), '3') self.assertEqual(max(1, 2, 3), 3) @@ -1300,6 +1334,13 @@ return i self.assertRaises(ValueError, list, zip(BadSeq(), BadSeq())) + def test_zip_pickle(self): + a = (1, 2, 3) + b = (4, 5, 6) + t = [(1, 4), (2, 5), (3, 6)] + z1 = zip(a, b) + self.check_iter_pickle(z1, t) + def test_format(self): # Test the basic machinery of the format() builtin. Don't test # the specifics of the various formatters diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -518,6 +518,24 @@ q = pickle.loads(ps) self.assertEqual(b, q) + def test_iterator_pickling(self): + for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0": + it = itorg = iter(self.type2test(b)) + data = list(self.type2test(b)) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(list(it), data) + + it = pickle.loads(d) + try: + next(it) + except StopIteration: + continue + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(list(it), data[1:]) + def test_strip(self): b = self.type2test(b'mississippi') self.assertEqual(b.strip(b'i'), b'mississipp') diff --git a/Lib/test/test_deque.py b/Lib/test/test_deque.py --- a/Lib/test/test_deque.py +++ b/Lib/test/test_deque.py @@ -471,6 +471,19 @@ ## self.assertNotEqual(id(d), id(e)) ## self.assertEqual(id(e), id(e[-1])) + def test_iterator_pickle(self): + data = deque(range(200)) + it = itorg = iter(data) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(list(it), list(data)) + + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(list(it), list(data)[1:]) + def test_deepcopy(self): mut = [10] d = deque([mut]) diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py --- a/Lib/test/test_dict.py +++ b/Lib/test/test_dict.py @@ -2,7 +2,9 @@ from test import support import collections, random, string +import collections.abc import gc, weakref +import pickle class DictTest(unittest.TestCase): @@ -803,6 +805,58 @@ pass self._tracked(MyDict()) + def test_iterator_pickling(self): + data = {1:"a", 2:"b", 3:"c"} + it = iter(data) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(sorted(it), sorted(data)) + + it = pickle.loads(d) + try: + drop = next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + del data[drop] + self.assertEqual(sorted(it), sorted(data)) + + def test_itemiterator_pickling(self): + data = {1:"a", 2:"b", 3:"c"} + # dictviews aren't picklable, only their iterators + itorg = iter(data.items()) + d = pickle.dumps(itorg) + it = pickle.loads(d) + # note that the type of type of the unpickled iterator + # is not necessarily the same as the original. It is + # merely an object supporting the iterator protocol, yielding + # the same objects as the original one. + # self.assertEqual(type(itorg), type(it)) + self.assertTrue(isinstance(it, collections.abc.Iterator)) + self.assertEqual(dict(it), data) + + it = pickle.loads(d) + drop = next(it) + d = pickle.dumps(it) + it = pickle.loads(d) + del data[drop[0]] + self.assertEqual(dict(it), data) + + def test_valuesiterator_pickling(self): + data = {1:"a", 2:"b", 3:"c"} + # data.values() isn't picklable, only its iterator + it = iter(data.values()) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(sorted(list(it)), sorted(list(data.values()))) + + it = pickle.loads(d) + drop = next(it) + d = pickle.dumps(it) + it = pickle.loads(d) + values = list(it) + [drop] + self.assertEqual(sorted(values), sorted(list(data.values()))) from test import mapping_tests diff --git a/Lib/test/test_enumerate.py b/Lib/test/test_enumerate.py --- a/Lib/test/test_enumerate.py +++ b/Lib/test/test_enumerate.py @@ -1,5 +1,6 @@ import unittest import sys +import pickle from test import support @@ -61,7 +62,25 @@ def __iter__(self): return self -class EnumerateTestCase(unittest.TestCase): +class PickleTest: + # Helper to check picklability + def check_pickle(self, itorg, seq): + d = pickle.dumps(itorg) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(list(it), seq) + + it = pickle.loads(d) + try: + next(it) + except StopIteration: + self.assertFalse(seq[1:]) + return + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(list(it), seq[1:]) + +class EnumerateTestCase(unittest.TestCase, PickleTest): enum = enumerate seq, res = 'abc', [(0,'a'), (1,'b'), (2,'c')] @@ -73,6 +92,9 @@ self.assertEqual(list(self.enum(self.seq)), self.res) self.enum.__doc__ + def test_pickle(self): + self.check_pickle(self.enum(self.seq), self.res) + def test_getitemseqn(self): self.assertEqual(list(self.enum(G(self.seq))), self.res) e = self.enum(G('')) @@ -126,7 +148,7 @@ seq = range(10,20000,2) res = list(zip(range(20000), seq)) -class TestReversed(unittest.TestCase): +class TestReversed(unittest.TestCase, PickleTest): def test_simple(self): class A: @@ -212,6 +234,10 @@ ngi = NoGetItem() self.assertRaises(TypeError, reversed, ngi) + def test_pickle(self): + for data in 'abc', range(5), tuple(enumerate('abc')), range(1,17,5): + self.check_pickle(reversed(data), list(data)[::-1]) + class EnumerateStartTestCase(EnumerateTestCase): diff --git a/Lib/test/test_iter.py b/Lib/test/test_iter.py --- a/Lib/test/test_iter.py +++ b/Lib/test/test_iter.py @@ -2,6 +2,8 @@ import unittest from test.support import run_unittest, TESTFN, unlink, cpython_only +import pickle +import collections.abc # Test result of triple loop (too big to inline) TRIPLETS = [(0, 0, 0), (0, 0, 1), (0, 0, 2), @@ -28,6 +30,8 @@ raise StopIteration self.i = res + 1 return res + def __iter__(self): + return self class IteratingSequenceClass: def __init__(self, n): @@ -49,7 +53,9 @@ class TestCase(unittest.TestCase): # Helper to check that an iterator returns a given sequence - def check_iterator(self, it, seq): + def check_iterator(self, it, seq, pickle=True): + if pickle: + self.check_pickle(it, seq) res = [] while 1: try: @@ -60,12 +66,33 @@ self.assertEqual(res, seq) # Helper to check that a for loop generates a given sequence - def check_for_loop(self, expr, seq): + def check_for_loop(self, expr, seq, pickle=True): + if pickle: + self.check_pickle(iter(expr), seq) res = [] for val in expr: res.append(val) self.assertEqual(res, seq) + # Helper to check picklability + def check_pickle(self, itorg, seq): + d = pickle.dumps(itorg) + it = pickle.loads(d) + # Cannot assert type equality because dict iterators unpickle as list + # iterators. + # self.assertEqual(type(itorg), type(it)) + self.assertTrue(isinstance(it, collections.abc.Iterator)) + self.assertEqual(list(it), seq) + + it = pickle.loads(d) + try: + next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(list(it), seq[1:]) + # Test basic use of iter() function def test_iter_basic(self): self.check_iterator(iter(range(10)), list(range(10))) @@ -138,7 +165,7 @@ if i > 100: raise IndexError # Emergency stop return i - self.check_iterator(iter(C(), 10), list(range(10))) + self.check_iterator(iter(C(), 10), list(range(10)), pickle=False) # Test two-argument iter() with function def test_iter_function(self): @@ -146,7 +173,7 @@ i = state[0] state[0] = i+1 return i - self.check_iterator(iter(spam, 10), list(range(10))) + self.check_iterator(iter(spam, 10), list(range(10)), pickle=False) # Test two-argument iter() with function that raises StopIteration def test_iter_function_stop(self): @@ -156,7 +183,7 @@ raise StopIteration state[0] = i+1 return i - self.check_iterator(iter(spam, 20), list(range(10))) + self.check_iterator(iter(spam, 20), list(range(10)), pickle=False) # Test exception propagation through function iterator def test_exception_function(self): @@ -198,7 +225,7 @@ if i == 10: raise StopIteration return SequenceClass.__getitem__(self, i) - self.check_for_loop(MySequenceClass(20), list(range(10))) + self.check_for_loop(MySequenceClass(20), list(range(10)), pickle=False) # Test a big range def test_iter_big_range(self): @@ -237,8 +264,8 @@ f.close() f = open(TESTFN, "r") try: - self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"]) - self.check_for_loop(f, []) + self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"], pickle=False) + self.check_for_loop(f, [], pickle=False) finally: f.close() try: diff --git a/Lib/test/test_itertools.py b/Lib/test/test_itertools.py --- a/Lib/test/test_itertools.py +++ b/Lib/test/test_itertools.py @@ -37,6 +37,13 @@ 'Test predicate' return x%2==1 +def tupleize(*args): + return args + +def irange(n): + for i in range(n): + yield i + class StopNow: 'Class emulating an empty iterable.' def __iter__(self): @@ -55,8 +62,59 @@ 'Factorial' return prod(range(1, n+1)) +# root level methods for pickling ability +def testR(r): + return r[0] + +def testR2(r): + return r[2] + +def underten(x): + return x<10 + class TestBasicOps(unittest.TestCase): + def pickletest(self, it, stop=4, take=1, compare=None): + """Test that an iterator is the same after pickling, also when part-consumed""" + def expand(it, i=0): + # Recursively expand iterables, within sensible bounds + if i > 10: + raise RuntimeError("infinite recursion encountered") + if isinstance(it, str): + return it + try: + l = list(islice(it, stop)) + except TypeError: + return it # can't expand it + return [expand(e, i+1) for e in l] + + # Test the initial copy against the original + dump = pickle.dumps(it) + i2 = pickle.loads(dump) + self.assertEqual(type(it), type(i2)) + a, b = expand(it), expand(i2) + self.assertEqual(a, b) + if compare: + c = expand(compare) + self.assertEqual(a, c) + + # Take from the copy, and create another copy and compare them. + i3 = pickle.loads(dump) + took = 0 + try: + for i in range(take): + next(i3) + took += 1 + except StopIteration: + pass #in case there is less data than 'take' + dump = pickle.dumps(i3) + i4 = pickle.loads(dump) + a, b = expand(i3), expand(i4) + self.assertEqual(a, b) + if compare: + c = expand(compare[took:]) + self.assertEqual(a, c); + def test_accumulate(self): self.assertEqual(list(accumulate(range(10))), # one positional arg [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) @@ -83,6 +141,7 @@ [2, 16, 144, 720, 5040, 0, 0, 0, 0, 0]) with self.assertRaises(TypeError): list(accumulate(s, chr)) # unary-operation + self.pickletest(accumulate(range(10))) # test pickling def test_chain(self): @@ -106,14 +165,43 @@ self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd')) self.assertRaises(TypeError, list, chain.from_iterable([2, 3])) + def test_chain_reducible(self): + operators = [copy.deepcopy, + lambda s: pickle.loads(pickle.dumps(s))] + for oper in operators: + it = chain('abc', 'def') + self.assertEqual(list(oper(it)), list('abcdef')) + self.assertEqual(next(it), 'a') + self.assertEqual(list(oper(it)), list('bcdef')) + + self.assertEqual(list(oper(chain(''))), []) + self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd')) + self.assertRaises(TypeError, list, oper(chain(2, 3))) + self.pickletest(chain('abc', 'def'), compare=list('abcdef')) + def test_combinations(self): self.assertRaises(TypeError, combinations, 'abc') # missing r argument self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, combinations, None) # pool is not iterable self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative - self.assertEqual(list(combinations('abc', 32)), []) # r > n - self.assertEqual(list(combinations(range(4), 3)), - [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]) + + for op in (lambda a:a, lambda a:pickle.loads(pickle.dumps(a))): + self.assertEqual(list(op(combinations('abc', 32))), []) # r > n + + self.assertEqual(list(op(combinations('ABCD', 2))), + [('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]) + testIntermediate = combinations('ABCD', 2) + next(testIntermediate) + self.assertEqual(list(op(testIntermediate)), + [('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]) + + self.assertEqual(list(op(combinations(range(4), 3))), + [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]) + testIntermediate = combinations(range(4), 3) + next(testIntermediate) + self.assertEqual(list(op(testIntermediate)), + [(0,1,3), (0,2,3), (1,2,3)]) + def combinations1(iterable, r): 'Pure python version shown in the docs' @@ -168,6 +256,9 @@ self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version + self.pickletest(combinations(values, r)) # test pickling + + # Test implementation detail: tuple re-use @support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) @@ -179,8 +270,15 @@ self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, cwr, None) # pool is not iterable self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative - self.assertEqual(list(cwr('ABC', 2)), - [('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) + + for op in (lambda a:a, lambda a:pickle.loads(pickle.dumps(a))): + self.assertEqual(list(op(cwr('ABC', 2))), + [('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) + testIntermediate = cwr('ABC', 2) + next(testIntermediate) + self.assertEqual(list(op(testIntermediate)), + [('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) + def cwr1(iterable, r): 'Pure python version shown in the docs' @@ -239,6 +337,10 @@ self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version + self.pickletest(cwr(values,r)) # test pickling + + # Test implementation detail: tuple re-use + @support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): cwr = combinations_with_replacement @@ -305,6 +407,8 @@ self.assertEqual(result, list(permutations(values, None))) # test r as None self.assertEqual(result, list(permutations(values))) # test default r + self.pickletest(permutations(values, r)) # test pickling + @support.impl_detail("tuple resuse is CPython specific") def test_permutations_tuple_reuse(self): self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) @@ -359,6 +463,24 @@ self.assertRaises(TypeError, compress, range(6)) # too few args self.assertRaises(TypeError, compress, range(6), None) # too many args + # check copy, deepcopy, pickle + for op in (lambda a:copy.copy(a), lambda a:copy.deepcopy(a), lambda a:pickle.loads(pickle.dumps(a))): + for data, selectors, result1, result2 in [ + ('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'), + ('ABCDEF', [0,0,0,0,0,0], '', ''), + ('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'), + ('ABCDEF', [1,0,1], 'AC', 'C'), + ('ABC', [0,1,1,1,1,1], 'BC', 'C'), + ]: + + self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1)) + self.assertEqual(list(op(compress(data, selectors))), list(result1)) + testIntermediate = compress(data, selectors) + if result1: + next(testIntermediate) + self.assertEqual(list(op(testIntermediate)), list(result2)) + + def test_count(self): self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)]) @@ -393,7 +515,7 @@ c = count(value) self.assertEqual(next(copy.copy(c)), value) self.assertEqual(next(copy.deepcopy(c)), value) - self.assertEqual(next(pickle.loads(pickle.dumps(c))), value) + self.pickletest(count(value)) #check proper internal error handling for large "step' sizes count(1, maxsize+5); sys.exc_info() @@ -440,6 +562,7 @@ else: r2 = ('count(%r, %r)' % (i, j)).replace('L', '') self.assertEqual(r1, r2) + self.pickletest(count(i, j)) def test_cycle(self): self.assertEqual(take(10, cycle('abc')), list('abcabcabca')) @@ -448,6 +571,18 @@ self.assertRaises(TypeError, cycle, 5) self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0]) + # check copy, deepcopy, pickle + c = cycle('abc') + self.assertEqual(next(c), 'a') + #simple copy currently not supported, because __reduce__ returns + #an internal iterator + #self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab')) + self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab')) + self.assertEqual(take(10, pickle.loads(pickle.dumps(c))), list('bcabcabcab')) + next(c) + self.assertEqual(take(10, pickle.loads(pickle.dumps(c))), list('cabcabcabc')) + self.pickletest(cycle('abc')) + def test_groupby(self): # Check whether it accepts arguments correctly self.assertEqual([], list(groupby([]))) @@ -466,18 +601,37 @@ dup.append(elem) self.assertEqual(s, dup) + # Check normal pickled + dup = [] + for k, g in pickle.loads(pickle.dumps(groupby(s, testR))): + for elem in g: + self.assertEqual(k, elem[0]) + dup.append(elem) + self.assertEqual(s, dup) + # Check nested case dup = [] - for k, g in groupby(s, lambda r:r[0]): - for ik, ig in groupby(g, lambda r:r[2]): + for k, g in groupby(s, testR): + for ik, ig in groupby(g, testR2): for elem in ig: self.assertEqual(k, elem[0]) self.assertEqual(ik, elem[2]) dup.append(elem) self.assertEqual(s, dup) + # Check nested and pickled + dup = [] + for k, g in pickle.loads(pickle.dumps(groupby(s, testR))): + for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2))): + for elem in ig: + self.assertEqual(k, elem[0]) + self.assertEqual(ik, elem[2]) + dup.append(elem) + self.assertEqual(s, dup) + + # Check case where inner iterator is not used - keys = [k for k, g in groupby(s, lambda r:r[0])] + keys = [k for k, g in groupby(s, testR)] expectedkeys = set([r[0] for r in s]) self.assertEqual(set(keys), expectedkeys) self.assertEqual(len(keys), len(expectedkeys)) @@ -548,6 +702,20 @@ self.assertRaises(TypeError, filter, isEven, 3) self.assertRaises(TypeError, next, filter(range(6), range(6))) + # check copy, deepcopy, pickle + ans = [0,2,4] + + c = filter(isEven, range(6)) + self.assertEqual(list(copy.copy(c)), ans) + c = filter(isEven, range(6)) + self.assertEqual(list(copy.deepcopy(c)), ans) + c = filter(isEven, range(6)) + self.assertEqual(list(pickle.loads(pickle.dumps(c))), ans) + next(c) + self.assertEqual(list(pickle.loads(pickle.dumps(c))), ans[1:]) + c = filter(isEven, range(6)) + self.pickletest(c) + def test_filterfalse(self): self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5]) self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0]) @@ -558,6 +726,7 @@ self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7) self.assertRaises(TypeError, filterfalse, isEven, 3) self.assertRaises(TypeError, next, filterfalse(range(6), range(6))) + self.pickletest(filterfalse(isEven, range(6))) def test_zip(self): # XXX This is rather silly now that builtin zip() calls zip()... @@ -582,6 +751,23 @@ ids = list(map(id, list(zip('abc', 'def')))) self.assertEqual(len(dict.fromkeys(ids)), len(ids)) + # check copy, deepcopy, pickle + ans = [(x,y) for x, y in copy.copy(zip('abc',count()))] + self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) + + ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))] + self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) + + ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count())))] + self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) + + testIntermediate = zip('abc',count()) + next(testIntermediate) + ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate))] + self.assertEqual(ans, [('b', 1), ('c', 2)]) + + self.pickletest(zip('abc', count())) + def test_ziplongest(self): for args in [ ['abc', range(6)], @@ -631,6 +817,12 @@ ids = list(map(id, list(zip_longest('abc', 'def')))) self.assertEqual(len(dict.fromkeys(ids)), len(ids)) + def test_zip_longest_pickling(self): + self.pickletest(zip_longest("abc", "def")) + self.pickletest(zip_longest("abc", "defgh")) + self.pickletest(zip_longest("abc", "defgh", fillvalue=1)) + self.pickletest(zip_longest("", "defgh")) + def test_bug_7244(self): class Repeater: @@ -734,6 +926,20 @@ self.assertEqual(len(set(map(id, product('abc', 'def')))), 1) self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1) + def test_product_pickling(self): + # check copy, deepcopy, pickle + for args, result in [ + ([], [()]), # zero iterables + (['ab'], [('a',), ('b',)]), # one iterable + ([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables + ([range(0), range(2), range(3)], []), # first iterable with zero length + ([range(2), range(0), range(3)], []), # middle iterable with zero length + ([range(2), range(3), range(0)], []), # last iterable with zero length + ]: + self.assertEqual(list(copy.copy(product(*args))), result) + self.assertEqual(list(copy.deepcopy(product(*args))), result) + self.pickletest(product(*args)) + def test_repeat(self): self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a']) self.assertEqual(lzip(range(3),repeat('a')), @@ -752,11 +958,16 @@ list(r) self.assertEqual(repr(r), 'repeat((1+0j), 0)') + # check copy, deepcopy, pickle + c = repeat(object='a', times=10) + self.assertEqual(next(c), 'a') + self.assertEqual(take(2, copy.copy(c)), list('a' * 2)) + self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2)) + self.pickletest(repeat(object='a', times=10)) + def test_map(self): self.assertEqual(list(map(operator.pow, range(3), range(1,7))), [0**1, 1**2, 2**3]) - def tupleize(*args): - return args self.assertEqual(list(map(tupleize, 'abc', range(5))), [('a',0),('b',1),('c',2)]) self.assertEqual(list(map(tupleize, 'abc', count())), @@ -771,6 +982,18 @@ self.assertRaises(ValueError, next, map(errfunc, [4], [5])) self.assertRaises(TypeError, next, map(onearg, [4], [5])) + # check copy, deepcopy, pickle + ans = [('a',0),('b',1),('c',2)] + + c = map(tupleize, 'abc', count()) + self.assertEqual(list(copy.copy(c)), ans) + + c = map(tupleize, 'abc', count()) + self.assertEqual(list(copy.deepcopy(c)), ans) + + c = map(tupleize, 'abc', count()) + self.pickletest(c) + def test_starmap(self): self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))), [0**1, 1**2, 2**3]) @@ -785,6 +1008,18 @@ self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)])) self.assertRaises(TypeError, next, starmap(onearg, [(4,5)])) + # check copy, deepcopy, pickle + ans = [0**1, 1**2, 2**3] + + c = starmap(operator.pow, zip(range(3), range(1,7))) + self.assertEqual(list(copy.copy(c)), ans) + + c = starmap(operator.pow, zip(range(3), range(1,7))) + self.assertEqual(list(copy.deepcopy(c)), ans) + + c = starmap(operator.pow, zip(range(3), range(1,7))) + self.pickletest(c) + def test_islice(self): for args in [ # islice(args) should agree with range(args) (10, 20, 3), @@ -817,17 +1052,18 @@ self.assertEqual(list(it), list(range(3, 10))) # Test invalid arguments - self.assertRaises(TypeError, islice, range(10)) - self.assertRaises(TypeError, islice, range(10), 1, 2, 3, 4) - self.assertRaises(ValueError, islice, range(10), -5, 10, 1) - self.assertRaises(ValueError, islice, range(10), 1, -5, -1) - self.assertRaises(ValueError, islice, range(10), 1, 10, -1) - self.assertRaises(ValueError, islice, range(10), 1, 10, 0) - self.assertRaises(ValueError, islice, range(10), 'a') - self.assertRaises(ValueError, islice, range(10), 'a', 1) - self.assertRaises(ValueError, islice, range(10), 1, 'a') - self.assertRaises(ValueError, islice, range(10), 'a', 1, 1) - self.assertRaises(ValueError, islice, range(10), 1, 'a', 1) + ra = range(10) + self.assertRaises(TypeError, islice, ra) + self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4) + self.assertRaises(ValueError, islice, ra, -5, 10, 1) + self.assertRaises(ValueError, islice, ra, 1, -5, -1) + self.assertRaises(ValueError, islice, ra, 1, 10, -1) + self.assertRaises(ValueError, islice, ra, 1, 10, 0) + self.assertRaises(ValueError, islice, ra, 'a') + self.assertRaises(ValueError, islice, ra, 'a', 1) + self.assertRaises(ValueError, islice, ra, 1, 'a') + self.assertRaises(ValueError, islice, ra, 'a', 1, 1) + self.assertRaises(ValueError, islice, ra, 1, 'a', 1) self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1) # Issue #10323: Less islice in a predictable state @@ -835,9 +1071,22 @@ self.assertEqual(list(islice(c, 1, 3, 50)), [1]) self.assertEqual(next(c), 3) + # check copy, deepcopy, pickle + for args in [ # islice(args) should agree with range(args) + (10, 20, 3), + (10, 3, 20), + (10, 20), + (10, 3), + (20,) + ]: + self.assertEqual(list(copy.copy(islice(range(100), *args))), + list(range(*args))) + self.assertEqual(list(copy.deepcopy(islice(range(100), *args))), + list(range(*args))) + self.pickletest(islice(range(100), *args)) + def test_takewhile(self): data = [1, 3, 5, 20, 2, 4, 6, 8] - underten = lambda x: x<10 self.assertEqual(list(takewhile(underten, data)), [1, 3, 5]) self.assertEqual(list(takewhile(underten, [])), []) self.assertRaises(TypeError, takewhile) @@ -849,9 +1098,14 @@ self.assertEqual(list(t), [1, 1, 1]) self.assertRaises(StopIteration, next, t) + # check copy, deepcopy, pickle + self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5]) + self.assertEqual(list(copy.deepcopy(takewhile(underten, data))), + [1, 3, 5]) + self.pickletest(takewhile(underten, data)) + def test_dropwhile(self): data = [1, 3, 5, 20, 2, 4, 6, 8] - underten = lambda x: x<10 self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8]) self.assertEqual(list(dropwhile(underten, [])), []) self.assertRaises(TypeError, dropwhile) @@ -860,11 +1114,14 @@ self.assertRaises(TypeError, next, dropwhile(10, [(4,5)])) self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)])) + # check copy, deepcopy, pickle + self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8]) + self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))), + [20, 2, 4, 6, 8]) + self.pickletest(dropwhile(underten, data)) + def test_tee(self): n = 200 - def irange(n): - for i in range(n): - yield i a, b = tee([]) # test empty iterator self.assertEqual(list(a), []) @@ -949,6 +1206,67 @@ del a self.assertRaises(ReferenceError, getattr, p, '__class__') + ans = list('abc') + long_ans = list(range(10000)) + + # check copy + a, b = tee('abc') + self.assertEqual(list(copy.copy(a)), ans) + self.assertEqual(list(copy.copy(b)), ans) + a, b = tee(list(range(10000))) + self.assertEqual(list(copy.copy(a)), long_ans) + self.assertEqual(list(copy.copy(b)), long_ans) + + # check partially consumed copy + a, b = tee('abc') + take(2, a) + take(1, b) + self.assertEqual(list(copy.copy(a)), ans[2:]) + self.assertEqual(list(copy.copy(b)), ans[1:]) + self.assertEqual(list(a), ans[2:]) + self.assertEqual(list(b), ans[1:]) + a, b = tee(range(10000)) + take(100, a) + take(60, b) + self.assertEqual(list(copy.copy(a)), long_ans[100:]) + self.assertEqual(list(copy.copy(b)), long_ans[60:]) + self.assertEqual(list(a), long_ans[100:]) + self.assertEqual(list(b), long_ans[60:]) + + # check deepcopy + a, b = tee('abc') + self.assertEqual(list(copy.deepcopy(a)), ans) + self.assertEqual(list(copy.deepcopy(b)), ans) + self.assertEqual(list(a), ans) + self.assertEqual(list(b), ans) + a, b = tee(range(10000)) + self.assertEqual(list(copy.deepcopy(a)), long_ans) + self.assertEqual(list(copy.deepcopy(b)), long_ans) + self.assertEqual(list(a), long_ans) + self.assertEqual(list(b), long_ans) + + # check partially consumed deepcopy + a, b = tee('abc') + take(2, a) + take(1, b) + self.assertEqual(list(copy.deepcopy(a)), ans[2:]) + self.assertEqual(list(copy.deepcopy(b)), ans[1:]) + self.assertEqual(list(a), ans[2:]) + self.assertEqual(list(b), ans[1:]) + a, b = tee(range(10000)) + take(100, a) + take(60, b) + self.assertEqual(list(copy.deepcopy(a)), long_ans[100:]) + self.assertEqual(list(copy.deepcopy(b)), long_ans[60:]) + self.assertEqual(list(a), long_ans[100:]) + self.assertEqual(list(b), long_ans[60:]) + + # check pickle + self.pickletest(iter(tee('abc'))) + a, b = tee('abc') + self.pickletest(a, compare=ans) + self.pickletest(b, compare=ans) + def test_StopIteration(self): self.assertRaises(StopIteration, next, zip()) @@ -974,9 +1292,21 @@ class TestExamples(unittest.TestCase): - def test_accumlate(self): + def test_accumulate(self): self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15]) + def test_accumulate_reducible(self): + # check copy, deepcopy, pickle + data = [1, 2, 3, 4, 5] + accumulated = [1, 3, 6, 10, 15] + it = accumulate(data) + + self.assertEqual(list(pickle.loads(pickle.dumps(it))), accumulated[:]) + self.assertEqual(next(it), 1) + self.assertEqual(list(pickle.loads(pickle.dumps(it))), accumulated[1:]) + self.assertEqual(list(copy.deepcopy(it)), accumulated[1:]) + self.assertEqual(list(copy.copy(it)), accumulated[1:]) + def test_chain(self): self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF') diff --git a/Lib/test/test_list.py b/Lib/test/test_list.py --- a/Lib/test/test_list.py +++ b/Lib/test/test_list.py @@ -1,5 +1,6 @@ import sys from test import support, list_tests +import pickle class ListTest(list_tests.CommonTest): type2test = list @@ -69,6 +70,33 @@ check(10) # check our checking code check(1000000) + def test_iterator_pickle(self): + # Userlist iterators don't support pickling yet since + # they are based on generators. + data = self.type2test([4, 5, 6, 7]) + it = itorg = iter(data) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(self.type2test(it), self.type2test(data)) + + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(self.type2test(it), self.type2test(data)[1:]) + + def test_reversed_pickle(self): + data = self.type2test([4, 5, 6, 7]) + it = itorg = reversed(data) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(self.type2test(it), self.type2test(reversed(data))) + + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:]) def test_main(verbose=None): support.run_unittest(ListTest) diff --git a/Lib/test/test_range.py b/Lib/test/test_range.py --- a/Lib/test/test_range.py +++ b/Lib/test/test_range.py @@ -341,13 +341,35 @@ def test_pickling(self): testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), - (13, 21, 3), (-2, 2, 2)] + (13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)] for proto in range(pickle.HIGHEST_PROTOCOL + 1): for t in testcases: r = range(*t) self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))), list(r)) + def test_iterator_pickling(self): + testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), + (13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)] + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + for t in testcases: + it = itorg = iter(range(*t)) + data = list(range(*t)) + + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(list(it), data) + + it = pickle.loads(d) + try: + next(it) + except StopIteration: + continue + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(list(it), data[1:]) + def test_odd_bug(self): # This used to raise a "SystemError: NULL result without error" # because the range validation step was eating the exception diff --git a/Lib/test/test_set.py b/Lib/test/test_set.py --- a/Lib/test/test_set.py +++ b/Lib/test/test_set.py @@ -9,6 +9,7 @@ import sys import warnings import collections +import collections.abc class PassThru(Exception): pass @@ -234,6 +235,26 @@ dup = pickle.loads(p) self.assertEqual(self.s.x, dup.x) + def test_iterator_pickling(self): + itorg = iter(self.s) + data = self.thetype(self.s) + d = pickle.dumps(itorg) + it = pickle.loads(d) + # Set iterators unpickle as list iterators due to the + # undefined order of set items. + # self.assertEqual(type(itorg), type(it)) + self.assertTrue(isinstance(it, collections.abc.Iterator)) + self.assertEqual(self.thetype(it), data) + + it = pickle.loads(d) + try: + drop = next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(self.thetype(it), data - self.thetype((drop,))) + def test_deepcopy(self): class Tracer: def __init__(self, value): diff --git a/Lib/test/test_tuple.py b/Lib/test/test_tuple.py --- a/Lib/test/test_tuple.py +++ b/Lib/test/test_tuple.py @@ -1,6 +1,7 @@ from test import support, seq_tests import gc +import pickle class TupleTest(seq_tests.CommonTest): type2test = tuple @@ -164,6 +165,34 @@ check(10) # check our checking code check(1000000) + def test_iterator_pickle(self): + # Userlist iterators don't support pickling yet since + # they are based on generators. + data = self.type2test([4, 5, 6, 7]) + itorg = iter(data) + d = pickle.dumps(itorg) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(self.type2test(it), self.type2test(data)) + + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(self.type2test(it), self.type2test(data)[1:]) + + def test_reversed_pickle(self): + data = self.type2test([4, 5, 6, 7]) + itorg = reversed(data) + d = pickle.dumps(itorg) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(self.type2test(it), self.type2test(reversed(data))) + + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:]) + def test_main(): support.run_unittest(TupleTest) diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -1122,6 +1122,35 @@ } static PyObject * +dequeiter_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + Py_ssize_t i, index=0; + PyObject *deque; + dequeiterobject *it; + if (!PyArg_ParseTuple(args, "O!|n", &deque_type, &deque, &index)) + return NULL; + assert(type == &dequeiter_type); + + it = (dequeiterobject*)deque_iter((dequeobject *)deque); + if (!it) + return NULL; + /* consume items from the queue */ + for(i=0; icounter) { + Py_DECREF(it); + return NULL; + } else + break; + } + } + return (PyObject*)it; +} + +static PyObject * dequeiter_len(dequeiterobject *it) { return PyLong_FromSsize_t(it->counter); @@ -1129,14 +1158,21 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +dequeiter_reduce(dequeiterobject *it) +{ + return Py_BuildValue("O(On)", Py_TYPE(it), it->deque, it->deque->len - it->counter); +} + static PyMethodDef dequeiter_methods[] = { {"__length_hint__", (PyCFunction)dequeiter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)dequeiter_reduce, METH_NOARGS, reduce_doc}, {NULL, NULL} /* sentinel */ }; static PyTypeObject dequeiter_type = { PyVarObject_HEAD_INIT(NULL, 0) - "deque_iterator", /* tp_name */ + "_collections._deque_iterator", /* tp_name */ sizeof(dequeiterobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ @@ -1164,6 +1200,16 @@ PyObject_SelfIter, /* tp_iter */ (iternextfunc)dequeiter_next, /* tp_iternext */ dequeiter_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + dequeiter_new, /* tp_new */ 0, }; @@ -1217,9 +1263,38 @@ return item; } +static PyObject * +dequereviter_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + Py_ssize_t i, index=0; + PyObject *deque; + dequeiterobject *it; + if (!PyArg_ParseTuple(args, "O!|n", &deque_type, &deque, &index)) + return NULL; + assert(type == &dequereviter_type); + + it = (dequeiterobject*)deque_reviter((dequeobject *)deque); + if (!it) + return NULL; + /* consume items from the queue */ + for(i=0; icounter) { + Py_DECREF(it); + return NULL; + } else + break; + } + } + return (PyObject*)it; +} + static PyTypeObject dequereviter_type = { PyVarObject_HEAD_INIT(NULL, 0) - "deque_reverse_iterator", /* tp_name */ + "_collections._deque_reverse_iterator", /* tp_name */ sizeof(dequeiterobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ @@ -1247,6 +1322,16 @@ PyObject_SelfIter, /* tp_iter */ (iternextfunc)dequereviter_next, /* tp_iternext */ dequeiter_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + dequereviter_new, /* tp_new */ 0, }; @@ -1653,9 +1738,13 @@ if (PyType_Ready(&dequeiter_type) < 0) return NULL; + Py_INCREF(&dequeiter_type); + PyModule_AddObject(m, "_deque_iterator", (PyObject *)&dequeiter_type); if (PyType_Ready(&dequereviter_type) < 0) return NULL; + Py_INCREF(&dequereviter_type); + PyModule_AddObject(m, "_deque_reverse_iterator", (PyObject *)&dequereviter_type); return m; } diff --git a/Modules/arraymodule.c b/Modules/arraymodule.c --- a/Modules/arraymodule.c +++ b/Modules/arraymodule.c @@ -2753,6 +2753,34 @@ return 0; } +static PyObject * +arrayiter_reduce(arrayiterobject *it) +{ + return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("iter"), + it->ao, it->index); +} + +static PyObject * +arrayiter_setstate(arrayiterobject *it, PyObject *state) +{ + Py_ssize_t index = PyLong_AsSsize_t(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (index < 0) + index = 0; + it->index = index; + Py_RETURN_NONE; +} + +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); +static PyMethodDef arrayiter_methods[] = { + {"__reduce__", (PyCFunction)arrayiter_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)arrayiter_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + static PyTypeObject PyArrayIter_Type = { PyVarObject_HEAD_INIT(NULL, 0) "arrayiterator", /* tp_name */ @@ -2782,7 +2810,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)arrayiter_next, /* tp_iternext */ - 0, /* tp_methods */ + arrayiter_methods, /* tp_methods */ }; diff --git a/Modules/itertoolsmodule.c b/Modules/itertoolsmodule.c --- a/Modules/itertoolsmodule.c +++ b/Modules/itertoolsmodule.c @@ -134,6 +134,53 @@ return r; } +static PyObject * +groupby_reduce(groupbyobject *lz) +{ + /* reduce as a 'new' call with an optional 'setstate' if groupby + * has started + */ + PyObject *value; + if (lz->tgtkey && lz->currkey && lz->currvalue) + value = Py_BuildValue("O(OO)(OOO)", Py_TYPE(lz), + lz->it, lz->keyfunc, lz->currkey, lz->currvalue, lz->tgtkey); + else + value = Py_BuildValue("O(OO)", Py_TYPE(lz), + lz->it, lz->keyfunc); + + return value; +} + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); + +static PyObject * +groupby_setstate(groupbyobject *lz, PyObject *state) +{ + PyObject *currkey, *currvalue, *tgtkey; + if (!PyArg_ParseTuple(state, "OOO", &currkey, &currvalue, &tgtkey)) + return NULL; + Py_CLEAR(lz->currkey); + lz->currkey = currkey; + Py_INCREF(lz->currkey); + Py_CLEAR(lz->currvalue); + lz->currvalue = currvalue; + Py_INCREF(lz->currvalue); + Py_CLEAR(lz->tgtkey); + lz->tgtkey = tgtkey; + Py_INCREF(lz->tgtkey); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); + +static PyMethodDef groupby_methods[] = { + {"__reduce__", (PyCFunction)groupby_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)groupby_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(groupby_doc, "groupby(iterable[, keyfunc]) -> create an iterator which returns\n\ (key, sub-iterator) grouped by each value of key(value).\n"); @@ -168,7 +215,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)groupby_next, /* tp_iternext */ - 0, /* tp_methods */ + groupby_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -194,6 +241,17 @@ static PyTypeObject _grouper_type; static PyObject * +_grouper_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + PyObject *parent, *tgtkey; + + if (!PyArg_ParseTuple(args, "O!O", &groupby_type, &parent, &tgtkey)) + return NULL; + + return _grouper_create((groupbyobject*) parent, tgtkey); +} + +static PyObject * _grouper_create(groupbyobject *parent, PyObject *tgtkey) { _grouperobject *igo; @@ -269,6 +327,20 @@ return r; } +static PyObject * +_grouper_reduce(_grouperobject *lz) +{ + return Py_BuildValue("O(OO)", Py_TYPE(lz), + lz->parent, lz->tgtkey); +} + +static PyMethodDef _grouper_methods[] = { + {"__reduce__", (PyCFunction)_grouper_reduce, METH_NOARGS, + reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + + static PyTypeObject _grouper_type = { PyVarObject_HEAD_INIT(NULL, 0) "itertools._grouper", /* tp_name */ @@ -298,7 +370,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)_grouper_next, /* tp_iternext */ - 0, /* tp_methods */ + _grouper_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -308,7 +380,7 @@ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ - 0, /* tp_new */ + _grouper_new, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; @@ -344,7 +416,7 @@ static PyTypeObject teedataobject_type; static PyObject * -teedataobject_new(PyObject *it) +teedataobject_newinternal(PyObject *it) { teedataobject *tdo; @@ -364,7 +436,7 @@ teedataobject_jumplink(teedataobject *tdo) { if (tdo->nextlink == NULL) - tdo->nextlink = teedataobject_new(tdo->it); + tdo->nextlink = teedataobject_newinternal(tdo->it); Py_XINCREF(tdo->nextlink); return tdo->nextlink; } @@ -420,11 +492,80 @@ PyObject_GC_Del(tdo); } +static PyObject * +teedataobject_reduce(teedataobject *tdo) +{ + int i; + /* create a temporary list of already iterated values */ + PyObject *values = PyList_New(tdo->numread); + if (!values) + return NULL; + for (i=0 ; inumread ; i++) { + Py_INCREF(tdo->values[i]); + PyList_SET_ITEM(values, i, tdo->values[i]); + } + return Py_BuildValue("O(ONO)", Py_TYPE(tdo), tdo->it, + values, + tdo->nextlink ? tdo->nextlink : Py_None); +} + +static PyTypeObject teedataobject_type; + +static PyObject * +teedataobject_new(PyTypeObject *type, PyObject *args, PyObject *kw) +{ + teedataobject *tdo; + PyObject *it, *values, *next; + Py_ssize_t i, len; + + assert(type == &teedataobject_type); + if (!PyArg_ParseTuple(args, "OO!O", &it, &PyList_Type, &values, &next)) + return NULL; + + tdo = (teedataobject *)teedataobject_newinternal(it); + if (!tdo) + return NULL; + + len = PyList_GET_SIZE(values); + if (len > LINKCELLS) + goto err; + for (i=0; ivalues[i] = PyList_GET_ITEM(values, i); + Py_INCREF(tdo->values[i]); + } + tdo->numread = len; + + if (len == LINKCELLS) { + if (next != Py_None) { + if (Py_TYPE(next) != &teedataobject_type) + goto err; + assert(tdo->nextlink == NULL); + Py_INCREF(next); + tdo->nextlink = next; + } + } else { + if (next != Py_None) + goto err; /* shouldn't have a next if we are not full */ + } + return (PyObject*)tdo; + +err: + Py_XDECREF(tdo); + PyErr_SetString(PyExc_ValueError, "Invalid arguments"); + return NULL; +} + +static PyMethodDef teedataobject_methods[] = { + {"__reduce__", (PyCFunction)teedataobject_reduce, METH_NOARGS, + reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(teedataobject_doc, "Data container common to multiple tee objects."); static PyTypeObject teedataobject_type = { PyVarObject_HEAD_INIT(0, 0) /* Must fill in type value later */ - "itertools.tee_dataobject", /* tp_name */ + "itertools._tee_dataobject", /* tp_name */ sizeof(teedataobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ @@ -451,7 +592,7 @@ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ - 0, /* tp_methods */ + teedataobject_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -461,7 +602,7 @@ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ - 0, /* tp_new */ + teedataobject_new, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; @@ -528,7 +669,7 @@ to = PyObject_GC_New(teeobject, &tee_type); if (to == NULL) goto done; - to->dataobj = (teedataobject *)teedataobject_new(it); + to->dataobj = (teedataobject *)teedataobject_newinternal(it); if (!to->dataobj) { PyObject_GC_Del(to); to = NULL; @@ -548,7 +689,7 @@ { PyObject *iterable; - if (!PyArg_UnpackTuple(args, "tee", 1, 1, &iterable)) + if (!PyArg_UnpackTuple(args, "_tee", 1, 1, &iterable)) return NULL; return tee_fromiterable(iterable); } @@ -570,17 +711,43 @@ PyObject_GC_Del(to); } +static PyObject * +tee_reduce(teeobject *to) +{ + return Py_BuildValue("O(())(Oi)", Py_TYPE(to), to->dataobj, to->index); +} + +static PyObject * +tee_setstate(teeobject *to, PyObject *state) +{ + teedataobject *tdo; + int index; + if (!PyArg_ParseTuple(state, "O!i", &teedataobject_type, &tdo, &index)) + return NULL; + if (index < 0 || index > LINKCELLS) { + PyErr_SetString(PyExc_ValueError, "Index out of range"); + return NULL; + } + Py_CLEAR(to->dataobj); + to->dataobj = tdo; + Py_INCREF(to->dataobj); + to->index = index; + Py_RETURN_NONE; +} + PyDoc_STRVAR(teeobject_doc, "Iterator wrapped to make it copyable"); static PyMethodDef tee_methods[] = { {"__copy__", (PyCFunction)tee_copy, METH_NOARGS, teecopy_doc}, + {"__reduce__", (PyCFunction)tee_reduce, METH_NOARGS, reduce_doc}, + {"__setstate__", (PyCFunction)tee_setstate, METH_O, setstate_doc}, {NULL, NULL} /* sentinel */ }; static PyTypeObject tee_type = { PyVarObject_HEAD_INIT(NULL, 0) - "itertools.tee", /* tp_name */ + "itertools._tee", /* tp_name */ sizeof(teeobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ @@ -771,6 +938,38 @@ } } +static PyObject * +cycle_reduce(cycleobject *lz) +{ + /* Create a new cycle with the iterator tuple, then set + * the saved state on it. + */ + return Py_BuildValue("O(O)(Oi)", Py_TYPE(lz), + lz->it, lz->saved, lz->firstpass); + } + +static PyObject * +cycle_setstate(cycleobject *lz, PyObject *state) +{ + PyObject *saved=NULL; + int firstpass; + if (!PyArg_ParseTuple(state, "Oi", &saved, &firstpass)) + return NULL; + Py_CLEAR(lz->saved); + lz->saved = saved; + Py_XINCREF(lz->saved); + lz->firstpass = firstpass != 0; + Py_RETURN_NONE; +} + +static PyMethodDef cycle_methods[] = { + {"__reduce__", (PyCFunction)cycle_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)cycle_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(cycle_doc, "cycle(iterable) --> cycle object\n\ \n\ @@ -807,7 +1006,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)cycle_next, /* tp_iternext */ - 0, /* tp_methods */ + cycle_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -913,6 +1112,31 @@ } } +static PyObject * +dropwhile_reduce(dropwhileobject *lz) +{ + return Py_BuildValue("O(OO)l", Py_TYPE(lz), + lz->func, lz->it, lz->start); +} + +static PyObject * +dropwhile_setstate(dropwhileobject *lz, PyObject *state) +{ + int start = PyObject_IsTrue(state); + if (start == -1) + return NULL; + lz->start = start; + Py_RETURN_NONE; +} + +static PyMethodDef dropwhile_methods[] = { + {"__reduce__", (PyCFunction)dropwhile_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)dropwhile_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(dropwhile_doc, "dropwhile(predicate, iterable) --> dropwhile object\n\ \n\ @@ -949,7 +1173,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)dropwhile_next, /* tp_iternext */ - 0, /* tp_methods */ + dropwhile_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -1052,6 +1276,30 @@ return NULL; } +static PyObject * +takewhile_reduce(takewhileobject *lz) +{ + return Py_BuildValue("O(OO)l", Py_TYPE(lz), + lz->func, lz->it, lz->stop); +} + +static PyObject * +takewhile_reduce_setstate(takewhileobject *lz, PyObject *state) +{ + int stop = PyObject_IsTrue(state); + if (stop == -1) + return NULL; + lz->stop = stop; + Py_RETURN_NONE; +} + +static PyMethodDef takewhile_reduce_methods[] = { + {"__reduce__", (PyCFunction)takewhile_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)takewhile_reduce_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; PyDoc_STRVAR(takewhile_doc, "takewhile(predicate, iterable) --> takewhile object\n\ \n\ @@ -1088,7 +1336,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)takewhile_next, /* tp_iternext */ - 0, /* tp_methods */ + takewhile_reduce_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -1244,6 +1492,44 @@ return item; } +static PyObject * +islice_reduce(isliceobject *lz) +{ + /* When unpickled, generate a new object with the same bounds, + * then 'setstate' with the next and count + */ + PyObject *stop; + if (lz->stop == -1) { + stop = Py_None; + Py_INCREF(stop); + } else { + stop = PyLong_FromSsize_t(lz->stop); + if (stop == NULL) + return NULL; + } + return Py_BuildValue("O(OnNn)n", Py_TYPE(lz), + lz->it, lz->next, stop, lz->step, + lz->cnt); +} + +static PyObject * +islice_setstate(isliceobject *lz, PyObject *state) +{ + Py_ssize_t cnt = PyLong_AsSsize_t(state); + if (cnt == -1 && PyErr_Occurred()) + return NULL; + lz->cnt = cnt; + Py_RETURN_NONE; +} + +static PyMethodDef islice_methods[] = { + {"__reduce__", (PyCFunction)islice_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)islice_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(islice_doc, "islice(iterable, [start,] stop [, step]) --> islice object\n\ \n\ @@ -1284,7 +1570,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)islice_next, /* tp_iternext */ - 0, /* tp_methods */ + islice_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -1379,6 +1665,19 @@ return result; } +static PyObject * +starmap_reduce(starmapobject *lz) +{ + /* Just pickle the iterator */ + return Py_BuildValue("O(OO)", Py_TYPE(lz), lz->func, lz->it); +} + +static PyMethodDef starmap_methods[] = { + {"__reduce__", (PyCFunction)starmap_reduce, METH_NOARGS, + reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(starmap_doc, "starmap(function, sequence) --> starmap object\n\ \n\ @@ -1415,7 +1714,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)starmap_next, /* tp_iternext */ - 0, /* tp_methods */ + starmap_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -1534,6 +1833,41 @@ return chain_next(lz); /* recurse and use next active */ } +static PyObject * +chain_reduce(chainobject *lz) +{ + if (lz->source) { + /* we can't pickle function objects (itertools.from_iterable) so + * we must use setstate to replace the iterable. One day we + * will fix pickling of functions + */ + if (lz->active) { + return Py_BuildValue("O()(OO)", Py_TYPE(lz), lz->source, lz->active); + } else { + return Py_BuildValue("O()(O)", Py_TYPE(lz), lz->source); + } + } else { + return Py_BuildValue("O()", Py_TYPE(lz)); /* exhausted */ + } + return NULL; +} + +static PyObject * +chain_setstate(chainobject *lz, PyObject *state) +{ + PyObject *source, *active=NULL; + if (! PyArg_ParseTuple(state, "O|O", &source, &active)) + return NULL; + + Py_CLEAR(lz->source); + lz->source = source; + Py_INCREF(lz->source); + Py_CLEAR(lz->active); + lz->active = active; + Py_XINCREF(lz->active); + Py_RETURN_NONE; +} + PyDoc_STRVAR(chain_doc, "chain(*iterables) --> chain object\n\ \n\ @@ -1550,6 +1884,10 @@ static PyMethodDef chain_methods[] = { {"from_iterable", (PyCFunction) chain_new_from_iterable, METH_O | METH_CLASS, chain_from_iterable_doc}, + {"__reduce__", (PyCFunction)chain_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)chain_setstate, METH_O, + setstate_doc}, {NULL, NULL} /* sentinel */ }; @@ -1790,6 +2128,83 @@ return NULL; } +static PyObject * +product_reduce(productobject *lz) +{ + if (lz->stopped) { + return Py_BuildValue("O(())", Py_TYPE(lz)); + } else if (lz->result == NULL) { + return Py_BuildValue("OO", Py_TYPE(lz), lz->pools); + } else { + PyObject *indices; + Py_ssize_t n, i; + + /* we must pickle the indices use them for setstate, and + * additionally indicate that the iterator has started + */ + n = PyTuple_GET_SIZE(lz->pools); + indices = PyTuple_New(n); + if (indices == NULL) + return NULL; + for (i=0; iindices[i]); + if (!index) { + Py_DECREF(indices); + return NULL; + } + PyTuple_SET_ITEM(indices, i, index); + } + return Py_BuildValue("OON", Py_TYPE(lz), lz->pools, indices); + } +} + +static PyObject * +product_setstate(productobject *lz, PyObject *state) +{ + PyObject *result; + Py_ssize_t n, i; + + n = PyTuple_GET_SIZE(lz->pools); + if (!PyTuple_Check(state) || PyTuple_GET_SIZE(state) != n) { + PyErr_SetString(PyExc_ValueError, "invalid arguments"); + return NULL; + } + for (i=0; i n-1) + index = n-1; + lz->indices[i] = index; + } + + result = PyTuple_New(n); + if (!result) + return NULL; + for (i=0; ipools, i); + PyObject *element = PyTuple_GET_ITEM(pool, lz->indices[i]); + Py_INCREF(element); + PyTuple_SET_ITEM(result, i, element); + } + Py_CLEAR(lz->result); + lz->result = result; + Py_RETURN_NONE; +} + +static PyMethodDef product_methods[] = { + {"__reduce__", (PyCFunction)product_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)product_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(product_doc, "product(*iterables) --> product object\n\ \n\ @@ -1834,7 +2249,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)product_next, /* tp_iternext */ - 0, /* tp_methods */ + product_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2021,6 +2436,86 @@ return NULL; } +static PyObject * +combinations_reduce(combinationsobject *lz) +{ + if (lz->result == NULL) { + return Py_BuildValue("O(On)", Py_TYPE(lz), lz->pool, lz->r); + } else if (lz->stopped) { + return Py_BuildValue("O(()n)", Py_TYPE(lz), lz->r); + } else { + PyObject *indices; + Py_ssize_t i; + + /* we must pickle the indices and use them for setstate */ + indices = PyTuple_New(lz->r); + if (!indices) + return NULL; + for (i=0; ir; i++) + { + PyObject* index = PyLong_FromSsize_t(lz->indices[i]); + if (!index) { + Py_DECREF(indices); + return NULL; + } + PyTuple_SET_ITEM(indices, i, index); + } + + return Py_BuildValue("O(On)N", Py_TYPE(lz), lz->pool, lz->r, indices); + } +} + +static PyObject * +combinations_setstate(combinationsobject *lz, PyObject *state) +{ + PyObject *result; + Py_ssize_t i; + Py_ssize_t n = PyTuple_GET_SIZE(lz->pool); + + if (!PyTuple_Check(state) || PyTuple_GET_SIZE(state) != lz->r) + { + PyErr_SetString(PyExc_ValueError, "invalid arguments"); + return NULL; + } + + for (i=0; ir; i++) + { + Py_ssize_t max; + PyObject* indexObject = PyTuple_GET_ITEM(state, i); + Py_ssize_t index = PyLong_AsSsize_t(indexObject); + if (index == -1 && PyErr_Occurred()) + return NULL; /* not an integer */ + max = i + n - lz->r; + /* clamp the index (beware of negative max) */ + if (index > max) + index = max; + if (index < 0) + index = 0; + lz->indices[i] = index; + } + + result = PyTuple_New(lz->r); + if (result == NULL) + return NULL; + for (i=0; ir; i++) { + PyObject *element = PyTuple_GET_ITEM(lz->pool, lz->indices[i]); + Py_INCREF(element); + PyTuple_SET_ITEM(result, i, element); + } + + Py_CLEAR(lz->result); + lz->result = result; + Py_RETURN_NONE; +} + +static PyMethodDef combinations_methods[] = { + {"__reduce__", (PyCFunction)combinations_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)combinations_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(combinations_doc, "combinations(iterable, r) --> combinations object\n\ \n\ @@ -2029,11 +2524,11 @@ static PyTypeObject combinations_type = { PyVarObject_HEAD_INIT(NULL, 0) - "itertools.combinations", /* tp_name */ + "itertools.combinations", /* tp_name */ sizeof(combinationsobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ - (destructor)combinations_dealloc, /* tp_dealloc */ + (destructor)combinations_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ @@ -2050,14 +2545,14 @@ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /* tp_flags */ - combinations_doc, /* tp_doc */ - (traverseproc)combinations_traverse, /* tp_traverse */ + combinations_doc, /* tp_doc */ + (traverseproc)combinations_traverse,/* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ - (iternextfunc)combinations_next, /* tp_iternext */ - 0, /* tp_methods */ + (iternextfunc)combinations_next, /* tp_iternext */ + combinations_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2067,7 +2562,7 @@ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ - combinations_new, /* tp_new */ + combinations_new, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; @@ -2266,6 +2761,82 @@ return NULL; } +static PyObject * +cwr_reduce(cwrobject *lz) +{ + if (lz->result == NULL) { + return Py_BuildValue("O(On)", Py_TYPE(lz), lz->pool, lz->r); + } else if (lz->stopped) { + return Py_BuildValue("O(()n)", Py_TYPE(lz), lz->r); + } else { + PyObject *indices; + Py_ssize_t i; + + /* we must pickle the indices and use them for setstate */ + indices = PyTuple_New(lz->r); + if (!indices) + return NULL; + for (i=0; ir; i++) + { + PyObject* index = PyLong_FromSsize_t(lz->indices[i]); + if (!index) { + Py_DECREF(indices); + return NULL; + } + PyTuple_SET_ITEM(indices, i, index); + } + + return Py_BuildValue("O(On)N", Py_TYPE(lz), lz->pool, lz->r, indices); + } +} + +static PyObject * +cwr_setstate(cwrobject *lz, PyObject *state) +{ + PyObject *result; + Py_ssize_t n, i; + + if (!PyTuple_Check(state) || PyTuple_GET_SIZE(state) != lz->r) + { + PyErr_SetString(PyExc_ValueError, "invalid arguments"); + return NULL; + } + + n = PyTuple_GET_SIZE(lz->pool); + for (i=0; ir; i++) + { + PyObject* indexObject = PyTuple_GET_ITEM(state, i); + Py_ssize_t index = PyLong_AsSsize_t(indexObject); + if (index < 0 && PyErr_Occurred()) + return NULL; /* not an integer */ + /* clamp the index */ + if (index < 0) + index = 0; + else if (index > n-1) + index = n-1; + lz->indices[i] = index; + } + result = PyTuple_New(lz->r); + if (result == NULL) + return NULL; + for (i=0; ir; i++) { + PyObject *element = PyTuple_GET_ITEM(lz->pool, lz->indices[i]); + Py_INCREF(element); + PyTuple_SET_ITEM(result, i, element); + } + Py_CLEAR(lz->result); + lz->result = result; + Py_RETURN_NONE; +} + +static PyMethodDef cwr_methods[] = { + {"__reduce__", (PyCFunction)cwr_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)cwr_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(cwr_doc, "combinations_with_replacement(iterable, r) --> combinations_with_replacement object\n\ \n\ @@ -2275,11 +2846,11 @@ static PyTypeObject cwr_type = { PyVarObject_HEAD_INIT(NULL, 0) - "itertools.combinations_with_replacement", /* tp_name */ - sizeof(cwrobject), /* tp_basicsize */ + "itertools.combinations_with_replacement", /* tp_name */ + sizeof(cwrobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ - (destructor)cwr_dealloc, /* tp_dealloc */ + (destructor)cwr_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ @@ -2291,19 +2862,19 @@ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ + PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - cwr_doc, /* tp_doc */ - (traverseproc)cwr_traverse, /* tp_traverse */ + Py_TPFLAGS_BASETYPE, /* tp_flags */ + cwr_doc, /* tp_doc */ + (traverseproc)cwr_traverse, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)cwr_next, /* tp_iternext */ - 0, /* tp_methods */ + PyObject_SelfIter, /* tp_iter */ + (iternextfunc)cwr_next, /* tp_iternext */ + cwr_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2313,8 +2884,8 @@ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ - cwr_new, /* tp_new */ - PyObject_GC_Del, /* tp_free */ + cwr_new, /* tp_new */ + PyObject_GC_Del, /* tp_free */ }; @@ -2538,6 +3109,115 @@ return NULL; } +static PyObject * +permutations_reduce(permutationsobject *po) +{ + if (po->result == NULL) { + return Py_BuildValue("O(On)", Py_TYPE(po), po->pool, po->r); + } else if (po->stopped) { + return Py_BuildValue("O(()n)", Py_TYPE(po), po->r); + } else { + PyObject *indices=NULL, *cycles=NULL; + Py_ssize_t n, i; + + /* we must pickle the indices and cycles and use them for setstate */ + n = PyTuple_GET_SIZE(po->pool); + indices = PyTuple_New(n); + if (indices == NULL) + goto err; + for (i=0; iindices[i]); + if (!index) + goto err; + PyTuple_SET_ITEM(indices, i, index); + } + + cycles = PyTuple_New(po->r); + if (cycles == NULL) + goto err; + for (i=0; ir; i++) + { + PyObject* index = PyLong_FromSsize_t(po->cycles[i]); + if (!index) + goto err; + PyTuple_SET_ITEM(cycles, i, index); + } + return Py_BuildValue("O(On)(NN)", Py_TYPE(po), + po->pool, po->r, + indices, cycles); + err: + Py_XDECREF(indices); + Py_XDECREF(cycles); + return NULL; + } +} + +static PyObject * +permutations_setstate(permutationsobject *po, PyObject *state) +{ + PyObject *indices, *cycles, *result; + Py_ssize_t n, i; + + if (!PyArg_ParseTuple(state, "O!O!", + &PyTuple_Type, &indices, + &PyTuple_Type, &cycles)) + return NULL; + + n = PyTuple_GET_SIZE(po->pool); + if (PyTuple_GET_SIZE(indices) != n || + PyTuple_GET_SIZE(cycles) != po->r) + { + PyErr_SetString(PyExc_ValueError, "invalid arguments"); + return NULL; + } + + for (i=0; i n-1) + index = n-1; + po->indices[i] = index; + } + + for (i=0; ir; i++) + { + PyObject* indexObject = PyTuple_GET_ITEM(cycles, i); + Py_ssize_t index = PyLong_AsSsize_t(indexObject); + if (index < 0 && PyErr_Occurred()) + return NULL; /* not an integer */ + if (index < 1) + index = 1; + else if (index > n-i) + index = n-i; + po->cycles[i] = index; + } + result = PyTuple_New(po->r); + if (result == NULL) + return NULL; + for (i=0; ir; i++) { + PyObject *element = PyTuple_GET_ITEM(po->pool, po->indices[i]); + Py_INCREF(element); + PyTuple_SET_ITEM(result, i, element); + } + Py_CLEAR(po->result); + po->result = result; + Py_RETURN_NONE; +} + +static PyMethodDef permuations_methods[] = { + {"__reduce__", (PyCFunction)permutations_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)permutations_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(permutations_doc, "permutations(iterable[, r]) --> permutations object\n\ \n\ @@ -2574,7 +3254,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)permutations_next, /* tp_iternext */ - 0, /* tp_methods */ + permuations_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2605,7 +3285,7 @@ static char *kwargs[] = {"iterable", "func", NULL}; PyObject *iterable; PyObject *it; - PyObject *binop = NULL; + PyObject *binop = Py_None; accumulateobject *lz; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:accumulate", @@ -2624,8 +3304,10 @@ return NULL; } - Py_XINCREF(binop); - lz->binop = binop; + if (binop != Py_None) { + Py_XINCREF(binop); + lz->binop = binop; + } lz->total = NULL; lz->it = it; return (PyObject *)lz; @@ -2681,6 +3363,31 @@ return newtotal; } +static PyObject * +accumulate_reduce(accumulateobject *lz) +{ + return Py_BuildValue("O(OO)O", Py_TYPE(lz), + lz->it, lz->binop?lz->binop:Py_None, + lz->total?lz->total:Py_None); + } + +static PyObject * +accumulate_setstate(accumulateobject *lz, PyObject *state) +{ + Py_CLEAR(lz->total); + lz->total = state; + Py_INCREF(lz->total); + Py_RETURN_NONE; +} + +static PyMethodDef accumulate_methods[] = { + {"__reduce__", (PyCFunction)accumulate_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)accumulate_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(accumulate_doc, "accumulate(iterable[, func]) --> accumulate object\n\ \n\ @@ -2716,7 +3423,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)accumulate_next, /* tp_iternext */ - 0, /* tp_methods */ + accumulate_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2833,6 +3540,19 @@ } } +static PyObject * +compress_reduce(compressobject *lz) +{ + return Py_BuildValue("O(OO)", Py_TYPE(lz), + lz->data, lz->selectors); + } + +static PyMethodDef compress_methods[] = { + {"__reduce__", (PyCFunction)compress_reduce, METH_NOARGS, + reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(compress_doc, "compress(data, selectors) --> iterator over selected data\n\ \n\ @@ -2870,7 +3590,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)compress_next, /* tp_iternext */ - 0, /* tp_methods */ + compress_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2977,6 +3697,19 @@ } } +static PyObject * +filterfalse_reduce(filterfalseobject *lz) +{ + return Py_BuildValue("O(OO)", Py_TYPE(lz), + lz->func, lz->it); + } + +static PyMethodDef filterfalse_methods[] = { + {"__reduce__", (PyCFunction)filterfalse_reduce, METH_NOARGS, + reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(filterfalse_doc, "filterfalse(function or None, sequence) --> filterfalse object\n\ \n\ @@ -3013,7 +3746,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)filterfalse_next, /* tp_iternext */ - 0, /* tp_methods */ + filterfalse_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -3207,11 +3940,9 @@ return Py_BuildValue("O(n)", Py_TYPE(lz), lz->cnt); } -PyDoc_STRVAR(count_reduce_doc, "Return state information for pickling."); - static PyMethodDef count_methods[] = { {"__reduce__", (PyCFunction)count_reduce, METH_NOARGS, - count_reduce_doc}, + reduce_doc}, {NULL, NULL} /* sentinel */ }; @@ -3352,8 +4083,21 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +repeat_reduce(repeatobject *ro) +{ + /* unpickle this so that a new repeat iterator is constructed with an + * object, then call __setstate__ on it to set cnt + */ + if (ro->cnt >= 0) + return Py_BuildValue("O(On)", Py_TYPE(ro), ro->element, ro->cnt); + else + return Py_BuildValue("O(O)", Py_TYPE(ro), ro->element); +} + static PyMethodDef repeat_methods[] = { {"__length_hint__", (PyCFunction)repeat_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)repeat_reduce, METH_NOARGS, reduce_doc}, {NULL, NULL} /* sentinel */ }; @@ -3579,6 +4323,49 @@ return result; } +static PyObject * +zip_longest_reduce(ziplongestobject *lz) +{ + + /* Create a new tuple with empty sequences where appropriate to pickle. + * Then use setstate to set the fillvalue + */ + int i; + PyObject *args = PyTuple_New(PyTuple_GET_SIZE(lz->ittuple)); + if (args == NULL) + return NULL; + for (i=0; iittuple); i++) { + PyObject *elem = PyTuple_GET_ITEM(lz->ittuple, i); + if (elem == NULL) { + elem = PyTuple_New(0); + if (elem == NULL) { + Py_DECREF(args); + return NULL; + } + } else + Py_INCREF(elem); + PyTuple_SET_ITEM(args, i, elem); + } + return Py_BuildValue("ONO", Py_TYPE(lz), args, lz->fillvalue); +} + +static PyObject * +zip_longest_setstate(ziplongestobject *lz, PyObject *state) +{ + Py_CLEAR(lz->fillvalue); + lz->fillvalue = state; + Py_INCREF(lz->fillvalue); + Py_RETURN_NONE; +} + +static PyMethodDef zip_longest_methods[] = { + {"__reduce__", (PyCFunction)zip_longest_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)zip_longest_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(zip_longest_doc, "zip_longest(iter1 [,iter2 [...]], [fillvalue=None]) --> zip_longest object\n\ \n\ @@ -3620,7 +4407,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)zip_longest_next, /* tp_iternext */ - 0, /* tp_methods */ + zip_longest_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -3708,6 +4495,9 @@ &product_type, &repeat_type, &groupby_type, + &_grouper_type, + &tee_type, + &teedataobject_type, NULL }; @@ -3725,11 +4515,5 @@ PyModule_AddObject(m, name+1, (PyObject *)typelist[i]); } - if (PyType_Ready(&teedataobject_type) < 0) - return NULL; - if (PyType_Ready(&tee_type) < 0) - return NULL; - if (PyType_Ready(&_grouper_type) < 0) - return NULL; return m; } diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -3003,7 +3003,7 @@ } static PyObject * -bytesarrayiter_length_hint(bytesiterobject *it) +bytearrayiter_length_hint(bytesiterobject *it) { Py_ssize_t len = 0; if (it->it_seq) @@ -3014,9 +3014,41 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +bytearrayiter_reduce(bytesiterobject *it) +{ + if (it->it_seq != NULL) { + return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("iter"), + it->it_seq, it->it_index); + } else { + PyObject *u = PyUnicode_FromUnicode(NULL, 0); + if (u == NULL) + return NULL; + return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), u); + } +} + +static PyObject * +bytearrayiter_setstate(bytesiterobject *it, PyObject *state) +{ + Py_ssize_t index = PyLong_AsSsize_t(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (index < 0) + index = 0; + it->it_index = index; + Py_RETURN_NONE; +} + +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); + static PyMethodDef bytearrayiter_methods[] = { - {"__length_hint__", (PyCFunction)bytesarrayiter_length_hint, METH_NOARGS, + {"__length_hint__", (PyCFunction)bytearrayiter_length_hint, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)bytearrayiter_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)bytearrayiter_setstate, METH_O, + setstate_doc}, {NULL, NULL} /* sentinel */ }; diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3074,9 +3074,43 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +striter_reduce(striterobject *it) +{ + if (it->it_seq != NULL) { + return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("iter"), + it->it_seq, it->it_index); + } else { + PyObject *u = PyUnicode_FromUnicode(NULL, 0); + if (u == NULL) + return NULL; + return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), u); + } +} + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); + +static PyObject * +striter_setstate(striterobject *it, PyObject *state) +{ + Py_ssize_t index = PyLong_AsSsize_t(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (index < 0) + index = 0; + it->it_index = index; + Py_RETURN_NONE; +} + +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); + static PyMethodDef striter_methods[] = { {"__length_hint__", (PyCFunction)striter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)striter_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)striter_setstate, METH_O, + setstate_doc}, {NULL, NULL} /* sentinel */ }; diff --git a/Objects/dictobject.c b/Objects/dictobject.c --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -2323,9 +2323,16 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +dictiter_reduce(dictiterobject *di); + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); + static PyMethodDef dictiter_methods[] = { {"__length_hint__", (PyCFunction)dictiter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)dictiter_reduce, METH_NOARGS, + reduce_doc}, {NULL, NULL} /* sentinel */ }; @@ -2560,6 +2567,52 @@ }; +static PyObject * +dictiter_reduce(dictiterobject *di) +{ + PyObject *list; + dictiterobject tmp; + + list = PyList_New(0); + if (!list) + return NULL; + + /* copy the itertor state */ + tmp = *di; + Py_XINCREF(tmp.di_dict); + + /* iterate the temporary into a list */ + for(;;) { + PyObject *element = 0; + if (Py_TYPE(di) == &PyDictIterItem_Type) + element = dictiter_iternextitem(&tmp); + else if (Py_TYPE(di) == &PyDictIterKey_Type) + element = dictiter_iternextkey(&tmp); + else if (Py_TYPE(di) == &PyDictIterValue_Type) + element = dictiter_iternextvalue(&tmp); + else + assert(0); + if (element) { + if (PyList_Append(list, element)) { + Py_DECREF(element); + Py_DECREF(list); + Py_XDECREF(tmp.di_dict); + return NULL; + } + Py_DECREF(element); + } else + break; + } + Py_XDECREF(tmp.di_dict); + /* check for error */ + if (tmp.di_dict != NULL) { + /* we have an error */ + Py_DECREF(list); + return NULL; + } + return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), list); +} + /***********************************************/ /* View objects for keys(), items(), values(). */ /***********************************************/ diff --git a/Objects/enumobject.c b/Objects/enumobject.c --- a/Objects/enumobject.c +++ b/Objects/enumobject.c @@ -158,6 +158,22 @@ return result; } +static PyObject * +enum_reduce(enumobject *en) +{ + if (en->en_longindex != NULL) + return Py_BuildValue("O(OO)", Py_TYPE(en), en->en_sit, en->en_longindex); + else + return Py_BuildValue("O(On)", Py_TYPE(en), en->en_sit, en->en_index); +} + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); + +static PyMethodDef enum_methods[] = { + {"__reduce__", (PyCFunction)enum_reduce, METH_NOARGS, reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(enum_doc, "enumerate(iterable[, start]) -> iterator for index, value of iterable\n" "\n" @@ -197,7 +213,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)enum_next, /* tp_iternext */ - 0, /* tp_methods */ + enum_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -319,8 +335,40 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +reversed_reduce(reversedobject *ro) +{ + if (ro->seq) + return Py_BuildValue("O(O)n", Py_TYPE(ro), ro->seq, ro->index); + else + return Py_BuildValue("O(())", Py_TYPE(ro)); +} + +static PyObject * +reversed_setstate(reversedobject *ro, PyObject *state) +{ + Py_ssize_t index = PyLong_AsSsize_t(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (ro->seq != 0) { + Py_ssize_t n = PySequence_Size(ro->seq); + if (n < 0) + return NULL; + if (index < -1) + index = -1; + else if (index > n-1) + index = n-1; + ro->index = index; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); + static PyMethodDef reversediter_methods[] = { {"__length_hint__", (PyCFunction)reversed_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)reversed_reduce, METH_NOARGS, reduce_doc}, + {"__setstate__", (PyCFunction)reversed_setstate, METH_O, setstate_doc}, {NULL, NULL} /* sentinel */ }; diff --git a/Objects/iterobject.c b/Objects/iterobject.c --- a/Objects/iterobject.c +++ b/Objects/iterobject.c @@ -2,6 +2,19 @@ #include "Python.h" +/* Convenience function to get builtins.iter or builtins.reversed */ +PyObject * +_PyIter_GetBuiltin(const char *iter) +{ + PyObject *mod, *attr; + mod = PyImport_ImportModule("builtins"); + if (mod == NULL) + return NULL; + attr = PyObject_GetAttrString(mod, iter); + Py_DECREF(mod); + return attr; +} + typedef struct { PyObject_HEAD long it_index; @@ -88,8 +101,38 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +iter_reduce(seqiterobject *it) +{ + if (it->it_seq != NULL) + return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("iter"), + it->it_seq, it->it_index); + else + return Py_BuildValue("N(())", _PyIter_GetBuiltin("iter")); +} + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); + +static PyObject * +iter_setstate(seqiterobject *it, PyObject *state) +{ + Py_ssize_t index = PyLong_AsSsize_t(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (it->it_seq != NULL) { + if (index < 0) + index = 0; + it->it_index = index; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); + static PyMethodDef seqiter_methods[] = { {"__length_hint__", (PyCFunction)iter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)iter_reduce, METH_NOARGS, reduce_doc}, + {"__setstate__", (PyCFunction)iter_setstate, METH_O, setstate_doc}, {NULL, NULL} /* sentinel */ }; @@ -195,6 +238,21 @@ return NULL; } +static PyObject * +calliter_reduce(calliterobject *it) +{ + if (it->it_callable != NULL && it->it_sentinel != NULL) + return Py_BuildValue("N(OO)", _PyIter_GetBuiltin("iter"), + it->it_callable, it->it_sentinel); + else + return Py_BuildValue("N(())", _PyIter_GetBuiltin("iter")); +} + +static PyMethodDef calliter_methods[] = { + {"__reduce__", (PyCFunction)calliter_reduce, METH_NOARGS, reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyTypeObject PyCallIter_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "callable_iterator", /* tp_name */ @@ -224,7 +282,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)calliter_iternext, /* tp_iternext */ - 0, /* tp_methods */ + calliter_methods, /* tp_methods */ }; diff --git a/Objects/listobject.c b/Objects/listobject.c --- a/Objects/listobject.c +++ b/Objects/listobject.c @@ -2660,11 +2660,18 @@ static int listiter_traverse(listiterobject *, visitproc, void *); static PyObject *listiter_next(listiterobject *); static PyObject *listiter_len(listiterobject *); +static PyObject *listiter_reduce_general(void *_it, int forward); +static PyObject *listiter_reduce(listiterobject *); +static PyObject *listiter_setstate(listiterobject *, PyObject *state); PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); static PyMethodDef listiter_methods[] = { {"__length_hint__", (PyCFunction)listiter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)listiter_reduce, METH_NOARGS, reduce_doc}, + {"__setstate__", (PyCFunction)listiter_setstate, METH_O, setstate_doc}, {NULL, NULL} /* sentinel */ }; @@ -2771,6 +2778,27 @@ } return PyLong_FromLong(0); } + +static PyObject * +listiter_reduce(listiterobject *it) +{ + return listiter_reduce_general(it, 1); +} + +static PyObject * +listiter_setstate(listiterobject *it, PyObject *state) +{ + long index = PyLong_AsLong(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (it->it_seq != NULL) { + if (index < 0) + index = 0; + it->it_index = index; + } + Py_RETURN_NONE; +} + /*********************** List Reverse Iterator **************************/ typedef struct { @@ -2784,9 +2812,13 @@ static int listreviter_traverse(listreviterobject *, visitproc, void *); static PyObject *listreviter_next(listreviterobject *); static PyObject *listreviter_len(listreviterobject *); +static PyObject *listreviter_reduce(listreviterobject *); +static PyObject *listreviter_setstate(listreviterobject *, PyObject *); static PyMethodDef listreviter_methods[] = { {"__length_hint__", (PyCFunction)listreviter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)listreviter_reduce, METH_NOARGS, reduce_doc}, + {"__setstate__", (PyCFunction)listreviter_setstate, METH_O, setstate_doc}, {NULL, NULL} /* sentinel */ }; @@ -2883,3 +2915,51 @@ len = 0; return PyLong_FromSsize_t(len); } + +static PyObject * +listreviter_reduce(listreviterobject *it) +{ + return listiter_reduce_general(it, 0); +} + +static PyObject * +listreviter_setstate(listreviterobject *it, PyObject *state) +{ + Py_ssize_t index = PyLong_AsSsize_t(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (it->it_seq != NULL) { + if (index < -1) + index = -1; + else if (index > PyList_GET_SIZE(it->it_seq) - 1) + index = PyList_GET_SIZE(it->it_seq) - 1; + it->it_index = index; + } + Py_RETURN_NONE; +} + +/* common pickling support */ + +static PyObject * +listiter_reduce_general(void *_it, int forward) +{ + PyObject *list; + + /* the objects are not the same, index is of different types! */ + if (forward) { + listiterobject *it = (listiterobject *)_it; + if (it->it_seq) + return Py_BuildValue("N(O)l", _PyIter_GetBuiltin("iter"), + it->it_seq, it->it_index); + } else { + listreviterobject *it = (listreviterobject *)_it; + if (it->it_seq) + return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("reversed"), + it->it_seq, it->it_index); + } + /* empty iterator, create an empty list */ + list = PyList_New(0); + if (list == NULL) + return NULL; + return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), list); +} diff --git a/Objects/rangeobject.c b/Objects/rangeobject.c --- a/Objects/rangeobject.c +++ b/Objects/rangeobject.c @@ -964,9 +964,59 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +rangeiter_reduce(rangeiterobject *r) +{ + PyObject *start=NULL, *stop=NULL, *step=NULL; + PyObject *range; + + /* create a range object for pickling */ + start = PyLong_FromLong(r->start); + if (start == NULL) + goto err; + stop = PyLong_FromLong(r->start + r->len * r->step); + if (stop == NULL) + goto err; + step = PyLong_FromLong(r->step); + if (step == NULL) + goto err; + range = (PyObject*)make_range_object(&PyRange_Type, + start, stop, step); + if (range == NULL) + goto err; + /* return the result */ + return Py_BuildValue("N(N)i", _PyIter_GetBuiltin("iter"), range, r->index); +err: + Py_XDECREF(start); + Py_XDECREF(stop); + Py_XDECREF(step); + return NULL; +} + +static PyObject * +rangeiter_setstate(rangeiterobject *r, PyObject *state) +{ + long index = PyLong_AsLong(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (index < 0 || index >= r->len) { + PyErr_SetString(PyExc_ValueError, "index out of range"); + return NULL; + } + r->index = index; + Py_RETURN_NONE; +} + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); + static PyMethodDef rangeiter_methods[] = { {"__length_hint__", (PyCFunction)rangeiter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)rangeiter_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)rangeiter_setstate, METH_O, + setstate_doc}, {NULL, NULL} /* sentinel */ }; @@ -1095,9 +1145,51 @@ return PyNumber_Subtract(r->len, r->index); } +static PyObject * +longrangeiter_reduce(longrangeiterobject *r) +{ + PyObject *product, *stop=NULL; + PyObject *range; + + /* create a range object for pickling. Must calculate the "stop" value */ + product = PyNumber_Multiply(r->len, r->step); + if (product == NULL) + return NULL; + stop = PyNumber_Add(r->start, product); + Py_DECREF(product); + if (stop == NULL) + return NULL; + Py_INCREF(r->start); + Py_INCREF(r->step); + range = (PyObject*)make_range_object(&PyRange_Type, + r->start, stop, r->step); + if (range == NULL) { + Py_DECREF(r->start); + Py_DECREF(stop); + Py_DECREF(r->step); + return NULL; + } + + /* return the result */ + return Py_BuildValue("N(N)O", _PyIter_GetBuiltin("iter"), range, r->index); +} + +static PyObject * +longrangeiter_setstate(longrangeiterobject *r, PyObject *state) +{ + Py_CLEAR(r->index); + r->index = state; + Py_INCREF(r->index); + Py_RETURN_NONE; +} + static PyMethodDef longrangeiter_methods[] = { {"__length_hint__", (PyCFunction)longrangeiter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)longrangeiter_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)longrangeiter_setstate, METH_O, + setstate_doc}, {NULL, NULL} /* sentinel */ }; diff --git a/Objects/setobject.c b/Objects/setobject.c --- a/Objects/setobject.c +++ b/Objects/setobject.c @@ -819,8 +819,51 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject *setiter_iternext(setiterobject *si); + +static PyObject * +setiter_reduce(setiterobject *si) +{ + PyObject *list; + setiterobject tmp; + + list = PyList_New(0); + if (!list) + return NULL; + + /* copy the itertor state */ + tmp = *si; + Py_XINCREF(tmp.si_set); + + /* iterate the temporary into a list */ + for(;;) { + PyObject *element = setiter_iternext(&tmp); + if (element) { + if (PyList_Append(list, element)) { + Py_DECREF(element); + Py_DECREF(list); + Py_XDECREF(tmp.si_set); + return NULL; + } + Py_DECREF(element); + } else + break; + } + Py_XDECREF(tmp.si_set); + /* check for error */ + if (tmp.si_set != NULL) { + /* we have an error */ + Py_DECREF(list); + return NULL; + } + return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), list); +} + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); + static PyMethodDef setiter_methods[] = { {"__length_hint__", (PyCFunction)setiter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)setiter_reduce, METH_NOARGS, reduce_doc}, {NULL, NULL} /* sentinel */ }; @@ -1964,8 +2007,6 @@ return result; } -PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); - static PyObject * set_sizeof(PySetObject *so) { diff --git a/Objects/tupleobject.c b/Objects/tupleobject.c --- a/Objects/tupleobject.c +++ b/Objects/tupleobject.c @@ -967,8 +967,39 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +tupleiter_reduce(tupleiterobject *it) +{ + if (it->it_seq) + return Py_BuildValue("N(O)l", _PyIter_GetBuiltin("iter"), + it->it_seq, it->it_index); + else + return Py_BuildValue("N(())", _PyIter_GetBuiltin("iter")); +} + +static PyObject * +tupleiter_setstate(tupleiterobject *it, PyObject *state) +{ + long index = PyLong_AsLong(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (it->it_seq != NULL) { + if (index < 0) + index = 0; + else if (it->it_seq != NULL && index > PyTuple_GET_SIZE(it->it_seq)) + index = PyTuple_GET_SIZE(it->it_seq); + it->it_index = index; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); + static PyMethodDef tupleiter_methods[] = { {"__length_hint__", (PyCFunction)tupleiter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)tupleiter_reduce, METH_NOARGS, reduce_doc}, + {"__setstate__", (PyCFunction)tupleiter_setstate, METH_O, setstate_doc}, {NULL, NULL} /* sentinel */ }; diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -14382,9 +14382,43 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +unicodeiter_reduce(unicodeiterobject *it) +{ + if (it->it_seq != NULL) { + return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("iter"), + it->it_seq, it->it_index); + } else { + PyObject *u = PyUnicode_FromUnicode(NULL, 0); + if (u == NULL) + return NULL; + return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), u); + } +} + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); + +static PyObject * +unicodeiter_setstate(unicodeiterobject *it, PyObject *state) +{ + Py_ssize_t index = PyLong_AsSsize_t(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (index < 0) + index = 0; + it->it_index = index; + Py_RETURN_NONE; +} + +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); + static PyMethodDef unicodeiter_methods[] = { {"__length_hint__", (PyCFunction)unicodeiter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)unicodeiter_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)unicodeiter_setstate, METH_O, + setstate_doc}, {NULL, NULL} /* sentinel */ }; diff --git a/Python/bltinmodule.c b/Python/bltinmodule.c --- a/Python/bltinmodule.c +++ b/Python/bltinmodule.c @@ -438,6 +438,19 @@ } } +static PyObject * +filter_reduce(filterobject *lz) +{ + return Py_BuildValue("O(OO)", Py_TYPE(lz), lz->func, lz->it); +} + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); + +static PyMethodDef filter_methods[] = { + {"__reduce__", (PyCFunction)filter_reduce, METH_NOARGS, reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(filter_doc, "filter(function or None, iterable) --> filter object\n\ \n\ @@ -474,7 +487,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)filter_next, /* tp_iternext */ - 0, /* tp_methods */ + filter_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -1054,6 +1067,31 @@ return result; } +static PyObject * +map_reduce(mapobject *lz) +{ + Py_ssize_t numargs = PyTuple_GET_SIZE(lz->iters); + PyObject *args = PyTuple_New(numargs+1); + Py_ssize_t i; + if (args == NULL) + return NULL; + Py_INCREF(lz->func); + PyTuple_SET_ITEM(args, 0, lz->func); + for (i = 0; iiters, i); + Py_INCREF(it); + PyTuple_SET_ITEM(args, i+1, it); + } + + return Py_BuildValue("ON", Py_TYPE(lz), args); +} + +static PyMethodDef map_methods[] = { + {"__reduce__", (PyCFunction)map_reduce, METH_NOARGS, reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + + PyDoc_STRVAR(map_doc, "map(func, *iterables) --> map object\n\ \n\ @@ -1090,7 +1128,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)map_next, /* tp_iternext */ - 0, /* tp_methods */ + map_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2238,6 +2276,18 @@ return result; } +static PyObject * +zip_reduce(zipobject *lz) +{ + /* Just recreate the zip with the internal iterator tuple */ + return Py_BuildValue("OO", Py_TYPE(lz), lz->ittuple); +} + +static PyMethodDef zip_methods[] = { + {"__reduce__", (PyCFunction)zip_reduce, METH_NOARGS, reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(zip_doc, "zip(iter1 [,iter2 [...]]) --> zip object\n\ \n\ @@ -2276,7 +2326,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)zip_next, /* tp_iternext */ - 0, /* tp_methods */ + zip_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 13:12:05 2012 From: python-checkins at python.org (kristjan.jonsson) Date: Tue, 03 Apr 2012 13:12:05 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=2314288=3A_Modify_Mi?= =?utf8?q?sc/NEWS?= Message-ID: http://hg.python.org/cpython/rev/51c88d51aa4a changeset: 76092:51c88d51aa4a user: Kristj?n Valur J?nsson date: Tue Apr 03 10:59:26 2012 +0000 summary: Issue #14288: Modify Misc/NEWS files: Misc/NEWS | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -60,6 +60,8 @@ - Issue #14471: Fix a possible buffer overrun in the winreg module. +- Issue #14288: Allow the serialization of builtin iterators + Library ------- -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 14:00:18 2012 From: python-checkins at python.org (victor.stinner) Date: Tue, 03 Apr 2012 14:00:18 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Mention_CLOCK=5F*?= =?utf8?q?=5FCOARSE_and_Tools/pybench/systimes=2Epy?= Message-ID: http://hg.python.org/peps/rev/9a455a87ffac changeset: 4193:9a455a87ffac user: Victor Stinner date: Tue Apr 03 14:00:16 2012 +0200 summary: PEP 418: Mention CLOCK_*_COARSE and Tools/pybench/systimes.py files: pep-0418.txt | 16 ++++++++++++++++ 1 files changed, 16 insertions(+), 0 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -383,6 +383,12 @@ clock_gettime() requires to link the program against the rt (real-time) library. +.. note:: + + Linux provides also CLOCK_MONOTONIC_COARSE since Linux 2.6.32 which has less + accurate than CLOCK_MONOTONIC but is faster. + + Windows: QueryPerformanceCounter ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -554,6 +560,11 @@ It looks like Linux does not implement clock_getres() and always return 1 nanosecond. +.. note:: + + Linux provides also CLOCK_REALTIME_COARSE since Linux 2.6.32 which has less + accurate than CLOCK_REALTIME but is faster. + Windows: GetSystemTimeAsFileTime ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -621,6 +632,11 @@ always return 1 nanosecond. For GetProcessTimes(), the accuracy is read using GetSystemTimeAdjustment(). +Python source code includes a portable library to get the process time: +`Tools/pybench/systimes.py +`_. + + Functions ^^^^^^^^^ -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Tue Apr 3 14:47:31 2012 From: python-checkins at python.org (r.david.murray) Date: Tue, 03 Apr 2012 14:47:31 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMi43KTogIzE0NDgxOiBmaXgg?= =?utf8?q?formatting_of_example_in_subprocess_docs=2E?= Message-ID: http://hg.python.org/cpython/rev/2c1ce04ded55 changeset: 76093:2c1ce04ded55 branch: 2.7 parent: 76083:013766e7a6eb user: R David Murray date: Tue Apr 03 08:46:05 2012 -0400 summary: #14481: fix formatting of example in subprocess docs. files: Doc/library/subprocess.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -685,7 +685,7 @@ to receive a SIGPIPE if p2 exits before p1. Alternatively, for trusted input, the shell's own pipeline support may still -be used directly: +be used directly:: output=`dmesg | grep hda` # becomes -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 14:47:33 2012 From: python-checkins at python.org (r.david.murray) Date: Tue, 03 Apr 2012 14:47:33 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMy4yKTogIzE0NDgxOiBmaXgg?= =?utf8?q?formatting_of_example_in_subprocess_docs=2E?= Message-ID: http://hg.python.org/cpython/rev/e5f5652bfe91 changeset: 76094:e5f5652bfe91 branch: 3.2 parent: 76084:43606a4085b0 user: R David Murray date: Tue Apr 03 08:46:48 2012 -0400 summary: #14481: fix formatting of example in subprocess docs. files: Doc/library/subprocess.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -735,7 +735,7 @@ to receive a SIGPIPE if p2 exits before p1. Alternatively, for trusted input, the shell's own pipeline support may still -be used directly: +be used directly:: output=`dmesg | grep hda` # becomes -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 14:47:40 2012 From: python-checkins at python.org (r.david.murray) Date: Tue, 03 Apr 2012 14:47:40 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_Merge_=2314481=3A_fix_formatting_of_example_in_subprocess_do?= =?utf8?b?Y3Mu?= Message-ID: http://hg.python.org/cpython/rev/9599f091faa6 changeset: 76095:9599f091faa6 parent: 76092:51c88d51aa4a parent: 76094:e5f5652bfe91 user: R David Murray date: Tue Apr 03 08:47:14 2012 -0400 summary: Merge #14481: fix formatting of example in subprocess docs. files: Doc/library/subprocess.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -804,7 +804,7 @@ to receive a SIGPIPE if p2 exits before p1. Alternatively, for trusted input, the shell's own pipeline support may still -be used directly: +be used directly:: output=`dmesg | grep hda` # becomes -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 15:41:25 2012 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 03 Apr 2012 15:41:25 +0200 Subject: [Python-checkins] =?utf8?q?devguide=3A_Issue_=2314466=3A_remove_m?= =?utf8?q?q-based_workflow?= Message-ID: http://hg.python.org/devguide/rev/e1d4b6dc9702 changeset: 502:e1d4b6dc9702 user: Antoine Pitrou date: Tue Apr 03 15:36:14 2012 +0200 summary: Issue #14466: remove mq-based workflow files: patch.rst | 67 +++++++++++++----------------------------- 1 files changed, 21 insertions(+), 46 deletions(-) diff --git a/patch.rst b/patch.rst --- a/patch.rst +++ b/patch.rst @@ -10,57 +10,36 @@ Tool Usage '''''''''' -.. _mq-workflow: +.. _workflow: Mercurial allows for various workflows according to each person's or -project's preference. We present here a very simple solution based on mq_ -(*Mercurial Queues*). You are welcome to use any approach you like (including -a svn-like approach of simply using ``hg diff`` to create a patch based on -uncommitted changes in your working copy). Usage of mq_ is merely a -suggestion; it's a balance between being able to do everything needed -while allowing for more powerful usage if desired in the future. +project's preference. It is out of this guide's scope to present them all, +so we will stick to a basic workflow where you work on a patch in your +working copy without ever making any local commits. -First make sure that the extension has been turned on in your ``.hgrc`` or -``Mercurial.ini`` file:: +If you use this workflow, and your work adds or removes files to the +source tree, you will have to temporarily ``hg add`` or ``hg remove`` them, +respectively, before generating a patch. - [extensions] - mq = +To generate a patch, just invoke ``hg diff`` which will print out a +patch of the working copy's changes against the current revision:: -You can verify this is working properly by running ``hg help mq``. + hg diff > mywork.patch -Before you start modifying things in your working copy, type:: +If you want to undo your changes, you can revert them from the working copy:: - hg qnew mywork + hg revert -a -where ``mywork`` is a descriptive name for what you are going to work on. -This will create a patch in your patch queue. Whenever you have reached a point -that you want to save what you have done, run:: +You can later re-apply the changes if you want to continue working on the +patch:: - hg qrefresh + hg import --no-commit mywork.patch -This will update the patch to contain all of the changes you have made up to -this point. If you have added or removed any file, use ``hg add`` or ``hg -remove``, respectively, before running ``hg qrefresh``. - -Later on, we will explain :ref:`how to generate a patch `. - -If you want to delete your changes irrevocably (either because they were -committed, or they ended up uninteresting), use:: - - hg qpop mywork - hg qdelete mywork .. seealso:: - For more advanced usage of mq, read the `mq chapter - `_ - of `Mercurial: The Definitive Guide `_. - - Also, regardless of your workflow, refer to the :ref:`FAQ ` for + Refer to the :ref:`FAQ ` for :ref:`more information on using Mercurial `. -.. _issue tracker: http://bugs.python.org -.. _mq: http://mercurial.selenic.com/wiki/MqExtension - Preparation ''''''''''' @@ -123,19 +102,15 @@ ./python.exe Tools/scripts/patchcheck.py -Assuming you are using the :ref:`mq approach ` suggested earlier, -first check that all your local changes have been recorded (using -``hg qrefresh``), then type the following:: +Assuming you are using the :ref:`basic approach ` suggested earlier, +just type the following:: - hg qdiff > mywork.patch + hg diff > mywork.patch If you are using another approach, you probably need to find out the right invocation of ``hg diff`` for your purposes; see ``hg help diff`` and ``hg -help revisions``. Just please make sure that you -generate a **single, condensed** patch rather than a series of several changesets. - -Also, please make sure your patch is whitespace normalized. ``patchcheck`` -will check this for you. +help revisions``. Just please make sure that you generate a +**single, condensed** patch rather than a series of several changesets. Submitting -- Repository URL: http://hg.python.org/devguide From python-checkins at python.org Tue Apr 3 20:20:37 2012 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 03 Apr 2012 20:20:37 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE0NDgy?= =?utf8?q?=3A_Raise_a_ValueError=2C_not_a_NameError=2C_when_trying_to_crea?= =?utf8?q?te?= Message-ID: http://hg.python.org/cpython/rev/57c0867fbf30 changeset: 76096:57c0867fbf30 branch: 3.2 parent: 76094:e5f5652bfe91 user: Antoine Pitrou date: Tue Apr 03 20:12:23 2012 +0200 summary: Issue #14482: Raise a ValueError, not a NameError, when trying to create a multiprocessing Client or Listener with an AF_UNIX type address under Windows. Patch by Popa Claudiu. files: Lib/multiprocessing/connection.py | 4 ++++ Lib/test/test_multiprocessing.py | 6 ++++++ Misc/NEWS | 4 ++++ 3 files changed, 14 insertions(+), 0 deletions(-) diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -101,6 +101,10 @@ if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py --- a/Lib/test/test_multiprocessing.py +++ b/Lib/test/test_multiprocessing.py @@ -2331,6 +2331,12 @@ with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') + @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") + def test_invalid_family_win32(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener('/var/test.pipe') + + testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, TestStdinBadfiledescriptor, TestInvalidFamily] diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -39,6 +39,10 @@ Library ------- +- Issue #14482: Raise a ValueError, not a NameError, when trying to create + a multiprocessing Client or Listener with an AF_UNIX type address under + Windows. Patch by Popa Claudiu. + - Issue #14151: Raise a ValueError, not a NameError, when trying to create a multiprocessing Client or Listener with an AF_PIPE type address under non-Windows platforms. Patch by Popa Claudiu. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 20:20:38 2012 From: python-checkins at python.org (antoine.pitrou) Date: Tue, 03 Apr 2012 20:20:38 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_Issue_=2314482=3A_Raise_a_ValueError=2C_not_a_NameError=2C_w?= =?utf8?q?hen_trying_to_create?= Message-ID: http://hg.python.org/cpython/rev/ddc5adcedf29 changeset: 76097:ddc5adcedf29 parent: 76095:9599f091faa6 parent: 76096:57c0867fbf30 user: Antoine Pitrou date: Tue Apr 03 20:13:45 2012 +0200 summary: Issue #14482: Raise a ValueError, not a NameError, when trying to create a multiprocessing Client or Listener with an AF_UNIX type address under Windows. Patch by Popa Claudiu. files: Lib/multiprocessing/connection.py | 4 ++++ Lib/test/test_multiprocessing.py | 4 ++++ Misc/NEWS | 4 ++++ 3 files changed, 12 insertions(+), 0 deletions(-) diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -111,6 +111,10 @@ if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py --- a/Lib/test/test_multiprocessing.py +++ b/Lib/test/test_multiprocessing.py @@ -2649,6 +2649,10 @@ with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') + @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") + def test_invalid_family_win32(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener('/var/test.pipe') testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, TestStdinBadfiledescriptor, TestWait, TestInvalidFamily] diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -19,6 +19,10 @@ Library ------- +- Issue #14482: Raise a ValueError, not a NameError, when trying to create + a multiprocessing Client or Listener with an AF_UNIX type address under + Windows. Patch by Popa Claudiu. + - Issue #802310: Generate always unique tkinter font names if not directly passed. - Issue #14151: Raise a ValueError, not a NameError, when trying to create -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Tue Apr 3 21:04:28 2012 From: python-checkins at python.org (eli.bendersky) Date: Tue, 03 Apr 2012 21:04:28 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Fixes_and_enhancements_to_?= =?utf8?q?=5Felementtree=3A?= Message-ID: http://hg.python.org/cpython/rev/14abfa27ff19 changeset: 76098:14abfa27ff19 user: Eli Bendersky date: Tue Apr 03 22:02:37 2012 +0300 summary: Fixes and enhancements to _elementtree: * Fixed refleak problems when GC collection is run (see messages in issue #14065) * Added weakref support to Element objects files: Lib/test/test_xml_etree.py | 35 ++++++++++++ Modules/_elementtree.c | 71 +++++++++++++++++-------- 2 files changed, 82 insertions(+), 24 deletions(-) diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -1859,6 +1859,41 @@ gc_collect() self.assertIsNone(wref()) + # A longer cycle: d->e->e2->d + e = ET.Element('joe') + d = Dummy() + d.dummyref = e + wref = weakref.ref(d) + e2 = ET.SubElement(e, 'foo', attr=d) + del d, e, e2 + gc_collect() + self.assertIsNone(wref()) + + # A cycle between Element objects as children of one another + # e1->e2->e3->e1 + e1 = ET.Element('e1') + e2 = ET.Element('e2') + e3 = ET.Element('e3') + e1.append(e2) + e2.append(e2) + e3.append(e1) + wref = weakref.ref(e1) + del e1, e2, e3 + gc_collect() + self.assertIsNone(wref()) + + def test_weakref(self): + flag = False + def wref_cb(w): + nonlocal flag + flag = True + e = ET.Element('e') + wref = weakref.ref(e, wref_cb) + self.assertEqual(wref().tag, 'e') + del e + self.assertEqual(flag, True) + self.assertEqual(wref(), None) + class ElementTreeTest(unittest.TestCase): def test_istype(self): diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c --- a/Modules/_elementtree.c +++ b/Modules/_elementtree.c @@ -48,6 +48,7 @@ /* See http://www.python.org/psf/license for licensing details. */ #include "Python.h" +#include "structmember.h" #define VERSION "1.0.6" @@ -229,6 +230,8 @@ ElementObjectExtra* extra; + PyObject *weakreflist; /* For tp_weaklistoffset */ + } ElementObject; static PyTypeObject Element_Type; @@ -261,17 +264,24 @@ LOCAL(void) dealloc_extra(ElementObject* self) { + if (!self->extra) + return; + + /* Avoid DECREFs calling into this code again (cycles, etc.) + */ + ElementObjectExtra *myextra = self->extra; + self->extra = NULL; + + Py_DECREF(myextra->attrib); + int i; - - Py_DECREF(self->extra->attrib); - - for (i = 0; i < self->extra->length; i++) - Py_DECREF(self->extra->children[i]); - - if (self->extra->children != self->extra->_children) - PyObject_Free(self->extra->children); - - PyObject_Free(self->extra); + for (i = 0; i < myextra->length; i++) + Py_DECREF(myextra->children[i]); + + if (myextra->children != myextra->_children) + PyObject_Free(myextra->children); + + PyObject_Free(myextra); } /* Convenience internal function to create new Element objects with the given @@ -308,6 +318,8 @@ Py_INCREF(Py_None); self->tail = Py_None; + self->weakreflist = NULL; + ALLOC(sizeof(ElementObject), "create element"); PyObject_GC_Track(self); return (PyObject*) self; @@ -328,6 +340,7 @@ e->tail = Py_None; e->extra = NULL; + e->weakreflist = NULL; } return (PyObject *)e; } @@ -576,19 +589,28 @@ static int element_gc_clear(ElementObject *self) { - PyObject *text = JOIN_OBJ(self->text); - PyObject *tail = JOIN_OBJ(self->tail); Py_CLEAR(self->tag); - Py_CLEAR(text); - Py_CLEAR(tail); + + /* The following is like Py_CLEAR for self->text and self->tail, but + * written explicitily because the real pointers hide behind access + * macros. + */ + if (self->text) { + PyObject *tmp = JOIN_OBJ(self->text); + self->text = NULL; + Py_DECREF(tmp); + } + + if (self->tail) { + PyObject *tmp = JOIN_OBJ(self->tail); + self->tail = NULL; + Py_DECREF(tmp); + } /* After dropping all references from extra, it's no longer valid anyway, - ** so fully deallocate it (see also element_clearmethod) + * so fully deallocate it. */ - if (self->extra) { - dealloc_extra(self); - self->extra = NULL; - } + dealloc_extra(self); return 0; } @@ -596,6 +618,10 @@ element_dealloc(ElementObject* self) { PyObject_GC_UnTrack(self); + + if (self->weakreflist != NULL) + PyObject_ClearWeakRefs((PyObject *) self); + /* element_gc_clear clears all references and deallocates extra */ element_gc_clear(self); @@ -626,10 +652,7 @@ if (!PyArg_ParseTuple(args, ":clear")) return NULL; - if (self->extra) { - dealloc_extra(self); - self->extra = NULL; - } + dealloc_extra(self); Py_INCREF(Py_None); Py_DECREF(JOIN_OBJ(self->text)); @@ -1693,7 +1716,7 @@ (traverseproc)element_gc_traverse, /* tp_traverse */ (inquiry)element_gc_clear, /* tp_clear */ 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ + offsetof(ElementObject, weakreflist), /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ element_methods, /* tp_methods */ -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Apr 4 01:00:53 2012 From: python-checkins at python.org (victor.stinner) Date: Wed, 04 Apr 2012 01:00:53 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_3_alternatives?= Message-ID: http://hg.python.org/peps/rev/763be41b879b changeset: 4194:763be41b879b user: Victor Stinner date: Wed Apr 04 01:00:46 2012 +0200 summary: PEP 418: Add 3 alternatives files: pep-0418.txt | 41 ++++++++++++++++++++++++++++++++++++++++ 1 files changed, 41 insertions(+), 0 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -792,6 +792,47 @@ a monotonic clock with an unspecified starting point +Only expose operating system clocks +----------------------------------- + +To not have to define high-level clocks, which is a difficult task, a simpler +approach is to only expose operating system clocks. time.clock_gettime() and +related clock identifiers were already added to Python 3.3 for example. + + +Don't fallback on system clock +------------------------------ + +time.monotonic() is always a monotonic clock and is only available if the +operating system provides a monotonic clock. + +time.highres() is only available if the operating system provides a clock with +a high resolution (e.g. at least a microsecond or better). + + +One function choosing the clock from a list of constrains +--------------------------------------------------------- + +time.get_clock(*flags) with the following flags: + + * time.MONOTONIC: clock cannot go backard + * time.STEADY: clock rate is steady and the clock is not adjusted + * time.HIGHRES: clock with the highest resolutions + +time.get_clock() returns None if the clock is found and so calls can be chained +using the or operator. Example:: + + func = time.get_clock(time.MONOTONIC) or time.get_clock(time.STEADY) or time.time() + +Example of flags of system clocks: + + * QueryPerformanceCounter: MONOTONIC | HIGHRES + * GetTickCount: MONOTONIC | STEADY + * CLOCK_MONOTONIC: MONOTONIC | STEADY (or only MONOTONIC on Linux) + * CLOCK_MONOTONIC_RAW: MONOTONIC | STEADY + * gettimeofday(): (none) + + One function with a flag: time.monotonic(fallback=True) ------------------------------------------------------- -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Wed Apr 4 01:09:50 2012 From: python-checkins at python.org (victor.stinner) Date: Wed, 04 Apr 2012 01:09:50 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_definitions_of_?= =?utf8?q?=22monotonic=22_and_=22steady=22?= Message-ID: http://hg.python.org/peps/rev/dc0b671d6aa4 changeset: 4195:dc0b671d6aa4 user: Victor Stinner date: Wed Apr 04 01:07:55 2012 +0200 summary: PEP 418: Add definitions of "monotonic" and "steady" files: pep-0418.txt | 21 +++++++++++++++++++++ 1 files changed, 21 insertions(+), 0 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -219,6 +219,27 @@ daemon) +Definitions +=========== + +Monotonic +--------- + +A monotonic clock cannot go backward. It may give the same value for two close +reads depending on the clock resolution. + +On Linux, CLOCK_MONOTONIC is a monotonic clock but its rate is adjusted by NTP. + +Steady +------ + +A steady clock is a clock with a stable rate. + +The C++ Timeout Specifiction uses the following definition: "Objects of class +steady_clock represent clocks for which values of time_point advance at a +steady rate relative to real time. That is, the clock may not be adjusted." + + Hardware clocks =============== -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Wed Apr 4 01:09:51 2012 From: python-checkins at python.org (victor.stinner) Date: Wed, 04 Apr 2012 01:09:51 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Rename_time=2Ehighr?= =?utf8?b?ZXMoKSB0byB0aW1lLnBlcmZfY291bnRlcigp?= Message-ID: http://hg.python.org/peps/rev/ebb521404664 changeset: 4196:ebb521404664 user: Victor Stinner date: Wed Apr 04 01:09:35 2012 +0200 summary: PEP 418: Rename time.highres() to time.perf_counter() files: pep-0418.txt | 32 +++++++++++++++++--------------- 1 files changed, 17 insertions(+), 15 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -13,7 +13,7 @@ Abstract ======== -Add time.steady(), time.highres(), time.get_clock_info(name) functions to +Add time.steady(), time.perf_counter(), time.get_clock_info(name) functions to Python 3.3. @@ -25,7 +25,7 @@ * Display the current time to a human (e.g. display a calendar or draw a wall clock): use system clock, i.e. time.time() or datetime.datetime.now(). -* Benchmark, profiling: time.highres(). +* Benchmark, profiling: time.perf_counter(). * Event scheduler, timeout: time.steady(). @@ -35,7 +35,7 @@ To fulfill the use cases, the functions' properties are: * time.time(): system clock, "wall clock". -* time.highres(): clock with the best accuracy. +* time.perf_counter(): clock with the best accuracy. * time.steady(): steady clock, should be monotonic * time.get_clock_info(name): get information on the specified time function @@ -165,8 +165,8 @@ different in two Python processes. -time.highres() --------------- +time.perf_counter() +------------------- Clock with the best available resolution. @@ -174,24 +174,24 @@ Pseudo-code:: - def highres(): - if highres.use_performance_counter: + def perf_counter(): + if perf_counter.use_performance_counter: try: return _time.QueryPerformanceCounter() except OSError: # QueryPerformanceFrequency() may fail, if the installed # hardware does not support a high-resolution performance # counter for example - highres.use_performance_counter = False - if highres.use_steady: + perf_counter.use_performance_counter = False + if perf_counter.use_steady: # Monotonic clock is preferred over system clock try: return time.steady() except OSError: - highres.use_steady = False + perf_counter.use_steady = False return time.time() - highres.use_performance_counter = (os.name == 'nt') - highres.use_steady = hasattr(time, 'steady') + perf_counter.use_performance_counter = (os.name == 'nt') + perf_counter.use_steady = hasattr(time, 'steady') time.get_clock_info(name) ------------------------- @@ -199,7 +199,7 @@ Get information on the specified clock. Supported clocks: * "clock": time.clock() - * "highres": time.highres() + * "perf_counter": time.perf_counter() * "steady": time.steady() * "time": time.time() @@ -794,8 +794,9 @@ Other names for new functions ----------------------------- -time.highres(): +time.perf_counter(): +* time.highres() * time.hires(): "hires" can be read as "to hire" as in "he hires a car to go on holiday", rather than a "HIgh-RESolution clock". * time.timer(): "it would be too easy to confuse with (or misspell as) @@ -827,8 +828,8 @@ time.monotonic() is always a monotonic clock and is only available if the operating system provides a monotonic clock. -time.highres() is only available if the operating system provides a clock with -a high resolution (e.g. at least a microsecond or better). +time.perf_counter() is only available if the operating system provides a clock +with a high resolution (e.g. at least a microsecond or better). One function choosing the clock from a list of constrains -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Wed Apr 4 01:50:04 2012 From: python-checkins at python.org (victor.stinner) Date: Wed, 04 Apr 2012 01:50:04 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Fix_typo_=28Specifi?= =?utf8?q?ction=29?= Message-ID: http://hg.python.org/peps/rev/060641887edf changeset: 4197:060641887edf user: Victor Stinner date: Wed Apr 04 01:49:55 2012 +0200 summary: PEP 418: Fix typo (Specifiction) files: pep-0418.txt | 10 ++++++---- 1 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -235,9 +235,11 @@ A steady clock is a clock with a stable rate. -The C++ Timeout Specifiction uses the following definition: "Objects of class -steady_clock represent clocks for which values of time_point advance at a -steady rate relative to real time. That is, the clock may not be adjusted." +The `C++ Timeout Specification +`_ uses the +following definition: "Objects of class steady_clock represent clocks for which +values of time_point advance at a steady rate relative to real time. That is, +the clock may not be adjusted." Hardware clocks @@ -835,7 +837,7 @@ One function choosing the clock from a list of constrains --------------------------------------------------------- -time.get_clock(*flags) with the following flags: +``time.get_clock(*flags)`` with the following flags: * time.MONOTONIC: clock cannot go backard * time.STEADY: clock rate is steady and the clock is not adjusted -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Wed Apr 4 01:57:47 2012 From: python-checkins at python.org (victor.stinner) Date: Wed, 04 Apr 2012 01:57:47 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_issues_of_the_h?= =?utf8?q?acked_monotonic_function?= Message-ID: http://hg.python.org/peps/rev/c568a59a3b83 changeset: 4198:c568a59a3b83 user: Victor Stinner date: Wed Apr 04 01:57:37 2012 +0200 summary: PEP 418: Add issues of the hacked monotonic function files: pep-0418.txt | 7 +++++++ 1 files changed, 7 insertions(+), 0 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -900,6 +900,13 @@ `KB274323`_ contains a code example to workaround the bug (use GetTickCount() to detect QueryPerformanceCounter() leap). +Issues of a hacked monotonic function: + + * if the clock is accidentally set forward by an hour and then back + again, you wouldn't have a useful clock for an hour + * the cache is not shared between processes so different processes wouldn't + see the same clock value + Footnotes ========= -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Wed Apr 4 02:58:04 2012 From: python-checkins at python.org (victor.stinner) Date: Wed, 04 Apr 2012 02:58:04 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_Rewrite_the_PEP_418?= Message-ID: http://hg.python.org/peps/rev/5f1708dfd267 changeset: 4199:5f1708dfd267 user: Victor Stinner date: Wed Apr 04 02:57:55 2012 +0200 summary: Rewrite the PEP 418 * Rename time.steady() to time.monotonic(), again * time.monotonic() does not fallback to the system clock anymore * Drop time.perf_counter() function files: pep-0418.txt | 209 ++++++++++++++++---------------------- 1 files changed, 86 insertions(+), 123 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -1,5 +1,5 @@ PEP: 418 -Title: Add steady and high-resolution time functions +Title: Add a monotonic time functions Version: $Revision$ Last-Modified: $Date$ Author: Victor Stinner @@ -13,8 +13,7 @@ Abstract ======== -Add time.steady(), time.perf_counter(), time.get_clock_info(name) functions to -Python 3.3. +Add time.monotonic() and time.get_clock_info(name) functions to Python 3.3. Rationale @@ -25,8 +24,9 @@ * Display the current time to a human (e.g. display a calendar or draw a wall clock): use system clock, i.e. time.time() or datetime.datetime.now(). -* Benchmark, profiling: time.perf_counter(). -* Event scheduler, timeout: time.steady(). +* Event scheduler, timeout: time.monotonic(). +* Benchmark, profiling: time.clock() on Windows, time.monotonic(), + or fallback to time.time() Functions @@ -35,8 +35,7 @@ To fulfill the use cases, the functions' properties are: * time.time(): system clock, "wall clock". -* time.perf_counter(): clock with the best accuracy. -* time.steady(): steady clock, should be monotonic +* time.monotonic(): monotonic clock * time.get_clock_info(name): get information on the specified time function @@ -78,129 +77,75 @@ return _time.time() -time.steady() -------------- +time.monotonic() +---------------- -Steady clock. Use a monotonic clock, or falls back to the system clock. Its -rate may be adjusted by NTP. The reference point of the returned value is -undefined so only the difference of consecutive calls is valid. - -Use time.get_clock_info('steady')['is_monotonic'] to check if the clock -monotonic or not. +Monotonic clock, cannot go backward. Its rate may be adjusted by NTP. The +reference point of the returned value is undefined so only the difference of +consecutive calls is valid. The elapsed time may or may not include time the system spends in sleep or hibernation; this depends on the operating system. +Availability: Windows, Mac OS X, Unix. + Pseudo-code [#pseudo]_:: if os.name == 'nt': # GetTickCount64() requires Windows Vista, Server 2008 or later if hasattr(time, '_GetTickCount64'): - def steady(): + def monotonic(): return _time.GetTickCount64() * 1e-3 else: - def steady(): + def monotonic(): ticks = _time.GetTickCount() - if ticks < steady.last: + if ticks < monotonic.last: # Integer overflow detected - steady.delta += 2**32 - steady.last = ticks - return (ticks + steady.delta) * 1e-3 - steady.last = 0 - steady.delta = 0 + monotonic.delta += 2**32 + monotonic.last = ticks + return (ticks + monotonic.delta) * 1e-3 + monotonic.last = 0 + monotonic.delta = 0 elif os.name == 'mac': - def steady(): - if steady.factor is None: + def monotonic(): + if monotonic.factor is None: factor = _time.mach_timebase_info() - steady.factor = timebase[0] / timebase[1] - return _time.mach_absolute_time() * steady.factor - steady.factor = None + monotonic.factor = timebase[0] / timebase[1] + return _time.mach_absolute_time() * monotonic.factor + monotonic.factor = None - elif os.name.startswith('sunos'): - def steady(): - if steady.use_clock_highres: + elif hasattr(time, "clock_gettime"): + def monotonic(): + if monotonic.use_clock_highres: try: time.clock_gettime(time.CLOCK_HIGHRES) except OSError: - steady.use_clock_highres = False - if steady.use_gethrtime: - try: - return time.gethrtime() - except OSError: - steady.use_gethrtime = False - return time.time() - steady.use_clock_highres = (hasattr(time, 'clock_gettime') + monotonic.use_clock_highres = False + return time.clock_gettime(time.CLOCK_MONOTONIC) + monotonic.use_clock_highres = (hasattr(time, 'clock_gettime') and hasattr(time, 'CLOCK_HIGHRES')) - steady.use_gethrtime = True - elif hasattr(time, "clock_gettime"): - def steady(): - while steady.clocks: - try: - clk_id = steady.clocks[0] - return time.clock_gettime(clk_id) - except OSError: - del steady.clocks[0] - return time.time() - steady.clocks = [] - if hasattr(time, 'CLOCK_HIGHRES'): - steady.clocks.append(time.CLOCK_HIGHRES) - steady.clocks.append(time.CLOCK_MONOTONIC) - else: - def steady(): - return time.time() - -On Windows, QueryPerformanceCounter() is not used even though it has a -better resolution than GetTickCount(). It is not reliable and has too -many issues. +On Windows, QueryPerformanceCounter() is not used even though it has a better +accuracy than GetTickCount(). It is not reliable and has too many issues. .. note:: - time.steady() detects GetTickCount() integer overflow (32 bits, - roll-over after 49.7 days): it increases a delta by 2\ :sup:`32` - each time than an overflow is detected. The delta is stored in the - process-local state and so the value of time.steady() may be - different in two Python processes. + time.monotonic() detects GetTickCount() integer overflow (32 bits, roll-over + after 49.7 days): it increases a delta by 2\ :sup:`32` each time than an + overflow is detected. The delta is stored in the process-local state and so + the value of time.monotonic() may be different in two Python processes + running for more than 49 days. -time.perf_counter() -------------------- - -Clock with the best available resolution. - -It is available on all platforms and cannot fail. - -Pseudo-code:: - - def perf_counter(): - if perf_counter.use_performance_counter: - try: - return _time.QueryPerformanceCounter() - except OSError: - # QueryPerformanceFrequency() may fail, if the installed - # hardware does not support a high-resolution performance - # counter for example - perf_counter.use_performance_counter = False - if perf_counter.use_steady: - # Monotonic clock is preferred over system clock - try: - return time.steady() - except OSError: - perf_counter.use_steady = False - return time.time() - perf_counter.use_performance_counter = (os.name == 'nt') - perf_counter.use_steady = hasattr(time, 'steady') - time.get_clock_info(name) ------------------------- Get information on the specified clock. Supported clocks: * "clock": time.clock() - * "perf_counter": time.perf_counter() - * "steady": time.steady() + * "monotonic": time.monotonic() * "time": time.time() Return a dictionary with the following keys: @@ -264,8 +209,8 @@ 32,768 Hz -NTP adjusted -============ +NTP adjustment +============== NTP has diffent methods to adjust a clock: @@ -655,10 +600,6 @@ always return 1 nanosecond. For GetProcessTimes(), the accuracy is read using GetSystemTimeAdjustment(). -Python source code includes a portable library to get the process time: -`Tools/pybench/systimes.py -`_. - Functions ^^^^^^^^^ @@ -793,27 +734,17 @@ Alternatives: API design ======================== -Other names for new functions ------------------------------ +Other names for time.monotonic() +-------------------------------- -time.perf_counter(): - -* time.highres() -* time.hires(): "hires" can be read as "to hire" as in "he hires a car - to go on holiday", rather than a "HIgh-RESolution clock". -* time.timer(): "it would be too easy to confuse with (or misspell as) - time.time()" - -time.steady(): - -* time.monotonic(): QueryPerformanceCounter() is monotonic but it is not used - by time.steady() because it is not steady, and it is surprising to have to - check for time.get_clock_info('monotonic')['is_monotonic']. -* time.try_monotonic(): it is a clear and obvious solution for the - use-case of "I prefer the monotonic clock, if it is available, - otherwise I'll take my chances with a best-effect clock." +* time.steady() * time.wallclock(): it is not the system time aka the "wall clock", but a monotonic clock with an unspecified starting point +* time.seconds() +* time.counter() + +The name "time.try_monotonic()" was also proposed when time.monotonic() was +falling back to the system clock when no monotonic clock was available. Only expose operating system clocks @@ -824,14 +755,18 @@ related clock identifiers were already added to Python 3.3 for example. -Don't fallback on system clock ------------------------------- +Fallback to system clock +------------------------ -time.monotonic() is always a monotonic clock and is only available if the -operating system provides a monotonic clock. +If no monotonic clock is available, time.monotonic() falls back to the system +clock. -time.perf_counter() is only available if the operating system provides a clock -with a high resolution (e.g. at least a microsecond or better). +Issues: + + * It is hard to define correctly such function in the documentation: is it + monotonic? is it steady? is it adjusted? + * Some user want to decide what to do when no monotonic clock is available: + use another clock, display an error, or do something else One function choosing the clock from a list of constrains @@ -846,7 +781,8 @@ time.get_clock() returns None if the clock is found and so calls can be chained using the or operator. Example:: - func = time.get_clock(time.MONOTONIC) or time.get_clock(time.STEADY) or time.time() + get_time = time.get_clock(time.MONOTONIC) or time.get_clock(time.STEADY) or time.time() + t = get_time() Example of flags of system clocks: @@ -854,7 +790,7 @@ * GetTickCount: MONOTONIC | STEADY * CLOCK_MONOTONIC: MONOTONIC | STEADY (or only MONOTONIC on Linux) * CLOCK_MONOTONIC_RAW: MONOTONIC | STEADY - * gettimeofday(): (none) + * gettimeofday(): (no flag) One function with a flag: time.monotonic(fallback=True) @@ -908,6 +844,29 @@ see the same clock value +Deferred API: time.perf_counter() +================================= + +Python does not provide a portable "performance counter" clock for benchmarking +or profiling. Each tool has to implement its own heuristic to decide which +clock is the best depending on the OS and on which counters are available. + +A previous version of the PEP proposed a time.perf_counter() function using +QueryPerformanceCounter() on Windows, time.monotonic(), or falls back to the +system time. This function was not well defined and the idea is deferred. + +Proposed names for such function: + + * time.hires() + * time.highres() + * time.perf_counter() + * time.timer() + +Python source code includes a portable library to get the process time: +`Tools/pybench/systimes.py +`_. + + Footnotes ========= -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Wed Apr 4 03:48:02 2012 From: python-checkins at python.org (victor.stinner) Date: Wed, 04 Apr 2012 03:48:02 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_also_timeout=5F?= =?utf8?q?clock=28=29_alternative_name?= Message-ID: http://hg.python.org/peps/rev/d1bbe228a3af changeset: 4200:d1bbe228a3af user: Victor Stinner date: Wed Apr 04 03:47:55 2012 +0200 summary: PEP 418: Add also timeout_clock() alternative name files: pep-0418.txt | 5 +++-- 1 files changed, 3 insertions(+), 2 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -737,11 +737,12 @@ Other names for time.monotonic() -------------------------------- +* time.counter() +* time.seconds() * time.steady() +* time.timeout_clock() * time.wallclock(): it is not the system time aka the "wall clock", but a monotonic clock with an unspecified starting point -* time.seconds() -* time.counter() The name "time.try_monotonic()" was also proposed when time.monotonic() was falling back to the system clock when no monotonic clock was available. -- Repository URL: http://hg.python.org/peps From solipsis at pitrou.net Wed Apr 4 05:34:37 2012 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Wed, 04 Apr 2012 05:34:37 +0200 Subject: [Python-checkins] Daily reference leaks (14abfa27ff19): sum=0 Message-ID: results for 14abfa27ff19 on branch "default" -------------------------------------------- Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogehBBEr', '-x'] From python-checkins at python.org Wed Apr 4 13:21:01 2012 From: python-checkins at python.org (matthias.klose) Date: Wed, 04 Apr 2012 13:21:01 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Followup_for_issue_=2314321?= =?utf8?q?=2C_remove_references_to_Parser/pgen=2Estamp?= Message-ID: http://hg.python.org/cpython/rev/4e306c1a3c92 changeset: 76099:4e306c1a3c92 user: Matthias Klose date: Wed Apr 04 13:20:55 2012 +0200 summary: Followup for issue #14321, remove references to Parser/pgen.stamp files: .bzrignore | 1 - .gitignore | 1 - .hgignore | 1 - Makefile.pre.in | 2 +- 4 files changed, 1 insertions(+), 4 deletions(-) diff --git a/.bzrignore b/.bzrignore --- a/.bzrignore +++ b/.bzrignore @@ -33,7 +33,6 @@ Modules/config.c Modules/ld_so_aix Parser/pgen -Parser/pgen.stamp Lib/test/data/* Lib/lib2to3/Grammar*.pickle Lib/lib2to3/PatternGrammar*.pickle diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -32,7 +32,6 @@ PCbuild/*.pdb PCbuild/Win32-temp-* Parser/pgen -Parser/pgen.stamp __pycache__ autom4te.cache build/ diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -32,7 +32,6 @@ Modules/config.c Modules/ld_so_aix$ Parser/pgen$ -Parser/pgen.stamp$ PCbuild/amd64/ ^core ^python-gdb.py diff --git a/Makefile.pre.in b/Makefile.pre.in --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1351,7 +1351,7 @@ clobber: clean profile-removal -rm -f $(BUILDPYTHON) $(PGEN) $(LIBRARY) $(LDLIBRARY) $(DLLLIBRARY) \ - tags TAGS Parser/pgen.stamp \ + tags TAGS \ config.cache config.log pyconfig.h Modules/config.c -rm -rf build platform -rm -rf $(PYTHONFRAMEWORKDIR) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Apr 4 13:26:54 2012 From: python-checkins at python.org (victor.stinner) Date: Wed, 04 Apr 2012 13:26:54 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_Boost=2EChrono_?= =?utf8?q?library_in_links?= Message-ID: http://hg.python.org/peps/rev/8dc2457f0c5c changeset: 4201:8dc2457f0c5c user: Victor Stinner date: Wed Apr 04 13:26:35 2012 +0200 summary: PEP 418: Add Boost.Chrono library in links files: pep-0418.txt | 18 ++++++++++++++++++ 1 files changed, 18 insertions(+), 0 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -916,6 +916,24 @@ * `libpthread `_: POSIX thread library for Windows (`clock.c `_) +* `Boost.Chrono + `_ uses: + + * system_clock: + + * mac = gettimeofday() + * posix = clock_gettime(CLOCK_REALTIME) + * win = GetSystemTimeAsFileTime() + + * steady_clock: + + * mac = mach_absolute_time() + * posix = clock_gettime(CLOCK_MONOTONIC) + * win = QueryPerformanceCounter() + + * high_resolution_clock: + + * steady_clock, if available system_clock, otherwise Time: -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Wed Apr 4 14:19:10 2012 From: python-checkins at python.org (matthias.klose) Date: Wed, 04 Apr 2012 14:19:10 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_-_Issue_=2314493=3A_Use_gvf?= =?utf8?q?s-open/xdg-open_in_Lib/webbrowser=2Epy=2E?= Message-ID: http://hg.python.org/cpython/rev/70c58903b52e changeset: 76100:70c58903b52e user: Matthias Klose date: Wed Apr 04 14:19:04 2012 +0200 summary: - Issue #14493: Use gvfs-open/xdg-open in Lib/webbrowser.py. files: Lib/webbrowser.py | 8 ++++++++ Misc/NEWS | 2 ++ 2 files changed, 10 insertions(+), 0 deletions(-) diff --git a/Lib/webbrowser.py b/Lib/webbrowser.py --- a/Lib/webbrowser.py +++ b/Lib/webbrowser.py @@ -448,6 +448,14 @@ def register_X_browsers(): + # use xdg-open if around + if _iscommand("xdg-open"): + register("xdg-open", None, BackgroundBrowser("xdg-open")) + + # The default GNOME3 browser + if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gvfs-open"): + register("gvfs-open", None, BackgroundBrowser("gvfs-open")) + # The default GNOME browser if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gnome-open"): register("gnome-open", None, BackgroundBrowser("gnome-open")) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -29,6 +29,8 @@ a multiprocessing Client or Listener with an AF_PIPE type address under non-Windows platforms. Patch by Popa Claudiu. +- Issue #14493: Use gvfs-open/xdg-open in Lib/webbrowser.py. + What's New in Python 3.3.0 Alpha 2? =================================== -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Apr 4 14:57:06 2012 From: python-checkins at python.org (eli.bendersky) Date: Wed, 04 Apr 2012 14:57:06 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Fix_Windows_compilation_err?= =?utf8?q?ors?= Message-ID: http://hg.python.org/cpython/rev/118729090dc5 changeset: 76101:118729090dc5 user: Eli Bendersky date: Wed Apr 04 15:55:07 2012 +0300 summary: Fix Windows compilation errors files: Modules/_elementtree.c | 6 ++++-- 1 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c --- a/Modules/_elementtree.c +++ b/Modules/_elementtree.c @@ -264,17 +264,19 @@ LOCAL(void) dealloc_extra(ElementObject* self) { + ElementObjectExtra *myextra; + int i; + if (!self->extra) return; /* Avoid DECREFs calling into this code again (cycles, etc.) */ - ElementObjectExtra *myextra = self->extra; + myextra = self->extra; self->extra = NULL; Py_DECREF(myextra->attrib); - int i; for (i = 0; i < myextra->length; i++) Py_DECREF(myextra->children[i]); -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Apr 4 20:17:22 2012 From: python-checkins at python.org (georg.brandl) Date: Wed, 04 Apr 2012 20:17:22 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=283=2E2=29=3A_Closes_=2314495?= =?utf8?q?=3A_fix_typo=2E?= Message-ID: http://hg.python.org/cpython/rev/45287f2799f5 changeset: 76102:45287f2799f5 branch: 3.2 parent: 76096:57c0867fbf30 user: Georg Brandl date: Wed Apr 04 20:17:06 2012 +0200 summary: Closes #14495: fix typo. files: Lib/tkinter/ttk.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/tkinter/ttk.py b/Lib/tkinter/ttk.py --- a/Lib/tkinter/ttk.py +++ b/Lib/tkinter/ttk.py @@ -1253,7 +1253,7 @@ def exists(self, item): - """Returns True if the specified item is present in the three, + """Returns True if the specified item is present in the tree, False otherwise.""" return bool(self.tk.call(self._w, "exists", item)) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Apr 4 20:17:23 2012 From: python-checkins at python.org (georg.brandl) Date: Wed, 04 Apr 2012 20:17:23 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_merge_with_3=2E2?= Message-ID: http://hg.python.org/cpython/rev/68ef2fc47da6 changeset: 76103:68ef2fc47da6 parent: 76101:118729090dc5 parent: 76102:45287f2799f5 user: Georg Brandl date: Wed Apr 04 20:17:27 2012 +0200 summary: merge with 3.2 files: Lib/tkinter/ttk.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/tkinter/ttk.py b/Lib/tkinter/ttk.py --- a/Lib/tkinter/ttk.py +++ b/Lib/tkinter/ttk.py @@ -1253,7 +1253,7 @@ def exists(self, item): - """Returns True if the specified item is present in the three, + """Returns True if the specified item is present in the tree, False otherwise.""" return bool(self.tk.call(self._w, "exists", item)) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Wed Apr 4 20:19:16 2012 From: python-checkins at python.org (georg.brandl) Date: Wed, 04 Apr 2012 20:19:16 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=282=2E7=29=3A_Transplant_4528?= =?utf8?q?7f2799f5_from_default_branch=2E?= Message-ID: http://hg.python.org/cpython/rev/9b472b24e5c9 changeset: 76104:9b472b24e5c9 branch: 2.7 parent: 76093:2c1ce04ded55 user: Georg Brandl date: Wed Apr 04 20:19:09 2012 +0200 summary: Transplant 45287f2799f5 from default branch. files: Lib/lib-tk/ttk.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Lib/lib-tk/ttk.py b/Lib/lib-tk/ttk.py --- a/Lib/lib-tk/ttk.py +++ b/Lib/lib-tk/ttk.py @@ -1253,7 +1253,7 @@ def exists(self, item): - """Returns True if the specified item is present in the three, + """Returns True if the specified item is present in the tree, False otherwise.""" return bool(self.tk.call(self._w, "exists", item)) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 00:11:58 2012 From: python-checkins at python.org (antoine.pitrou) Date: Thu, 05 Apr 2012 00:11:58 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Rename_=5FPyIter=5FGetBuilt?= =?utf8?q?in_to_=5FPyObject=5FGetBuiltin=2C_and_do_not_include_it_in_the?= Message-ID: http://hg.python.org/cpython/rev/fb907ee4fdfa changeset: 76105:fb907ee4fdfa parent: 76103:68ef2fc47da6 user: Antoine Pitrou date: Thu Apr 05 00:04:20 2012 +0200 summary: Rename _PyIter_GetBuiltin to _PyObject_GetBuiltin, and do not include it in the stable ABI. files: Include/iterobject.h | 2 -- Include/object.h | 5 +++++ Modules/arraymodule.c | 2 +- Objects/bytearrayobject.c | 4 ++-- Objects/bytesobject.c | 4 ++-- Objects/dictobject.c | 2 +- Objects/iterobject.c | 21 ++++----------------- Objects/listobject.c | 6 +++--- Objects/object.c | 13 +++++++++++++ Objects/rangeobject.c | 4 ++-- Objects/setobject.c | 2 +- Objects/tupleobject.c | 4 ++-- Objects/unicodeobject.c | 4 ++-- 13 files changed, 38 insertions(+), 35 deletions(-) diff --git a/Include/iterobject.h b/Include/iterobject.h --- a/Include/iterobject.h +++ b/Include/iterobject.h @@ -18,8 +18,6 @@ PyAPI_FUNC(PyObject *) PyCallIter_New(PyObject *, PyObject *); -PyAPI_FUNC(PyObject *) _PyIter_GetBuiltin(const char *iter); - #ifdef __cplusplus } #endif diff --git a/Include/object.h b/Include/object.h --- a/Include/object.h +++ b/Include/object.h @@ -535,6 +535,11 @@ _PyObject_GenericSetAttrWithDict(PyObject *, PyObject *, PyObject *, PyObject *); +/* Helper to look up a builtin object */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) +_PyObject_GetBuiltin(const char *name); +#endif /* PyObject_Dir(obj) acts like Python builtins.dir(obj), returning a list of strings. PyObject_Dir(NULL) is like builtins.dir(), diff --git a/Modules/arraymodule.c b/Modules/arraymodule.c --- a/Modules/arraymodule.c +++ b/Modules/arraymodule.c @@ -2756,7 +2756,7 @@ static PyObject * arrayiter_reduce(arrayiterobject *it) { - return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("iter"), + return Py_BuildValue("N(O)n", _PyObject_GetBuiltin("iter"), it->ao, it->index); } diff --git a/Objects/bytearrayobject.c b/Objects/bytearrayobject.c --- a/Objects/bytearrayobject.c +++ b/Objects/bytearrayobject.c @@ -3018,13 +3018,13 @@ bytearrayiter_reduce(bytesiterobject *it) { if (it->it_seq != NULL) { - return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("iter"), + return Py_BuildValue("N(O)n", _PyObject_GetBuiltin("iter"), it->it_seq, it->it_index); } else { PyObject *u = PyUnicode_FromUnicode(NULL, 0); if (u == NULL) return NULL; - return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), u); + return Py_BuildValue("N(N)", _PyObject_GetBuiltin("iter"), u); } } diff --git a/Objects/bytesobject.c b/Objects/bytesobject.c --- a/Objects/bytesobject.c +++ b/Objects/bytesobject.c @@ -3078,13 +3078,13 @@ striter_reduce(striterobject *it) { if (it->it_seq != NULL) { - return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("iter"), + return Py_BuildValue("N(O)n", _PyObject_GetBuiltin("iter"), it->it_seq, it->it_index); } else { PyObject *u = PyUnicode_FromUnicode(NULL, 0); if (u == NULL) return NULL; - return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), u); + return Py_BuildValue("N(N)", _PyObject_GetBuiltin("iter"), u); } } diff --git a/Objects/dictobject.c b/Objects/dictobject.c --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -2610,7 +2610,7 @@ Py_DECREF(list); return NULL; } - return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), list); + return Py_BuildValue("N(N)", _PyObject_GetBuiltin("iter"), list); } /***********************************************/ diff --git a/Objects/iterobject.c b/Objects/iterobject.c --- a/Objects/iterobject.c +++ b/Objects/iterobject.c @@ -2,19 +2,6 @@ #include "Python.h" -/* Convenience function to get builtins.iter or builtins.reversed */ -PyObject * -_PyIter_GetBuiltin(const char *iter) -{ - PyObject *mod, *attr; - mod = PyImport_ImportModule("builtins"); - if (mod == NULL) - return NULL; - attr = PyObject_GetAttrString(mod, iter); - Py_DECREF(mod); - return attr; -} - typedef struct { PyObject_HEAD long it_index; @@ -105,10 +92,10 @@ iter_reduce(seqiterobject *it) { if (it->it_seq != NULL) - return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("iter"), + return Py_BuildValue("N(O)n", _PyObject_GetBuiltin("iter"), it->it_seq, it->it_index); else - return Py_BuildValue("N(())", _PyIter_GetBuiltin("iter")); + return Py_BuildValue("N(())", _PyObject_GetBuiltin("iter")); } PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); @@ -242,10 +229,10 @@ calliter_reduce(calliterobject *it) { if (it->it_callable != NULL && it->it_sentinel != NULL) - return Py_BuildValue("N(OO)", _PyIter_GetBuiltin("iter"), + return Py_BuildValue("N(OO)", _PyObject_GetBuiltin("iter"), it->it_callable, it->it_sentinel); else - return Py_BuildValue("N(())", _PyIter_GetBuiltin("iter")); + return Py_BuildValue("N(())", _PyObject_GetBuiltin("iter")); } static PyMethodDef calliter_methods[] = { diff --git a/Objects/listobject.c b/Objects/listobject.c --- a/Objects/listobject.c +++ b/Objects/listobject.c @@ -2949,17 +2949,17 @@ if (forward) { listiterobject *it = (listiterobject *)_it; if (it->it_seq) - return Py_BuildValue("N(O)l", _PyIter_GetBuiltin("iter"), + return Py_BuildValue("N(O)l", _PyObject_GetBuiltin("iter"), it->it_seq, it->it_index); } else { listreviterobject *it = (listreviterobject *)_it; if (it->it_seq) - return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("reversed"), + return Py_BuildValue("N(O)n", _PyObject_GetBuiltin("reversed"), it->it_seq, it->it_index); } /* empty iterator, create an empty list */ list = PyList_New(0); if (list == NULL) return NULL; - return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), list); + return Py_BuildValue("N(N)", _PyObject_GetBuiltin("iter"), list); } diff --git a/Objects/object.c b/Objects/object.c --- a/Objects/object.c +++ b/Objects/object.c @@ -1026,6 +1026,19 @@ return obj; } +/* Convenience function to get a builtin from its name */ +PyObject * +_PyObject_GetBuiltin(const char *name) +{ + PyObject *mod, *attr; + mod = PyImport_ImportModule("builtins"); + if (mod == NULL) + return NULL; + attr = PyObject_GetAttrString(mod, name); + Py_DECREF(mod); + return attr; +} + /* Helper used when the __next__ method is removed from a type: tp_iternext is never NULL and can be safely called without checking on every iteration. diff --git a/Objects/rangeobject.c b/Objects/rangeobject.c --- a/Objects/rangeobject.c +++ b/Objects/rangeobject.c @@ -985,7 +985,7 @@ if (range == NULL) goto err; /* return the result */ - return Py_BuildValue("N(N)i", _PyIter_GetBuiltin("iter"), range, r->index); + return Py_BuildValue("N(N)i", _PyObject_GetBuiltin("iter"), range, r->index); err: Py_XDECREF(start); Py_XDECREF(stop); @@ -1171,7 +1171,7 @@ } /* return the result */ - return Py_BuildValue("N(N)O", _PyIter_GetBuiltin("iter"), range, r->index); + return Py_BuildValue("N(N)O", _PyObject_GetBuiltin("iter"), range, r->index); } static PyObject * diff --git a/Objects/setobject.c b/Objects/setobject.c --- a/Objects/setobject.c +++ b/Objects/setobject.c @@ -856,7 +856,7 @@ Py_DECREF(list); return NULL; } - return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), list); + return Py_BuildValue("N(N)", _PyObject_GetBuiltin("iter"), list); } PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); diff --git a/Objects/tupleobject.c b/Objects/tupleobject.c --- a/Objects/tupleobject.c +++ b/Objects/tupleobject.c @@ -971,10 +971,10 @@ tupleiter_reduce(tupleiterobject *it) { if (it->it_seq) - return Py_BuildValue("N(O)l", _PyIter_GetBuiltin("iter"), + return Py_BuildValue("N(O)l", _PyObject_GetBuiltin("iter"), it->it_seq, it->it_index); else - return Py_BuildValue("N(())", _PyIter_GetBuiltin("iter")); + return Py_BuildValue("N(())", _PyObject_GetBuiltin("iter")); } static PyObject * diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -14386,13 +14386,13 @@ unicodeiter_reduce(unicodeiterobject *it) { if (it->it_seq != NULL) { - return Py_BuildValue("N(O)n", _PyIter_GetBuiltin("iter"), + return Py_BuildValue("N(O)n", _PyObject_GetBuiltin("iter"), it->it_seq, it->it_index); } else { PyObject *u = PyUnicode_FromUnicode(NULL, 0); if (u == NULL) return NULL; - return Py_BuildValue("N(N)", _PyIter_GetBuiltin("iter"), u); + return Py_BuildValue("N(N)", _PyObject_GetBuiltin("iter"), u); } } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 00:42:56 2012 From: python-checkins at python.org (victor.stinner) Date: Thu, 05 Apr 2012 00:42:56 +0200 Subject: [Python-checkins] =?utf8?q?peps=3A_PEP_418=3A_Add_link_to_Monoclo?= =?utf8?q?ck_library_and_an_old_article_on_Windows_clocks?= Message-ID: http://hg.python.org/peps/rev/d0033fc0844f changeset: 4202:d0033fc0844f user: Victor Stinner date: Thu Apr 05 00:38:51 2012 +0200 summary: PEP 418: Add link to Monoclock library and an old article on Windows clocks Mention also another issue of QueryPerformanceCounter() files: pep-0418.txt | 8 ++++++++ 1 files changed, 8 insertions(+), 0 deletions(-) diff --git a/pep-0418.txt b/pep-0418.txt --- a/pep-0418.txt +++ b/pep-0418.txt @@ -411,6 +411,9 @@ each processor. The bug was fixed in Windows XP SP2. * Issues with processor with variable frequency: the frequency is changed depending on the workload to reduce memory consumption. +* Chromium don't use QueryPerformanceCounter() on Athlon X2 CPUs (model 15) + because "QueryPerformanceCounter is unreliable" (see base/time_win.cc in + Chromium source code) .. _KB896256: http://support.microsoft.com/?id=896256 .. _KB274323: http://support.microsoft.com/?id=274323 @@ -905,6 +908,9 @@ * `python-monotonic-time `_ (`github `_) +* `Monoclock.nano_count() + `_ uses clock_gettime(CLOCK_MONOTONIC) + and returns a number of nanoseconds * `monotonic_clock `_ * `Perl: Time::HiRes `_ exposes clock_gettime(CLOCK_MONOTONIC) @@ -961,6 +967,8 @@ `_: How the IBM High Resolution Time Stamp Facility accurately measures itty bits of time +* `Win32 Performance Measurement Options + `_ by Matthew Wilson, May 01, 2003 Copyright -- Repository URL: http://hg.python.org/peps From python-checkins at python.org Thu Apr 5 03:29:46 2012 From: python-checkins at python.org (r.david.murray) Date: Thu, 05 Apr 2012 03:29:46 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMy4yKTogIzE0NDkwLCAjMTQ0?= =?utf8?q?91=3A_add_=27sundry=27-style_import_tests_for_Tools/scripts=2E?= Message-ID: http://hg.python.org/cpython/rev/62dde5dd475e changeset: 76106:62dde5dd475e branch: 3.2 parent: 76102:45287f2799f5 user: R David Murray date: Wed Apr 04 21:28:14 2012 -0400 summary: #14490, #14491: add 'sundry'-style import tests for Tools/scripts. This patch changes a few of the scripts to have __name__=='__main__' clauses so that they are importable without running. Also fixes the syntax errors revealed by the tests. files: Lib/test/test_tools.py | 45 +++++++- Tools/scripts/abitype.py | 88 ++++++++------- Tools/scripts/find_recursionlimit.py | 24 ++-- Tools/scripts/findnocoding.py | 46 ++++---- Tools/scripts/fixcid.py | 2 +- Tools/scripts/md5sum.py | 2 +- Tools/scripts/parseentities.py | 3 +- 7 files changed, 128 insertions(+), 82 deletions(-) diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py --- a/Lib/test/test_tools.py +++ b/Lib/test/test_tools.py @@ -5,6 +5,7 @@ """ import os +import sys import unittest import sysconfig from test import support @@ -17,10 +18,11 @@ srcdir = sysconfig.get_config_var('projectbase') basepath = os.path.join(os.getcwd(), srcdir, 'Tools') +scriptsdir = os.path.join(basepath, 'scripts') class ReindentTests(unittest.TestCase): - script = os.path.join(basepath, 'scripts', 'reindent.py') + script = os.path.join(scriptsdir, 'reindent.py') def test_noargs(self): assert_python_ok(self.script) @@ -31,8 +33,47 @@ self.assertGreater(err, b'') +class TestSundryScripts(unittest.TestCase): + # At least make sure the rest don't have syntax errors. When tests are + # added for a script it should be added to the whitelist below. + + # scripts that have independent tests. + whitelist = ['reindent.py'] + # scripts that can't be imported without running + blacklist = ['make_ctype.py'] + # scripts that use windows-only modules + windows_only = ['win_add2path.py'] + # blacklisted for other reasons + other = ['analyze_dxp.py'] + + skiplist = blacklist + whitelist + windows_only + other + + def setUp(self): + cm = support.DirsOnSysPath(scriptsdir) + cm.__enter__() + self.addCleanup(cm.__exit__) + + def test_sundry(self): + for fn in os.listdir(scriptsdir): + if fn.endswith('.py') and fn not in self.skiplist: + __import__(fn[:-3]) + + @unittest.skipIf(sys.platform != "win32", "Windows-only test") + def test_sundry_windows(self): + for fn in self.windows_only: + __import__(fn[:-3]) + + def test_analyze_dxp_import(self): + if hasattr(sys, 'getdxp'): + import analyze_dxp + else: + with self.assertRaises(RuntimeError): + import analyze_dxp + + def test_main(): - support.run_unittest(ReindentTests) + support.run_unittest(*[obj for obj in globals().values() + if isinstance(obj, type)]) if __name__ == '__main__': diff --git a/Tools/scripts/abitype.py b/Tools/scripts/abitype.py --- a/Tools/scripts/abitype.py +++ b/Tools/scripts/abitype.py @@ -3,34 +3,6 @@ # Usage: abitype.py < old_code > new_code import re, sys -############ Simplistic C scanner ################################## -tokenizer = re.compile( - r"(?P#.*\n)" - r"|(?P/\*.*?\*/)" - r"|(?P[a-zA-Z_][a-zA-Z0-9_]*)" - r"|(?P[ \t\n]+)" - r"|(?P.)", - re.MULTILINE) - -tokens = [] -source = sys.stdin.read() -pos = 0 -while pos != len(source): - m = tokenizer.match(source, pos) - tokens.append([m.lastgroup, m.group()]) - pos += len(tokens[-1][1]) - if tokens[-1][0] == 'preproc': - # continuation lines are considered - # only in preprocess statements - while tokens[-1][1].endswith('\\\n'): - nl = source.find('\n', pos) - if nl == -1: - line = source[pos:] - else: - line = source[pos:nl+1] - tokens[-1][1] += line - pos += len(line) - ###### Replacement of PyTypeObject static instances ############## # classify each token, giving it a one-letter code: @@ -79,7 +51,7 @@ while tokens[pos][0] in ('ws', 'comment'): pos += 1 if tokens[pos][1] != 'PyVarObject_HEAD_INIT': - raise Exception, '%s has no PyVarObject_HEAD_INIT' % name + raise Exception('%s has no PyVarObject_HEAD_INIT' % name) while tokens[pos][1] != ')': pos += 1 pos += 1 @@ -183,18 +155,48 @@ return '\n'.join(res) -# Main loop: replace all static PyTypeObjects until -# there are none left. -while 1: - c = classify() - m = re.search('(SW)?TWIW?=W?{.*?};', c) - if not m: - break - start = m.start() - end = m.end() - name, fields = get_fields(start, m) - tokens[start:end] = [('',make_slots(name, fields))] +if __name__ == '__main__': -# Output result to stdout -for t, v in tokens: - sys.stdout.write(v) + ############ Simplistic C scanner ################################## + tokenizer = re.compile( + r"(?P#.*\n)" + r"|(?P/\*.*?\*/)" + r"|(?P[a-zA-Z_][a-zA-Z0-9_]*)" + r"|(?P[ \t\n]+)" + r"|(?P.)", + re.MULTILINE) + + tokens = [] + source = sys.stdin.read() + pos = 0 + while pos != len(source): + m = tokenizer.match(source, pos) + tokens.append([m.lastgroup, m.group()]) + pos += len(tokens[-1][1]) + if tokens[-1][0] == 'preproc': + # continuation lines are considered + # only in preprocess statements + while tokens[-1][1].endswith('\\\n'): + nl = source.find('\n', pos) + if nl == -1: + line = source[pos:] + else: + line = source[pos:nl+1] + tokens[-1][1] += line + pos += len(line) + + # Main loop: replace all static PyTypeObjects until + # there are none left. + while 1: + c = classify() + m = re.search('(SW)?TWIW?=W?{.*?};', c) + if not m: + break + start = m.start() + end = m.end() + name, fields = get_fields(start, m) + tokens[start:end] = [('',make_slots(name, fields))] + + # Output result to stdout + for t, v in tokens: + sys.stdout.write(v) diff --git a/Tools/scripts/find_recursionlimit.py b/Tools/scripts/find_recursionlimit.py --- a/Tools/scripts/find_recursionlimit.py +++ b/Tools/scripts/find_recursionlimit.py @@ -106,14 +106,16 @@ else: print("Yikes!") -limit = 1000 -while 1: - check_limit(limit, "test_recurse") - check_limit(limit, "test_add") - check_limit(limit, "test_repr") - check_limit(limit, "test_init") - check_limit(limit, "test_getattr") - check_limit(limit, "test_getitem") - check_limit(limit, "test_cpickle") - print("Limit of %d is fine" % limit) - limit = limit + 100 +if __name__ == '__main__': + + limit = 1000 + while 1: + check_limit(limit, "test_recurse") + check_limit(limit, "test_add") + check_limit(limit, "test_repr") + check_limit(limit, "test_init") + check_limit(limit, "test_getattr") + check_limit(limit, "test_getitem") + check_limit(limit, "test_cpickle") + print("Limit of %d is fine" % limit) + limit = limit + 100 diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py --- a/Tools/scripts/findnocoding.py +++ b/Tools/scripts/findnocoding.py @@ -76,29 +76,31 @@ -c: recognize Python source files trying to compile them -d: debug output""" % sys.argv[0] -try: - opts, args = getopt.getopt(sys.argv[1:], 'cd') -except getopt.error as msg: - print(msg, file=sys.stderr) - print(usage, file=sys.stderr) - sys.exit(1) +if __name__ == '__main__': -is_python = pysource.looks_like_python -debug = False + try: + opts, args = getopt.getopt(sys.argv[1:], 'cd') + except getopt.error as msg: + print(msg, file=sys.stderr) + print(usage, file=sys.stderr) + sys.exit(1) -for o, a in opts: - if o == '-c': - is_python = pysource.can_be_compiled - elif o == '-d': - debug = True + is_python = pysource.looks_like_python + debug = False -if not args: - print(usage, file=sys.stderr) - sys.exit(1) + for o, a in opts: + if o == '-c': + is_python = pysource.can_be_compiled + elif o == '-d': + debug = True -for fullpath in pysource.walk_python_files(args, is_python): - if debug: - print("Testing for coding: %s" % fullpath) - result = needs_declaration(fullpath) - if result: - print(fullpath) + if not args: + print(usage, file=sys.stderr) + sys.exit(1) + + for fullpath in pysource.walk_python_files(args, is_python): + if debug: + print("Testing for coding: %s" % fullpath) + result = needs_declaration(fullpath) + if result: + print(fullpath) diff --git a/Tools/scripts/fixcid.py b/Tools/scripts/fixcid.py --- a/Tools/scripts/fixcid.py +++ b/Tools/scripts/fixcid.py @@ -292,7 +292,7 @@ if not words: continue if len(words) == 3 and words[0] == 'struct': words[:2] = [words[0] + ' ' + words[1]] - elif len(words) <> 2: + elif len(words) != 2: err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line)) continue if Reverse: diff --git a/Tools/scripts/md5sum.py b/Tools/scripts/md5sum.py --- a/Tools/scripts/md5sum.py +++ b/Tools/scripts/md5sum.py @@ -20,7 +20,7 @@ import sys import os import getopt -import md5 +from hashlib import md5 def sum(*files): sts = 0 diff --git a/Tools/scripts/parseentities.py b/Tools/scripts/parseentities.py --- a/Tools/scripts/parseentities.py +++ b/Tools/scripts/parseentities.py @@ -13,7 +13,6 @@ """ import re,sys -import TextTools entityRE = re.compile('') @@ -45,7 +44,7 @@ charcode = repr(charcode) else: charcode = repr(charcode) - comment = TextTools.collapse(comment) + comment = ' '.join(comment.split()) f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment)) f.write('\n}\n') -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 03:29:47 2012 From: python-checkins at python.org (r.david.murray) Date: Thu, 05 Apr 2012 03:29:47 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_Merge_=2314490=2C_=2314491=3A_add_=27sundry=27-style_import_?= =?utf8?q?tests_for_Tools/scripts=2E?= Message-ID: http://hg.python.org/cpython/rev/696cb524322a changeset: 76107:696cb524322a parent: 76105:fb907ee4fdfa parent: 76106:62dde5dd475e user: R David Murray date: Wed Apr 04 21:29:03 2012 -0400 summary: Merge #14490, #14491: add 'sundry'-style import tests for Tools/scripts. This patch changes a few of the scripts to have __name__=='__main__' clauses so that they are importable without running. Also fixes the syntax errors revealed by the tests. files: Lib/test/test_tools.py | 45 +++++++- Tools/scripts/abitype.py | 88 ++++++++------- Tools/scripts/find_recursionlimit.py | 24 ++-- Tools/scripts/findnocoding.py | 46 ++++---- Tools/scripts/fixcid.py | 2 +- Tools/scripts/md5sum.py | 2 +- Tools/scripts/parseentities.py | 3 +- 7 files changed, 128 insertions(+), 82 deletions(-) diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py --- a/Lib/test/test_tools.py +++ b/Lib/test/test_tools.py @@ -5,6 +5,7 @@ """ import os +import sys import unittest import sysconfig from test import support @@ -17,10 +18,11 @@ srcdir = sysconfig.get_config_var('projectbase') basepath = os.path.join(os.getcwd(), srcdir, 'Tools') +scriptsdir = os.path.join(basepath, 'scripts') class ReindentTests(unittest.TestCase): - script = os.path.join(basepath, 'scripts', 'reindent.py') + script = os.path.join(scriptsdir, 'reindent.py') def test_noargs(self): assert_python_ok(self.script) @@ -31,8 +33,47 @@ self.assertGreater(err, b'') +class TestSundryScripts(unittest.TestCase): + # At least make sure the rest don't have syntax errors. When tests are + # added for a script it should be added to the whitelist below. + + # scripts that have independent tests. + whitelist = ['reindent.py'] + # scripts that can't be imported without running + blacklist = ['make_ctype.py'] + # scripts that use windows-only modules + windows_only = ['win_add2path.py'] + # blacklisted for other reasons + other = ['analyze_dxp.py'] + + skiplist = blacklist + whitelist + windows_only + other + + def setUp(self): + cm = support.DirsOnSysPath(scriptsdir) + cm.__enter__() + self.addCleanup(cm.__exit__) + + def test_sundry(self): + for fn in os.listdir(scriptsdir): + if fn.endswith('.py') and fn not in self.skiplist: + __import__(fn[:-3]) + + @unittest.skipIf(sys.platform != "win32", "Windows-only test") + def test_sundry_windows(self): + for fn in self.windows_only: + __import__(fn[:-3]) + + def test_analyze_dxp_import(self): + if hasattr(sys, 'getdxp'): + import analyze_dxp + else: + with self.assertRaises(RuntimeError): + import analyze_dxp + + def test_main(): - support.run_unittest(ReindentTests) + support.run_unittest(*[obj for obj in globals().values() + if isinstance(obj, type)]) if __name__ == '__main__': diff --git a/Tools/scripts/abitype.py b/Tools/scripts/abitype.py --- a/Tools/scripts/abitype.py +++ b/Tools/scripts/abitype.py @@ -3,34 +3,6 @@ # Usage: abitype.py < old_code > new_code import re, sys -############ Simplistic C scanner ################################## -tokenizer = re.compile( - r"(?P#.*\n)" - r"|(?P/\*.*?\*/)" - r"|(?P[a-zA-Z_][a-zA-Z0-9_]*)" - r"|(?P[ \t\n]+)" - r"|(?P.)", - re.MULTILINE) - -tokens = [] -source = sys.stdin.read() -pos = 0 -while pos != len(source): - m = tokenizer.match(source, pos) - tokens.append([m.lastgroup, m.group()]) - pos += len(tokens[-1][1]) - if tokens[-1][0] == 'preproc': - # continuation lines are considered - # only in preprocess statements - while tokens[-1][1].endswith('\\\n'): - nl = source.find('\n', pos) - if nl == -1: - line = source[pos:] - else: - line = source[pos:nl+1] - tokens[-1][1] += line - pos += len(line) - ###### Replacement of PyTypeObject static instances ############## # classify each token, giving it a one-letter code: @@ -79,7 +51,7 @@ while tokens[pos][0] in ('ws', 'comment'): pos += 1 if tokens[pos][1] != 'PyVarObject_HEAD_INIT': - raise Exception, '%s has no PyVarObject_HEAD_INIT' % name + raise Exception('%s has no PyVarObject_HEAD_INIT' % name) while tokens[pos][1] != ')': pos += 1 pos += 1 @@ -183,18 +155,48 @@ return '\n'.join(res) -# Main loop: replace all static PyTypeObjects until -# there are none left. -while 1: - c = classify() - m = re.search('(SW)?TWIW?=W?{.*?};', c) - if not m: - break - start = m.start() - end = m.end() - name, fields = get_fields(start, m) - tokens[start:end] = [('',make_slots(name, fields))] +if __name__ == '__main__': -# Output result to stdout -for t, v in tokens: - sys.stdout.write(v) + ############ Simplistic C scanner ################################## + tokenizer = re.compile( + r"(?P#.*\n)" + r"|(?P/\*.*?\*/)" + r"|(?P[a-zA-Z_][a-zA-Z0-9_]*)" + r"|(?P[ \t\n]+)" + r"|(?P.)", + re.MULTILINE) + + tokens = [] + source = sys.stdin.read() + pos = 0 + while pos != len(source): + m = tokenizer.match(source, pos) + tokens.append([m.lastgroup, m.group()]) + pos += len(tokens[-1][1]) + if tokens[-1][0] == 'preproc': + # continuation lines are considered + # only in preprocess statements + while tokens[-1][1].endswith('\\\n'): + nl = source.find('\n', pos) + if nl == -1: + line = source[pos:] + else: + line = source[pos:nl+1] + tokens[-1][1] += line + pos += len(line) + + # Main loop: replace all static PyTypeObjects until + # there are none left. + while 1: + c = classify() + m = re.search('(SW)?TWIW?=W?{.*?};', c) + if not m: + break + start = m.start() + end = m.end() + name, fields = get_fields(start, m) + tokens[start:end] = [('',make_slots(name, fields))] + + # Output result to stdout + for t, v in tokens: + sys.stdout.write(v) diff --git a/Tools/scripts/find_recursionlimit.py b/Tools/scripts/find_recursionlimit.py --- a/Tools/scripts/find_recursionlimit.py +++ b/Tools/scripts/find_recursionlimit.py @@ -106,14 +106,16 @@ else: print("Yikes!") -limit = 1000 -while 1: - check_limit(limit, "test_recurse") - check_limit(limit, "test_add") - check_limit(limit, "test_repr") - check_limit(limit, "test_init") - check_limit(limit, "test_getattr") - check_limit(limit, "test_getitem") - check_limit(limit, "test_cpickle") - print("Limit of %d is fine" % limit) - limit = limit + 100 +if __name__ == '__main__': + + limit = 1000 + while 1: + check_limit(limit, "test_recurse") + check_limit(limit, "test_add") + check_limit(limit, "test_repr") + check_limit(limit, "test_init") + check_limit(limit, "test_getattr") + check_limit(limit, "test_getitem") + check_limit(limit, "test_cpickle") + print("Limit of %d is fine" % limit) + limit = limit + 100 diff --git a/Tools/scripts/findnocoding.py b/Tools/scripts/findnocoding.py --- a/Tools/scripts/findnocoding.py +++ b/Tools/scripts/findnocoding.py @@ -76,29 +76,31 @@ -c: recognize Python source files trying to compile them -d: debug output""" % sys.argv[0] -try: - opts, args = getopt.getopt(sys.argv[1:], 'cd') -except getopt.error as msg: - print(msg, file=sys.stderr) - print(usage, file=sys.stderr) - sys.exit(1) +if __name__ == '__main__': -is_python = pysource.looks_like_python -debug = False + try: + opts, args = getopt.getopt(sys.argv[1:], 'cd') + except getopt.error as msg: + print(msg, file=sys.stderr) + print(usage, file=sys.stderr) + sys.exit(1) -for o, a in opts: - if o == '-c': - is_python = pysource.can_be_compiled - elif o == '-d': - debug = True + is_python = pysource.looks_like_python + debug = False -if not args: - print(usage, file=sys.stderr) - sys.exit(1) + for o, a in opts: + if o == '-c': + is_python = pysource.can_be_compiled + elif o == '-d': + debug = True -for fullpath in pysource.walk_python_files(args, is_python): - if debug: - print("Testing for coding: %s" % fullpath) - result = needs_declaration(fullpath) - if result: - print(fullpath) + if not args: + print(usage, file=sys.stderr) + sys.exit(1) + + for fullpath in pysource.walk_python_files(args, is_python): + if debug: + print("Testing for coding: %s" % fullpath) + result = needs_declaration(fullpath) + if result: + print(fullpath) diff --git a/Tools/scripts/fixcid.py b/Tools/scripts/fixcid.py --- a/Tools/scripts/fixcid.py +++ b/Tools/scripts/fixcid.py @@ -292,7 +292,7 @@ if not words: continue if len(words) == 3 and words[0] == 'struct': words[:2] = [words[0] + ' ' + words[1]] - elif len(words) <> 2: + elif len(words) != 2: err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line)) continue if Reverse: diff --git a/Tools/scripts/md5sum.py b/Tools/scripts/md5sum.py --- a/Tools/scripts/md5sum.py +++ b/Tools/scripts/md5sum.py @@ -20,7 +20,7 @@ import sys import os import getopt -import md5 +from hashlib import md5 def sum(*files): sts = 0 diff --git a/Tools/scripts/parseentities.py b/Tools/scripts/parseentities.py --- a/Tools/scripts/parseentities.py +++ b/Tools/scripts/parseentities.py @@ -13,7 +13,6 @@ """ import re,sys -import TextTools entityRE = re.compile('') @@ -45,7 +44,7 @@ charcode = repr(charcode) else: charcode = repr(charcode) - comment = TextTools.collapse(comment) + comment = ' '.join(comment.split()) f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment)) f.write('\n}\n') -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 04:38:43 2012 From: python-checkins at python.org (r.david.murray) Date: Thu, 05 Apr 2012 04:38:43 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=283=2E2=29=3A_test=5Ftools_fi?= =?utf8?q?x=3A_don=27t_import_analyze=5Fdxp_if_no_=5Fthread_module?= Message-ID: http://hg.python.org/cpython/rev/058f9a9f97cd changeset: 76108:058f9a9f97cd branch: 3.2 parent: 76106:62dde5dd475e user: R David Murray date: Wed Apr 04 22:37:50 2012 -0400 summary: test_tools fix: don't import analyze_dxp if no _thread module files: Lib/test/test_tools.py | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py --- a/Lib/test/test_tools.py +++ b/Lib/test/test_tools.py @@ -63,6 +63,7 @@ for fn in self.windows_only: __import__(fn[:-3]) + @unittest.skipIf(not support.threading, "test requires _thread module") def test_analyze_dxp_import(self): if hasattr(sys, 'getdxp'): import analyze_dxp -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 04:38:43 2012 From: python-checkins at python.org (r.david.murray) Date: Thu, 05 Apr 2012 04:38:43 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_Merge=3A_test=5Ftools_fix=3A_don=27t_import_analyze=5Fdxp_if?= =?utf8?q?_no_=5Fthread_module?= Message-ID: http://hg.python.org/cpython/rev/0da8ea486026 changeset: 76109:0da8ea486026 parent: 76107:696cb524322a parent: 76108:058f9a9f97cd user: R David Murray date: Wed Apr 04 22:38:26 2012 -0400 summary: Merge: test_tools fix: don't import analyze_dxp if no _thread module files: Lib/test/test_tools.py | 1 + 1 files changed, 1 insertions(+), 0 deletions(-) diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py --- a/Lib/test/test_tools.py +++ b/Lib/test/test_tools.py @@ -63,6 +63,7 @@ for fn in self.windows_only: __import__(fn[:-3]) + @unittest.skipIf(not support.threading, "test requires _thread module") def test_analyze_dxp_import(self): if hasattr(sys, 'getdxp'): import analyze_dxp -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 04:42:51 2012 From: python-checkins at python.org (eli.bendersky) Date: Thu, 05 Apr 2012 04:42:51 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Replace_bootstrap_imports_w?= =?utf8?q?ith_real_C_API_calls=2E?= Message-ID: http://hg.python.org/cpython/rev/1ddf58262706 changeset: 76110:1ddf58262706 user: Eli Bendersky date: Thu Apr 05 05:40:58 2012 +0300 summary: Replace bootstrap imports with real C API calls. files: Modules/_elementtree.c | 17 +++++++++-------- 1 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c --- a/Modules/_elementtree.c +++ b/Modules/_elementtree.c @@ -3034,8 +3034,7 @@ PyMODINIT_FUNC PyInit__elementtree(void) { - PyObject* m; - PyObject* g; + PyObject *m, *g, *temp; char* bootstrap; /* Initialize object types */ @@ -3067,10 +3066,6 @@ PyDict_SetItemString(g, "__builtins__", PyEval_GetBuiltins()); bootstrap = ( - - "from copy import deepcopy\n" - "from xml.etree import ElementPath\n" - "def iter(node, tag=None):\n" /* helper */ " if tag == '*':\n" " tag = None\n" @@ -3094,8 +3089,14 @@ if (!PyRun_String(bootstrap, Py_file_input, g, NULL)) return NULL; - elementpath_obj = PyDict_GetItemString(g, "ElementPath"); - elementtree_deepcopy_obj = PyDict_GetItemString(g, "deepcopy"); + if (!(temp = PyImport_ImportModule("copy"))) + return NULL; + elementtree_deepcopy_obj = PyObject_GetAttrString(temp, "deepcopy"); + Py_XDECREF(temp); + + if (!(elementpath_obj = PyImport_ImportModule("xml.etree.ElementPath"))) + return NULL; + elementtree_iter_obj = PyDict_GetItemString(g, "iter"); elementtree_itertext_obj = PyDict_GetItemString(g, "itertext"); -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 05:02:33 2012 From: python-checkins at python.org (eric.araujo) Date: Thu, 05 Apr 2012 05:02:33 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_A_few_tweaks_to_whatsnew/3?= =?utf8?b?LjMgKGZpeGVzICMxNDM2Mik=?= Message-ID: http://hg.python.org/cpython/rev/deadd0823ab3 changeset: 76111:deadd0823ab3 user: ?ric Araujo date: Wed Apr 04 23:01:01 2012 -0400 summary: A few tweaks to whatsnew/3.3 (fixes #14362) files: Doc/whatsnew/3.3.rst | 49 ++++++++++++++++++++++++++----- Misc/NEWS | 4 +- 2 files changed, 43 insertions(+), 10 deletions(-) diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst --- a/Doc/whatsnew/3.3.rst +++ b/Doc/whatsnew/3.3.rst @@ -574,6 +574,26 @@ The ``unicode_internal`` codec has been deprecated. + +collections +----------- + +Addition of a new :class:`~collections.ChainMap` class to allow treating a +number of mappings as a single unit. + +(Written by Raymond Hettinger for :issue:`11089`, made public in +:issue:`11297`) + +The abstract base classes have been moved in a new :mod:`collections.abc` +module, to better differentiate between the abstract and the concrete +collections classes. Aliases for ABCs are still present in the +:mod:`collections` module to preserve existing imports. + +(:issue:`11085`) + +.. XXX addition of __slots__ to ABCs not recorded here: internal detail + + crypt ----- @@ -867,11 +887,12 @@ --------- :mod:`distutils` has undergone additions and refactoring under a new name, -:mod:`packaging`, to allow developers to break backward compatibility. +:mod:`packaging`, to allow developers to make far-reaching changes without +being constrained by backward compatibility. :mod:`distutils` is still provided in the standard library, but users are encouraged to transition to :mod:`packaging`. For older versions of Python, a -backport compatible with 2.4+ and 3.1+ will be made available on PyPI under the -name :mod:`distutils2`. +backport compatible with Python 2.5 and newer and 3.2 is available on PyPI +under the name `distutils2 `_. .. TODO add examples and howto to the packaging docs and link to them @@ -1059,12 +1080,24 @@ (:issue:`1673007`) +webbrowser +---------- + +The :mod:`webbrowser` module supports more browsers: Google Chrome (named +:program:`chrome`, :program:`chromium`, :program:`chrome-browser` or +:program:`chromium-browser` depending on the version and operating system) as +well as the the generic launchers :program:`xdg-open` from the FreeDesktop.org +project and :program:`gvfs-open` which is the default URI handler for GNOME 3. + +(:issue:`13620` and :issue:`14493`) + + Optimizations ============= Major performance enhancements have been added: -* Thanks to the :pep:`393`, some operations on Unicode strings has been optimized: +* Thanks to :pep:`393`, some operations on Unicode strings have been optimized: * the memory footprint is divided by 2 to 4 depending on the text * encode an ASCII string to UTF-8 doesn't need to encode characters anymore, @@ -1083,7 +1116,7 @@ * :c:func:`PyMemoryView_FromMemory` -* The :pep:`393` added new Unicode types, macros and functions: +* :pep:`393` added new Unicode types, macros and functions: * High-level API: @@ -1126,7 +1159,7 @@ Deprecated Python modules, functions and methods ------------------------------------------------ -* The :mod:`distutils` modules has been deprecated. Use the new +* The :mod:`distutils` module has been deprecated. Use the new :mod:`packaging` module instead. * The ``unicode_internal`` codec has been deprecated because of the :pep:`393`, use UTF-8, UTF-16 (``utf-16-le`` or ``utf-16-be``), or UTF-32 @@ -1145,7 +1178,7 @@ Deprecated functions and types of the C API ------------------------------------------- -The :c:type:`Py_UNICODE` has been deprecated by the :pep:`393` and will be +The :c:type:`Py_UNICODE` has been deprecated by :pep:`393` and will be removed in Python 4. All functions using this type are deprecated: Unicode functions and methods using :c:type:`Py_UNICODE` and @@ -1247,7 +1280,7 @@ functions using this type are deprecated (but will stay available for at least five years). If you were using low-level Unicode APIs to construct and access unicode objects and you want to benefit of the - memory footprint reduction provided by the PEP 393, you have to convert + memory footprint reduction provided by PEP 393, you have to convert your code to the new :doc:`Unicode API <../c-api/unicode>`. However, if you only have been using high-level functions such as diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -29,7 +29,7 @@ a multiprocessing Client or Listener with an AF_PIPE type address under non-Windows platforms. Patch by Popa Claudiu. -- Issue #14493: Use gvfs-open/xdg-open in Lib/webbrowser.py. +- Issue #14493: Use gvfs-open or xdg-open in webbrowser. What's New in Python 3.3.0 Alpha 2? @@ -1007,7 +1007,7 @@ - Issue #11006: Don't issue low level warning in subprocess when pipe2() fails. -- Issue #13620: Support for Chrome browser in webbrowser.py Patch contributed +- Issue #13620: Support for Chrome browser in webbrowser. Patch contributed by Arnaud Calmettes. - Issue #11829: Fix code execution holes in inspect.getattr_static for -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Thu Apr 5 05:35:22 2012 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Thu, 05 Apr 2012 05:35:22 +0200 Subject: [Python-checkins] Daily reference leaks (696cb524322a): sum=4 Message-ID: results for 696cb524322a on branch "default" -------------------------------------------- test_support leaked [4, 0, 0] references, sum=4 Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflog7IwXDg', '-x'] From python-checkins at python.org Thu Apr 5 05:44:41 2012 From: python-checkins at python.org (eli.bendersky) Date: Thu, 05 Apr 2012 05:44:41 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Reformat_by_replacing_tabs_?= =?utf8?q?with_4-spaces=2E_Makes_the_code_more_PEP-7_compliant?= Message-ID: http://hg.python.org/cpython/rev/64b8fdb0bfa9 changeset: 76112:64b8fdb0bfa9 user: Eli Bendersky date: Thu Apr 05 06:42:48 2012 +0300 summary: Reformat by replacing tabs with 4-spaces. Makes the code more PEP-7 compliant and gets rid of some gross formatting on tab=4-space-configured editors. files: Include/methodobject.h | 16 ++++++++-------- 1 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Include/methodobject.h b/Include/methodobject.h --- a/Include/methodobject.h +++ b/Include/methodobject.h @@ -17,7 +17,7 @@ typedef PyObject *(*PyCFunction)(PyObject *, PyObject *); typedef PyObject *(*PyCFunctionWithKeywords)(PyObject *, PyObject *, - PyObject *); + PyObject *); typedef PyObject *(*PyNoArgsFunction)(PyObject *); PyAPI_FUNC(PyCFunction) PyCFunction_GetFunction(PyObject *); @@ -33,22 +33,22 @@ (((PyCFunctionObject *)func) -> m_ml -> ml_flags & METH_STATIC ? \ NULL : ((PyCFunctionObject *)func) -> m_self) #define PyCFunction_GET_FLAGS(func) \ - (((PyCFunctionObject *)func) -> m_ml -> ml_flags) + (((PyCFunctionObject *)func) -> m_ml -> ml_flags) #endif PyAPI_FUNC(PyObject *) PyCFunction_Call(PyObject *, PyObject *, PyObject *); struct PyMethodDef { - const char *ml_name; /* The name of the built-in function/method */ - PyCFunction ml_meth; /* The C function that implements it */ - int ml_flags; /* Combination of METH_xxx flags, which mostly - describe the args expected by the C func */ - const char *ml_doc; /* The __doc__ attribute, or NULL */ + const char *ml_name; /* The name of the built-in function/method */ + PyCFunction ml_meth; /* The C function that implements it */ + int ml_flags; /* Combination of METH_xxx flags, which mostly + describe the args expected by the C func */ + const char *ml_doc; /* The __doc__ attribute, or NULL */ }; typedef struct PyMethodDef PyMethodDef; #define PyCFunction_New(ML, SELF) PyCFunction_NewEx((ML), (SELF), NULL) PyAPI_FUNC(PyObject *) PyCFunction_NewEx(PyMethodDef *, PyObject *, - PyObject *); + PyObject *); /* Flag passed to newmethodobject */ /* #define METH_OLDARGS 0x0000 -- unsupported now */ -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 09:04:32 2012 From: python-checkins at python.org (georg.brandl) Date: Thu, 05 Apr 2012 09:04:32 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=282=2E7=29=3A_Closes_=2314489?= =?utf8?q?=3A_correct_link_target=2E?= Message-ID: http://hg.python.org/cpython/rev/4416efeb0163 changeset: 76113:4416efeb0163 branch: 2.7 parent: 76104:9b472b24e5c9 user: Georg Brandl date: Thu Apr 05 09:04:40 2012 +0200 summary: Closes #14489: correct link target. files: Doc/library/functions.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/functions.rst b/Doc/library/functions.rst --- a/Doc/library/functions.rst +++ b/Doc/library/functions.rst @@ -19,7 +19,7 @@ :func:`bytearray` :func:`float` :func:`list` :func:`raw_input` :func:`unichr` :func:`callable` :func:`format` :func:`locals` :func:`reduce` :func:`unicode` :func:`chr` :func:`frozenset` :func:`long` :func:`reload` :func:`vars` -:func:`classmethod` :func:`getattr` :func:`map` :func:`repr` :func:`xrange` +:func:`classmethod` :func:`getattr` :func:`map` :func:`.repr` :func:`xrange` :func:`cmp` :func:`globals` :func:`max` :func:`reversed` :func:`zip` :func:`compile` :func:`hasattr` :func:`memoryview` :func:`round` :func:`__import__` :func:`complex` :func:`hash` :func:`min` :func:`set` :func:`apply` -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 09:53:00 2012 From: python-checkins at python.org (georg.brandl) Date: Thu, 05 Apr 2012 09:53:00 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Replace_ThreadError_by_Runt?= =?utf8?q?imeError=3A_the_former_is_now_an_obsolete_alias_of_the?= Message-ID: http://hg.python.org/cpython/rev/f3dda6b66204 changeset: 76114:f3dda6b66204 parent: 76112:64b8fdb0bfa9 user: Georg Brandl date: Thu Apr 05 09:53:04 2012 +0200 summary: Replace ThreadError by RuntimeError: the former is now an obsolete alias of the latter. files: Doc/library/_thread.rst | 2 +- Doc/library/threading.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Doc/library/_thread.rst b/Doc/library/_thread.rst --- a/Doc/library/_thread.rst +++ b/Doc/library/_thread.rst @@ -94,7 +94,7 @@ *size* argument specifies the stack size to be used for subsequently created threads, and must be 0 (use platform or configured default) or a positive integer value of at least 32,768 (32kB). If changing the thread stack size is - unsupported, a :exc:`ThreadError` is raised. If the specified stack size is + unsupported, a :exc:`RuntimeError` is raised. If the specified stack size is invalid, a :exc:`ValueError` is raised and the stack size is unmodified. 32kB is currently the minimum supported stack size value to guarantee sufficient stack space for the interpreter itself. Note that some platforms may have diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -174,7 +174,7 @@ *size* argument specifies the stack size to be used for subsequently created threads, and must be 0 (use platform or configured default) or a positive integer value of at least 32,768 (32kB). If changing the thread stack size is - unsupported, a :exc:`ThreadError` is raised. If the specified stack size is + unsupported, a :exc:`RuntimeError` is raised. If the specified stack size is invalid, a :exc:`ValueError` is raised and the stack size is unmodified. 32kB is currently the minimum supported stack size value to guarantee sufficient stack space for the interpreter itself. Note that some platforms may have -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 11:41:48 2012 From: python-checkins at python.org (andrew.svetlov) Date: Thu, 05 Apr 2012 11:41:48 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=233033=3A_Add_displa?= =?utf8?q?yof_parameter_to_tkinter_font=2E?= Message-ID: http://hg.python.org/cpython/rev/774c2afa6665 changeset: 76115:774c2afa6665 user: Andrew Svetlov date: Thu Apr 05 12:41:20 2012 +0300 summary: Issue #3033: Add displayof parameter to tkinter font. Patch by Guilherme Polo. files: Lib/tkinter/font.py | 44 ++++++++++++++++++++------------ Misc/NEWS | 2 + 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/Lib/tkinter/font.py b/Lib/tkinter/font.py --- a/Lib/tkinter/font.py +++ b/Lib/tkinter/font.py @@ -2,9 +2,6 @@ # # written by Fredrik Lundh, February 1998 # -# FIXME: should add 'displayof' option where relevant (actual, families, -# measure, and metrics) -# __version__ = "0.9" @@ -124,14 +121,17 @@ "Return a distinct copy of the current font" return Font(self._root, **self.actual()) - def actual(self, option=None): + def actual(self, option=None, displayof=None): "Return actual font attributes" + args = () + if displayof: + args = ('-displayof', displayof) if option: - return self._call("font", "actual", self.name, "-"+option) + args = args + ('-' + option, ) + return self._call("font", "actual", self.name, *args) else: return self._mkdict( - self._split(self._call("font", "actual", self.name)) - ) + self._split(self._call("font", "actual", self.name, *args))) def cget(self, option): "Get font attribute" @@ -148,32 +148,42 @@ configure = config - def measure(self, text): + def measure(self, text, displayof=None): "Return text width" - return int(self._call("font", "measure", self.name, text)) + args = (text,) + if displayof: + args = ('-displayof', displayof, text) + return int(self._call("font", "measure", self.name, *args)) - def metrics(self, *options): + def metrics(self, *options, **kw): """Return font metrics. For best performance, create a dummy widget using this font before calling this method.""" - + args = () + displayof = kw.pop('displayof', None) + if displayof: + args = ('-displayof', displayof) if options: + args = args + self._get(options) return int( - self._call("font", "metrics", self.name, self._get(options))) + self._call("font", "metrics", self.name, *args)) else: - res = self._split(self._call("font", "metrics", self.name)) + res = self._split(self._call("font", "metrics", self.name, *args)) options = {} for i in range(0, len(res), 2): options[res[i][1:]] = int(res[i+1]) return options -def families(root=None): +def families(root=None, displayof=None): "Get font families (as a tuple)" if not root: root = tkinter._default_root - return root.tk.splitlist(root.tk.call("font", "families")) + args = () + if displayof: + args = ('-displayof', displayof) + return root.tk.splitlist(root.tk.call("font", "families", *args)) def names(root=None): @@ -205,10 +215,10 @@ print(f.measure("hello"), f.metrics("linespace")) - print(f.metrics()) + print(f.metrics(displayof=root)) f = Font(font=("Courier", 20, "bold")) - print(f.measure("hello"), f.metrics("linespace")) + print(f.measure("hello"), f.metrics("linespace", displayof=root)) w = tkinter.Label(root, text="Hello, world", font=f) w.pack() diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -19,6 +19,8 @@ Library ------- +- Issue #3033: Add displayof parameter to tkinter font. Patch by Guilherme Polo. + - Issue #14482: Raise a ValueError, not a NameError, when trying to create a multiprocessing Client or Listener with an AF_UNIX type address under Windows. Patch by Popa Claudiu. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 13:44:33 2012 From: python-checkins at python.org (victor.stinner) Date: Thu, 05 Apr 2012 13:44:33 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Close_=2314249=3A_Use_an_un?= =?utf8?q?ion_instead_of_a_long_to_short_pointer_to_avoid_aliasing?= Message-ID: http://hg.python.org/cpython/rev/2c514c382a2a changeset: 76116:2c514c382a2a user: Victor Stinner date: Thu Apr 05 13:44:34 2012 +0200 summary: Close #14249: Use an union instead of a long to short pointer to avoid aliasing issue. Speed up UTF-16 by 20%. files: Objects/unicodeobject.c | 48 ++++++++++++++++------------ 1 files changed, 27 insertions(+), 21 deletions(-) diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -5496,33 +5496,39 @@ int kind = PyUnicode_KIND(unicode); void *data = PyUnicode_DATA(unicode); while (_q < aligned_end) { - unsigned long block = * (unsigned long *) _q; - unsigned short *pblock = (unsigned short*)█ + union { + unsigned long as_long; + unsigned short units[sizeof(long) / sizeof(short)]; + unsigned char bytes[sizeof(long)]; + } block, block_copy; Py_UCS4 maxch; + + block.as_long = *(unsigned long *) _q; if (native_ordering) { /* Can use buffer directly */ - if (block & FAST_CHAR_MASK) + if (block.as_long & FAST_CHAR_MASK) break; } else { /* Need to byte-swap */ - unsigned char *_p = (unsigned char*)pblock; - if (block & SWAPPED_FAST_CHAR_MASK) + block_copy = block; + + if (block.as_long & SWAPPED_FAST_CHAR_MASK) break; - _p[0] = _q[1]; - _p[1] = _q[0]; - _p[2] = _q[3]; - _p[3] = _q[2]; + block.bytes[0] = block_copy.bytes[1]; + block.bytes[1] = block_copy.bytes[0]; + block.bytes[2] = block_copy.bytes[3]; + block.bytes[3] = block_copy.bytes[2]; #if (SIZEOF_LONG == 8) - _p[4] = _q[5]; - _p[5] = _q[4]; - _p[6] = _q[7]; - _p[7] = _q[6]; -#endif - } - maxch = Py_MAX(pblock[0], pblock[1]); + block.bytes[4] = block_copy.bytes[5]; + block.bytes[5] = block_copy.bytes[4]; + block.bytes[6] = block_copy.bytes[7]; + block.bytes[7] = block_copy.bytes[6]; +#endif + } + maxch = Py_MAX(block.units[0], block.units[1]); #if SIZEOF_LONG == 8 - maxch = Py_MAX(maxch, Py_MAX(pblock[2], pblock[3])); + maxch = Py_MAX(maxch, Py_MAX(block.units[2], block.units[3])); #endif if (maxch > PyUnicode_MAX_CHAR_VALUE(unicode)) { if (unicode_widen(&unicode, maxch) < 0) @@ -5530,11 +5536,11 @@ kind = PyUnicode_KIND(unicode); data = PyUnicode_DATA(unicode); } - PyUnicode_WRITE(kind, data, outpos++, pblock[0]); - PyUnicode_WRITE(kind, data, outpos++, pblock[1]); + PyUnicode_WRITE(kind, data, outpos++, block.units[0]); + PyUnicode_WRITE(kind, data, outpos++, block.units[1]); #if SIZEOF_LONG == 8 - PyUnicode_WRITE(kind, data, outpos++, pblock[2]); - PyUnicode_WRITE(kind, data, outpos++, pblock[3]); + PyUnicode_WRITE(kind, data, outpos++, block.units[2]); + PyUnicode_WRITE(kind, data, outpos++, block.units[3]); #endif _q += SIZEOF_LONG; } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 14:13:21 2012 From: python-checkins at python.org (antoine.pitrou) Date: Thu, 05 Apr 2012 14:13:21 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzE0NTA1?= =?utf8?q?=3A_Fix_file_descriptor_leak_when_deallocating_file_objects_crea?= =?utf8?q?ted?= Message-ID: http://hg.python.org/cpython/rev/8258e5fa4a19 changeset: 76117:8258e5fa4a19 branch: 2.7 parent: 76113:4416efeb0163 user: Antoine Pitrou date: Thu Apr 05 14:07:52 2012 +0200 summary: Issue #14505: Fix file descriptor leak when deallocating file objects created with PyFile_FromString(). files: Misc/NEWS | 3 +++ Objects/fileobject.c | 3 ++- 2 files changed, 5 insertions(+), 1 deletions(-) diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -9,6 +9,9 @@ Core and Builtins ----------------- +- Issue #14505: Fix file descriptor leak when deallocating file objects + created with PyFile_FromString(). + - Issue #14474: Save and restore exception state in thread.start_new_thread() while writing error message if the thread leaves a unhandled exception. diff --git a/Objects/fileobject.c b/Objects/fileobject.c --- a/Objects/fileobject.c +++ b/Objects/fileobject.c @@ -493,9 +493,10 @@ PyObject * PyFile_FromString(char *name, char *mode) { + extern int fclose(FILE *); PyFileObject *f; - f = (PyFileObject *)PyFile_FromFile((FILE *)NULL, name, mode, NULL); + f = (PyFileObject *)PyFile_FromFile((FILE *)NULL, name, mode, fclose); if (f != NULL) { if (open_the_file(f, name, mode) == NULL) { Py_DECREF(f); -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 15:58:38 2012 From: python-checkins at python.org (andrew.svetlov) Date: Thu, 05 Apr 2012 15:58:38 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE0NDk2?= =?utf8?q?=3A_Fix_wrong_name_in_idlelib/tabbedpages=2Epy=2E?= Message-ID: http://hg.python.org/cpython/rev/f2dfe0ca6c21 changeset: 76118:f2dfe0ca6c21 branch: 3.2 parent: 76108:058f9a9f97cd user: Andrew Svetlov date: Thu Apr 05 16:58:05 2012 +0300 summary: Issue #14496: Fix wrong name in idlelib/tabbedpages.py. Patch by Popa Claudiu. files: Lib/idlelib/tabbedpages.py | 4 ++-- Misc/NEWS | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Lib/idlelib/tabbedpages.py b/Lib/idlelib/tabbedpages.py --- a/Lib/idlelib/tabbedpages.py +++ b/Lib/idlelib/tabbedpages.py @@ -78,7 +78,7 @@ def remove_tab(self, tab_name): """Remove the tab named """ if not tab_name in self._tab_names: - raise KeyError("No such Tab: '%s" % page_name) + raise KeyError("No such Tab: '%s" % tab_name) self._tab_names.remove(tab_name) self._arrange_tabs() @@ -88,7 +88,7 @@ if tab_name == self._selected_tab: return if tab_name is not None and tab_name not in self._tabs: - raise KeyError("No such Tab: '%s" % page_name) + raise KeyError("No such Tab: '%s" % tab_name) # deselect the current selected tab if self._selected_tab is not None: diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -39,6 +39,9 @@ Library ------- +- Issue #14496: Fix wrong name in idlelib/tabbedpages.py. + Patch by Popa Claudiu. + - Issue #14482: Raise a ValueError, not a NameError, when trying to create a multiprocessing Client or Listener with an AF_UNIX type address under Windows. Patch by Popa Claudiu. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 16:04:38 2012 From: python-checkins at python.org (andrew.svetlov) Date: Thu, 05 Apr 2012 16:04:38 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_Merge_from_3=2E2?= Message-ID: http://hg.python.org/cpython/rev/4be55684eefc changeset: 76119:4be55684eefc parent: 76116:2c514c382a2a parent: 76118:f2dfe0ca6c21 user: Andrew Svetlov date: Thu Apr 05 17:04:28 2012 +0300 summary: Merge from 3.2 files: Lib/idlelib/tabbedpages.py | 4 ++-- Misc/NEWS | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Lib/idlelib/tabbedpages.py b/Lib/idlelib/tabbedpages.py --- a/Lib/idlelib/tabbedpages.py +++ b/Lib/idlelib/tabbedpages.py @@ -78,7 +78,7 @@ def remove_tab(self, tab_name): """Remove the tab named """ if not tab_name in self._tab_names: - raise KeyError("No such Tab: '%s" % page_name) + raise KeyError("No such Tab: '%s" % tab_name) self._tab_names.remove(tab_name) self._arrange_tabs() @@ -88,7 +88,7 @@ if tab_name == self._selected_tab: return if tab_name is not None and tab_name not in self._tabs: - raise KeyError("No such Tab: '%s" % page_name) + raise KeyError("No such Tab: '%s" % tab_name) # deselect the current selected tab if self._selected_tab is not None: diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -19,6 +19,9 @@ Library ------- +- Issue #14496: Fix wrong name in idlelib/tabbedpages.py. + Patch by Popa Claudiu. + - Issue #3033: Add displayof parameter to tkinter font. Patch by Guilherme Polo. - Issue #14482: Raise a ValueError, not a NameError, when trying to create -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 16:27:26 2012 From: python-checkins at python.org (stefan.krah) Date: Thu, 05 Apr 2012 16:27:26 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Raise_InvalidOperation_if_e?= =?utf8?q?xponents_of_zeros_are_clamped_during_exact?= Message-ID: http://hg.python.org/cpython/rev/0f9d79998c79 changeset: 76120:0f9d79998c79 parent: 76116:2c514c382a2a user: Stefan Krah date: Thu Apr 05 15:21:58 2012 +0200 summary: Raise InvalidOperation if exponents of zeros are clamped during exact conversion in the Decimal constructor. Exact here refers to the representation and not to the value (clamping does not change the value). files: Lib/test/test_decimal.py | 24 ++++++++++++++++++ Modules/_decimal/_decimal.c | 4 +- Modules/_decimal/tests/deccheck.py | 1 + 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -4953,6 +4953,30 @@ self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g') self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g') + def test_exact_conversion(self): + Decimal = C.Decimal + localcontext = C.localcontext + InvalidOperation = C.InvalidOperation + + with localcontext() as c: + + c.traps[InvalidOperation] = True + + # Clamped + x = "0e%d" % sys.maxsize + self.assertRaises(InvalidOperation, Decimal, x) + + x = "0e%d" % (-sys.maxsize-1) + self.assertRaises(InvalidOperation, Decimal, x) + + # Overflow + x = "1e%d" % sys.maxsize + self.assertRaises(InvalidOperation, Decimal, x) + + # Underflow + x = "1e%d" % (-sys.maxsize-1) + self.assertRaises(InvalidOperation, Decimal, x) + all_tests = [ CExplicitConstructionTest, PyExplicitConstructionTest, diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -1935,7 +1935,7 @@ mpd_maxcontext(&maxctx); mpd_qset_string(MPD(dec), s, &maxctx, &status); - if (status & (MPD_Inexact|MPD_Rounded)) { + if (status & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) { /* we want exact results */ mpd_seterror(MPD(dec), MPD_Invalid_operation, &status); } @@ -2139,7 +2139,7 @@ return NULL; } - if (status & (MPD_Inexact|MPD_Rounded)) { + if (status & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) { /* we want exact results */ mpd_seterror(MPD(dec), MPD_Invalid_operation, &status); } diff --git a/Modules/_decimal/tests/deccheck.py b/Modules/_decimal/tests/deccheck.py --- a/Modules/_decimal/tests/deccheck.py +++ b/Modules/_decimal/tests/deccheck.py @@ -302,6 +302,7 @@ dec = maxcontext.create_decimal(value) if maxcontext.flags[P.Inexact] or \ maxcontext.flags[P.Rounded] or \ + maxcontext.flags[P.Clamped] or \ maxcontext.flags[P.InvalidOperation]: return context.p._raise_error(P.InvalidOperation) if maxcontext.flags[P.FloatOperation]: -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 16:27:27 2012 From: python-checkins at python.org (stefan.krah) Date: Thu, 05 Apr 2012 16:27:27 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Allow_printing_a_leading_?= =?utf8?q?=27-=27_and_the_maximum_number_of_exponent_digits?= Message-ID: http://hg.python.org/cpython/rev/a99cd3b98425 changeset: 76121:a99cd3b98425 user: Stefan Krah date: Thu Apr 05 15:46:19 2012 +0200 summary: Allow printing a leading '-' and the maximum number of exponent digits rather than raising RuntimeError (allocated space is sufficient for the additional character). files: Lib/test/test_decimal.py | 48 +++++++++++++++++++++++++ Modules/_decimal/_decimal.c | 4 +- 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -4977,6 +4977,54 @@ x = "1e%d" % (-sys.maxsize-1) self.assertRaises(InvalidOperation, Decimal, x) + def test_from_tuple(self): + Decimal = C.Decimal + localcontext = C.localcontext + InvalidOperation = C.InvalidOperation + Overflow = C.Overflow + Underflow = C.Underflow + + with localcontext() as c: + + c.traps[InvalidOperation] = True + c.traps[Overflow] = True + c.traps[Underflow] = True + + # SSIZE_MAX + x = (1, (), sys.maxsize) + self.assertEqual(str(c.create_decimal(x)), '-0E+999999') + self.assertRaises(InvalidOperation, Decimal, x) + + x = (1, (0, 1, 2), sys.maxsize) + self.assertRaises(Overflow, c.create_decimal, x) + self.assertRaises(InvalidOperation, Decimal, x) + + # SSIZE_MIN + x = (1, (), -sys.maxsize-1) + self.assertEqual(str(c.create_decimal(x)), '-0E-1000026') + self.assertRaises(InvalidOperation, Decimal, x) + + x = (1, (0, 1, 2), -sys.maxsize-1) + self.assertRaises(Underflow, c.create_decimal, x) + self.assertRaises(InvalidOperation, Decimal, x) + + # OverflowError + x = (1, (), sys.maxsize+1) + self.assertRaises(OverflowError, c.create_decimal, x) + self.assertRaises(OverflowError, Decimal, x) + + x = (1, (), -sys.maxsize-2) + self.assertRaises(OverflowError, c.create_decimal, x) + self.assertRaises(OverflowError, Decimal, x) + + # Specials + x = (1, (), "N") + self.assertEqual(str(Decimal(x)), '-sNaN') + x = (1, (0,), "N") + self.assertEqual(str(Decimal(x)), '-sNaN') + x = (1, (0, 1), "N") + self.assertEqual(str(Decimal(x)), '-sNaN1') + all_tests = [ CExplicitConstructionTest, PyExplicitConstructionTest, diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -2435,8 +2435,8 @@ if (sign_special[1] == '\0') { /* not a special number */ *cp++ = 'E'; - n = snprintf(cp, MPD_EXPDIGITS+1, "%" PRI_mpd_ssize_t, exp); - if (n < 0 || n >= MPD_EXPDIGITS+1) { + n = snprintf(cp, MPD_EXPDIGITS+2, "%" PRI_mpd_ssize_t, exp); + if (n < 0 || n >= MPD_EXPDIGITS+2) { PyErr_SetString(PyExc_RuntimeError, "internal error in dec_sequence_as_str"); goto error; -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 16:27:27 2012 From: python-checkins at python.org (stefan.krah) Date: Thu, 05 Apr 2012 16:27:27 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Formatting=2E?= Message-ID: http://hg.python.org/cpython/rev/cd98afa9c4e9 changeset: 76122:cd98afa9c4e9 user: Stefan Krah date: Thu Apr 05 15:48:59 2012 +0200 summary: Formatting. files: Modules/_decimal/_decimal.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -2385,8 +2385,8 @@ } /* coefficient */ - digits = sequence_as_tuple(PyTuple_GET_ITEM(dectuple, 1), - PyExc_ValueError, "coefficient must be a tuple of digits"); + digits = sequence_as_tuple(PyTuple_GET_ITEM(dectuple, 1), PyExc_ValueError, + "coefficient must be a tuple of digits"); if (digits == NULL) { goto error; } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 16:27:28 2012 From: python-checkins at python.org (stefan.krah) Date: Thu, 05 Apr 2012 16:27:28 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Reduce_array_size=2E?= Message-ID: http://hg.python.org/cpython/rev/b33af0fb7bc6 changeset: 76123:b33af0fb7bc6 user: Stefan Krah date: Thu Apr 05 16:07:22 2012 +0200 summary: Reduce array size. files: Modules/_decimal/_decimal.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -4215,7 +4215,7 @@ mpd_uint_t p_data[1] = {2305843009213693951ULL}; mpd_t p = {MPD_POS|MPD_STATIC|MPD_CONST_DATA, 0, 19, 1, 1, p_data}; /* Inverse of 10 modulo p */ - mpd_uint_t inv10_p_data[2] = {2075258708292324556ULL}; + mpd_uint_t inv10_p_data[1] = {2075258708292324556ULL}; mpd_t inv10_p = {MPD_POS|MPD_STATIC|MPD_CONST_DATA, 0, 19, 1, 1, inv10_p_data}; #elif defined(CONFIG_32) && _PyHASH_BITS == 31 -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 16:27:29 2012 From: python-checkins at python.org (stefan.krah) Date: Thu, 05 Apr 2012 16:27:29 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Whitespace=2E?= Message-ID: http://hg.python.org/cpython/rev/0375d3fa499c changeset: 76124:0375d3fa499c user: Stefan Krah date: Thu Apr 05 16:15:25 2012 +0200 summary: Whitespace. files: Modules/_decimal/_decimal.c | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -4934,7 +4934,7 @@ PyObject *result; CONVERT_OP_RAISE(&result, v, context); - return result; + return result; } static PyObject * -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 16:27:30 2012 From: python-checkins at python.org (stefan.krah) Date: Thu, 05 Apr 2012 16:27:30 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_default_-=3E_default?= =?utf8?b?KTogTWVyZ2Uu?= Message-ID: http://hg.python.org/cpython/rev/c5caaa83ca60 changeset: 76125:c5caaa83ca60 parent: 76124:0375d3fa499c parent: 76119:4be55684eefc user: Stefan Krah date: Thu Apr 05 16:25:21 2012 +0200 summary: Merge. files: Lib/idlelib/tabbedpages.py | 4 ++-- Misc/NEWS | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Lib/idlelib/tabbedpages.py b/Lib/idlelib/tabbedpages.py --- a/Lib/idlelib/tabbedpages.py +++ b/Lib/idlelib/tabbedpages.py @@ -78,7 +78,7 @@ def remove_tab(self, tab_name): """Remove the tab named """ if not tab_name in self._tab_names: - raise KeyError("No such Tab: '%s" % page_name) + raise KeyError("No such Tab: '%s" % tab_name) self._tab_names.remove(tab_name) self._arrange_tabs() @@ -88,7 +88,7 @@ if tab_name == self._selected_tab: return if tab_name is not None and tab_name not in self._tabs: - raise KeyError("No such Tab: '%s" % page_name) + raise KeyError("No such Tab: '%s" % tab_name) # deselect the current selected tab if self._selected_tab is not None: diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -19,6 +19,9 @@ Library ------- +- Issue #14496: Fix wrong name in idlelib/tabbedpages.py. + Patch by Popa Claudiu. + - Issue #3033: Add displayof parameter to tkinter font. Patch by Guilherme Polo. - Issue #14482: Raise a ValueError, not a NameError, when trying to create -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 20:55:20 2012 From: python-checkins at python.org (andrew.svetlov) Date: Thu, 05 Apr 2012 20:55:20 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=238515=3A_Set_=5F=5F?= =?utf8?q?file=5F=5F_when_run_file_in_IDLE=2E?= Message-ID: http://hg.python.org/cpython/rev/2c276d0553ff changeset: 76126:2c276d0553ff user: Andrew Svetlov date: Thu Apr 05 21:54:39 2012 +0300 summary: Issue #8515: Set __file__ when run file in IDLE. files: Lib/idlelib/NEWS.txt | 3 +++ Lib/idlelib/ScriptBinding.py | 12 ++++++------ Misc/ACKS | 1 + Misc/NEWS | 3 +++ 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt --- a/Lib/idlelib/NEWS.txt +++ b/Lib/idlelib/NEWS.txt @@ -1,6 +1,9 @@ What's New in IDLE 3.3? ========================= +- Issue #8515: Set __file__ when run file in IDLE. + Initial patch by Bruce Frederiksen. + - IDLE can be launched as `python -m idlelib` - Issue #14409: IDLE now properly executes commands in the Shell window diff --git a/Lib/idlelib/ScriptBinding.py b/Lib/idlelib/ScriptBinding.py --- a/Lib/idlelib/ScriptBinding.py +++ b/Lib/idlelib/ScriptBinding.py @@ -150,16 +150,16 @@ dirname = os.path.dirname(filename) # XXX Too often this discards arguments the user just set... interp.runcommand("""if 1: - _filename = %r + __file__ = {filename!r} import sys as _sys from os.path import basename as _basename if (not _sys.argv or - _basename(_sys.argv[0]) != _basename(_filename)): - _sys.argv = [_filename] + _basename(_sys.argv[0]) != _basename(__file__)): + _sys.argv = [__file__] import os as _os - _os.chdir(%r) - del _filename, _sys, _basename, _os - \n""" % (filename, dirname)) + _os.chdir({dirname!r}) + del _sys, _basename, _os + \n""".format(filename=filename, dirname=dirname)) interp.prepend_syspath(filename) # XXX KBK 03Jul04 When run w/o subprocess, runtime warnings still # go to __stderr__. With subprocess, they go to the shell. diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -333,6 +333,7 @@ John Fouhy Andrew Francis Martin Franklin +Bruce Frederiksen Robin Friedrich Ivan Frohne Matthias Fuchs diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -19,6 +19,9 @@ Library ------- +- Issue #8515: Set __file__ when run file in IDLE. + Initial patch by Bruce Frederiksen. + - Issue #14496: Fix wrong name in idlelib/tabbedpages.py. Patch by Popa Claudiu. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 22:31:29 2012 From: python-checkins at python.org (raymond.hettinger) Date: Thu, 05 Apr 2012 22:31:29 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Clarify_that_the_purpose_of?= =?utf8?q?_computing_all_the_miscellaneous_collection_types_is?= Message-ID: http://hg.python.org/cpython/rev/67c26185636b changeset: 76127:67c26185636b user: Raymond Hettinger date: Thu Apr 05 13:31:12 2012 -0700 summary: Clarify that the purpose of computing all the miscellaneous collection types is to register them with the appropriate ABCs. files: Lib/collections/abc.py | 10 +++++++--- 1 files changed, 7 insertions(+), 3 deletions(-) diff --git a/Lib/collections/abc.py b/Lib/collections/abc.py --- a/Lib/collections/abc.py +++ b/Lib/collections/abc.py @@ -18,9 +18,13 @@ "ByteString", ] - -### collection related types which are not exposed through builtin ### -## iterators ## +# Private list of types that we want to register with the various ABCs +# so that they will pass tests like: +# it = iter(somebytearray) +# assert isinstance(it, Iterable) +# Note: in other implementations, these types many not be distinct +# and they make have their own implementation specific types that +# are not included on this list. bytes_iterator = type(iter(b'')) bytearray_iterator = type(iter(bytearray())) #callable_iterator = ??? -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 22:54:47 2012 From: python-checkins at python.org (victor.stinner) Date: Thu, 05 Apr 2012 22:54:47 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Close_=2314249=3A_Use_bit_s?= =?utf8?q?hifts_instead_of_an_union=2C_it=27s_more_efficient=2E?= Message-ID: http://hg.python.org/cpython/rev/489f252b1f8b changeset: 76128:489f252b1f8b user: Victor Stinner date: Thu Apr 05 22:54:49 2012 +0200 summary: Close #14249: Use bit shifts instead of an union, it's more efficient. Patch written by Serhiy Storchaka files: Misc/ACKS | 1 + Objects/unicodeobject.c | 63 ++++++++++++++++------------ 2 files changed, 36 insertions(+), 28 deletions(-) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -977,6 +977,7 @@ Peter Stoehr Casper Stoel Michael Stone +Serhiy Storchaka Ken Stox Dan Stromberg Daniel Stutzbach diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -5393,9 +5393,11 @@ #if (SIZEOF_LONG == 8) # define FAST_CHAR_MASK 0x8000800080008000L # define SWAPPED_FAST_CHAR_MASK 0x0080008000800080L +# define STRIPPED_MASK 0x00FF00FF00FF00FFL #elif (SIZEOF_LONG == 4) # define FAST_CHAR_MASK 0x80008000L # define SWAPPED_FAST_CHAR_MASK 0x00800080L +# define STRIPPED_MASK 0x00FF00FFL #else # error C 'long' size should be either 4 or 8! #endif @@ -5496,39 +5498,31 @@ int kind = PyUnicode_KIND(unicode); void *data = PyUnicode_DATA(unicode); while (_q < aligned_end) { - union { - unsigned long as_long; - unsigned short units[sizeof(long) / sizeof(short)]; - unsigned char bytes[sizeof(long)]; - } block, block_copy; + unsigned long block = * (unsigned long *) _q; Py_UCS4 maxch; - - block.as_long = *(unsigned long *) _q; if (native_ordering) { /* Can use buffer directly */ - if (block.as_long & FAST_CHAR_MASK) + if (block & FAST_CHAR_MASK) break; } else { /* Need to byte-swap */ - block_copy = block; - - if (block.as_long & SWAPPED_FAST_CHAR_MASK) + if (block & SWAPPED_FAST_CHAR_MASK) break; - block.bytes[0] = block_copy.bytes[1]; - block.bytes[1] = block_copy.bytes[0]; - block.bytes[2] = block_copy.bytes[3]; - block.bytes[3] = block_copy.bytes[2]; -#if (SIZEOF_LONG == 8) - block.bytes[4] = block_copy.bytes[5]; - block.bytes[5] = block_copy.bytes[4]; - block.bytes[6] = block_copy.bytes[7]; - block.bytes[7] = block_copy.bytes[6]; -#endif - } - maxch = Py_MAX(block.units[0], block.units[1]); + block = ((block >> 8) & STRIPPED_MASK) | + ((block & STRIPPED_MASK) << 8); + } + maxch = (Py_UCS2)(block & 0xFFFF); #if SIZEOF_LONG == 8 - maxch = Py_MAX(maxch, Py_MAX(block.units[2], block.units[3])); + ch = (Py_UCS2)((block >> 16) & 0xFFFF); + maxch = Py_MAX(maxch, ch); + ch = (Py_UCS2)((block >> 32) & 0xFFFF); + maxch = Py_MAX(maxch, ch); + ch = (Py_UCS2)(block >> 48); + maxch = Py_MAX(maxch, ch); +#else + ch = (Py_UCS2)(block >> 16); + maxch = Py_MAX(maxch, ch); #endif if (maxch > PyUnicode_MAX_CHAR_VALUE(unicode)) { if (unicode_widen(&unicode, maxch) < 0) @@ -5536,11 +5530,24 @@ kind = PyUnicode_KIND(unicode); data = PyUnicode_DATA(unicode); } - PyUnicode_WRITE(kind, data, outpos++, block.units[0]); - PyUnicode_WRITE(kind, data, outpos++, block.units[1]); +#ifdef BYTEORDER_IS_LITTLE_ENDIAN + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block & 0xFFFF)); #if SIZEOF_LONG == 8 - PyUnicode_WRITE(kind, data, outpos++, block.units[2]); - PyUnicode_WRITE(kind, data, outpos++, block.units[3]); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 16) & 0xFFFF)); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 32) & 0xFFFF)); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 48))); +#else + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block >> 16)); +#endif +#else +#if SIZEOF_LONG == 8 + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 48))); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 32) & 0xFFFF)); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 16) & 0xFFFF)); +#else + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block >> 16)); +#endif + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block & 0xFFFF)); #endif _q += SIZEOF_LONG; } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 23:01:58 2012 From: python-checkins at python.org (sandro.tosi) Date: Thu, 05 Apr 2012 23:01:58 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMi43KTogSXNzdWUgIzE0NTAy?= =?utf8?q?=3A_release=28=29_and_unlocked_lock_generates_a_ThreadError?= Message-ID: http://hg.python.org/cpython/rev/efeca6ff2751 changeset: 76129:efeca6ff2751 branch: 2.7 parent: 76117:8258e5fa4a19 user: Sandro Tosi date: Thu Apr 05 22:51:00 2012 +0200 summary: Issue #14502: release() and unlocked lock generates a ThreadError files: Doc/library/threading.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -420,7 +420,7 @@ are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. - Do not call this method when the lock is unlocked. + When invoked on an unlocked lock, a :exc:`ThreadError` is raised. There is no return value. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 23:01:58 2012 From: python-checkins at python.org (sandro.tosi) Date: Thu, 05 Apr 2012 23:01:58 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMy4yKTogSXNzdWUgIzE0NTAy?= =?utf8?q?=3A_release=28=29_and_unlocked_lock_generates_a_ThreadError?= Message-ID: http://hg.python.org/cpython/rev/acea9d95a6d8 changeset: 76130:acea9d95a6d8 branch: 3.2 parent: 76118:f2dfe0ca6c21 user: Sandro Tosi date: Thu Apr 05 22:51:54 2012 +0200 summary: Issue #14502: release() and unlocked lock generates a ThreadError files: Doc/library/threading.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -430,7 +430,7 @@ are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. - Do not call this method when the lock is unlocked. + When invoked on an unlocked lock, a :exc:`ThreadError` is raised. There is no return value. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 23:01:59 2012 From: python-checkins at python.org (sandro.tosi) Date: Thu, 05 Apr 2012 23:01:59 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=2314502=3A_merge_wit?= =?utf8?q?h_3=2E2?= Message-ID: http://hg.python.org/cpython/rev/c10a0f93544e changeset: 76131:c10a0f93544e parent: 76127:67c26185636b user: Sandro Tosi date: Thu Apr 05 22:53:21 2012 +0200 summary: Issue #14502: merge with 3.2 files: Doc/library/threading.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -452,7 +452,7 @@ are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. - Do not call this method when the lock is unlocked. + When invoked on an unlocked lock, a :exc:`RuntimeError` is raised. There is no return value. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Thu Apr 5 23:02:00 2012 From: python-checkins at python.org (sandro.tosi) Date: Thu, 05 Apr 2012 23:02:00 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_default_-=3E_default?= =?utf8?q?=29=3A_merge_heads?= Message-ID: http://hg.python.org/cpython/rev/ec3e0f1a3b8d changeset: 76132:ec3e0f1a3b8d parent: 76131:c10a0f93544e parent: 76128:489f252b1f8b user: Sandro Tosi date: Thu Apr 05 22:59:41 2012 +0200 summary: merge heads files: Misc/ACKS | 1 + Objects/unicodeobject.c | 63 ++++++++++++++++------------ 2 files changed, 36 insertions(+), 28 deletions(-) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -977,6 +977,7 @@ Peter Stoehr Casper Stoel Michael Stone +Serhiy Storchaka Ken Stox Dan Stromberg Daniel Stutzbach diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -5393,9 +5393,11 @@ #if (SIZEOF_LONG == 8) # define FAST_CHAR_MASK 0x8000800080008000L # define SWAPPED_FAST_CHAR_MASK 0x0080008000800080L +# define STRIPPED_MASK 0x00FF00FF00FF00FFL #elif (SIZEOF_LONG == 4) # define FAST_CHAR_MASK 0x80008000L # define SWAPPED_FAST_CHAR_MASK 0x00800080L +# define STRIPPED_MASK 0x00FF00FFL #else # error C 'long' size should be either 4 or 8! #endif @@ -5496,39 +5498,31 @@ int kind = PyUnicode_KIND(unicode); void *data = PyUnicode_DATA(unicode); while (_q < aligned_end) { - union { - unsigned long as_long; - unsigned short units[sizeof(long) / sizeof(short)]; - unsigned char bytes[sizeof(long)]; - } block, block_copy; + unsigned long block = * (unsigned long *) _q; Py_UCS4 maxch; - - block.as_long = *(unsigned long *) _q; if (native_ordering) { /* Can use buffer directly */ - if (block.as_long & FAST_CHAR_MASK) + if (block & FAST_CHAR_MASK) break; } else { /* Need to byte-swap */ - block_copy = block; - - if (block.as_long & SWAPPED_FAST_CHAR_MASK) + if (block & SWAPPED_FAST_CHAR_MASK) break; - block.bytes[0] = block_copy.bytes[1]; - block.bytes[1] = block_copy.bytes[0]; - block.bytes[2] = block_copy.bytes[3]; - block.bytes[3] = block_copy.bytes[2]; -#if (SIZEOF_LONG == 8) - block.bytes[4] = block_copy.bytes[5]; - block.bytes[5] = block_copy.bytes[4]; - block.bytes[6] = block_copy.bytes[7]; - block.bytes[7] = block_copy.bytes[6]; -#endif - } - maxch = Py_MAX(block.units[0], block.units[1]); + block = ((block >> 8) & STRIPPED_MASK) | + ((block & STRIPPED_MASK) << 8); + } + maxch = (Py_UCS2)(block & 0xFFFF); #if SIZEOF_LONG == 8 - maxch = Py_MAX(maxch, Py_MAX(block.units[2], block.units[3])); + ch = (Py_UCS2)((block >> 16) & 0xFFFF); + maxch = Py_MAX(maxch, ch); + ch = (Py_UCS2)((block >> 32) & 0xFFFF); + maxch = Py_MAX(maxch, ch); + ch = (Py_UCS2)(block >> 48); + maxch = Py_MAX(maxch, ch); +#else + ch = (Py_UCS2)(block >> 16); + maxch = Py_MAX(maxch, ch); #endif if (maxch > PyUnicode_MAX_CHAR_VALUE(unicode)) { if (unicode_widen(&unicode, maxch) < 0) @@ -5536,11 +5530,24 @@ kind = PyUnicode_KIND(unicode); data = PyUnicode_DATA(unicode); } - PyUnicode_WRITE(kind, data, outpos++, block.units[0]); - PyUnicode_WRITE(kind, data, outpos++, block.units[1]); +#ifdef BYTEORDER_IS_LITTLE_ENDIAN + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block & 0xFFFF)); #if SIZEOF_LONG == 8 - PyUnicode_WRITE(kind, data, outpos++, block.units[2]); - PyUnicode_WRITE(kind, data, outpos++, block.units[3]); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 16) & 0xFFFF)); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 32) & 0xFFFF)); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 48))); +#else + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block >> 16)); +#endif +#else +#if SIZEOF_LONG == 8 + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 48))); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 32) & 0xFFFF)); + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)((block >> 16) & 0xFFFF)); +#else + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block >> 16)); +#endif + PyUnicode_WRITE(kind, data, outpos++, (Py_UCS2)(block & 0xFFFF)); #endif _q += SIZEOF_LONG; } -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Apr 6 04:57:17 2012 From: python-checkins at python.org (benjamin.peterson) Date: Fri, 06 Apr 2012 04:57:17 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_merge_3=2E2?= Message-ID: http://hg.python.org/cpython/rev/9506e636bd9f changeset: 76133:9506e636bd9f parent: 76132:ec3e0f1a3b8d parent: 76130:acea9d95a6d8 user: Benjamin Peterson date: Thu Apr 05 22:57:10 2012 -0400 summary: merge 3.2 files: Doc/library/threading.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -452,7 +452,7 @@ are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. - When invoked on an unlocked lock, a :exc:`RuntimeError` is raised. + When invoked on an unlocked lock, a :exc:`ThreadError` is raised. There is no return value. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Apr 6 05:01:28 2012 From: python-checkins at python.org (r.david.murray) Date: Fri, 06 Apr 2012 05:01:28 +0200 Subject: [Python-checkins] =?utf8?b?Y3B5dGhvbiAoMy4yKTogIzE0NDkyOiBmaXgg?= =?utf8?q?some_bugs_in_Tools/scripts/pdeps=2Epy=2E?= Message-ID: http://hg.python.org/cpython/rev/e9b8115c5b25 changeset: 76134:e9b8115c5b25 branch: 3.2 parent: 76130:acea9d95a6d8 user: R David Murray date: Thu Apr 05 22:59:13 2012 -0400 summary: #14492: fix some bugs in Tools/scripts/pdeps.py. Initial patch by Popa Claudiu. files: Lib/test/test_tools.py | 27 +++++++++++++++++++++++++++ Tools/scripts/pdeps.py | 10 +++++----- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py --- a/Lib/test/test_tools.py +++ b/Lib/test/test_tools.py @@ -6,8 +6,10 @@ import os import sys +import imp import unittest import sysconfig +import tempfile from test import support from test.script_helper import assert_python_ok @@ -72,6 +74,31 @@ import analyze_dxp +class PdepsTests(unittest.TestCase): + + @classmethod + def setUpClass(self): + path = os.path.join(scriptsdir, 'pdeps.py') + self.pdeps = imp.load_source('pdeps', path) + + @classmethod + def tearDownClass(self): + if 'pdeps' in sys.modules: + del sys.modules['pdeps'] + + def test_process_errors(self): + # Issue #14492: m_import.match(line) can be None. + with tempfile.TemporaryDirectory() as tmpdir: + fn = os.path.join(tmpdir, 'foo') + with open(fn, 'w') as stream: + stream.write("#!/this/will/fail") + self.pdeps.process(fn, {}) + + def test_inverse_attribute_error(self): + # Issue #14492: this used to fail with an AttributeError. + self.pdeps.inverse({'a': []}) + + def test_main(): support.run_unittest(*[obj for obj in globals().values() if isinstance(obj, type)]) diff --git a/Tools/scripts/pdeps.py b/Tools/scripts/pdeps.py --- a/Tools/scripts/pdeps.py +++ b/Tools/scripts/pdeps.py @@ -76,10 +76,9 @@ nextline = fp.readline() if not nextline: break line = line[:-1] + nextline - if m_import.match(line) >= 0: - (a, b), (a1, b1) = m_import.regs[:2] - elif m_from.match(line) >= 0: - (a, b), (a1, b1) = m_from.regs[:2] + m_found = m_import.match(line) or m_from.match(line) + if m_found: + (a, b), (a1, b1) = m_found.regs[:2] else: continue words = line[a1:b1].split(',') # print '#', line, words @@ -87,6 +86,7 @@ word = word.strip() if word not in list: list.append(word) + fp.close() # Compute closure (this is in fact totally general) @@ -123,7 +123,7 @@ def inverse(table): inv = {} for key in table.keys(): - if not inv.has_key(key): + if key not in inv: inv[key] = [] for item in table[key]: store(inv, item, key) -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Apr 6 05:01:29 2012 From: python-checkins at python.org (r.david.murray) Date: Fri, 06 Apr 2012 05:01:29 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_3=2E2_-=3E_default=29?= =?utf8?q?=3A_Merge_=2314492=3A_fix_some_bugs_in_Tools/scripts/pdeps=2Epy?= =?utf8?q?=2E?= Message-ID: http://hg.python.org/cpython/rev/26a7cc129b3d changeset: 76135:26a7cc129b3d parent: 76133:9506e636bd9f parent: 76134:e9b8115c5b25 user: R David Murray date: Thu Apr 05 23:01:13 2012 -0400 summary: Merge #14492: fix some bugs in Tools/scripts/pdeps.py. Initial patch by Popa Claudiu. files: Lib/test/test_tools.py | 27 +++++++++++++++++++++++++++ Tools/scripts/pdeps.py | 10 +++++----- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py --- a/Lib/test/test_tools.py +++ b/Lib/test/test_tools.py @@ -6,8 +6,10 @@ import os import sys +import imp import unittest import sysconfig +import tempfile from test import support from test.script_helper import assert_python_ok @@ -72,6 +74,31 @@ import analyze_dxp +class PdepsTests(unittest.TestCase): + + @classmethod + def setUpClass(self): + path = os.path.join(scriptsdir, 'pdeps.py') + self.pdeps = imp.load_source('pdeps', path) + + @classmethod + def tearDownClass(self): + if 'pdeps' in sys.modules: + del sys.modules['pdeps'] + + def test_process_errors(self): + # Issue #14492: m_import.match(line) can be None. + with tempfile.TemporaryDirectory() as tmpdir: + fn = os.path.join(tmpdir, 'foo') + with open(fn, 'w') as stream: + stream.write("#!/this/will/fail") + self.pdeps.process(fn, {}) + + def test_inverse_attribute_error(self): + # Issue #14492: this used to fail with an AttributeError. + self.pdeps.inverse({'a': []}) + + def test_main(): support.run_unittest(*[obj for obj in globals().values() if isinstance(obj, type)]) diff --git a/Tools/scripts/pdeps.py b/Tools/scripts/pdeps.py --- a/Tools/scripts/pdeps.py +++ b/Tools/scripts/pdeps.py @@ -76,10 +76,9 @@ nextline = fp.readline() if not nextline: break line = line[:-1] + nextline - if m_import.match(line) >= 0: - (a, b), (a1, b1) = m_import.regs[:2] - elif m_from.match(line) >= 0: - (a, b), (a1, b1) = m_from.regs[:2] + m_found = m_import.match(line) or m_from.match(line) + if m_found: + (a, b), (a1, b1) = m_found.regs[:2] else: continue words = line[a1:b1].split(',') # print '#', line, words @@ -87,6 +86,7 @@ word = word.strip() if word not in list: list.append(word) + fp.close() # Compute closure (this is in fact totally general) @@ -123,7 +123,7 @@ def inverse(table): inv = {} for key in table.keys(): - if not inv.has_key(key): + if key not in inv: inv[key] = [] for item in table[key]: store(inv, item, key) -- Repository URL: http://hg.python.org/cpython From solipsis at pitrou.net Fri Apr 6 05:40:39 2012 From: solipsis at pitrou.net (solipsis at pitrou.net) Date: Fri, 06 Apr 2012 05:40:39 +0200 Subject: [Python-checkins] Daily reference leaks (ec3e0f1a3b8d): sum=0 Message-ID: results for ec3e0f1a3b8d on branch "default" -------------------------------------------- Command line was: ['./python', '-m', 'test.regrtest', '-uall', '-R', '3:3:/home/antoine/cpython/refleaks/reflogIoG1k1', '-x'] From python-checkins at python.org Fri Apr 6 11:15:48 2012 From: python-checkins at python.org (sandro.tosi) Date: Fri, 06 Apr 2012 11:15:48 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Issue_=2314502=3A_it=27s_Ru?= =?utf8?q?ntimeError_on_3=2E3?= Message-ID: http://hg.python.org/cpython/rev/068a614e9d97 changeset: 76136:068a614e9d97 user: Sandro Tosi date: Fri Apr 06 11:15:06 2012 +0200 summary: Issue #14502: it's RuntimeError on 3.3 files: Doc/library/threading.rst | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-) diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -452,7 +452,7 @@ are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. - When invoked on an unlocked lock, a :exc:`ThreadError` is raised. + When invoked on an unlocked lock, a :exc:`RuntimeError` is raised. There is no return value. -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Apr 6 16:42:06 2012 From: python-checkins at python.org (kristjan.jonsson) Date: Fri, 06 Apr 2012 16:42:06 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Set_a_time_threshold_in_tes?= =?utf8?q?t=5Fasyncore=2Ecapture=5Fserver_so_that_tests_don=27t?= Message-ID: http://hg.python.org/cpython/rev/d8c5c0f7aa56 changeset: 76137:d8c5c0f7aa56 user: Kristj?n Valur J?nsson date: Fri Apr 06 14:37:45 2012 +0000 summary: Set a time threshold in test_asyncore.capture_server so that tests don't deadlock if the main thread fails before sending all the data. files: Lib/test/test_asyncore.py | 7 ++++--- 1 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Lib/test/test_asyncore.py b/Lib/test/test_asyncore.py --- a/Lib/test/test_asyncore.py +++ b/Lib/test/test_asyncore.py @@ -74,15 +74,16 @@ pass else: n = 200 - while n > 0: - r, w, e = select.select([conn], [], []) + start = time.time() + while n > 0 and time.time() - start < 3.0: + r, w, e = select.select([conn], [], [], 0.1) if r: + n -= 1 data = conn.recv(10) # keep everything except for the newline terminator buf.write(data.replace(b'\n', b'')) if b'\n' in data: break - n -= 1 time.sleep(0.01) conn.close() -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Apr 6 18:55:29 2012 From: python-checkins at python.org (brett.cannon) Date: Fri, 06 Apr 2012 18:55:29 +0200 Subject: [Python-checkins] =?utf8?q?cpython=3A_Fix_test=5Fsite_from_modify?= =?utf8?q?ing_sysconfig=2E=5FCONFIG=5FVARS=2E?= Message-ID: http://hg.python.org/cpython/rev/1a3eb3b8ba42 changeset: 76138:1a3eb3b8ba42 parent: 76082:d0e4e3ef224e user: Brett Cannon date: Wed Apr 04 17:31:16 2012 -0400 summary: Fix test_site from modifying sysconfig._CONFIG_VARS. files: Lib/test/test_site.py | 5 ++++- 1 files changed, 4 insertions(+), 1 deletions(-) diff --git a/Lib/test/test_site.py b/Lib/test/test_site.py --- a/Lib/test/test_site.py +++ b/Lib/test/test_site.py @@ -39,6 +39,7 @@ self.old_base = site.USER_BASE self.old_site = site.USER_SITE self.old_prefixes = site.PREFIXES + self.original_vars = sysconfig._CONFIG_VARS self.old_vars = copy(sysconfig._CONFIG_VARS) def tearDown(self): @@ -47,7 +48,9 @@ site.USER_BASE = self.old_base site.USER_SITE = self.old_site site.PREFIXES = self.old_prefixes - sysconfig._CONFIG_VARS = self.old_vars + sysconfig._CONFIG_VARS = self.original_vars + sysconfig._CONFIG_VARS.clear() + sysconfig._CONFIG_VARS.update(self.old_vars) def test_makepath(self): # Test makepath() have an absolute path for its first return value -- Repository URL: http://hg.python.org/cpython From python-checkins at python.org Fri Apr 6 18:55:30 2012 From: python-checkins at python.org (brett.cannon) Date: Fri, 06 Apr 2012 18:55:30 +0200 Subject: [Python-checkins] =?utf8?q?cpython_=28merge_default_-=3E_default?= =?utf8?q?=29=3A_merge?= Message-ID: http://hg.python.org/cpython/rev/fbbb14604e94 changeset: 76139:fbbb14604e94 parent: 76138:1a3eb3b8ba42 parent: 76137:d8c5c0f7aa56 user: Brett Cannon date: Fri Apr 06 12:54:57 2012 -0400 summary: merge files: .bzrignore | 1 - .gitignore | 1 - .hgignore | 1 - Doc/library/_thread.rst | 2 +- Doc/library/collections.rst | 4 +- Doc/library/subprocess.rst | 2 +- Doc/library/threading.rst | 4 +- Doc/whatsnew/3.3.rst | 51 +- Include/methodobject.h | 16 +- Include/object.h | 5 + Lib/collections/abc.py | 10 +- Lib/idlelib/NEWS.txt | 3 + Lib/idlelib/ScriptBinding.py | 12 +- Lib/idlelib/tabbedpages.py | 4 +- Lib/multiprocessing/connection.py | 4 + Lib/test/seq_tests.py | 7 + Lib/test/test_array.py | 14 + Lib/test/test_asyncore.py | 7 +- Lib/test/test_builtin.py | 41 + Lib/test/test_bytes.py | 18 + Lib/test/test_decimal.py | 72 + Lib/test/test_deque.py | 13 + Lib/test/test_dict.py | 54 + Lib/test/test_enumerate.py | 30 +- Lib/test/test_iter.py | 43 +- Lib/test/test_itertools.py | 386 ++++++- Lib/test/test_list.py | 28 + Lib/test/test_multiprocessing.py | 4 + Lib/test/test_range.py | 24 +- Lib/test/test_set.py | 21 + Lib/test/test_tools.py | 73 +- Lib/test/test_tuple.py | 29 + Lib/test/test_xml_etree.py | 35 + Lib/tkinter/font.py | 63 +- Lib/tkinter/ttk.py | 2 +- Lib/webbrowser.py | 8 + Makefile.pre.in | 2 +- Misc/ACKS | 2 + Misc/NEWS | 20 +- Modules/_collectionsmodule.c | 93 +- Modules/_decimal/_decimal.c | 16 +- Modules/_decimal/tests/deccheck.py | 1 + Modules/_elementtree.c | 88 +- Modules/arraymodule.c | 30 +- Modules/itertoolsmodule.c | 890 ++++++++++++++- Objects/bytearrayobject.c | 36 +- Objects/bytesobject.c | 34 + Objects/dictobject.c | 53 + Objects/enumobject.c | 50 +- Objects/iterobject.c | 47 +- Objects/listobject.c | 80 + Objects/object.c | 13 + Objects/rangeobject.c | 92 + Objects/setobject.c | 45 +- Objects/tupleobject.c | 31 + Objects/unicodeobject.c | 85 +- Python/bltinmodule.c | 56 +- Python/pythonrun.c | 57 +- Tools/scripts/abitype.py | 88 +- Tools/scripts/find_recursionlimit.py | 24 +- Tools/scripts/findnocoding.py | 46 +- Tools/scripts/fixcid.py | 2 +- Tools/scripts/md5sum.py | 2 +- Tools/scripts/parseentities.py | 3 +- Tools/scripts/pdeps.py | 10 +- 65 files changed, 2751 insertions(+), 337 deletions(-) diff --git a/.bzrignore b/.bzrignore --- a/.bzrignore +++ b/.bzrignore @@ -33,7 +33,6 @@ Modules/config.c Modules/ld_so_aix Parser/pgen -Parser/pgen.stamp Lib/test/data/* Lib/lib2to3/Grammar*.pickle Lib/lib2to3/PatternGrammar*.pickle diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -32,7 +32,6 @@ PCbuild/*.pdb PCbuild/Win32-temp-* Parser/pgen -Parser/pgen.stamp __pycache__ autom4te.cache build/ diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -32,7 +32,6 @@ Modules/config.c Modules/ld_so_aix$ Parser/pgen$ -Parser/pgen.stamp$ PCbuild/amd64/ ^core ^python-gdb.py diff --git a/Doc/library/_thread.rst b/Doc/library/_thread.rst --- a/Doc/library/_thread.rst +++ b/Doc/library/_thread.rst @@ -94,7 +94,7 @@ *size* argument specifies the stack size to be used for subsequently created threads, and must be 0 (use platform or configured default) or a positive integer value of at least 32,768 (32kB). If changing the thread stack size is - unsupported, a :exc:`ThreadError` is raised. If the specified stack size is + unsupported, a :exc:`RuntimeError` is raised. If the specified stack size is invalid, a :exc:`ValueError` is raised and the stack size is unmodified. 32kB is currently the minimum supported stack size value to guarantee sufficient stack space for the interpreter itself. Note that some platforms may have diff --git a/Doc/library/collections.rst b/Doc/library/collections.rst --- a/Doc/library/collections.rst +++ b/Doc/library/collections.rst @@ -41,6 +41,8 @@ :class:`ChainMap` objects ------------------------- +.. versionadded:: 3.3 + A :class:`ChainMap` class is provided for quickly linking a number of mappings so they can be treated as a single unit. It is often much faster than creating a new dictionary and running multiple :meth:`~dict.update` calls. @@ -91,8 +93,6 @@ The use-cases also parallel those for the builtin :func:`super` function. A reference to ``d.parents`` is equivalent to: ``ChainMap(*d.maps[1:])``. - .. versionadded:: 3.3 - Example of simulating Python's internal lookup chain:: import builtins diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -804,7 +804,7 @@ to receive a SIGPIPE if p2 exits before p1. Alternatively, for trusted input, the shell's own pipeline support may still -be used directly: +be used directly:: output=`dmesg | grep hda` # becomes diff --git a/Doc/library/threading.rst b/Doc/library/threading.rst --- a/Doc/library/threading.rst +++ b/Doc/library/threading.rst @@ -174,7 +174,7 @@ *size* argument specifies the stack size to be used for subsequently created threads, and must be 0 (use platform or configured default) or a positive integer value of at least 32,768 (32kB). If changing the thread stack size is - unsupported, a :exc:`ThreadError` is raised. If the specified stack size is + unsupported, a :exc:`RuntimeError` is raised. If the specified stack size is invalid, a :exc:`ValueError` is raised and the stack size is unmodified. 32kB is currently the minimum supported stack size value to guarantee sufficient stack space for the interpreter itself. Note that some platforms may have @@ -452,7 +452,7 @@ are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. - Do not call this method when the lock is unlocked. + When invoked on an unlocked lock, a :exc:`RuntimeError` is raised. There is no return value. diff --git a/Doc/whatsnew/3.3.rst b/Doc/whatsnew/3.3.rst --- a/Doc/whatsnew/3.3.rst +++ b/Doc/whatsnew/3.3.rst @@ -486,6 +486,8 @@ (:issue:`10516`) +.. XXX mention new error messages for passing wrong number of arguments to functions + New and Improved Modules ======================== @@ -572,6 +574,26 @@ The ``unicode_internal`` codec has been deprecated. + +collections +----------- + +Addition of a new :class:`~collections.ChainMap` class to allow treating a +number of mappings as a single unit. + +(Written by Raymond Hettinger for :issue:`11089`, made public in +:issue:`11297`) + +The abstract base classes have been moved in a new :mod:`collections.abc` +module, to better differentiate between the abstract and the concrete +collections classes. Aliases for ABCs are still present in the +:mod:`collections` module to preserve existing imports. + +(:issue:`11085`) + +.. XXX addition of __slots__ to ABCs not recorded here: internal detail + + crypt ----- @@ -865,11 +887,12 @@ --------- :mod:`distutils` has undergone additions and refactoring under a new name, -:mod:`packaging`, to allow developers to break backward compatibility. +:mod:`packaging`, to allow developers to make far-reaching changes without +being constrained by backward compatibility. :mod:`distutils` is still provided in the standard library, but users are encouraged to transition to :mod:`packaging`. For older versions of Python, a -backport compatible with 2.4+ and 3.1+ will be made available on PyPI under the -name :mod:`distutils2`. +backport compatible with Python 2.5 and newer and 3.2 is available on PyPI +under the name `distutils2 `_. .. TODO add examples and howto to the packaging docs and link to them @@ -1057,12 +1080,24 @@ (:issue:`1673007`) +webbrowser +---------- + +The :mod:`webbrowser` module supports more browsers: Google Chrome (named +:program:`chrome`, :program:`chromium`, :program:`chrome-browser` or +:program:`chromium-browser` depending on the version and operating system) as +well as the the generic launchers :program:`xdg-open` from the FreeDesktop.org +project and :program:`gvfs-open` which is the default URI handler for GNOME 3. + +(:issue:`13620` and :issue:`14493`) + + Optimizations ============= Major performance enhancements have been added: -* Thanks to the :pep:`393`, some operations on Unicode strings has been optimized: +* Thanks to :pep:`393`, some operations on Unicode strings have been optimized: * the memory footprint is divided by 2 to 4 depending on the text * encode an ASCII string to UTF-8 doesn't need to encode characters anymore, @@ -1081,7 +1116,7 @@ * :c:func:`PyMemoryView_FromMemory` -* The :pep:`393` added new Unicode types, macros and functions: +* :pep:`393` added new Unicode types, macros and functions: * High-level API: @@ -1124,7 +1159,7 @@ Deprecated Python modules, functions and methods ------------------------------------------------ -* The :mod:`distutils` modules has been deprecated. Use the new +* The :mod:`distutils` module has been deprecated. Use the new :mod:`packaging` module instead. * The ``unicode_internal`` codec has been deprecated because of the :pep:`393`, use UTF-8, UTF-16 (``utf-16-le`` or ``utf-16-be``), or UTF-32 @@ -1143,7 +1178,7 @@ Deprecated functions and types of the C API ------------------------------------------- -The :c:type:`Py_UNICODE` has been deprecated by the :pep:`393` and will be +The :c:type:`Py_UNICODE` has been deprecated by :pep:`393` and will be removed in Python 4. All functions using this type are deprecated: Unicode functions and methods using :c:type:`Py_UNICODE` and @@ -1245,7 +1280,7 @@ functions using this type are deprecated (but will stay available for at least five years). If you were using low-level Unicode APIs to construct and access unicode objects and you want to benefit of the - memory footprint reduction provided by the PEP 393, you have to convert + memory footprint reduction provided by PEP 393, you have to convert your code to the new :doc:`Unicode API <../c-api/unicode>`. However, if you only have been using high-level functions such as diff --git a/Include/methodobject.h b/Include/methodobject.h --- a/Include/methodobject.h +++ b/Include/methodobject.h @@ -17,7 +17,7 @@ typedef PyObject *(*PyCFunction)(PyObject *, PyObject *); typedef PyObject *(*PyCFunctionWithKeywords)(PyObject *, PyObject *, - PyObject *); + PyObject *); typedef PyObject *(*PyNoArgsFunction)(PyObject *); PyAPI_FUNC(PyCFunction) PyCFunction_GetFunction(PyObject *); @@ -33,22 +33,22 @@ (((PyCFunctionObject *)func) -> m_ml -> ml_flags & METH_STATIC ? \ NULL : ((PyCFunctionObject *)func) -> m_self) #define PyCFunction_GET_FLAGS(func) \ - (((PyCFunctionObject *)func) -> m_ml -> ml_flags) + (((PyCFunctionObject *)func) -> m_ml -> ml_flags) #endif PyAPI_FUNC(PyObject *) PyCFunction_Call(PyObject *, PyObject *, PyObject *); struct PyMethodDef { - const char *ml_name; /* The name of the built-in function/method */ - PyCFunction ml_meth; /* The C function that implements it */ - int ml_flags; /* Combination of METH_xxx flags, which mostly - describe the args expected by the C func */ - const char *ml_doc; /* The __doc__ attribute, or NULL */ + const char *ml_name; /* The name of the built-in function/method */ + PyCFunction ml_meth; /* The C function that implements it */ + int ml_flags; /* Combination of METH_xxx flags, which mostly + describe the args expected by the C func */ + const char *ml_doc; /* The __doc__ attribute, or NULL */ }; typedef struct PyMethodDef PyMethodDef; #define PyCFunction_New(ML, SELF) PyCFunction_NewEx((ML), (SELF), NULL) PyAPI_FUNC(PyObject *) PyCFunction_NewEx(PyMethodDef *, PyObject *, - PyObject *); + PyObject *); /* Flag passed to newmethodobject */ /* #define METH_OLDARGS 0x0000 -- unsupported now */ diff --git a/Include/object.h b/Include/object.h --- a/Include/object.h +++ b/Include/object.h @@ -535,6 +535,11 @@ _PyObject_GenericSetAttrWithDict(PyObject *, PyObject *, PyObject *, PyObject *); +/* Helper to look up a builtin object */ +#ifndef Py_LIMITED_API +PyAPI_FUNC(PyObject *) +_PyObject_GetBuiltin(const char *name); +#endif /* PyObject_Dir(obj) acts like Python builtins.dir(obj), returning a list of strings. PyObject_Dir(NULL) is like builtins.dir(), diff --git a/Lib/collections/abc.py b/Lib/collections/abc.py --- a/Lib/collections/abc.py +++ b/Lib/collections/abc.py @@ -18,9 +18,13 @@ "ByteString", ] - -### collection related types which are not exposed through builtin ### -## iterators ## +# Private list of types that we want to register with the various ABCs +# so that they will pass tests like: +# it = iter(somebytearray) +# assert isinstance(it, Iterable) +# Note: in other implementations, these types many not be distinct +# and they make have their own implementation specific types that +# are not included on this list. bytes_iterator = type(iter(b'')) bytearray_iterator = type(iter(bytearray())) #callable_iterator = ??? diff --git a/Lib/idlelib/NEWS.txt b/Lib/idlelib/NEWS.txt --- a/Lib/idlelib/NEWS.txt +++ b/Lib/idlelib/NEWS.txt @@ -1,6 +1,9 @@ What's New in IDLE 3.3? ========================= +- Issue #8515: Set __file__ when run file in IDLE. + Initial patch by Bruce Frederiksen. + - IDLE can be launched as `python -m idlelib` - Issue #14409: IDLE now properly executes commands in the Shell window diff --git a/Lib/idlelib/ScriptBinding.py b/Lib/idlelib/ScriptBinding.py --- a/Lib/idlelib/ScriptBinding.py +++ b/Lib/idlelib/ScriptBinding.py @@ -150,16 +150,16 @@ dirname = os.path.dirname(filename) # XXX Too often this discards arguments the user just set... interp.runcommand("""if 1: - _filename = %r + __file__ = {filename!r} import sys as _sys from os.path import basename as _basename if (not _sys.argv or - _basename(_sys.argv[0]) != _basename(_filename)): - _sys.argv = [_filename] + _basename(_sys.argv[0]) != _basename(__file__)): + _sys.argv = [__file__] import os as _os - _os.chdir(%r) - del _filename, _sys, _basename, _os - \n""" % (filename, dirname)) + _os.chdir({dirname!r}) + del _sys, _basename, _os + \n""".format(filename=filename, dirname=dirname)) interp.prepend_syspath(filename) # XXX KBK 03Jul04 When run w/o subprocess, runtime warnings still # go to __stderr__. With subprocess, they go to the shell. diff --git a/Lib/idlelib/tabbedpages.py b/Lib/idlelib/tabbedpages.py --- a/Lib/idlelib/tabbedpages.py +++ b/Lib/idlelib/tabbedpages.py @@ -78,7 +78,7 @@ def remove_tab(self, tab_name): """Remove the tab named """ if not tab_name in self._tab_names: - raise KeyError("No such Tab: '%s" % page_name) + raise KeyError("No such Tab: '%s" % tab_name) self._tab_names.remove(tab_name) self._arrange_tabs() @@ -88,7 +88,7 @@ if tab_name == self._selected_tab: return if tab_name is not None and tab_name not in self._tabs: - raise KeyError("No such Tab: '%s" % page_name) + raise KeyError("No such Tab: '%s" % tab_name) # deselect the current selected tab if self._selected_tab is not None: diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -111,6 +111,10 @@ if sys.platform != 'win32' and family == 'AF_PIPE': raise ValueError('Family %s is not recognized.' % family) + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) def address_type(address): ''' diff --git a/Lib/test/seq_tests.py b/Lib/test/seq_tests.py --- a/Lib/test/seq_tests.py +++ b/Lib/test/seq_tests.py @@ -4,6 +4,7 @@ import unittest import sys +import pickle # Various iterables # This is used for checking the constructor (here and in test_deque.py) @@ -388,3 +389,9 @@ self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2) self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize) self.assertRaises(ValueError, a.index, 2, 0, -10) + + def test_pickle(self): + lst = self.type2test([4, 5, 6, 7]) + lst2 = pickle.loads(pickle.dumps(lst)) + self.assertEqual(lst2, lst) + self.assertNotEqual(id(lst2), id(lst)) diff --git a/Lib/test/test_array.py b/Lib/test/test_array.py --- a/Lib/test/test_array.py +++ b/Lib/test/test_array.py @@ -285,6 +285,20 @@ self.assertEqual(a.x, b.x) self.assertEqual(type(a), type(b)) + def test_iterator_pickle(self): + data = array.array(self.typecode, self.example) + orgit = iter(data) + d = pickle.dumps(orgit) + it = pickle.loads(d) + self.assertEqual(type(orgit), type(it)) + self.assertEqual(list(it), list(data)) + + if len(data): + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(list(it), list(data)[1:]) + def test_insert(self): a = array.array(self.typecode, self.example) a.insert(0, self.example[0]) diff --git a/Lib/test/test_asyncore.py b/Lib/test/test_asyncore.py --- a/Lib/test/test_asyncore.py +++ b/Lib/test/test_asyncore.py @@ -74,15 +74,16 @@ pass else: n = 200 - while n > 0: - r, w, e = select.select([conn], [], []) + start = time.time() + while n > 0 and time.time() - start < 3.0: + r, w, e = select.select([conn], [], [], 0.1) if r: + n -= 1 data = conn.recv(10) # keep everything except for the newline terminator buf.write(data.replace(b'\n', b'')) if b'\n' in data: break - n -= 1 time.sleep(0.01) conn.close() diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -14,6 +14,7 @@ import traceback from test.support import TESTFN, unlink, run_unittest, check_warnings from operator import neg +import pickle try: import pty, signal except ImportError: @@ -110,7 +111,30 @@ def __iter__(self): raise RuntimeError +def filter_char(arg): + return ord(arg) > ord("d") + +def map_char(arg): + return chr(ord(arg)+1) + class BuiltinTest(unittest.TestCase): + # Helper to check picklability + def check_iter_pickle(self, it, seq): + itorg = it + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(list(it), seq) + + #test the iterator after dropping one from it + it = pickle.loads(d) + try: + next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(list(it), seq[1:]) def test_import(self): __import__('sys') @@ -566,6 +590,11 @@ self.assertEqual(list(filter(lambda x: x>=3, (1, 2, 3, 4))), [3, 4]) self.assertRaises(TypeError, list, filter(42, (1, 2))) + def test_filter_pickle(self): + f1 = filter(filter_char, "abcdeabcde") + f2 = filter(filter_char, "abcdeabcde") + self.check_iter_pickle(f1, list(f2)) + def test_getattr(self): self.assertTrue(getattr(sys, 'stdout') is sys.stdout) self.assertRaises(TypeError, getattr, sys, 1) @@ -759,6 +788,11 @@ raise RuntimeError self.assertRaises(RuntimeError, list, map(badfunc, range(5))) + def test_map_pickle(self): + m1 = map(map_char, "Is this the real life?") + m2 = map(map_char, "Is this the real life?") + self.check_iter_pickle(m1, list(m2)) + def test_max(self): self.assertEqual(max('123123'), '3') self.assertEqual(max(1, 2, 3), 3) @@ -1300,6 +1334,13 @@ return i self.assertRaises(ValueError, list, zip(BadSeq(), BadSeq())) + def test_zip_pickle(self): + a = (1, 2, 3) + b = (4, 5, 6) + t = [(1, 4), (2, 5), (3, 6)] + z1 = zip(a, b) + self.check_iter_pickle(z1, t) + def test_format(self): # Test the basic machinery of the format() builtin. Don't test # the specifics of the various formatters diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -518,6 +518,24 @@ q = pickle.loads(ps) self.assertEqual(b, q) + def test_iterator_pickling(self): + for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0": + it = itorg = iter(self.type2test(b)) + data = list(self.type2test(b)) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(list(it), data) + + it = pickle.loads(d) + try: + next(it) + except StopIteration: + continue + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(list(it), data[1:]) + def test_strip(self): b = self.type2test(b'mississippi') self.assertEqual(b.strip(b'i'), b'mississipp') diff --git a/Lib/test/test_decimal.py b/Lib/test/test_decimal.py --- a/Lib/test/test_decimal.py +++ b/Lib/test/test_decimal.py @@ -4953,6 +4953,78 @@ self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g') self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g') + def test_exact_conversion(self): + Decimal = C.Decimal + localcontext = C.localcontext + InvalidOperation = C.InvalidOperation + + with localcontext() as c: + + c.traps[InvalidOperation] = True + + # Clamped + x = "0e%d" % sys.maxsize + self.assertRaises(InvalidOperation, Decimal, x) + + x = "0e%d" % (-sys.maxsize-1) + self.assertRaises(InvalidOperation, Decimal, x) + + # Overflow + x = "1e%d" % sys.maxsize + self.assertRaises(InvalidOperation, Decimal, x) + + # Underflow + x = "1e%d" % (-sys.maxsize-1) + self.assertRaises(InvalidOperation, Decimal, x) + + def test_from_tuple(self): + Decimal = C.Decimal + localcontext = C.localcontext + InvalidOperation = C.InvalidOperation + Overflow = C.Overflow + Underflow = C.Underflow + + with localcontext() as c: + + c.traps[InvalidOperation] = True + c.traps[Overflow] = True + c.traps[Underflow] = True + + # SSIZE_MAX + x = (1, (), sys.maxsize) + self.assertEqual(str(c.create_decimal(x)), '-0E+999999') + self.assertRaises(InvalidOperation, Decimal, x) + + x = (1, (0, 1, 2), sys.maxsize) + self.assertRaises(Overflow, c.create_decimal, x) + self.assertRaises(InvalidOperation, Decimal, x) + + # SSIZE_MIN + x = (1, (), -sys.maxsize-1) + self.assertEqual(str(c.create_decimal(x)), '-0E-1000026') + self.assertRaises(InvalidOperation, Decimal, x) + + x = (1, (0, 1, 2), -sys.maxsize-1) + self.assertRaises(Underflow, c.create_decimal, x) + self.assertRaises(InvalidOperation, Decimal, x) + + # OverflowError + x = (1, (), sys.maxsize+1) + self.assertRaises(OverflowError, c.create_decimal, x) + self.assertRaises(OverflowError, Decimal, x) + + x = (1, (), -sys.maxsize-2) + self.assertRaises(OverflowError, c.create_decimal, x) + self.assertRaises(OverflowError, Decimal, x) + + # Specials + x = (1, (), "N") + self.assertEqual(str(Decimal(x)), '-sNaN') + x = (1, (0,), "N") + self.assertEqual(str(Decimal(x)), '-sNaN') + x = (1, (0, 1), "N") + self.assertEqual(str(Decimal(x)), '-sNaN1') + all_tests = [ CExplicitConstructionTest, PyExplicitConstructionTest, diff --git a/Lib/test/test_deque.py b/Lib/test/test_deque.py --- a/Lib/test/test_deque.py +++ b/Lib/test/test_deque.py @@ -471,6 +471,19 @@ ## self.assertNotEqual(id(d), id(e)) ## self.assertEqual(id(e), id(e[-1])) + def test_iterator_pickle(self): + data = deque(range(200)) + it = itorg = iter(data) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(list(it), list(data)) + + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(list(it), list(data)[1:]) + def test_deepcopy(self): mut = [10] d = deque([mut]) diff --git a/Lib/test/test_dict.py b/Lib/test/test_dict.py --- a/Lib/test/test_dict.py +++ b/Lib/test/test_dict.py @@ -2,7 +2,9 @@ from test import support import collections, random, string +import collections.abc import gc, weakref +import pickle class DictTest(unittest.TestCase): @@ -803,6 +805,58 @@ pass self._tracked(MyDict()) + def test_iterator_pickling(self): + data = {1:"a", 2:"b", 3:"c"} + it = iter(data) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(sorted(it), sorted(data)) + + it = pickle.loads(d) + try: + drop = next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + del data[drop] + self.assertEqual(sorted(it), sorted(data)) + + def test_itemiterator_pickling(self): + data = {1:"a", 2:"b", 3:"c"} + # dictviews aren't picklable, only their iterators + itorg = iter(data.items()) + d = pickle.dumps(itorg) + it = pickle.loads(d) + # note that the type of type of the unpickled iterator + # is not necessarily the same as the original. It is + # merely an object supporting the iterator protocol, yielding + # the same objects as the original one. + # self.assertEqual(type(itorg), type(it)) + self.assertTrue(isinstance(it, collections.abc.Iterator)) + self.assertEqual(dict(it), data) + + it = pickle.loads(d) + drop = next(it) + d = pickle.dumps(it) + it = pickle.loads(d) + del data[drop[0]] + self.assertEqual(dict(it), data) + + def test_valuesiterator_pickling(self): + data = {1:"a", 2:"b", 3:"c"} + # data.values() isn't picklable, only its iterator + it = iter(data.values()) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(sorted(list(it)), sorted(list(data.values()))) + + it = pickle.loads(d) + drop = next(it) + d = pickle.dumps(it) + it = pickle.loads(d) + values = list(it) + [drop] + self.assertEqual(sorted(values), sorted(list(data.values()))) from test import mapping_tests diff --git a/Lib/test/test_enumerate.py b/Lib/test/test_enumerate.py --- a/Lib/test/test_enumerate.py +++ b/Lib/test/test_enumerate.py @@ -1,5 +1,6 @@ import unittest import sys +import pickle from test import support @@ -61,7 +62,25 @@ def __iter__(self): return self -class EnumerateTestCase(unittest.TestCase): +class PickleTest: + # Helper to check picklability + def check_pickle(self, itorg, seq): + d = pickle.dumps(itorg) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(list(it), seq) + + it = pickle.loads(d) + try: + next(it) + except StopIteration: + self.assertFalse(seq[1:]) + return + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(list(it), seq[1:]) + +class EnumerateTestCase(unittest.TestCase, PickleTest): enum = enumerate seq, res = 'abc', [(0,'a'), (1,'b'), (2,'c')] @@ -73,6 +92,9 @@ self.assertEqual(list(self.enum(self.seq)), self.res) self.enum.__doc__ + def test_pickle(self): + self.check_pickle(self.enum(self.seq), self.res) + def test_getitemseqn(self): self.assertEqual(list(self.enum(G(self.seq))), self.res) e = self.enum(G('')) @@ -126,7 +148,7 @@ seq = range(10,20000,2) res = list(zip(range(20000), seq)) -class TestReversed(unittest.TestCase): +class TestReversed(unittest.TestCase, PickleTest): def test_simple(self): class A: @@ -212,6 +234,10 @@ ngi = NoGetItem() self.assertRaises(TypeError, reversed, ngi) + def test_pickle(self): + for data in 'abc', range(5), tuple(enumerate('abc')), range(1,17,5): + self.check_pickle(reversed(data), list(data)[::-1]) + class EnumerateStartTestCase(EnumerateTestCase): diff --git a/Lib/test/test_iter.py b/Lib/test/test_iter.py --- a/Lib/test/test_iter.py +++ b/Lib/test/test_iter.py @@ -2,6 +2,8 @@ import unittest from test.support import run_unittest, TESTFN, unlink, cpython_only +import pickle +import collections.abc # Test result of triple loop (too big to inline) TRIPLETS = [(0, 0, 0), (0, 0, 1), (0, 0, 2), @@ -28,6 +30,8 @@ raise StopIteration self.i = res + 1 return res + def __iter__(self): + return self class IteratingSequenceClass: def __init__(self, n): @@ -49,7 +53,9 @@ class TestCase(unittest.TestCase): # Helper to check that an iterator returns a given sequence - def check_iterator(self, it, seq): + def check_iterator(self, it, seq, pickle=True): + if pickle: + self.check_pickle(it, seq) res = [] while 1: try: @@ -60,12 +66,33 @@ self.assertEqual(res, seq) # Helper to check that a for loop generates a given sequence - def check_for_loop(self, expr, seq): + def check_for_loop(self, expr, seq, pickle=True): + if pickle: + self.check_pickle(iter(expr), seq) res = [] for val in expr: res.append(val) self.assertEqual(res, seq) + # Helper to check picklability + def check_pickle(self, itorg, seq): + d = pickle.dumps(itorg) + it = pickle.loads(d) + # Cannot assert type equality because dict iterators unpickle as list + # iterators. + # self.assertEqual(type(itorg), type(it)) + self.assertTrue(isinstance(it, collections.abc.Iterator)) + self.assertEqual(list(it), seq) + + it = pickle.loads(d) + try: + next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(list(it), seq[1:]) + # Test basic use of iter() function def test_iter_basic(self): self.check_iterator(iter(range(10)), list(range(10))) @@ -138,7 +165,7 @@ if i > 100: raise IndexError # Emergency stop return i - self.check_iterator(iter(C(), 10), list(range(10))) + self.check_iterator(iter(C(), 10), list(range(10)), pickle=False) # Test two-argument iter() with function def test_iter_function(self): @@ -146,7 +173,7 @@ i = state[0] state[0] = i+1 return i - self.check_iterator(iter(spam, 10), list(range(10))) + self.check_iterator(iter(spam, 10), list(range(10)), pickle=False) # Test two-argument iter() with function that raises StopIteration def test_iter_function_stop(self): @@ -156,7 +183,7 @@ raise StopIteration state[0] = i+1 return i - self.check_iterator(iter(spam, 20), list(range(10))) + self.check_iterator(iter(spam, 20), list(range(10)), pickle=False) # Test exception propagation through function iterator def test_exception_function(self): @@ -198,7 +225,7 @@ if i == 10: raise StopIteration return SequenceClass.__getitem__(self, i) - self.check_for_loop(MySequenceClass(20), list(range(10))) + self.check_for_loop(MySequenceClass(20), list(range(10)), pickle=False) # Test a big range def test_iter_big_range(self): @@ -237,8 +264,8 @@ f.close() f = open(TESTFN, "r") try: - self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"]) - self.check_for_loop(f, []) + self.check_for_loop(f, ["0\n", "1\n", "2\n", "3\n", "4\n"], pickle=False) + self.check_for_loop(f, [], pickle=False) finally: f.close() try: diff --git a/Lib/test/test_itertools.py b/Lib/test/test_itertools.py --- a/Lib/test/test_itertools.py +++ b/Lib/test/test_itertools.py @@ -37,6 +37,13 @@ 'Test predicate' return x%2==1 +def tupleize(*args): + return args + +def irange(n): + for i in range(n): + yield i + class StopNow: 'Class emulating an empty iterable.' def __iter__(self): @@ -55,8 +62,59 @@ 'Factorial' return prod(range(1, n+1)) +# root level methods for pickling ability +def testR(r): + return r[0] + +def testR2(r): + return r[2] + +def underten(x): + return x<10 + class TestBasicOps(unittest.TestCase): + def pickletest(self, it, stop=4, take=1, compare=None): + """Test that an iterator is the same after pickling, also when part-consumed""" + def expand(it, i=0): + # Recursively expand iterables, within sensible bounds + if i > 10: + raise RuntimeError("infinite recursion encountered") + if isinstance(it, str): + return it + try: + l = list(islice(it, stop)) + except TypeError: + return it # can't expand it + return [expand(e, i+1) for e in l] + + # Test the initial copy against the original + dump = pickle.dumps(it) + i2 = pickle.loads(dump) + self.assertEqual(type(it), type(i2)) + a, b = expand(it), expand(i2) + self.assertEqual(a, b) + if compare: + c = expand(compare) + self.assertEqual(a, c) + + # Take from the copy, and create another copy and compare them. + i3 = pickle.loads(dump) + took = 0 + try: + for i in range(take): + next(i3) + took += 1 + except StopIteration: + pass #in case there is less data than 'take' + dump = pickle.dumps(i3) + i4 = pickle.loads(dump) + a, b = expand(i3), expand(i4) + self.assertEqual(a, b) + if compare: + c = expand(compare[took:]) + self.assertEqual(a, c); + def test_accumulate(self): self.assertEqual(list(accumulate(range(10))), # one positional arg [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) @@ -83,6 +141,7 @@ [2, 16, 144, 720, 5040, 0, 0, 0, 0, 0]) with self.assertRaises(TypeError): list(accumulate(s, chr)) # unary-operation + self.pickletest(accumulate(range(10))) # test pickling def test_chain(self): @@ -106,14 +165,43 @@ self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd')) self.assertRaises(TypeError, list, chain.from_iterable([2, 3])) + def test_chain_reducible(self): + operators = [copy.deepcopy, + lambda s: pickle.loads(pickle.dumps(s))] + for oper in operators: + it = chain('abc', 'def') + self.assertEqual(list(oper(it)), list('abcdef')) + self.assertEqual(next(it), 'a') + self.assertEqual(list(oper(it)), list('bcdef')) + + self.assertEqual(list(oper(chain(''))), []) + self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd')) + self.assertRaises(TypeError, list, oper(chain(2, 3))) + self.pickletest(chain('abc', 'def'), compare=list('abcdef')) + def test_combinations(self): self.assertRaises(TypeError, combinations, 'abc') # missing r argument self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, combinations, None) # pool is not iterable self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative - self.assertEqual(list(combinations('abc', 32)), []) # r > n - self.assertEqual(list(combinations(range(4), 3)), - [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]) + + for op in (lambda a:a, lambda a:pickle.loads(pickle.dumps(a))): + self.assertEqual(list(op(combinations('abc', 32))), []) # r > n + + self.assertEqual(list(op(combinations('ABCD', 2))), + [('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]) + testIntermediate = combinations('ABCD', 2) + next(testIntermediate) + self.assertEqual(list(op(testIntermediate)), + [('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]) + + self.assertEqual(list(op(combinations(range(4), 3))), + [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]) + testIntermediate = combinations(range(4), 3) + next(testIntermediate) + self.assertEqual(list(op(testIntermediate)), + [(0,1,3), (0,2,3), (1,2,3)]) + def combinations1(iterable, r): 'Pure python version shown in the docs' @@ -168,6 +256,9 @@ self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version + self.pickletest(combinations(values, r)) # test pickling + + # Test implementation detail: tuple re-use @support.impl_detail("tuple reuse is specific to CPython") def test_combinations_tuple_reuse(self): self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) @@ -179,8 +270,15 @@ self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, cwr, None) # pool is not iterable self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative - self.assertEqual(list(cwr('ABC', 2)), - [('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) + + for op in (lambda a:a, lambda a:pickle.loads(pickle.dumps(a))): + self.assertEqual(list(op(cwr('ABC', 2))), + [('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) + testIntermediate = cwr('ABC', 2) + next(testIntermediate) + self.assertEqual(list(op(testIntermediate)), + [('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) + def cwr1(iterable, r): 'Pure python version shown in the docs' @@ -239,6 +337,10 @@ self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version + self.pickletest(cwr(values,r)) # test pickling + + # Test implementation detail: tuple re-use + @support.impl_detail("tuple reuse is specific to CPython") def test_combinations_with_replacement_tuple_reuse(self): cwr = combinations_with_replacement @@ -305,6 +407,8 @@ self.assertEqual(result, list(permutations(values, None))) # test r as None self.assertEqual(result, list(permutations(values))) # test default r + self.pickletest(permutations(values, r)) # test pickling + @support.impl_detail("tuple resuse is CPython specific") def test_permutations_tuple_reuse(self): self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) @@ -359,6 +463,24 @@ self.assertRaises(TypeError, compress, range(6)) # too few args self.assertRaises(TypeError, compress, range(6), None) # too many args + # check copy, deepcopy, pickle + for op in (lambda a:copy.copy(a), lambda a:copy.deepcopy(a), lambda a:pickle.loads(pickle.dumps(a))): + for data, selectors, result1, result2 in [ + ('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'), + ('ABCDEF', [0,0,0,0,0,0], '', ''), + ('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'), + ('ABCDEF', [1,0,1], 'AC', 'C'), + ('ABC', [0,1,1,1,1,1], 'BC', 'C'), + ]: + + self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1)) + self.assertEqual(list(op(compress(data, selectors))), list(result1)) + testIntermediate = compress(data, selectors) + if result1: + next(testIntermediate) + self.assertEqual(list(op(testIntermediate)), list(result2)) + + def test_count(self): self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)]) @@ -393,7 +515,7 @@ c = count(value) self.assertEqual(next(copy.copy(c)), value) self.assertEqual(next(copy.deepcopy(c)), value) - self.assertEqual(next(pickle.loads(pickle.dumps(c))), value) + self.pickletest(count(value)) #check proper internal error handling for large "step' sizes count(1, maxsize+5); sys.exc_info() @@ -440,6 +562,7 @@ else: r2 = ('count(%r, %r)' % (i, j)).replace('L', '') self.assertEqual(r1, r2) + self.pickletest(count(i, j)) def test_cycle(self): self.assertEqual(take(10, cycle('abc')), list('abcabcabca')) @@ -448,6 +571,18 @@ self.assertRaises(TypeError, cycle, 5) self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0]) + # check copy, deepcopy, pickle + c = cycle('abc') + self.assertEqual(next(c), 'a') + #simple copy currently not supported, because __reduce__ returns + #an internal iterator + #self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab')) + self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab')) + self.assertEqual(take(10, pickle.loads(pickle.dumps(c))), list('bcabcabcab')) + next(c) + self.assertEqual(take(10, pickle.loads(pickle.dumps(c))), list('cabcabcabc')) + self.pickletest(cycle('abc')) + def test_groupby(self): # Check whether it accepts arguments correctly self.assertEqual([], list(groupby([]))) @@ -466,18 +601,37 @@ dup.append(elem) self.assertEqual(s, dup) + # Check normal pickled + dup = [] + for k, g in pickle.loads(pickle.dumps(groupby(s, testR))): + for elem in g: + self.assertEqual(k, elem[0]) + dup.append(elem) + self.assertEqual(s, dup) + # Check nested case dup = [] - for k, g in groupby(s, lambda r:r[0]): - for ik, ig in groupby(g, lambda r:r[2]): + for k, g in groupby(s, testR): + for ik, ig in groupby(g, testR2): for elem in ig: self.assertEqual(k, elem[0]) self.assertEqual(ik, elem[2]) dup.append(elem) self.assertEqual(s, dup) + # Check nested and pickled + dup = [] + for k, g in pickle.loads(pickle.dumps(groupby(s, testR))): + for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2))): + for elem in ig: + self.assertEqual(k, elem[0]) + self.assertEqual(ik, elem[2]) + dup.append(elem) + self.assertEqual(s, dup) + + # Check case where inner iterator is not used - keys = [k for k, g in groupby(s, lambda r:r[0])] + keys = [k for k, g in groupby(s, testR)] expectedkeys = set([r[0] for r in s]) self.assertEqual(set(keys), expectedkeys) self.assertEqual(len(keys), len(expectedkeys)) @@ -548,6 +702,20 @@ self.assertRaises(TypeError, filter, isEven, 3) self.assertRaises(TypeError, next, filter(range(6), range(6))) + # check copy, deepcopy, pickle + ans = [0,2,4] + + c = filter(isEven, range(6)) + self.assertEqual(list(copy.copy(c)), ans) + c = filter(isEven, range(6)) + self.assertEqual(list(copy.deepcopy(c)), ans) + c = filter(isEven, range(6)) + self.assertEqual(list(pickle.loads(pickle.dumps(c))), ans) + next(c) + self.assertEqual(list(pickle.loads(pickle.dumps(c))), ans[1:]) + c = filter(isEven, range(6)) + self.pickletest(c) + def test_filterfalse(self): self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5]) self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0]) @@ -558,6 +726,7 @@ self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7) self.assertRaises(TypeError, filterfalse, isEven, 3) self.assertRaises(TypeError, next, filterfalse(range(6), range(6))) + self.pickletest(filterfalse(isEven, range(6))) def test_zip(self): # XXX This is rather silly now that builtin zip() calls zip()... @@ -582,6 +751,23 @@ ids = list(map(id, list(zip('abc', 'def')))) self.assertEqual(len(dict.fromkeys(ids)), len(ids)) + # check copy, deepcopy, pickle + ans = [(x,y) for x, y in copy.copy(zip('abc',count()))] + self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) + + ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))] + self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) + + ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count())))] + self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) + + testIntermediate = zip('abc',count()) + next(testIntermediate) + ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate))] + self.assertEqual(ans, [('b', 1), ('c', 2)]) + + self.pickletest(zip('abc', count())) + def test_ziplongest(self): for args in [ ['abc', range(6)], @@ -631,6 +817,12 @@ ids = list(map(id, list(zip_longest('abc', 'def')))) self.assertEqual(len(dict.fromkeys(ids)), len(ids)) + def test_zip_longest_pickling(self): + self.pickletest(zip_longest("abc", "def")) + self.pickletest(zip_longest("abc", "defgh")) + self.pickletest(zip_longest("abc", "defgh", fillvalue=1)) + self.pickletest(zip_longest("", "defgh")) + def test_bug_7244(self): class Repeater: @@ -734,6 +926,20 @@ self.assertEqual(len(set(map(id, product('abc', 'def')))), 1) self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1) + def test_product_pickling(self): + # check copy, deepcopy, pickle + for args, result in [ + ([], [()]), # zero iterables + (['ab'], [('a',), ('b',)]), # one iterable + ([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables + ([range(0), range(2), range(3)], []), # first iterable with zero length + ([range(2), range(0), range(3)], []), # middle iterable with zero length + ([range(2), range(3), range(0)], []), # last iterable with zero length + ]: + self.assertEqual(list(copy.copy(product(*args))), result) + self.assertEqual(list(copy.deepcopy(product(*args))), result) + self.pickletest(product(*args)) + def test_repeat(self): self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a']) self.assertEqual(lzip(range(3),repeat('a')), @@ -752,11 +958,16 @@ list(r) self.assertEqual(repr(r), 'repeat((1+0j), 0)') + # check copy, deepcopy, pickle + c = repeat(object='a', times=10) + self.assertEqual(next(c), 'a') + self.assertEqual(take(2, copy.copy(c)), list('a' * 2)) + self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2)) + self.pickletest(repeat(object='a', times=10)) + def test_map(self): self.assertEqual(list(map(operator.pow, range(3), range(1,7))), [0**1, 1**2, 2**3]) - def tupleize(*args): - return args self.assertEqual(list(map(tupleize, 'abc', range(5))), [('a',0),('b',1),('c',2)]) self.assertEqual(list(map(tupleize, 'abc', count())), @@ -771,6 +982,18 @@ self.assertRaises(ValueError, next, map(errfunc, [4], [5])) self.assertRaises(TypeError, next, map(onearg, [4], [5])) + # check copy, deepcopy, pickle + ans = [('a',0),('b',1),('c',2)] + + c = map(tupleize, 'abc', count()) + self.assertEqual(list(copy.copy(c)), ans) + + c = map(tupleize, 'abc', count()) + self.assertEqual(list(copy.deepcopy(c)), ans) + + c = map(tupleize, 'abc', count()) + self.pickletest(c) + def test_starmap(self): self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))), [0**1, 1**2, 2**3]) @@ -785,6 +1008,18 @@ self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)])) self.assertRaises(TypeError, next, starmap(onearg, [(4,5)])) + # check copy, deepcopy, pickle + ans = [0**1, 1**2, 2**3] + + c = starmap(operator.pow, zip(range(3), range(1,7))) + self.assertEqual(list(copy.copy(c)), ans) + + c = starmap(operator.pow, zip(range(3), range(1,7))) + self.assertEqual(list(copy.deepcopy(c)), ans) + + c = starmap(operator.pow, zip(range(3), range(1,7))) + self.pickletest(c) + def test_islice(self): for args in [ # islice(args) should agree with range(args) (10, 20, 3), @@ -817,17 +1052,18 @@ self.assertEqual(list(it), list(range(3, 10))) # Test invalid arguments - self.assertRaises(TypeError, islice, range(10)) - self.assertRaises(TypeError, islice, range(10), 1, 2, 3, 4) - self.assertRaises(ValueError, islice, range(10), -5, 10, 1) - self.assertRaises(ValueError, islice, range(10), 1, -5, -1) - self.assertRaises(ValueError, islice, range(10), 1, 10, -1) - self.assertRaises(ValueError, islice, range(10), 1, 10, 0) - self.assertRaises(ValueError, islice, range(10), 'a') - self.assertRaises(ValueError, islice, range(10), 'a', 1) - self.assertRaises(ValueError, islice, range(10), 1, 'a') - self.assertRaises(ValueError, islice, range(10), 'a', 1, 1) - self.assertRaises(ValueError, islice, range(10), 1, 'a', 1) + ra = range(10) + self.assertRaises(TypeError, islice, ra) + self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4) + self.assertRaises(ValueError, islice, ra, -5, 10, 1) + self.assertRaises(ValueError, islice, ra, 1, -5, -1) + self.assertRaises(ValueError, islice, ra, 1, 10, -1) + self.assertRaises(ValueError, islice, ra, 1, 10, 0) + self.assertRaises(ValueError, islice, ra, 'a') + self.assertRaises(ValueError, islice, ra, 'a', 1) + self.assertRaises(ValueError, islice, ra, 1, 'a') + self.assertRaises(ValueError, islice, ra, 'a', 1, 1) + self.assertRaises(ValueError, islice, ra, 1, 'a', 1) self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1) # Issue #10323: Less islice in a predictable state @@ -835,9 +1071,22 @@ self.assertEqual(list(islice(c, 1, 3, 50)), [1]) self.assertEqual(next(c), 3) + # check copy, deepcopy, pickle + for args in [ # islice(args) should agree with range(args) + (10, 20, 3), + (10, 3, 20), + (10, 20), + (10, 3), + (20,) + ]: + self.assertEqual(list(copy.copy(islice(range(100), *args))), + list(range(*args))) + self.assertEqual(list(copy.deepcopy(islice(range(100), *args))), + list(range(*args))) + self.pickletest(islice(range(100), *args)) + def test_takewhile(self): data = [1, 3, 5, 20, 2, 4, 6, 8] - underten = lambda x: x<10 self.assertEqual(list(takewhile(underten, data)), [1, 3, 5]) self.assertEqual(list(takewhile(underten, [])), []) self.assertRaises(TypeError, takewhile) @@ -849,9 +1098,14 @@ self.assertEqual(list(t), [1, 1, 1]) self.assertRaises(StopIteration, next, t) + # check copy, deepcopy, pickle + self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5]) + self.assertEqual(list(copy.deepcopy(takewhile(underten, data))), + [1, 3, 5]) + self.pickletest(takewhile(underten, data)) + def test_dropwhile(self): data = [1, 3, 5, 20, 2, 4, 6, 8] - underten = lambda x: x<10 self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8]) self.assertEqual(list(dropwhile(underten, [])), []) self.assertRaises(TypeError, dropwhile) @@ -860,11 +1114,14 @@ self.assertRaises(TypeError, next, dropwhile(10, [(4,5)])) self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)])) + # check copy, deepcopy, pickle + self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8]) + self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))), + [20, 2, 4, 6, 8]) + self.pickletest(dropwhile(underten, data)) + def test_tee(self): n = 200 - def irange(n): - for i in range(n): - yield i a, b = tee([]) # test empty iterator self.assertEqual(list(a), []) @@ -949,6 +1206,67 @@ del a self.assertRaises(ReferenceError, getattr, p, '__class__') + ans = list('abc') + long_ans = list(range(10000)) + + # check copy + a, b = tee('abc') + self.assertEqual(list(copy.copy(a)), ans) + self.assertEqual(list(copy.copy(b)), ans) + a, b = tee(list(range(10000))) + self.assertEqual(list(copy.copy(a)), long_ans) + self.assertEqual(list(copy.copy(b)), long_ans) + + # check partially consumed copy + a, b = tee('abc') + take(2, a) + take(1, b) + self.assertEqual(list(copy.copy(a)), ans[2:]) + self.assertEqual(list(copy.copy(b)), ans[1:]) + self.assertEqual(list(a), ans[2:]) + self.assertEqual(list(b), ans[1:]) + a, b = tee(range(10000)) + take(100, a) + take(60, b) + self.assertEqual(list(copy.copy(a)), long_ans[100:]) + self.assertEqual(list(copy.copy(b)), long_ans[60:]) + self.assertEqual(list(a), long_ans[100:]) + self.assertEqual(list(b), long_ans[60:]) + + # check deepcopy + a, b = tee('abc') + self.assertEqual(list(copy.deepcopy(a)), ans) + self.assertEqual(list(copy.deepcopy(b)), ans) + self.assertEqual(list(a), ans) + self.assertEqual(list(b), ans) + a, b = tee(range(10000)) + self.assertEqual(list(copy.deepcopy(a)), long_ans) + self.assertEqual(list(copy.deepcopy(b)), long_ans) + self.assertEqual(list(a), long_ans) + self.assertEqual(list(b), long_ans) + + # check partially consumed deepcopy + a, b = tee('abc') + take(2, a) + take(1, b) + self.assertEqual(list(copy.deepcopy(a)), ans[2:]) + self.assertEqual(list(copy.deepcopy(b)), ans[1:]) + self.assertEqual(list(a), ans[2:]) + self.assertEqual(list(b), ans[1:]) + a, b = tee(range(10000)) + take(100, a) + take(60, b) + self.assertEqual(list(copy.deepcopy(a)), long_ans[100:]) + self.assertEqual(list(copy.deepcopy(b)), long_ans[60:]) + self.assertEqual(list(a), long_ans[100:]) + self.assertEqual(list(b), long_ans[60:]) + + # check pickle + self.pickletest(iter(tee('abc'))) + a, b = tee('abc') + self.pickletest(a, compare=ans) + self.pickletest(b, compare=ans) + def test_StopIteration(self): self.assertRaises(StopIteration, next, zip()) @@ -974,9 +1292,21 @@ class TestExamples(unittest.TestCase): - def test_accumlate(self): + def test_accumulate(self): self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15]) + def test_accumulate_reducible(self): + # check copy, deepcopy, pickle + data = [1, 2, 3, 4, 5] + accumulated = [1, 3, 6, 10, 15] + it = accumulate(data) + + self.assertEqual(list(pickle.loads(pickle.dumps(it))), accumulated[:]) + self.assertEqual(next(it), 1) + self.assertEqual(list(pickle.loads(pickle.dumps(it))), accumulated[1:]) + self.assertEqual(list(copy.deepcopy(it)), accumulated[1:]) + self.assertEqual(list(copy.copy(it)), accumulated[1:]) + def test_chain(self): self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF') diff --git a/Lib/test/test_list.py b/Lib/test/test_list.py --- a/Lib/test/test_list.py +++ b/Lib/test/test_list.py @@ -1,5 +1,6 @@ import sys from test import support, list_tests +import pickle class ListTest(list_tests.CommonTest): type2test = list @@ -69,6 +70,33 @@ check(10) # check our checking code check(1000000) + def test_iterator_pickle(self): + # Userlist iterators don't support pickling yet since + # they are based on generators. + data = self.type2test([4, 5, 6, 7]) + it = itorg = iter(data) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(self.type2test(it), self.type2test(data)) + + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(self.type2test(it), self.type2test(data)[1:]) + + def test_reversed_pickle(self): + data = self.type2test([4, 5, 6, 7]) + it = itorg = reversed(data) + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(self.type2test(it), self.type2test(reversed(data))) + + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:]) def test_main(verbose=None): support.run_unittest(ListTest) diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py --- a/Lib/test/test_multiprocessing.py +++ b/Lib/test/test_multiprocessing.py @@ -2649,6 +2649,10 @@ with self.assertRaises(ValueError): multiprocessing.connection.Listener(r'\\.\test') + @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") + def test_invalid_family_win32(self): + with self.assertRaises(ValueError): + multiprocessing.connection.Listener('/var/test.pipe') testcases_other = [OtherTest, TestInvalidHandle, TestInitializers, TestStdinBadfiledescriptor, TestWait, TestInvalidFamily] diff --git a/Lib/test/test_range.py b/Lib/test/test_range.py --- a/Lib/test/test_range.py +++ b/Lib/test/test_range.py @@ -341,13 +341,35 @@ def test_pickling(self): testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), - (13, 21, 3), (-2, 2, 2)] + (13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)] for proto in range(pickle.HIGHEST_PROTOCOL + 1): for t in testcases: r = range(*t) self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))), list(r)) + def test_iterator_pickling(self): + testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1), + (13, 21, 3), (-2, 2, 2), (2**65, 2**65+2)] + for proto in range(pickle.HIGHEST_PROTOCOL + 1): + for t in testcases: + it = itorg = iter(range(*t)) + data = list(range(*t)) + + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(list(it), data) + + it = pickle.loads(d) + try: + next(it) + except StopIteration: + continue + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(list(it), data[1:]) + def test_odd_bug(self): # This used to raise a "SystemError: NULL result without error" # because the range validation step was eating the exception diff --git a/Lib/test/test_set.py b/Lib/test/test_set.py --- a/Lib/test/test_set.py +++ b/Lib/test/test_set.py @@ -9,6 +9,7 @@ import sys import warnings import collections +import collections.abc class PassThru(Exception): pass @@ -234,6 +235,26 @@ dup = pickle.loads(p) self.assertEqual(self.s.x, dup.x) + def test_iterator_pickling(self): + itorg = iter(self.s) + data = self.thetype(self.s) + d = pickle.dumps(itorg) + it = pickle.loads(d) + # Set iterators unpickle as list iterators due to the + # undefined order of set items. + # self.assertEqual(type(itorg), type(it)) + self.assertTrue(isinstance(it, collections.abc.Iterator)) + self.assertEqual(self.thetype(it), data) + + it = pickle.loads(d) + try: + drop = next(it) + except StopIteration: + return + d = pickle.dumps(it) + it = pickle.loads(d) + self.assertEqual(self.thetype(it), data - self.thetype((drop,))) + def test_deepcopy(self): class Tracer: def __init__(self, value): diff --git a/Lib/test/test_tools.py b/Lib/test/test_tools.py --- a/Lib/test/test_tools.py +++ b/Lib/test/test_tools.py @@ -5,8 +5,11 @@ """ import os +import sys +import imp import unittest import sysconfig +import tempfile from test import support from test.script_helper import assert_python_ok @@ -17,10 +20,11 @@ srcdir = sysconfig.get_config_var('projectbase') basepath = os.path.join(os.getcwd(), srcdir, 'Tools') +scriptsdir = os.path.join(basepath, 'scripts') class ReindentTests(unittest.TestCase): - script = os.path.join(basepath, 'scripts', 'reindent.py') + script = os.path.join(scriptsdir, 'reindent.py') def test_noargs(self): assert_python_ok(self.script) @@ -31,8 +35,73 @@ self.assertGreater(err, b'') +class TestSundryScripts(unittest.TestCase): + # At least make sure the rest don't have syntax errors. When tests are + # added for a script it should be added to the whitelist below. + + # scripts that have independent tests. + whitelist = ['reindent.py'] + # scripts that can't be imported without running + blacklist = ['make_ctype.py'] + # scripts that use windows-only modules + windows_only = ['win_add2path.py'] + # blacklisted for other reasons + other = ['analyze_dxp.py'] + + skiplist = blacklist + whitelist + windows_only + other + + def setUp(self): + cm = support.DirsOnSysPath(scriptsdir) + cm.__enter__() + self.addCleanup(cm.__exit__) + + def test_sundry(self): + for fn in os.listdir(scriptsdir): + if fn.endswith('.py') and fn not in self.skiplist: + __import__(fn[:-3]) + + @unittest.skipIf(sys.platform != "win32", "Windows-only test") + def test_sundry_windows(self): + for fn in self.windows_only: + __import__(fn[:-3]) + + @unittest.skipIf(not support.threading, "test requires _thread module") + def test_analyze_dxp_import(self): + if hasattr(sys, 'getdxp'): + import analyze_dxp + else: + with self.assertRaises(RuntimeError): + import analyze_dxp + + +class PdepsTests(unittest.TestCase): + + @classmethod + def setUpClass(self): + path = os.path.join(scriptsdir, 'pdeps.py') + self.pdeps = imp.load_source('pdeps', path) + + @classmethod + def tearDownClass(self): + if 'pdeps' in sys.modules: + del sys.modules['pdeps'] + + def test_process_errors(self): + # Issue #14492: m_import.match(line) can be None. + with tempfile.TemporaryDirectory() as tmpdir: + fn = os.path.join(tmpdir, 'foo') + with open(fn, 'w') as stream: + stream.write("#!/this/will/fail") + self.pdeps.process(fn, {}) + + def test_inverse_attribute_error(self): + # Issue #14492: this used to fail with an AttributeError. + self.pdeps.inverse({'a': []}) + + def test_main(): - support.run_unittest(ReindentTests) + support.run_unittest(*[obj for obj in globals().values() + if isinstance(obj, type)]) if __name__ == '__main__': diff --git a/Lib/test/test_tuple.py b/Lib/test/test_tuple.py --- a/Lib/test/test_tuple.py +++ b/Lib/test/test_tuple.py @@ -1,6 +1,7 @@ from test import support, seq_tests import gc +import pickle class TupleTest(seq_tests.CommonTest): type2test = tuple @@ -164,6 +165,34 @@ check(10) # check our checking code check(1000000) + def test_iterator_pickle(self): + # Userlist iterators don't support pickling yet since + # they are based on generators. + data = self.type2test([4, 5, 6, 7]) + itorg = iter(data) + d = pickle.dumps(itorg) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(self.type2test(it), self.type2test(data)) + + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(self.type2test(it), self.type2test(data)[1:]) + + def test_reversed_pickle(self): + data = self.type2test([4, 5, 6, 7]) + itorg = reversed(data) + d = pickle.dumps(itorg) + it = pickle.loads(d) + self.assertEqual(type(itorg), type(it)) + self.assertEqual(self.type2test(it), self.type2test(reversed(data))) + + it = pickle.loads(d) + next(it) + d = pickle.dumps(it) + self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:]) + def test_main(): support.run_unittest(TupleTest) diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -1859,6 +1859,41 @@ gc_collect() self.assertIsNone(wref()) + # A longer cycle: d->e->e2->d + e = ET.Element('joe') + d = Dummy() + d.dummyref = e + wref = weakref.ref(d) + e2 = ET.SubElement(e, 'foo', attr=d) + del d, e, e2 + gc_collect() + self.assertIsNone(wref()) + + # A cycle between Element objects as children of one another + # e1->e2->e3->e1 + e1 = ET.Element('e1') + e2 = ET.Element('e2') + e3 = ET.Element('e3') + e1.append(e2) + e2.append(e2) + e3.append(e1) + wref = weakref.ref(e1) + del e1, e2, e3 + gc_collect() + self.assertIsNone(wref()) + + def test_weakref(self): + flag = False + def wref_cb(w): + nonlocal flag + flag = True + e = ET.Element('e') + wref = weakref.ref(e, wref_cb) + self.assertEqual(wref().tag, 'e') + del e + self.assertEqual(flag, True) + self.assertEqual(wref(), None) + class ElementTreeTest(unittest.TestCase): def test_istype(self): diff --git a/Lib/tkinter/font.py b/Lib/tkinter/font.py --- a/Lib/tkinter/font.py +++ b/Lib/tkinter/font.py @@ -2,27 +2,27 @@ # # written by Fredrik Lundh, February 1998 # -# FIXME: should add 'displayof' option where relevant (actual, families, -# measure, and metrics) -# __version__ = "0.9" +import itertools import tkinter + # weight/slant NORMAL = "normal" ROMAN = "roman" BOLD = "bold" ITALIC = "italic" + def nametofont(name): """Given the name of a tk named font, returns a Font representation. """ return Font(name=name, exists=True) + class Font: - """Represents a named font. Constructor options are: @@ -44,6 +44,8 @@ """ + counter = itertools.count(1) + def _set(self, kw): options = [] for k, v in kw.items(): @@ -63,7 +65,8 @@ options[args[i][1:]] = args[i+1] return options - def __init__(self, root=None, font=None, name=None, exists=False, **options): + def __init__(self, root=None, font=None, name=None, exists=False, + **options): if not root: root = tkinter._default_root if font: @@ -72,7 +75,7 @@ else: font = self._set(options) if not name: - name = "font" + str(id(self)) + name = "font" + str(next(self.counter)) self.name = name if exists: @@ -118,14 +121,17 @@ "Return a distinct copy of the current font" return Font(self._root, **self.actual()) - def actual(self, option=None): + def actual(self, option=None, displayof=None): "Return actual font attributes" + args = () + if displayof: + args = ('-displayof', displayof) if option: - return self._call("font", "actual", self.name, "-"+option) + args = args + ('-' + option, ) + return self._call("font", "actual", self.name, *args) else: return self._mkdict( - self._split(self._call("font", "actual", self.name)) - ) + self._split(self._call("font", "actual", self.name, *args))) def cget(self, option): "Get font attribute" @@ -138,37 +144,47 @@ *self._set(options)) else: return self._mkdict( - self._split(self._call("font", "config", self.name)) - ) + self._split(self._call("font", "config", self.name))) configure = config - def measure(self, text): + def measure(self, text, displayof=None): "Return text width" - return int(self._call("font", "measure", self.name, text)) + args = (text,) + if displayof: + args = ('-displayof', displayof, text) + return int(self._call("font", "measure", self.name, *args)) - def metrics(self, *options): + def metrics(self, *options, **kw): """Return font metrics. For best performance, create a dummy widget using this font before calling this method.""" - + args = () + displayof = kw.pop('displayof', None) + if displayof: + args = ('-displayof', displayof) if options: + args = args + self._get(options) return int( - self._call("font", "metrics", self.name, self._get(options)) - ) + self._call("font", "metrics", self.name, *args)) else: - res = self._split(self._call("font", "metrics", self.name)) + res = self._split(self._call("font", "metrics", self.name, *args)) options = {} for i in range(0, len(res), 2): options[res[i][1:]] = int(res[i+1]) return options -def families(root=None): + +def families(root=None, displayof=None): "Get font families (as a tuple)" if not root: root = tkinter._default_root - return root.tk.splitlist(root.tk.call("font", "families")) + args = () + if displayof: + args = ('-displayof', displayof) + return root.tk.splitlist(root.tk.call("font", "families", *args)) + def names(root=None): "Get names of defined fonts (as a tuple)" @@ -176,6 +192,7 @@ root = tkinter._default_root return root.tk.splitlist(root.tk.call("font", "names")) + # -------------------------------------------------------------------- # test stuff @@ -198,10 +215,10 @@ print(f.measure("hello"), f.metrics("linespace")) - print(f.metrics()) + print(f.metrics(displayof=root)) f = Font(font=("Courier", 20, "bold")) - print(f.measure("hello"), f.metrics("linespace")) + print(f.measure("hello"), f.metrics("linespace", displayof=root)) w = tkinter.Label(root, text="Hello, world", font=f) w.pack() diff --git a/Lib/tkinter/ttk.py b/Lib/tkinter/ttk.py --- a/Lib/tkinter/ttk.py +++ b/Lib/tkinter/ttk.py @@ -1253,7 +1253,7 @@ def exists(self, item): - """Returns True if the specified item is present in the three, + """Returns True if the specified item is present in the tree, False otherwise.""" return bool(self.tk.call(self._w, "exists", item)) diff --git a/Lib/webbrowser.py b/Lib/webbrowser.py --- a/Lib/webbrowser.py +++ b/Lib/webbrowser.py @@ -448,6 +448,14 @@ def register_X_browsers(): + # use xdg-open if around + if _iscommand("xdg-open"): + register("xdg-open", None, BackgroundBrowser("xdg-open")) + + # The default GNOME3 browser + if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gvfs-open"): + register("gvfs-open", None, BackgroundBrowser("gvfs-open")) + # The default GNOME browser if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gnome-open"): register("gnome-open", None, BackgroundBrowser("gnome-open")) diff --git a/Makefile.pre.in b/Makefile.pre.in --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -1351,7 +1351,7 @@ clobber: clean profile-removal -rm -f $(BUILDPYTHON) $(PGEN) $(LIBRARY) $(LDLIBRARY) $(DLLLIBRARY) \ - tags TAGS Parser/pgen.stamp \ + tags TAGS \ config.cache config.log pyconfig.h Modules/config.c -rm -rf build platform -rm -rf $(PYTHONFRAMEWORKDIR) diff --git a/Misc/ACKS b/Misc/ACKS --- a/Misc/ACKS +++ b/Misc/ACKS @@ -333,6 +333,7 @@ John Fouhy Andrew Francis Martin Franklin +Bruce Frederiksen Robin Friedrich Ivan Frohne Matthias Fuchs @@ -976,6 +977,7 @@ Peter Stoehr Casper Stoel Michael Stone +Serhiy Storchaka Ken Stox Dan Stromberg Daniel Stutzbach diff --git a/Misc/NEWS b/Misc/NEWS --- a/Misc/NEWS +++ b/Misc/NEWS @@ -19,10 +19,26 @@ Library ------- +- Issue #8515: Set __file__ when run file in IDLE. + Initial patch by Bruce Frederiksen. + +- Issue #14496: Fix wrong name in idlelib/tabbedpages.py. + Patch by Popa Claudiu. + +- Issue #3033: Add displayof parameter to tkinter font. Patch by Guilherme Polo. + +- Issue #14482: Raise a ValueError, not a NameError, when trying to create + a multiprocessing Client or Listener with an AF_UNIX type address under + Windows. Patch by Popa Claudiu. + +- Issue #802310: Generate always unique tkinter font names if not directly passed. + - Issue #14151: Raise a ValueError, not a NameError, when trying to create a multiprocessing Client or Listener with an AF_PIPE type address under non-Windows platforms. Patch by Popa Claudiu. +- Issue #14493: Use gvfs-open or xdg-open in webbrowser. + What's New in Python 3.3.0 Alpha 2? =================================== @@ -58,6 +74,8 @@ - Issue #14471: Fix a possible buffer overrun in the winreg module. +- Issue #14288: Allow the serialization of builtin iterators + Library ------- @@ -997,7 +1015,7 @@ - Issue #11006: Don't issue low level warning in subprocess when pipe2() fails. -- Issue #13620: Support for Chrome browser in webbrowser.py Patch contributed +- Issue #13620: Support for Chrome browser in webbrowser. Patch contributed by Arnaud Calmettes. - Issue #11829: Fix code execution holes in inspect.getattr_static for diff --git a/Modules/_collectionsmodule.c b/Modules/_collectionsmodule.c --- a/Modules/_collectionsmodule.c +++ b/Modules/_collectionsmodule.c @@ -1122,6 +1122,35 @@ } static PyObject * +dequeiter_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + Py_ssize_t i, index=0; + PyObject *deque; + dequeiterobject *it; + if (!PyArg_ParseTuple(args, "O!|n", &deque_type, &deque, &index)) + return NULL; + assert(type == &dequeiter_type); + + it = (dequeiterobject*)deque_iter((dequeobject *)deque); + if (!it) + return NULL; + /* consume items from the queue */ + for(i=0; icounter) { + Py_DECREF(it); + return NULL; + } else + break; + } + } + return (PyObject*)it; +} + +static PyObject * dequeiter_len(dequeiterobject *it) { return PyLong_FromSsize_t(it->counter); @@ -1129,14 +1158,21 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +dequeiter_reduce(dequeiterobject *it) +{ + return Py_BuildValue("O(On)", Py_TYPE(it), it->deque, it->deque->len - it->counter); +} + static PyMethodDef dequeiter_methods[] = { {"__length_hint__", (PyCFunction)dequeiter_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)dequeiter_reduce, METH_NOARGS, reduce_doc}, {NULL, NULL} /* sentinel */ }; static PyTypeObject dequeiter_type = { PyVarObject_HEAD_INIT(NULL, 0) - "deque_iterator", /* tp_name */ + "_collections._deque_iterator", /* tp_name */ sizeof(dequeiterobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ @@ -1164,6 +1200,16 @@ PyObject_SelfIter, /* tp_iter */ (iternextfunc)dequeiter_next, /* tp_iternext */ dequeiter_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + dequeiter_new, /* tp_new */ 0, }; @@ -1217,9 +1263,38 @@ return item; } +static PyObject * +dequereviter_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + Py_ssize_t i, index=0; + PyObject *deque; + dequeiterobject *it; + if (!PyArg_ParseTuple(args, "O!|n", &deque_type, &deque, &index)) + return NULL; + assert(type == &dequereviter_type); + + it = (dequeiterobject*)deque_reviter((dequeobject *)deque); + if (!it) + return NULL; + /* consume items from the queue */ + for(i=0; icounter) { + Py_DECREF(it); + return NULL; + } else + break; + } + } + return (PyObject*)it; +} + static PyTypeObject dequereviter_type = { PyVarObject_HEAD_INIT(NULL, 0) - "deque_reverse_iterator", /* tp_name */ + "_collections._deque_reverse_iterator", /* tp_name */ sizeof(dequeiterobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ @@ -1247,6 +1322,16 @@ PyObject_SelfIter, /* tp_iter */ (iternextfunc)dequereviter_next, /* tp_iternext */ dequeiter_methods, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + dequereviter_new, /* tp_new */ 0, }; @@ -1653,9 +1738,13 @@ if (PyType_Ready(&dequeiter_type) < 0) return NULL; + Py_INCREF(&dequeiter_type); + PyModule_AddObject(m, "_deque_iterator", (PyObject *)&dequeiter_type); if (PyType_Ready(&dequereviter_type) < 0) return NULL; + Py_INCREF(&dequereviter_type); + PyModule_AddObject(m, "_deque_reverse_iterator", (PyObject *)&dequereviter_type); return m; } diff --git a/Modules/_decimal/_decimal.c b/Modules/_decimal/_decimal.c --- a/Modules/_decimal/_decimal.c +++ b/Modules/_decimal/_decimal.c @@ -1935,7 +1935,7 @@ mpd_maxcontext(&maxctx); mpd_qset_string(MPD(dec), s, &maxctx, &status); - if (status & (MPD_Inexact|MPD_Rounded)) { + if (status & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) { /* we want exact results */ mpd_seterror(MPD(dec), MPD_Invalid_operation, &status); } @@ -2139,7 +2139,7 @@ return NULL; } - if (status & (MPD_Inexact|MPD_Rounded)) { + if (status & (MPD_Inexact|MPD_Rounded|MPD_Clamped)) { /* we want exact results */ mpd_seterror(MPD(dec), MPD_Invalid_operation, &status); } @@ -2385,8 +2385,8 @@ } /* coefficient */ - digits = sequence_as_tuple(PyTuple_GET_ITEM(dectuple, 1), - PyExc_ValueError, "coefficient must be a tuple of digits"); + digits = sequence_as_tuple(PyTuple_GET_ITEM(dectuple, 1), PyExc_ValueError, + "coefficient must be a tuple of digits"); if (digits == NULL) { goto error; } @@ -2435,8 +2435,8 @@ if (sign_special[1] == '\0') { /* not a special number */ *cp++ = 'E'; - n = snprintf(cp, MPD_EXPDIGITS+1, "%" PRI_mpd_ssize_t, exp); - if (n < 0 || n >= MPD_EXPDIGITS+1) { + n = snprintf(cp, MPD_EXPDIGITS+2, "%" PRI_mpd_ssize_t, exp); + if (n < 0 || n >= MPD_EXPDIGITS+2) { PyErr_SetString(PyExc_RuntimeError, "internal error in dec_sequence_as_str"); goto error; @@ -4215,7 +4215,7 @@ mpd_uint_t p_data[1] = {2305843009213693951ULL}; mpd_t p = {MPD_POS|MPD_STATIC|MPD_CONST_DATA, 0, 19, 1, 1, p_data}; /* Inverse of 10 modulo p */ - mpd_uint_t inv10_p_data[2] = {2075258708292324556ULL}; + mpd_uint_t inv10_p_data[1] = {2075258708292324556ULL}; mpd_t inv10_p = {MPD_POS|MPD_STATIC|MPD_CONST_DATA, 0, 19, 1, 1, inv10_p_data}; #elif defined(CONFIG_32) && _PyHASH_BITS == 31 @@ -4934,7 +4934,7 @@ PyObject *result; CONVERT_OP_RAISE(&result, v, context); - return result; + return result; } static PyObject * diff --git a/Modules/_decimal/tests/deccheck.py b/Modules/_decimal/tests/deccheck.py --- a/Modules/_decimal/tests/deccheck.py +++ b/Modules/_decimal/tests/deccheck.py @@ -302,6 +302,7 @@ dec = maxcontext.create_decimal(value) if maxcontext.flags[P.Inexact] or \ maxcontext.flags[P.Rounded] or \ + maxcontext.flags[P.Clamped] or \ maxcontext.flags[P.InvalidOperation]: return context.p._raise_error(P.InvalidOperation) if maxcontext.flags[P.FloatOperation]: diff --git a/Modules/_elementtree.c b/Modules/_elementtree.c --- a/Modules/_elementtree.c +++ b/Modules/_elementtree.c @@ -48,6 +48,7 @@ /* See http://www.python.org/psf/license for licensing details. */ #include "Python.h" +#include "structmember.h" #define VERSION "1.0.6" @@ -229,6 +230,8 @@ ElementObjectExtra* extra; + PyObject *weakreflist; /* For tp_weaklistoffset */ + } ElementObject; static PyTypeObject Element_Type; @@ -261,17 +264,26 @@ LOCAL(void) dealloc_extra(ElementObject* self) { + ElementObjectExtra *myextra; int i; - Py_DECREF(self->extra->attrib); - - for (i = 0; i < self->extra->length; i++) - Py_DECREF(self->extra->children[i]); - - if (self->extra->children != self->extra->_children) - PyObject_Free(self->extra->children); - - PyObject_Free(self->extra); + if (!self->extra) + return; + + /* Avoid DECREFs calling into this code again (cycles, etc.) + */ + myextra = self->extra; + self->extra = NULL; + + Py_DECREF(myextra->attrib); + + for (i = 0; i < myextra->length; i++) + Py_DECREF(myextra->children[i]); + + if (myextra->children != myextra->_children) + PyObject_Free(myextra->children); + + PyObject_Free(myextra); } /* Convenience internal function to create new Element objects with the given @@ -308,6 +320,8 @@ Py_INCREF(Py_None); self->tail = Py_None; + self->weakreflist = NULL; + ALLOC(sizeof(ElementObject), "create element"); PyObject_GC_Track(self); return (PyObject*) self; @@ -328,6 +342,7 @@ e->tail = Py_None; e->extra = NULL; + e->weakreflist = NULL; } return (PyObject *)e; } @@ -576,19 +591,28 @@ static int element_gc_clear(ElementObject *self) { - PyObject *text = JOIN_OBJ(self->text); - PyObject *tail = JOIN_OBJ(self->tail); Py_CLEAR(self->tag); - Py_CLEAR(text); - Py_CLEAR(tail); + + /* The following is like Py_CLEAR for self->text and self->tail, but + * written explicitily because the real pointers hide behind access + * macros. + */ + if (self->text) { + PyObject *tmp = JOIN_OBJ(self->text); + self->text = NULL; + Py_DECREF(tmp); + } + + if (self->tail) { + PyObject *tmp = JOIN_OBJ(self->tail); + self->tail = NULL; + Py_DECREF(tmp); + } /* After dropping all references from extra, it's no longer valid anyway, - ** so fully deallocate it (see also element_clearmethod) + * so fully deallocate it. */ - if (self->extra) { - dealloc_extra(self); - self->extra = NULL; - } + dealloc_extra(self); return 0; } @@ -596,6 +620,10 @@ element_dealloc(ElementObject* self) { PyObject_GC_UnTrack(self); + + if (self->weakreflist != NULL) + PyObject_ClearWeakRefs((PyObject *) self); + /* element_gc_clear clears all references and deallocates extra */ element_gc_clear(self); @@ -626,10 +654,7 @@ if (!PyArg_ParseTuple(args, ":clear")) return NULL; - if (self->extra) { - dealloc_extra(self); - self->extra = NULL; - } + dealloc_extra(self); Py_INCREF(Py_None); Py_DECREF(JOIN_OBJ(self->text)); @@ -1693,7 +1718,7 @@ (traverseproc)element_gc_traverse, /* tp_traverse */ (inquiry)element_gc_clear, /* tp_clear */ 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ + offsetof(ElementObject, weakreflist), /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ element_methods, /* tp_methods */ @@ -3009,8 +3034,7 @@ PyMODINIT_FUNC PyInit__elementtree(void) { - PyObject* m; - PyObject* g; + PyObject *m, *g, *temp; char* bootstrap; /* Initialize object types */ @@ -3042,10 +3066,6 @@ PyDict_SetItemString(g, "__builtins__", PyEval_GetBuiltins()); bootstrap = ( - - "from copy import deepcopy\n" - "from xml.etree import ElementPath\n" - "def iter(node, tag=None):\n" /* helper */ " if tag == '*':\n" " tag = None\n" @@ -3069,8 +3089,14 @@ if (!PyRun_String(bootstrap, Py_file_input, g, NULL)) return NULL; - elementpath_obj = PyDict_GetItemString(g, "ElementPath"); - elementtree_deepcopy_obj = PyDict_GetItemString(g, "deepcopy"); + if (!(temp = PyImport_ImportModule("copy"))) + return NULL; + elementtree_deepcopy_obj = PyObject_GetAttrString(temp, "deepcopy"); + Py_XDECREF(temp); + + if (!(elementpath_obj = PyImport_ImportModule("xml.etree.ElementPath"))) + return NULL; + elementtree_iter_obj = PyDict_GetItemString(g, "iter"); elementtree_itertext_obj = PyDict_GetItemString(g, "itertext"); diff --git a/Modules/arraymodule.c b/Modules/arraymodule.c --- a/Modules/arraymodule.c +++ b/Modules/arraymodule.c @@ -2753,6 +2753,34 @@ return 0; } +static PyObject * +arrayiter_reduce(arrayiterobject *it) +{ + return Py_BuildValue("N(O)n", _PyObject_GetBuiltin("iter"), + it->ao, it->index); +} + +static PyObject * +arrayiter_setstate(arrayiterobject *it, PyObject *state) +{ + Py_ssize_t index = PyLong_AsSsize_t(state); + if (index == -1 && PyErr_Occurred()) + return NULL; + if (index < 0) + index = 0; + it->index = index; + Py_RETURN_NONE; +} + +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); +static PyMethodDef arrayiter_methods[] = { + {"__reduce__", (PyCFunction)arrayiter_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)arrayiter_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + static PyTypeObject PyArrayIter_Type = { PyVarObject_HEAD_INIT(NULL, 0) "arrayiterator", /* tp_name */ @@ -2782,7 +2810,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)arrayiter_next, /* tp_iternext */ - 0, /* tp_methods */ + arrayiter_methods, /* tp_methods */ }; diff --git a/Modules/itertoolsmodule.c b/Modules/itertoolsmodule.c --- a/Modules/itertoolsmodule.c +++ b/Modules/itertoolsmodule.c @@ -134,6 +134,53 @@ return r; } +static PyObject * +groupby_reduce(groupbyobject *lz) +{ + /* reduce as a 'new' call with an optional 'setstate' if groupby + * has started + */ + PyObject *value; + if (lz->tgtkey && lz->currkey && lz->currvalue) + value = Py_BuildValue("O(OO)(OOO)", Py_TYPE(lz), + lz->it, lz->keyfunc, lz->currkey, lz->currvalue, lz->tgtkey); + else + value = Py_BuildValue("O(OO)", Py_TYPE(lz), + lz->it, lz->keyfunc); + + return value; +} + +PyDoc_STRVAR(reduce_doc, "Return state information for pickling."); + +static PyObject * +groupby_setstate(groupbyobject *lz, PyObject *state) +{ + PyObject *currkey, *currvalue, *tgtkey; + if (!PyArg_ParseTuple(state, "OOO", &currkey, &currvalue, &tgtkey)) + return NULL; + Py_CLEAR(lz->currkey); + lz->currkey = currkey; + Py_INCREF(lz->currkey); + Py_CLEAR(lz->currvalue); + lz->currvalue = currvalue; + Py_INCREF(lz->currvalue); + Py_CLEAR(lz->tgtkey); + lz->tgtkey = tgtkey; + Py_INCREF(lz->tgtkey); + Py_RETURN_NONE; +} + +PyDoc_STRVAR(setstate_doc, "Set state information for unpickling."); + +static PyMethodDef groupby_methods[] = { + {"__reduce__", (PyCFunction)groupby_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)groupby_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(groupby_doc, "groupby(iterable[, keyfunc]) -> create an iterator which returns\n\ (key, sub-iterator) grouped by each value of key(value).\n"); @@ -168,7 +215,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)groupby_next, /* tp_iternext */ - 0, /* tp_methods */ + groupby_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -194,6 +241,17 @@ static PyTypeObject _grouper_type; static PyObject * +_grouper_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + PyObject *parent, *tgtkey; + + if (!PyArg_ParseTuple(args, "O!O", &groupby_type, &parent, &tgtkey)) + return NULL; + + return _grouper_create((groupbyobject*) parent, tgtkey); +} + +static PyObject * _grouper_create(groupbyobject *parent, PyObject *tgtkey) { _grouperobject *igo; @@ -269,6 +327,20 @@ return r; } +static PyObject * +_grouper_reduce(_grouperobject *lz) +{ + return Py_BuildValue("O(OO)", Py_TYPE(lz), + lz->parent, lz->tgtkey); +} + +static PyMethodDef _grouper_methods[] = { + {"__reduce__", (PyCFunction)_grouper_reduce, METH_NOARGS, + reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + + static PyTypeObject _grouper_type = { PyVarObject_HEAD_INIT(NULL, 0) "itertools._grouper", /* tp_name */ @@ -298,7 +370,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)_grouper_next, /* tp_iternext */ - 0, /* tp_methods */ + _grouper_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -308,7 +380,7 @@ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ - 0, /* tp_new */ + _grouper_new, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; @@ -344,7 +416,7 @@ static PyTypeObject teedataobject_type; static PyObject * -teedataobject_new(PyObject *it) +teedataobject_newinternal(PyObject *it) { teedataobject *tdo; @@ -364,7 +436,7 @@ teedataobject_jumplink(teedataobject *tdo) { if (tdo->nextlink == NULL) - tdo->nextlink = teedataobject_new(tdo->it); + tdo->nextlink = teedataobject_newinternal(tdo->it); Py_XINCREF(tdo->nextlink); return tdo->nextlink; } @@ -420,11 +492,80 @@ PyObject_GC_Del(tdo); } +static PyObject * +teedataobject_reduce(teedataobject *tdo) +{ + int i; + /* create a temporary list of already iterated values */ + PyObject *values = PyList_New(tdo->numread); + if (!values) + return NULL; + for (i=0 ; inumread ; i++) { + Py_INCREF(tdo->values[i]); + PyList_SET_ITEM(values, i, tdo->values[i]); + } + return Py_BuildValue("O(ONO)", Py_TYPE(tdo), tdo->it, + values, + tdo->nextlink ? tdo->nextlink : Py_None); +} + +static PyTypeObject teedataobject_type; + +static PyObject * +teedataobject_new(PyTypeObject *type, PyObject *args, PyObject *kw) +{ + teedataobject *tdo; + PyObject *it, *values, *next; + Py_ssize_t i, len; + + assert(type == &teedataobject_type); + if (!PyArg_ParseTuple(args, "OO!O", &it, &PyList_Type, &values, &next)) + return NULL; + + tdo = (teedataobject *)teedataobject_newinternal(it); + if (!tdo) + return NULL; + + len = PyList_GET_SIZE(values); + if (len > LINKCELLS) + goto err; + for (i=0; ivalues[i] = PyList_GET_ITEM(values, i); + Py_INCREF(tdo->values[i]); + } + tdo->numread = len; + + if (len == LINKCELLS) { + if (next != Py_None) { + if (Py_TYPE(next) != &teedataobject_type) + goto err; + assert(tdo->nextlink == NULL); + Py_INCREF(next); + tdo->nextlink = next; + } + } else { + if (next != Py_None) + goto err; /* shouldn't have a next if we are not full */ + } + return (PyObject*)tdo; + +err: + Py_XDECREF(tdo); + PyErr_SetString(PyExc_ValueError, "Invalid arguments"); + return NULL; +} + +static PyMethodDef teedataobject_methods[] = { + {"__reduce__", (PyCFunction)teedataobject_reduce, METH_NOARGS, + reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(teedataobject_doc, "Data container common to multiple tee objects."); static PyTypeObject teedataobject_type = { PyVarObject_HEAD_INIT(0, 0) /* Must fill in type value later */ - "itertools.tee_dataobject", /* tp_name */ + "itertools._tee_dataobject", /* tp_name */ sizeof(teedataobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ @@ -451,7 +592,7 @@ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ - 0, /* tp_methods */ + teedataobject_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -461,7 +602,7 @@ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ - 0, /* tp_new */ + teedataobject_new, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; @@ -528,7 +669,7 @@ to = PyObject_GC_New(teeobject, &tee_type); if (to == NULL) goto done; - to->dataobj = (teedataobject *)teedataobject_new(it); + to->dataobj = (teedataobject *)teedataobject_newinternal(it); if (!to->dataobj) { PyObject_GC_Del(to); to = NULL; @@ -548,7 +689,7 @@ { PyObject *iterable; - if (!PyArg_UnpackTuple(args, "tee", 1, 1, &iterable)) + if (!PyArg_UnpackTuple(args, "_tee", 1, 1, &iterable)) return NULL; return tee_fromiterable(iterable); } @@ -570,17 +711,43 @@ PyObject_GC_Del(to); } +static PyObject * +tee_reduce(teeobject *to) +{ + return Py_BuildValue("O(())(Oi)", Py_TYPE(to), to->dataobj, to->index); +} + +static PyObject * +tee_setstate(teeobject *to, PyObject *state) +{ + teedataobject *tdo; + int index; + if (!PyArg_ParseTuple(state, "O!i", &teedataobject_type, &tdo, &index)) + return NULL; + if (index < 0 || index > LINKCELLS) { + PyErr_SetString(PyExc_ValueError, "Index out of range"); + return NULL; + } + Py_CLEAR(to->dataobj); + to->dataobj = tdo; + Py_INCREF(to->dataobj); + to->index = index; + Py_RETURN_NONE; +} + PyDoc_STRVAR(teeobject_doc, "Iterator wrapped to make it copyable"); static PyMethodDef tee_methods[] = { {"__copy__", (PyCFunction)tee_copy, METH_NOARGS, teecopy_doc}, + {"__reduce__", (PyCFunction)tee_reduce, METH_NOARGS, reduce_doc}, + {"__setstate__", (PyCFunction)tee_setstate, METH_O, setstate_doc}, {NULL, NULL} /* sentinel */ }; static PyTypeObject tee_type = { PyVarObject_HEAD_INIT(NULL, 0) - "itertools.tee", /* tp_name */ + "itertools._tee", /* tp_name */ sizeof(teeobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ @@ -771,6 +938,38 @@ } } +static PyObject * +cycle_reduce(cycleobject *lz) +{ + /* Create a new cycle with the iterator tuple, then set + * the saved state on it. + */ + return Py_BuildValue("O(O)(Oi)", Py_TYPE(lz), + lz->it, lz->saved, lz->firstpass); + } + +static PyObject * +cycle_setstate(cycleobject *lz, PyObject *state) +{ + PyObject *saved=NULL; + int firstpass; + if (!PyArg_ParseTuple(state, "Oi", &saved, &firstpass)) + return NULL; + Py_CLEAR(lz->saved); + lz->saved = saved; + Py_XINCREF(lz->saved); + lz->firstpass = firstpass != 0; + Py_RETURN_NONE; +} + +static PyMethodDef cycle_methods[] = { + {"__reduce__", (PyCFunction)cycle_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)cycle_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(cycle_doc, "cycle(iterable) --> cycle object\n\ \n\ @@ -807,7 +1006,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)cycle_next, /* tp_iternext */ - 0, /* tp_methods */ + cycle_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -913,6 +1112,31 @@ } } +static PyObject * +dropwhile_reduce(dropwhileobject *lz) +{ + return Py_BuildValue("O(OO)l", Py_TYPE(lz), + lz->func, lz->it, lz->start); +} + +static PyObject * +dropwhile_setstate(dropwhileobject *lz, PyObject *state) +{ + int start = PyObject_IsTrue(state); + if (start == -1) + return NULL; + lz->start = start; + Py_RETURN_NONE; +} + +static PyMethodDef dropwhile_methods[] = { + {"__reduce__", (PyCFunction)dropwhile_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)dropwhile_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(dropwhile_doc, "dropwhile(predicate, iterable) --> dropwhile object\n\ \n\ @@ -949,7 +1173,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)dropwhile_next, /* tp_iternext */ - 0, /* tp_methods */ + dropwhile_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -1052,6 +1276,30 @@ return NULL; } +static PyObject * +takewhile_reduce(takewhileobject *lz) +{ + return Py_BuildValue("O(OO)l", Py_TYPE(lz), + lz->func, lz->it, lz->stop); +} + +static PyObject * +takewhile_reduce_setstate(takewhileobject *lz, PyObject *state) +{ + int stop = PyObject_IsTrue(state); + if (stop == -1) + return NULL; + lz->stop = stop; + Py_RETURN_NONE; +} + +static PyMethodDef takewhile_reduce_methods[] = { + {"__reduce__", (PyCFunction)takewhile_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)takewhile_reduce_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; PyDoc_STRVAR(takewhile_doc, "takewhile(predicate, iterable) --> takewhile object\n\ \n\ @@ -1088,7 +1336,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)takewhile_next, /* tp_iternext */ - 0, /* tp_methods */ + takewhile_reduce_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -1244,6 +1492,44 @@ return item; } +static PyObject * +islice_reduce(isliceobject *lz) +{ + /* When unpickled, generate a new object with the same bounds, + * then 'setstate' with the next and count + */ + PyObject *stop; + if (lz->stop == -1) { + stop = Py_None; + Py_INCREF(stop); + } else { + stop = PyLong_FromSsize_t(lz->stop); + if (stop == NULL) + return NULL; + } + return Py_BuildValue("O(OnNn)n", Py_TYPE(lz), + lz->it, lz->next, stop, lz->step, + lz->cnt); +} + +static PyObject * +islice_setstate(isliceobject *lz, PyObject *state) +{ + Py_ssize_t cnt = PyLong_AsSsize_t(state); + if (cnt == -1 && PyErr_Occurred()) + return NULL; + lz->cnt = cnt; + Py_RETURN_NONE; +} + +static PyMethodDef islice_methods[] = { + {"__reduce__", (PyCFunction)islice_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)islice_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(islice_doc, "islice(iterable, [start,] stop [, step]) --> islice object\n\ \n\ @@ -1284,7 +1570,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)islice_next, /* tp_iternext */ - 0, /* tp_methods */ + islice_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -1379,6 +1665,19 @@ return result; } +static PyObject * +starmap_reduce(starmapobject *lz) +{ + /* Just pickle the iterator */ + return Py_BuildValue("O(OO)", Py_TYPE(lz), lz->func, lz->it); +} + +static PyMethodDef starmap_methods[] = { + {"__reduce__", (PyCFunction)starmap_reduce, METH_NOARGS, + reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(starmap_doc, "starmap(function, sequence) --> starmap object\n\ \n\ @@ -1415,7 +1714,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)starmap_next, /* tp_iternext */ - 0, /* tp_methods */ + starmap_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -1534,6 +1833,41 @@ return chain_next(lz); /* recurse and use next active */ } +static PyObject * +chain_reduce(chainobject *lz) +{ + if (lz->source) { + /* we can't pickle function objects (itertools.from_iterable) so + * we must use setstate to replace the iterable. One day we + * will fix pickling of functions + */ + if (lz->active) { + return Py_BuildValue("O()(OO)", Py_TYPE(lz), lz->source, lz->active); + } else { + return Py_BuildValue("O()(O)", Py_TYPE(lz), lz->source); + } + } else { + return Py_BuildValue("O()", Py_TYPE(lz)); /* exhausted */ + } + return NULL; +} + +static PyObject * +chain_setstate(chainobject *lz, PyObject *state) +{ + PyObject *source, *active=NULL; + if (! PyArg_ParseTuple(state, "O|O", &source, &active)) + return NULL; + + Py_CLEAR(lz->source); + lz->source = source; + Py_INCREF(lz->source); + Py_CLEAR(lz->active); + lz->active = active; + Py_XINCREF(lz->active); + Py_RETURN_NONE; +} + PyDoc_STRVAR(chain_doc, "chain(*iterables) --> chain object\n\ \n\ @@ -1550,6 +1884,10 @@ static PyMethodDef chain_methods[] = { {"from_iterable", (PyCFunction) chain_new_from_iterable, METH_O | METH_CLASS, chain_from_iterable_doc}, + {"__reduce__", (PyCFunction)chain_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)chain_setstate, METH_O, + setstate_doc}, {NULL, NULL} /* sentinel */ }; @@ -1790,6 +2128,83 @@ return NULL; } +static PyObject * +product_reduce(productobject *lz) +{ + if (lz->stopped) { + return Py_BuildValue("O(())", Py_TYPE(lz)); + } else if (lz->result == NULL) { + return Py_BuildValue("OO", Py_TYPE(lz), lz->pools); + } else { + PyObject *indices; + Py_ssize_t n, i; + + /* we must pickle the indices use them for setstate, and + * additionally indicate that the iterator has started + */ + n = PyTuple_GET_SIZE(lz->pools); + indices = PyTuple_New(n); + if (indices == NULL) + return NULL; + for (i=0; iindices[i]); + if (!index) { + Py_DECREF(indices); + return NULL; + } + PyTuple_SET_ITEM(indices, i, index); + } + return Py_BuildValue("OON", Py_TYPE(lz), lz->pools, indices); + } +} + +static PyObject * +product_setstate(productobject *lz, PyObject *state) +{ + PyObject *result; + Py_ssize_t n, i; + + n = PyTuple_GET_SIZE(lz->pools); + if (!PyTuple_Check(state) || PyTuple_GET_SIZE(state) != n) { + PyErr_SetString(PyExc_ValueError, "invalid arguments"); + return NULL; + } + for (i=0; i n-1) + index = n-1; + lz->indices[i] = index; + } + + result = PyTuple_New(n); + if (!result) + return NULL; + for (i=0; ipools, i); + PyObject *element = PyTuple_GET_ITEM(pool, lz->indices[i]); + Py_INCREF(element); + PyTuple_SET_ITEM(result, i, element); + } + Py_CLEAR(lz->result); + lz->result = result; + Py_RETURN_NONE; +} + +static PyMethodDef product_methods[] = { + {"__reduce__", (PyCFunction)product_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)product_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(product_doc, "product(*iterables) --> product object\n\ \n\ @@ -1834,7 +2249,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)product_next, /* tp_iternext */ - 0, /* tp_methods */ + product_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2021,6 +2436,86 @@ return NULL; } +static PyObject * +combinations_reduce(combinationsobject *lz) +{ + if (lz->result == NULL) { + return Py_BuildValue("O(On)", Py_TYPE(lz), lz->pool, lz->r); + } else if (lz->stopped) { + return Py_BuildValue("O(()n)", Py_TYPE(lz), lz->r); + } else { + PyObject *indices; + Py_ssize_t i; + + /* we must pickle the indices and use them for setstate */ + indices = PyTuple_New(lz->r); + if (!indices) + return NULL; + for (i=0; ir; i++) + { + PyObject* index = PyLong_FromSsize_t(lz->indices[i]); + if (!index) { + Py_DECREF(indices); + return NULL; + } + PyTuple_SET_ITEM(indices, i, index); + } + + return Py_BuildValue("O(On)N", Py_TYPE(lz), lz->pool, lz->r, indices); + } +} + +static PyObject * +combinations_setstate(combinationsobject *lz, PyObject *state) +{ + PyObject *result; + Py_ssize_t i; + Py_ssize_t n = PyTuple_GET_SIZE(lz->pool); + + if (!PyTuple_Check(state) || PyTuple_GET_SIZE(state) != lz->r) + { + PyErr_SetString(PyExc_ValueError, "invalid arguments"); + return NULL; + } + + for (i=0; ir; i++) + { + Py_ssize_t max; + PyObject* indexObject = PyTuple_GET_ITEM(state, i); + Py_ssize_t index = PyLong_AsSsize_t(indexObject); + if (index == -1 && PyErr_Occurred()) + return NULL; /* not an integer */ + max = i + n - lz->r; + /* clamp the index (beware of negative max) */ + if (index > max) + index = max; + if (index < 0) + index = 0; + lz->indices[i] = index; + } + + result = PyTuple_New(lz->r); + if (result == NULL) + return NULL; + for (i=0; ir; i++) { + PyObject *element = PyTuple_GET_ITEM(lz->pool, lz->indices[i]); + Py_INCREF(element); + PyTuple_SET_ITEM(result, i, element); + } + + Py_CLEAR(lz->result); + lz->result = result; + Py_RETURN_NONE; +} + +static PyMethodDef combinations_methods[] = { + {"__reduce__", (PyCFunction)combinations_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)combinations_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(combinations_doc, "combinations(iterable, r) --> combinations object\n\ \n\ @@ -2029,11 +2524,11 @@ static PyTypeObject combinations_type = { PyVarObject_HEAD_INIT(NULL, 0) - "itertools.combinations", /* tp_name */ + "itertools.combinations", /* tp_name */ sizeof(combinationsobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ - (destructor)combinations_dealloc, /* tp_dealloc */ + (destructor)combinations_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ @@ -2050,14 +2545,14 @@ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /* tp_flags */ - combinations_doc, /* tp_doc */ - (traverseproc)combinations_traverse, /* tp_traverse */ + combinations_doc, /* tp_doc */ + (traverseproc)combinations_traverse,/* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ - (iternextfunc)combinations_next, /* tp_iternext */ - 0, /* tp_methods */ + (iternextfunc)combinations_next, /* tp_iternext */ + combinations_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2067,7 +2562,7 @@ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ - combinations_new, /* tp_new */ + combinations_new, /* tp_new */ PyObject_GC_Del, /* tp_free */ }; @@ -2266,6 +2761,82 @@ return NULL; } +static PyObject * +cwr_reduce(cwrobject *lz) +{ + if (lz->result == NULL) { + return Py_BuildValue("O(On)", Py_TYPE(lz), lz->pool, lz->r); + } else if (lz->stopped) { + return Py_BuildValue("O(()n)", Py_TYPE(lz), lz->r); + } else { + PyObject *indices; + Py_ssize_t i; + + /* we must pickle the indices and use them for setstate */ + indices = PyTuple_New(lz->r); + if (!indices) + return NULL; + for (i=0; ir; i++) + { + PyObject* index = PyLong_FromSsize_t(lz->indices[i]); + if (!index) { + Py_DECREF(indices); + return NULL; + } + PyTuple_SET_ITEM(indices, i, index); + } + + return Py_BuildValue("O(On)N", Py_TYPE(lz), lz->pool, lz->r, indices); + } +} + +static PyObject * +cwr_setstate(cwrobject *lz, PyObject *state) +{ + PyObject *result; + Py_ssize_t n, i; + + if (!PyTuple_Check(state) || PyTuple_GET_SIZE(state) != lz->r) + { + PyErr_SetString(PyExc_ValueError, "invalid arguments"); + return NULL; + } + + n = PyTuple_GET_SIZE(lz->pool); + for (i=0; ir; i++) + { + PyObject* indexObject = PyTuple_GET_ITEM(state, i); + Py_ssize_t index = PyLong_AsSsize_t(indexObject); + if (index < 0 && PyErr_Occurred()) + return NULL; /* not an integer */ + /* clamp the index */ + if (index < 0) + index = 0; + else if (index > n-1) + index = n-1; + lz->indices[i] = index; + } + result = PyTuple_New(lz->r); + if (result == NULL) + return NULL; + for (i=0; ir; i++) { + PyObject *element = PyTuple_GET_ITEM(lz->pool, lz->indices[i]); + Py_INCREF(element); + PyTuple_SET_ITEM(result, i, element); + } + Py_CLEAR(lz->result); + lz->result = result; + Py_RETURN_NONE; +} + +static PyMethodDef cwr_methods[] = { + {"__reduce__", (PyCFunction)cwr_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)cwr_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(cwr_doc, "combinations_with_replacement(iterable, r) --> combinations_with_replacement object\n\ \n\ @@ -2275,11 +2846,11 @@ static PyTypeObject cwr_type = { PyVarObject_HEAD_INIT(NULL, 0) - "itertools.combinations_with_replacement", /* tp_name */ - sizeof(cwrobject), /* tp_basicsize */ + "itertools.combinations_with_replacement", /* tp_name */ + sizeof(cwrobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ - (destructor)cwr_dealloc, /* tp_dealloc */ + (destructor)cwr_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ @@ -2291,19 +2862,19 @@ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ + PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - cwr_doc, /* tp_doc */ - (traverseproc)cwr_traverse, /* tp_traverse */ + Py_TPFLAGS_BASETYPE, /* tp_flags */ + cwr_doc, /* tp_doc */ + (traverseproc)cwr_traverse, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)cwr_next, /* tp_iternext */ - 0, /* tp_methods */ + PyObject_SelfIter, /* tp_iter */ + (iternextfunc)cwr_next, /* tp_iternext */ + cwr_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2313,8 +2884,8 @@ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ - cwr_new, /* tp_new */ - PyObject_GC_Del, /* tp_free */ + cwr_new, /* tp_new */ + PyObject_GC_Del, /* tp_free */ }; @@ -2538,6 +3109,115 @@ return NULL; } +static PyObject * +permutations_reduce(permutationsobject *po) +{ + if (po->result == NULL) { + return Py_BuildValue("O(On)", Py_TYPE(po), po->pool, po->r); + } else if (po->stopped) { + return Py_BuildValue("O(()n)", Py_TYPE(po), po->r); + } else { + PyObject *indices=NULL, *cycles=NULL; + Py_ssize_t n, i; + + /* we must pickle the indices and cycles and use them for setstate */ + n = PyTuple_GET_SIZE(po->pool); + indices = PyTuple_New(n); + if (indices == NULL) + goto err; + for (i=0; iindices[i]); + if (!index) + goto err; + PyTuple_SET_ITEM(indices, i, index); + } + + cycles = PyTuple_New(po->r); + if (cycles == NULL) + goto err; + for (i=0; ir; i++) + { + PyObject* index = PyLong_FromSsize_t(po->cycles[i]); + if (!index) + goto err; + PyTuple_SET_ITEM(cycles, i, index); + } + return Py_BuildValue("O(On)(NN)", Py_TYPE(po), + po->pool, po->r, + indices, cycles); + err: + Py_XDECREF(indices); + Py_XDECREF(cycles); + return NULL; + } +} + +static PyObject * +permutations_setstate(permutationsobject *po, PyObject *state) +{ + PyObject *indices, *cycles, *result; + Py_ssize_t n, i; + + if (!PyArg_ParseTuple(state, "O!O!", + &PyTuple_Type, &indices, + &PyTuple_Type, &cycles)) + return NULL; + + n = PyTuple_GET_SIZE(po->pool); + if (PyTuple_GET_SIZE(indices) != n || + PyTuple_GET_SIZE(cycles) != po->r) + { + PyErr_SetString(PyExc_ValueError, "invalid arguments"); + return NULL; + } + + for (i=0; i n-1) + index = n-1; + po->indices[i] = index; + } + + for (i=0; ir; i++) + { + PyObject* indexObject = PyTuple_GET_ITEM(cycles, i); + Py_ssize_t index = PyLong_AsSsize_t(indexObject); + if (index < 0 && PyErr_Occurred()) + return NULL; /* not an integer */ + if (index < 1) + index = 1; + else if (index > n-i) + index = n-i; + po->cycles[i] = index; + } + result = PyTuple_New(po->r); + if (result == NULL) + return NULL; + for (i=0; ir; i++) { + PyObject *element = PyTuple_GET_ITEM(po->pool, po->indices[i]); + Py_INCREF(element); + PyTuple_SET_ITEM(result, i, element); + } + Py_CLEAR(po->result); + po->result = result; + Py_RETURN_NONE; +} + +static PyMethodDef permuations_methods[] = { + {"__reduce__", (PyCFunction)permutations_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)permutations_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(permutations_doc, "permutations(iterable[, r]) --> permutations object\n\ \n\ @@ -2574,7 +3254,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)permutations_next, /* tp_iternext */ - 0, /* tp_methods */ + permuations_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2605,7 +3285,7 @@ static char *kwargs[] = {"iterable", "func", NULL}; PyObject *iterable; PyObject *it; - PyObject *binop = NULL; + PyObject *binop = Py_None; accumulateobject *lz; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:accumulate", @@ -2624,8 +3304,10 @@ return NULL; } - Py_XINCREF(binop); - lz->binop = binop; + if (binop != Py_None) { + Py_XINCREF(binop); + lz->binop = binop; + } lz->total = NULL; lz->it = it; return (PyObject *)lz; @@ -2681,6 +3363,31 @@ return newtotal; } +static PyObject * +accumulate_reduce(accumulateobject *lz) +{ + return Py_BuildValue("O(OO)O", Py_TYPE(lz), + lz->it, lz->binop?lz->binop:Py_None, + lz->total?lz->total:Py_None); + } + +static PyObject * +accumulate_setstate(accumulateobject *lz, PyObject *state) +{ + Py_CLEAR(lz->total); + lz->total = state; + Py_INCREF(lz->total); + Py_RETURN_NONE; +} + +static PyMethodDef accumulate_methods[] = { + {"__reduce__", (PyCFunction)accumulate_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)accumulate_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(accumulate_doc, "accumulate(iterable[, func]) --> accumulate object\n\ \n\ @@ -2716,7 +3423,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)accumulate_next, /* tp_iternext */ - 0, /* tp_methods */ + accumulate_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2833,6 +3540,19 @@ } } +static PyObject * +compress_reduce(compressobject *lz) +{ + return Py_BuildValue("O(OO)", Py_TYPE(lz), + lz->data, lz->selectors); + } + +static PyMethodDef compress_methods[] = { + {"__reduce__", (PyCFunction)compress_reduce, METH_NOARGS, + reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(compress_doc, "compress(data, selectors) --> iterator over selected data\n\ \n\ @@ -2870,7 +3590,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)compress_next, /* tp_iternext */ - 0, /* tp_methods */ + compress_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -2977,6 +3697,19 @@ } } +static PyObject * +filterfalse_reduce(filterfalseobject *lz) +{ + return Py_BuildValue("O(OO)", Py_TYPE(lz), + lz->func, lz->it); + } + +static PyMethodDef filterfalse_methods[] = { + {"__reduce__", (PyCFunction)filterfalse_reduce, METH_NOARGS, + reduce_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(filterfalse_doc, "filterfalse(function or None, sequence) --> filterfalse object\n\ \n\ @@ -3013,7 +3746,7 @@ 0, /* tp_weaklistoffset */ PyObject_SelfIter, /* tp_iter */ (iternextfunc)filterfalse_next, /* tp_iternext */ - 0, /* tp_methods */ + filterfalse_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ @@ -3207,11 +3940,9 @@ return Py_BuildValue("O(n)", Py_TYPE(lz), lz->cnt); } -PyDoc_STRVAR(count_reduce_doc, "Return state information for pickling."); - static PyMethodDef count_methods[] = { {"__reduce__", (PyCFunction)count_reduce, METH_NOARGS, - count_reduce_doc}, + reduce_doc}, {NULL, NULL} /* sentinel */ }; @@ -3352,8 +4083,21 @@ PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it))."); +static PyObject * +repeat_reduce(repeatobject *ro) +{ + /* unpickle this so that a new repeat iterator is constructed with an + * object, then call __setstate__ on it to set cnt + */ + if (ro->cnt >= 0) + return Py_BuildValue("O(On)", Py_TYPE(ro), ro->element, ro->cnt); + else + return Py_BuildValue("O(O)", Py_TYPE(ro), ro->element); +} + static PyMethodDef repeat_methods[] = { {"__length_hint__", (PyCFunction)repeat_len, METH_NOARGS, length_hint_doc}, + {"__reduce__", (PyCFunction)repeat_reduce, METH_NOARGS, reduce_doc}, {NULL, NULL} /* sentinel */ }; @@ -3579,6 +4323,49 @@ return result; } +static PyObject * +zip_longest_reduce(ziplongestobject *lz) +{ + + /* Create a new tuple with empty sequences where appropriate to pickle. + * Then use setstate to set the fillvalue + */ + int i; + PyObject *args = PyTuple_New(PyTuple_GET_SIZE(lz->ittuple)); + if (args == NULL) + return NULL; + for (i=0; iittuple); i++) { + PyObject *elem = PyTuple_GET_ITEM(lz->ittuple, i); + if (elem == NULL) { + elem = PyTuple_New(0); + if (elem == NULL) { + Py_DECREF(args); + return NULL; + } + } else + Py_INCREF(elem); + PyTuple_SET_ITEM(args, i, elem); + } + return Py_BuildValue("ONO", Py_TYPE(lz), args, lz->fillvalue); +} + +static PyObject * +zip_longest_setstate(ziplongestobject *lz, PyObject *state) +{ + Py_CLEAR(lz->fillvalue); + lz->fillvalue = state; + Py_INCREF(lz->fillvalue); + Py_RETURN_NONE; +} + +static PyMethodDef zip_longest_methods[] = { + {"__reduce__", (PyCFunction)zip_longest_reduce, METH_NOARGS, + reduce_doc}, + {"__setstate__", (PyCFunction)zip_longest_setstate, METH_O, + setstate_doc}, + {NULL, NULL} /* sentinel */ +}; + PyDoc_STRVAR(zip_longest_doc, "zip_longest(iter1 [,iter2 [...]], [fillvalue=None]