[Python-checkins] r72214 - in python/branches/pep-0383: Doc/library/internet.rst Doc/library/io.rst Doc/library/ipaddr.rst Doc/library/json.rst Doc/library/stdtypes.rst Doc/library/test.rst Lib/_pyio.py Lib/io.py Lib/ipaddr.py Lib/json/__init__.py Lib/json/decoder.py Lib/json/encoder.py Lib/json/scanner.py Lib/json/tests/test_decode.py Lib/json/tests/test_dump.py Lib/json/tests/test_encode_basestring_ascii.py Lib/json/tests/test_fail.py Lib/json/tests/test_float.py Lib/json/tests/test_scanstring.py Lib/json/tests/test_unicode.py Lib/json/tool.py Lib/shutil.py Lib/test/formatfloat_testcases.txt Lib/test/string_tests.py Lib/test/support.py Lib/test/test_getopt.py Lib/test/test_gettext.py Lib/test/test_io.py Lib/test/test_ipaddr.py Lib/test/test_memoryio.py Lib/test/test_ntpath.py Lib/test/test_optparse.py Lib/test/test_posixpath.py Lib/test/test_shutil.py Lib/test/test_tcl.py Lib/test/test_tempfile.py Lib/test/test_types.py Lib/test/test_xmlrpc.py Lib/urllib/request.py Misc/NEWS Modules/_io/bufferedio.c Modules/_io/textio.c Modules/_json.c Modules/ld_so_aix Objects/longobject.c Objects/stringlib/formatter.h Objects/stringlib/string_format.h Objects/unicodeobject.c PC/msvcrtmodule.c Python/pystrtod.c

martin.v.loewis python-checkins at python.org
Sat May 2 21:20:59 CEST 2009


Author: martin.v.loewis
Date: Sat May  2 21:20:57 2009
New Revision: 72214

Log:
Merged revisions 72156,72160-72161,72165,72172,72175-72177,72179,72181,72185-72187,72192,72194-72196,72201,72203-72204,72207 via svnmerge from 
svn+ssh://pythondev@svn.python.org/python/branches/py3k

................
  r72156 | senthil.kumaran | 2009-05-01 08:00:23 +0200 (Fr, 01 Mai 2009) | 5 lines
  
  Fix for Issue1648102, based on the MSDN spec: If this parameter specifies the
  "<local>" macro as the only entry, this function bypasses any host name that
  does not contain a period.
................
  r72160 | georg.brandl | 2009-05-01 10:59:13 +0200 (Fr, 01 Mai 2009) | 9 lines
  
  Merged revisions 72159 via svnmerge from 
  svn+ssh://pythondev@svn.python.org/python/trunk
  
  ........
    r72159 | georg.brandl | 2009-05-01 10:51:37 +0200 (Fr, 01 Mai 2009) | 2 lines
    
    #5889: remove comma at the end of a list that some C compilers don't like.
  ........
................
  r72161 | mark.dickinson | 2009-05-01 13:42:00 +0200 (Fr, 01 Mai 2009) | 5 lines
  
  Issue #5859: Remove use of fixed-length buffers for float formatting
  in unicodeobject.c and the fallback version of PyOS_double_to_string.
  As a result, operations like '%.120e' % 12.34 no longer raise an
  exception.
................
  r72165 | mark.dickinson | 2009-05-01 17:37:04 +0200 (Fr, 01 Mai 2009) | 2 lines
  
  Issue #5859: Remove '%f' to '%g' formatting switch for large floats.
................
  r72172 | walter.doerwald | 2009-05-01 21:58:58 +0200 (Fr, 01 Mai 2009) | 12 lines
  
  Merged revisions 72167 via svnmerge from 
  svn+ssh://pythondev@svn.python.org/python/trunk
  
  ........
    r72167 | walter.doerwald | 2009-05-01 19:35:37 +0200 (Fr, 01 Mai 2009) | 5 lines
    
    Make test.test_support.EnvironmentVarGuard behave like a dictionary.
    
    All changes are mirrored to the underlying os.environ dict, but rolled back
    on exit from the with block.
  ........
................
  r72175 | benjamin.peterson | 2009-05-01 22:40:59 +0200 (Fr, 01 Mai 2009) | 1 line
  
  implement a detach() method for BufferedIOBase and TextIOBase #5883
................
  r72176 | benjamin.peterson | 2009-05-01 22:45:43 +0200 (Fr, 01 Mai 2009) | 1 line
  
  add myself
................
  r72177 | benjamin.peterson | 2009-05-01 22:48:14 +0200 (Fr, 01 Mai 2009) | 1 line
  
  versionadded
................
  r72179 | antoine.pitrou | 2009-05-01 23:09:44 +0200 (Fr, 01 Mai 2009) | 10 lines
  
  Merged revisions 72178 via svnmerge from 
  svn+ssh://pythondev@svn.python.org/python/trunk
  
  ........
    r72178 | antoine.pitrou | 2009-05-01 22:55:35 +0200 (ven., 01 mai 2009) | 4 lines
    
    Issue #3002: `shutil.copyfile()` and `shutil.copytree()` now raise an
    error when a named pipe is encountered, rather than blocking infinitely.
  ........
................
  r72181 | antoine.pitrou | 2009-05-01 23:18:27 +0200 (Fr, 01 Mai 2009) | 10 lines
  
  Merged revisions 72180 via svnmerge from 
  svn+ssh://pythondev@svn.python.org/python/trunk
  
  ........
    r72180 | antoine.pitrou | 2009-05-01 23:16:14 +0200 (ven., 01 mai 2009) | 4 lines
    
    Issue #5726: Make Modules/ld_so_aix return the actual exit code of the linker, rather than always exit successfully.
    Patch by Floris Bruynooghe.
  ........
................
  r72185 | benjamin.peterson | 2009-05-01 23:42:23 +0200 (Fr, 01 Mai 2009) | 1 line
  
  use C character code to simplify #5410
................
  r72186 | gregory.p.smith | 2009-05-02 00:13:48 +0200 (Sa, 02 Mai 2009) | 12 lines
  
  Merged revisions 72173 via svnmerge from 
  svn+ssh://pythondev@svn.python.org/python/trunk
  
  ........
    r72173 | gregory.p.smith | 2009-05-01 12:59:52 -0700 (Fri, 01 May 2009) | 5 lines
    
    Adds the ipaddr module to the standard library.  Issue #3959.
    Based off of subversion r69 from http://code.google.com/p/ipaddr-py/
    
    This code is 2to3 safe, I'll merge it into py3k later this afternoon.
  ........
................
  r72187 | gregory.p.smith | 2009-05-02 08:15:18 +0200 (Sa, 02 Mai 2009) | 13 lines
  
  Merged revisions 72183-72184 via svnmerge from 
  svn+ssh://pythondev@svn.python.org/python/trunk
  
  ........
    r72183 | georg.brandl | 2009-05-01 14:28:35 -0700 (Fri, 01 May 2009) | 2 lines
    
    Review ipaddr docs and add them in the TOC under "Internet protocols".
  ........
    r72184 | georg.brandl | 2009-05-01 14:30:25 -0700 (Fri, 01 May 2009) | 1 line
    
    Fix directive name.
  ........
................
  r72192 | eric.smith | 2009-05-02 14:15:39 +0200 (Sa, 02 Mai 2009) | 9 lines
  
  Merged revisions 72189 via svnmerge from 
  svn+ssh://pythondev@svn.python.org/python/trunk
  
  ........
    r72189 | eric.smith | 2009-05-02 05:58:09 -0400 (Sat, 02 May 2009) | 1 line
    
    Keep py3k and trunk code in sync.
  ........
................
  r72194 | benjamin.peterson | 2009-05-02 14:36:44 +0200 (Sa, 02 Mai 2009) | 6 lines
  
  port simplejson upgrade from the trunk #4136
  
  json also now works only with unicode strings
  
  Patch by Antoine Pitrou; updated by me
................
  r72195 | benjamin.peterson | 2009-05-02 17:28:37 +0200 (Sa, 02 Mai 2009) | 16 lines
  
  Blocked revisions 70443,70471,70702 via svnmerge
  
  ........
    r70443 | bob.ippolito | 2009-03-17 18:19:00 -0500 (Tue, 17 Mar 2009) | 1 line
    
    merge json library with simplejson 2.0.9 (issue 4136)
  ........
    r70471 | raymond.hettinger | 2009-03-19 14:19:03 -0500 (Thu, 19 Mar 2009) | 3 lines
    
    Issue 5381:  Add object_pairs_hook to the json module.
  ........
    r70702 | bob.ippolito | 2009-03-29 17:33:58 -0500 (Sun, 29 Mar 2009) | 1 line
    
    Issue 5381: fix regression in pure python code path, Issue 5584: fix a decoder bug for unicode float literals outside of a container
  ........
................
  r72196 | hirokazu.yamamoto | 2009-05-02 17:55:19 +0200 (Sa, 02 Mai 2009) | 1 line
  
  Fixed warning. (Should not use *const* as variable name)
................
  r72201 | benjamin.peterson | 2009-05-02 19:38:06 +0200 (Sa, 02 Mai 2009) | 12 lines
  
  Blocked revisions 72199-72200 via svnmerge
  
  ........
    r72199 | benjamin.peterson | 2009-05-02 12:33:01 -0500 (Sat, 02 May 2009) | 1 line
    
    remove py3k compat code
  ........
    r72200 | benjamin.peterson | 2009-05-02 12:35:39 -0500 (Sat, 02 May 2009) | 1 line
    
    revert unrelated change
  ........
................
  r72203 | mark.dickinson | 2009-05-02 19:57:52 +0200 (Sa, 02 Mai 2009) | 10 lines
  
  Merged revisions 72202 via svnmerge from 
  svn+ssh://pythondev@svn.python.org/python/trunk
  
  ........
    r72202 | mark.dickinson | 2009-05-02 18:55:01 +0100 (Sat, 02 May 2009) | 3 lines
    
    Remove unnecessary use of context for long getters.
    (Related to issue #5880).
  ........
................
  r72204 | benjamin.peterson | 2009-05-02 20:10:37 +0200 (Sa, 02 Mai 2009) | 1 line
  
  make py3k compat code explicitly on
................
  r72207 | gregory.p.smith | 2009-05-02 20:35:58 +0200 (Sa, 02 Mai 2009) | 5 lines
  
  ipaddr cleanup for python 3.x:
  * Get rid of __hex__.
  * Support bytearray as well as bytes.
  * Don't double test for integer input.
................


Added:
   python/branches/pep-0383/Doc/library/ipaddr.rst
      - copied unchanged from r72207, /python/branches/py3k/Doc/library/ipaddr.rst
   python/branches/pep-0383/Lib/ipaddr.py
      - copied unchanged from r72207, /python/branches/py3k/Lib/ipaddr.py
   python/branches/pep-0383/Lib/test/test_ipaddr.py
      - copied unchanged from r72207, /python/branches/py3k/Lib/test/test_ipaddr.py
Modified:
   python/branches/pep-0383/   (props changed)
   python/branches/pep-0383/Doc/library/internet.rst
   python/branches/pep-0383/Doc/library/io.rst
   python/branches/pep-0383/Doc/library/json.rst
   python/branches/pep-0383/Doc/library/stdtypes.rst
   python/branches/pep-0383/Doc/library/test.rst
   python/branches/pep-0383/Lib/_pyio.py
   python/branches/pep-0383/Lib/io.py
   python/branches/pep-0383/Lib/json/__init__.py
   python/branches/pep-0383/Lib/json/decoder.py
   python/branches/pep-0383/Lib/json/encoder.py
   python/branches/pep-0383/Lib/json/scanner.py
   python/branches/pep-0383/Lib/json/tests/test_decode.py
   python/branches/pep-0383/Lib/json/tests/test_dump.py
   python/branches/pep-0383/Lib/json/tests/test_encode_basestring_ascii.py
   python/branches/pep-0383/Lib/json/tests/test_fail.py
   python/branches/pep-0383/Lib/json/tests/test_float.py
   python/branches/pep-0383/Lib/json/tests/test_scanstring.py
   python/branches/pep-0383/Lib/json/tests/test_unicode.py
   python/branches/pep-0383/Lib/json/tool.py
   python/branches/pep-0383/Lib/shutil.py
   python/branches/pep-0383/Lib/test/formatfloat_testcases.txt
   python/branches/pep-0383/Lib/test/string_tests.py
   python/branches/pep-0383/Lib/test/support.py
   python/branches/pep-0383/Lib/test/test_getopt.py
   python/branches/pep-0383/Lib/test/test_gettext.py
   python/branches/pep-0383/Lib/test/test_io.py
   python/branches/pep-0383/Lib/test/test_memoryio.py
   python/branches/pep-0383/Lib/test/test_ntpath.py
   python/branches/pep-0383/Lib/test/test_optparse.py
   python/branches/pep-0383/Lib/test/test_posixpath.py
   python/branches/pep-0383/Lib/test/test_shutil.py
   python/branches/pep-0383/Lib/test/test_tcl.py
   python/branches/pep-0383/Lib/test/test_tempfile.py
   python/branches/pep-0383/Lib/test/test_types.py
   python/branches/pep-0383/Lib/test/test_xmlrpc.py
   python/branches/pep-0383/Lib/urllib/request.py
   python/branches/pep-0383/Misc/NEWS
   python/branches/pep-0383/Modules/_io/bufferedio.c
   python/branches/pep-0383/Modules/_io/textio.c
   python/branches/pep-0383/Modules/_json.c
   python/branches/pep-0383/Modules/ld_so_aix
   python/branches/pep-0383/Objects/longobject.c
   python/branches/pep-0383/Objects/stringlib/formatter.h
   python/branches/pep-0383/Objects/stringlib/string_format.h
   python/branches/pep-0383/Objects/unicodeobject.c
   python/branches/pep-0383/PC/msvcrtmodule.c
   python/branches/pep-0383/Python/pystrtod.c

Modified: python/branches/pep-0383/Doc/library/internet.rst
==============================================================================
--- python/branches/pep-0383/Doc/library/internet.rst	(original)
+++ python/branches/pep-0383/Doc/library/internet.rst	Sat May  2 21:20:57 2009
@@ -37,6 +37,7 @@
    smtpd.rst
    telnetlib.rst
    uuid.rst
+   ipaddr.rst
    socketserver.rst
    http.server.rst
    http.cookies.rst

Modified: python/branches/pep-0383/Doc/library/io.rst
==============================================================================
--- python/branches/pep-0383/Doc/library/io.rst	(original)
+++ python/branches/pep-0383/Doc/library/io.rst	Sat May  2 21:20:57 2009
@@ -8,6 +8,7 @@
 .. moduleauthor:: Mark Russell <mark.russell at zen.co.uk>
 .. moduleauthor:: Antoine Pitrou <solipsis at pitrou.net>
 .. moduleauthor:: Amaury Forgeot d'Arc <amauryfa at gmail.com>
+.. moduleauthor:: Benjamin Peterson <benjamin at python.org>
 .. sectionauthor:: Benjamin Peterson <benjamin at python.org>
 
 The :mod:`io` module provides the Python interfaces to stream handling.  The
@@ -361,6 +362,19 @@
    :class:`BufferedIOBase` provides or overrides these methods in addition to
    those from :class:`IOBase`:
 
+   .. method:: detach()
+
+      Separate the underlying raw stream from the buffer and return it.
+
+      After the raw stream has been detached, the buffer is in an unusable
+      state.
+
+      Some buffers, like :class:`BytesIO`, do not have the concept of a single
+      raw stream to return from this method.  They raise
+      :exc:`UnsupportedOperation`.
+
+      .. versionadded:: 3.1
+
    .. method:: read([n])
 
       Read and return up to *n* bytes.  If the argument is omitted, ``None``, or
@@ -547,7 +561,9 @@
 
    *max_buffer_size* is unused and deprecated.
 
-   :class:`BufferedRWPair` implements all of :class:`BufferedIOBase`\'s methods.
+   :class:`BufferedRWPair` implements all of :class:`BufferedIOBase`\'s methods
+   except for :meth:`~BufferedIOBase.detach`, which raises
+   :exc:`UnsupportedOperation`.
 
 
 .. class:: BufferedRandom(raw[, buffer_size[, max_buffer_size]])
@@ -588,6 +604,19 @@
       A string, a tuple of strings, or ``None``, indicating the newlines
       translated so far.
 
+   .. method:: detach()
+
+      Separate the underlying buffer from the :class:`TextIOBase` and return it.
+
+      After the underlying buffer has been detached, the :class:`TextIOBase` is
+      in an unusable state.
+
+      Some :class:`TextIOBase` implementations, like :class:`StringIO`, may not
+      have the concept of an underlying buffer and calling this method will
+      raise :exc:`UnsupportedOperation`.
+
+      .. versionadded:: 3.1
+
    .. method:: read(n)
 
       Read and return at most *n* characters from the stream as a single

Modified: python/branches/pep-0383/Doc/library/json.rst
==============================================================================
--- python/branches/pep-0383/Doc/library/json.rst	(original)
+++ python/branches/pep-0383/Doc/library/json.rst	Sat May  2 21:20:57 2009
@@ -112,7 +112,7 @@
 Basic Usage
 -----------
 
-.. function:: dump(obj, fp[, skipkeys[, ensure_ascii[, check_circular[, allow_nan[, cls[, indent[, separators[, encoding[, default[, **kw]]]]]]]]]])
+.. function:: dump(obj, fp[, skipkeys[, ensure_ascii[, check_circular[, allow_nan[, cls[, indent[, separators[, default[, **kw]]]]]]]]]])
 
    Serialize *obj* as a JSON formatted stream to *fp* (a ``.write()``-supporting
    file-like object).
@@ -122,11 +122,10 @@
    :class:`float`, :class:`bool`, ``None``) will be skipped instead of raising a
    :exc:`TypeError`.
 
-   If *ensure_ascii* is ``False`` (default: ``True``), then some chunks written
-   to *fp* may be :class:`unicode` instances, subject to normal Python
-   :class:`str` to :class:`unicode` coercion rules.  Unless ``fp.write()``
-   explicitly understands :class:`unicode` (as in :func:`codecs.getwriter`) this
-   is likely to cause an error.
+   The :mod:`json` module always produces :class:`str` objects, not
+   :class:`bytes` objects. Therefore, ``fp.write()`` must support :class:`str`
+   input.
+
 
    If *check_circular* is ``False`` (default: ``True``), then the circular
    reference check for container types will be skipped and a circular reference
@@ -146,8 +145,6 @@
    will be used instead of the default ``(', ', ': ')`` separators.  ``(',',
    ':')`` is the most compact JSON representation.
 
-   *encoding* is the character encoding for str instances, default is UTF-8.
-
    *default(obj)* is a function that should return a serializable version of
    *obj* or raise :exc:`TypeError`.  The default simply raises :exc:`TypeError`.
 
@@ -156,26 +153,17 @@
    *cls* kwarg.
 
 
-.. function:: dumps(obj[, skipkeys[, ensure_ascii[, check_circular[, allow_nan[, cls[, indent[, separators[, encoding[, default[, **kw]]]]]]]]]])
+.. function:: dumps(obj[, skipkeys[, ensure_ascii[, check_circular[, allow_nan[, cls[, indent[, separators[, default[, **kw]]]]]]]]]])
 
-   Serialize *obj* to a JSON formatted :class:`str`.
+   Serialize *obj* to a JSON formatted :class:`str`.  The arguments have the
+   same meaning as in :func:`dump`.
 
-   If *ensure_ascii* is ``False``, then the return value will be a
-   :class:`unicode` instance.  The other arguments have the same meaning as in
-   :func:`dump`.
 
-
-.. function:: load(fp[, encoding[, cls[, object_hook[, parse_float[, parse_int[, parse_constant[, object_pairs_hook[, **kw]]]]]]]])
+.. function:: load(fp[, cls[, object_hook[, parse_float[, parse_int[, parse_constant[, object_pairs_hook[, **kw]]]]]]]])
 
    Deserialize *fp* (a ``.read()``-supporting file-like object containing a JSON
    document) to a Python object.
 
-   If the contents of *fp* are encoded with an ASCII based encoding other than
-   UTF-8 (e.g. latin-1), then an appropriate *encoding* name must be specified.
-   Encodings that are not ASCII based (such as UCS-2) are not allowed, and
-   should be wrapped with ``codecs.getreader(encoding)(fp)``, or simply decoded
-   to a :class:`unicode` object and passed to :func:`loads`.
-
    *object_hook* is an optional function that will be called with the result of
    any object literal decode (a :class:`dict`).  The return value of
    *object_hook* will be used instead of the :class:`dict`.  This feature can be used
@@ -241,7 +229,7 @@
    +---------------+-------------------+
    | array         | list              |
    +---------------+-------------------+
-   | string        | unicode           |
+   | string        | str               |
    +---------------+-------------------+
    | number (int)  | int               |
    +---------------+-------------------+
@@ -257,13 +245,6 @@
    It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as their
    corresponding ``float`` values, which is outside the JSON spec.
 
-   *encoding* determines the encoding used to interpret any :class:`str` objects
-   decoded by this instance (UTF-8 by default).  It has no effect when decoding
-   :class:`unicode` objects.
-
-   Note that currently only encodings that are a superset of ASCII work, strings
-   of other encodings should be passed in as :class:`unicode`.
-
    *object_hook*, if specified, will be called with the result of every JSON
    object decoded and its return value will be used in place of the given
    :class:`dict`.  This can be used to provide custom deserializations (e.g. to
@@ -298,20 +279,20 @@
 
    .. method:: decode(s)
 
-      Return the Python representation of *s* (a :class:`str` or
-      :class:`unicode` instance containing a JSON document)
+      Return the Python representation of *s* (a :class:`str` instance
+      containing a JSON document)
 
    .. method:: raw_decode(s)
 
-      Decode a JSON document from *s* (a :class:`str` or :class:`unicode`
-      beginning with a JSON document) and return a 2-tuple of the Python
-      representation and the index in *s* where the document ended.
+      Decode a JSON document from *s* (a :class:`str` beginning with a
+      JSON document) and return a 2-tuple of the Python representation
+      and the index in *s* where the document ended.
 
       This can be used to decode a JSON document from a string that may have
       extraneous data at the end.
 
 
-.. class:: JSONEncoder([skipkeys[, ensure_ascii[, check_circular[, allow_nan[, sort_keys[, indent[, separators[, encoding[, default]]]]]]]]])
+.. class:: JSONEncoder([skipkeys[, ensure_ascii[, check_circular[, allow_nan[, sort_keys[, indent[, separators[, default]]]]]]]])
 
    Extensible JSON encoder for Python data structures.
 
@@ -324,7 +305,7 @@
    +-------------------+---------------+
    | list, tuple       | array         |
    +-------------------+---------------+
-   | str, unicode      | string        |
+   | str               | string        |
    +-------------------+---------------+
    | int, float        | number        |
    +-------------------+---------------+
@@ -344,9 +325,9 @@
    attempt encoding of keys that are not str, int, float or None.  If
    *skipkeys* is ``True``, such items are simply skipped.
 
-   If *ensure_ascii* is ``True`` (the default), the output is guaranteed to be
-   :class:`str` objects with all incoming unicode characters escaped.  If
-   *ensure_ascii* is ``False``, the output will be a unicode object.
+   If *ensure_ascii* is ``True`` (the default), the output is guaranteed to
+   have all incoming non-ASCII characters escaped.  If *ensure_ascii* is
+   ``False``, these characters will be output as-is.
 
    If *check_circular* is ``True`` (the default), then lists, dicts, and custom
    encoded objects will be checked for circular references during encoding to
@@ -376,10 +357,6 @@
    otherwise be serialized.  It should return a JSON encodable version of the
    object or raise a :exc:`TypeError`.
 
-   If *encoding* is not ``None``, then all input strings will be transformed
-   into unicode using that encoding prior to JSON-encoding.  The default is
-   UTF-8.
-
 
    .. method:: default(o)
 

Modified: python/branches/pep-0383/Doc/library/stdtypes.rst
==============================================================================
--- python/branches/pep-0383/Doc/library/stdtypes.rst	(original)
+++ python/branches/pep-0383/Doc/library/stdtypes.rst	Sat May  2 21:20:57 2009
@@ -1321,9 +1321,9 @@
 
 .. XXX Examples?
 
-For safety reasons, floating point precisions are clipped to 50; ``%f``
-conversions for numbers whose absolute value is over 1e50 are replaced by ``%g``
-conversions. [#]_  All other errors raise exceptions.
+.. versionchanged:: 3.1
+   ``%f`` conversions for numbers whose absolute value is over 1e50 are no
+   longer replaced by ``%g`` conversions.
 
 .. index::
    module: string
@@ -2723,10 +2723,6 @@
 .. [#] To format only a tuple you should therefore provide a singleton tuple whose only
    element is the tuple to be formatted.
 
-.. [#] These numbers are fairly arbitrary.  They are intended to avoid printing endless
-   strings of meaningless digits without hampering correct use and without having
-   to know the exact precision of floating point values on a particular machine.
-
 .. [#] The advantage of leaving the newline on is that returning an empty string is
    then an unambiguous EOF indication.  It is also possible (in cases where it
    might matter, for example, if you want to make an exact copy of a file while

Modified: python/branches/pep-0383/Doc/library/test.rst
==============================================================================
--- python/branches/pep-0383/Doc/library/test.rst	(original)
+++ python/branches/pep-0383/Doc/library/test.rst	Sat May  2 21:20:57 2009
@@ -384,8 +384,13 @@
 .. class:: EnvironmentVarGuard()
 
    Class used to temporarily set or unset environment variables.  Instances can be
-   used as a context manager.
+   used as a context manager and have a complete dictionary interface for
+   querying/modifying the underlying ``os.environ``. After exit from the context
+   manager all changes to environment variables done through this instance will
+   be rolled back.
 
+   .. versionchanged:: 2.7
+      Added dictionary interface.
 
 .. method:: EnvironmentVarGuard.set(envvar, value)
 
@@ -396,6 +401,7 @@
 
    Temporarily unset the environment variable ``envvar``.
 
+
 .. class:: WarningsRecorder()
 
    Class used to record warnings for unit tests. See documentation of

Modified: python/branches/pep-0383/Lib/_pyio.py
==============================================================================
--- python/branches/pep-0383/Lib/_pyio.py	(original)
+++ python/branches/pep-0383/Lib/_pyio.py	Sat May  2 21:20:57 2009
@@ -642,6 +642,15 @@
         """
         self._unsupported("write")
 
+    def detach(self) -> None:
+        """
+        Separate the underlying raw stream from the buffer and return it.
+
+        After the raw stream has been detached, the buffer is in an unusable
+        state.
+        """
+        self._unsupported("detach")
+
 io.BufferedIOBase.register(BufferedIOBase)
 
 
@@ -689,13 +698,21 @@
         self.raw.flush()
 
     def close(self):
-        if not self.closed:
+        if not self.closed and self.raw is not None:
             try:
                 self.flush()
             except IOError:
                 pass  # If flush() fails, just give up
             self.raw.close()
 
+    def detach(self):
+        if self.raw is None:
+            raise ValueError("raw stream already detached")
+        self.flush()
+        raw = self.raw
+        self.raw = None
+        return raw
+
     ### Inquiries ###
 
     def seekable(self):
@@ -1236,6 +1253,15 @@
         """
         self._unsupported("readline")
 
+    def detach(self) -> None:
+        """
+        Separate the underlying buffer from the TextIOBase and return it.
+
+        After the underlying buffer has been detached, the TextIO is in an
+        unusable state.
+        """
+        self._unsupported("detach")
+
     @property
     def encoding(self):
         """Subclasses should override."""
@@ -1448,11 +1474,12 @@
         self._telling = self._seekable
 
     def close(self):
-        try:
-            self.flush()
-        except IOError:
-            pass  # If flush() fails, just give up
-        self.buffer.close()
+        if self.buffer is not None:
+            try:
+                self.flush()
+            except IOError:
+                pass  # If flush() fails, just give up
+            self.buffer.close()
 
     @property
     def closed(self):
@@ -1647,6 +1674,14 @@
         self.seek(pos)
         return self.buffer.truncate()
 
+    def detach(self):
+        if self.buffer is None:
+            raise ValueError("buffer is already detached")
+        self.flush()
+        buffer = self.buffer
+        self.buffer = None
+        return buffer
+
     def seek(self, cookie, whence=0):
         if self.closed:
             raise ValueError("tell on closed file")
@@ -1865,3 +1900,7 @@
     @property
     def encoding(self):
         return None
+
+    def detach(self):
+        # This doesn't make sense on StringIO.
+        self._unsupported("detach")

Modified: python/branches/pep-0383/Lib/io.py
==============================================================================
--- python/branches/pep-0383/Lib/io.py	(original)
+++ python/branches/pep-0383/Lib/io.py	Sat May  2 21:20:57 2009
@@ -47,7 +47,8 @@
               "Mike Verdone <mike.verdone at gmail.com>, "
               "Mark Russell <mark.russell at zen.co.uk>, "
               "Antoine Pitrou <solipsis at pitrou.net>, "
-              "Amaury Forgeot d'Arc <amauryfa at gmail.com>")
+              "Amaury Forgeot d'Arc <amauryfa at gmail.com>, "
+              "Benjamin Peterson <benjamin at python.org>")
 
 __all__ = ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
            "BytesIO", "StringIO", "BufferedIOBase",

Modified: python/branches/pep-0383/Lib/json/__init__.py
==============================================================================
--- python/branches/pep-0383/Lib/json/__init__.py	(original)
+++ python/branches/pep-0383/Lib/json/__init__.py	Sat May  2 21:20:57 2009
@@ -1,11 +1,13 @@
-r"""A simple, fast, extensible JSON encoder and decoder
-
-JSON (JavaScript Object Notation) <http://json.org> is a subset of
+r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
 JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
 interchange format.
 
-json exposes an API familiar to uses of the standard library
-marshal and pickle modules.
+:mod:`json` exposes an API familiar to users of the standard library
+:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
+version of the :mod:`json` library contained in Python 2.6, but maintains
+compatibility with Python 2.4 and Python 2.5 and (currently) has
+significant performance advantages, even without using the optional C
+extension for speedups.
 
 Encoding basic Python object hierarchies::
 
@@ -32,23 +34,28 @@
     >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
     '[1,2,3,{"4":5,"6":7}]'
 
-Pretty printing (using repr() because of extraneous whitespace in the output)::
+Pretty printing::
 
     >>> import json
-    >>> print(repr(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)))
-    '{\n    "4": 5, \n    "6": 7\n}'
+    >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
+    >>> print('\n'.join([l.rstrip() for l in  s.splitlines()]))
+    {
+        "4": 5,
+        "6": 7
+    }
 
 Decoding JSON::
 
     >>> import json
-    >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
-    ['foo', {'bar': ['baz', None, 1.0, 2]}]
-    >>> json.loads('"\\"foo\\bar"')
-    '"foo\x08ar'
+    >>> obj = ['foo', {'bar': ['baz', None, 1.0, 2]}]
+    >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
+    True
+    >>> json.loads('"\\"foo\\bar"') == '"foo\x08ar'
+    True
     >>> from io import StringIO
     >>> io = StringIO('["streaming API"]')
-    >>> json.load(io)
-    ['streaming API']
+    >>> json.load(io)[0] == 'streaming API'
+    True
 
 Specializing JSON object decoding::
 
@@ -61,43 +68,36 @@
     >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
     ...     object_hook=as_complex)
     (1+2j)
-    >>> import decimal
-    >>> json.loads('1.1', parse_float=decimal.Decimal)
-    Decimal('1.1')
+    >>> from decimal import Decimal
+    >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
+    True
 
-Extending JSONEncoder::
+Specializing JSON object encoding::
 
     >>> import json
-    >>> class ComplexEncoder(json.JSONEncoder):
-    ...     def default(self, obj):
-    ...         if isinstance(obj, complex):
-    ...             return [obj.real, obj.imag]
-    ...         return json.JSONEncoder.default(self, obj)
+    >>> def encode_complex(obj):
+    ...     if isinstance(obj, complex):
+    ...         return [obj.real, obj.imag]
+    ...     raise TypeError(repr(o) + " is not JSON serializable")
     ...
-    >>> dumps(2 + 1j, cls=ComplexEncoder)
+    >>> json.dumps(2 + 1j, default=encode_complex)
+    '[2.0, 1.0]'
+    >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
     '[2.0, 1.0]'
-    >>> ComplexEncoder().encode(2 + 1j)
+    >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
     '[2.0, 1.0]'
-    >>> list(ComplexEncoder().iterencode(2 + 1j))
-    ['[', '2.0', ', ', '1.0', ']']
 
 
-Using json.tool from the shell to validate and
-pretty-print::
+Using json.tool from the shell to validate and pretty-print::
 
-    $ echo '{"json":"obj"}' | python -mjson.tool
+    $ echo '{"json":"obj"}' | python -m json.tool
     {
         "json": "obj"
     }
-    $ echo '{ 1.2:3.4}' | python -mjson.tool
+    $ echo '{ 1.2:3.4}' | python -m json.tool
     Expecting property name: line 1 column 2 (char 2)
-
-Note that the JSON produced by this module's default settings
-is a subset of YAML, so it may be used as a serializer for that as well.
-
 """
-
-__version__ = '1.9'
+__version__ = '2.0.9'
 __all__ = [
     'dump', 'dumps', 'load', 'loads',
     'JSONDecoder', 'JSONEncoder',
@@ -115,45 +115,43 @@
     allow_nan=True,
     indent=None,
     separators=None,
-    encoding='utf-8',
     default=None,
 )
 
 def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
         allow_nan=True, cls=None, indent=None, separators=None,
-        encoding='utf-8', default=None, **kw):
+        default=None, **kw):
     """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
     ``.write()``-supporting file-like object).
 
-    If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
-    (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
-    will be skipped instead of raising a ``TypeError``.
+    If ``skipkeys`` is true then ``dict`` keys that are not basic types
+    (``str``, ``unicode``, ``int``, ``float``, ``bool``, ``None``) will be
+    skipped instead of raising a ``TypeError``.
 
-    If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
+    If ``ensure_ascii`` is false, then the some chunks written to ``fp``
     may be ``unicode`` instances, subject to normal Python ``str`` to
     ``unicode`` coercion rules. Unless ``fp.write()`` explicitly
     understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
     to cause an error.
 
-    If ``check_circular`` is ``False``, then the circular reference check
+    If ``check_circular`` is false, then the circular reference check
     for container types will be skipped and a circular reference will
     result in an ``OverflowError`` (or worse).
 
-    If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
+    If ``allow_nan`` is false, then it will be a ``ValueError`` to
     serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
     in strict compliance of the JSON specification, instead of using the
     JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
 
-    If ``indent`` is a non-negative integer, then JSON array elements and object
-    members will be pretty-printed with that indent level. An indent level
-    of 0 will only insert newlines. ``None`` is the most compact representation.
+    If ``indent`` is a non-negative integer, then JSON array elements and
+    object members will be pretty-printed with that indent level. An indent
+    level of 0 will only insert newlines. ``None`` is the most compact
+    representation.
 
     If ``separators`` is an ``(item_separator, dict_separator)`` tuple
     then it will be used instead of the default ``(', ', ': ')`` separators.
     ``(',', ':')`` is the most compact JSON representation.
 
-    ``encoding`` is the character encoding for str instances, default is UTF-8.
-
     ``default(obj)`` is a function that should return a serializable version
     of obj or raise TypeError. The default simply raises TypeError.
 
@@ -163,17 +161,17 @@
 
     """
     # cached encoder
-    if (skipkeys is False and ensure_ascii is True and
-        check_circular is True and allow_nan is True and
+    if (not skipkeys and ensure_ascii and
+        check_circular and allow_nan and
         cls is None and indent is None and separators is None and
-        encoding == 'utf-8' and default is None and not kw):
+        default is None and not kw):
         iterable = _default_encoder.iterencode(obj)
     else:
         if cls is None:
             cls = JSONEncoder
         iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
             check_circular=check_circular, allow_nan=allow_nan, indent=indent,
-            separators=separators, encoding=encoding,
+            separators=separators,
             default=default, **kw).iterencode(obj)
     # could accelerate with writelines in some versions of Python, at
     # a debuggability cost
@@ -183,22 +181,22 @@
 
 def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
         allow_nan=True, cls=None, indent=None, separators=None,
-        encoding='utf-8', default=None, **kw):
+        default=None, **kw):
     """Serialize ``obj`` to a JSON formatted ``str``.
 
-    If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
-    (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
-    will be skipped instead of raising a ``TypeError``.
+    If ``skipkeys`` is false then ``dict`` keys that are not basic types
+    (``str``, ``unicode``, ``int``, ``float``, ``bool``, ``None``) will be
+    skipped instead of raising a ``TypeError``.
 
-    If ``ensure_ascii`` is ``False``, then the return value will be a
+    If ``ensure_ascii`` is false, then the return value will be a
     ``unicode`` instance subject to normal Python ``str`` to ``unicode``
     coercion rules instead of being escaped to an ASCII ``str``.
 
-    If ``check_circular`` is ``False``, then the circular reference check
+    If ``check_circular`` is false, then the circular reference check
     for container types will be skipped and a circular reference will
     result in an ``OverflowError`` (or worse).
 
-    If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
+    If ``allow_nan`` is false, then it will be a ``ValueError`` to
     serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
     strict compliance of the JSON specification, instead of using the
     JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
@@ -212,8 +210,6 @@
     then it will be used instead of the default ``(', ', ': ')`` separators.
     ``(',', ':')`` is the most compact JSON representation.
 
-    ``encoding`` is the character encoding for str instances, default is UTF-8.
-
     ``default(obj)`` is a function that should return a serializable version
     of obj or raise TypeError. The default simply raises TypeError.
 
@@ -223,35 +219,27 @@
 
     """
     # cached encoder
-    if (skipkeys is False and ensure_ascii is True and
-        check_circular is True and allow_nan is True and
+    if (not skipkeys and ensure_ascii and
+        check_circular and allow_nan and
         cls is None and indent is None and separators is None and
-        encoding == 'utf-8' and default is None and not kw):
+        default is None and not kw):
         return _default_encoder.encode(obj)
     if cls is None:
         cls = JSONEncoder
     return cls(
         skipkeys=skipkeys, ensure_ascii=ensure_ascii,
         check_circular=check_circular, allow_nan=allow_nan, indent=indent,
-        separators=separators, encoding=encoding, default=default,
+        separators=separators, default=default,
         **kw).encode(obj)
 
 
-_default_decoder = JSONDecoder(encoding=None, object_hook=None,
-                               object_pairs_hook=None)
+_default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None)
 
 
-def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
+def load(fp, cls=None, object_hook=None, parse_float=None,
         parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
-    """Deserialize ``fp`` (a ``.read()``-supporting file-like object
-    containing a JSON document) to a Python object.
-
-    If the contents of ``fp`` is encoded with an ASCII based encoding other
-    than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
-    be specified. Encodings that are not ASCII based (such as UCS-2) are
-    not allowed, and should be wrapped with
-    ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
-    object and passed to ``loads()``
+    """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
+    a JSON document) to a Python object.
 
     ``object_hook`` is an optional function that will be called with the
     result of any object literal decode (a ``dict``). The return value of
@@ -263,21 +251,16 @@
 
     """
     return loads(fp.read(),
-        encoding=encoding, cls=cls, object_hook=object_hook,
+        cls=cls, object_hook=object_hook,
         parse_float=parse_float, parse_int=parse_int,
         parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)
 
 
 def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
         parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
-    """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
+    """Deserialize ``s`` (a ``str`` instance containing a JSON
     document) to a Python object.
 
-    If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
-    other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
-    must be specified. Encodings that are not ASCII based (such as UCS-2)
-    are not allowed and should be decoded to ``unicode`` first.
-
     ``object_hook`` is an optional function that will be called with the
     result of any object literal decode (a ``dict``). The return value of
     ``object_hook`` will be used instead of the ``dict``. This feature
@@ -302,7 +285,7 @@
     kwarg.
 
     """
-    if (cls is None and encoding is None and object_hook is None and
+    if (cls is None and object_hook is None and
             parse_int is None and parse_float is None and
             parse_constant is None and object_pairs_hook is None and not kw):
         return _default_decoder.decode(s)
@@ -318,4 +301,4 @@
         kw['parse_int'] = parse_int
     if parse_constant is not None:
         kw['parse_constant'] = parse_constant
-    return cls(encoding=encoding, **kw).decode(s)
+    return cls(**kw).decode(s)

Modified: python/branches/pep-0383/Lib/json/decoder.py
==============================================================================
--- python/branches/pep-0383/Lib/json/decoder.py	(original)
+++ python/branches/pep-0383/Lib/json/decoder.py	Sat May  2 21:20:57 2009
@@ -1,10 +1,11 @@
 """Implementation of JSONDecoder
 """
-
+import binascii
 import re
 import sys
+import struct
 
-from json.scanner import Scanner, pattern
+from json.scanner import make_scanner
 try:
     from _json import scanstring as c_scanstring
 except ImportError:
@@ -14,7 +15,14 @@
 
 FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
 
-NaN, PosInf, NegInf = float('nan'), float('inf'), float('-inf')
+def _floatconstants():
+    _BYTES = binascii.unhexlify(b'7FF80000000000007FF0000000000000')
+    if sys.byteorder != 'big':
+        _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
+    nan, inf = struct.unpack('dd', _BYTES)
+    return nan, inf, -inf
+
+NaN, PosInf, NegInf = _floatconstants()
 
 
 def linecol(doc, pos):
@@ -31,61 +39,43 @@
 
 
 def errmsg(msg, doc, pos, end=None):
+    # Note that this function is called from _json
     lineno, colno = linecol(doc, pos)
     if end is None:
         fmt = '{0}: line {1} column {2} (char {3})'
         return fmt.format(msg, lineno, colno, pos)
+        #fmt = '%s: line %d column %d (char %d)'
+        #return fmt % (msg, lineno, colno, pos)
     endlineno, endcolno = linecol(doc, end)
     fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
     return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
+    #fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
+    #return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
 
 
 _CONSTANTS = {
     '-Infinity': NegInf,
     'Infinity': PosInf,
     'NaN': NaN,
-    'true': True,
-    'false': False,
-    'null': None,
 }
 
 
-def JSONConstant(match, context, c=_CONSTANTS):
-    s = match.group(0)
-    fn = getattr(context, 'parse_constant', None)
-    if fn is None:
-        rval = c[s]
-    else:
-        rval = fn(s)
-    return rval, None
-pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant)
-
-
-def JSONNumber(match, context):
-    match = JSONNumber.regex.match(match.string, *match.span())
-    integer, frac, exp = match.groups()
-    if frac or exp:
-        fn = getattr(context, 'parse_float', None) or float
-        res = fn(integer + (frac or '') + (exp or ''))
-    else:
-        fn = getattr(context, 'parse_int', None) or int
-        res = fn(integer)
-    return res, None
-pattern(r'(-?(?:0|[1-9][0-9]*))(\.[0-9]+)?([eE][-+]?[0-9]+)?')(JSONNumber)
-
-
 STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
 BACKSLASH = {
     '"': '"', '\\': '\\', '/': '/',
     'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t',
 }
 
-DEFAULT_ENCODING = "utf-8"
-
+def py_scanstring(s, end, strict=True,
+        _b=BACKSLASH, _m=STRINGCHUNK.match):
+    """Scan the string s for a JSON string. End is the index of the
+    character in s after the quote that started the JSON string.
+    Unescapes all valid JSON string escape sequences and raises ValueError
+    on attempt to decode an invalid string. If strict is False then literal
+    control characters are allowed in the string.
 
-def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
-    if encoding is None:
-        encoding = DEFAULT_ENCODING
+    Returns a tuple of the decoded string and the index of the character in s
+    after the end quote."""
     chunks = []
     _append = chunks.append
     begin = end - 1
@@ -96,14 +86,16 @@
                 errmsg("Unterminated string starting at", s, begin))
         end = chunk.end()
         content, terminator = chunk.groups()
+        # Content is contains zero or more unescaped string characters
         if content:
-            if not isinstance(content, str):
-                content = str(content, encoding)
             _append(content)
+        # Terminator is the end of string, a literal control character,
+        # or a backslash denoting that an escape sequence follows
         if terminator == '"':
             break
         elif terminator != '\\':
             if strict:
+                #msg = "Invalid control character %r at" % (terminator,)
                 msg = "Invalid control character {0!r} at".format(terminator)
                 raise ValueError(errmsg(msg, s, end))
             else:
@@ -114,9 +106,10 @@
         except IndexError:
             raise ValueError(
                 errmsg("Unterminated string starting at", s, begin))
+        # If not a unicode escape sequence, must be in the lookup table
         if esc != 'u':
             try:
-                m = _b[esc]
+                char = _b[esc]
             except KeyError:
                 msg = "Invalid \\escape: {0!r}".format(esc)
                 raise ValueError(errmsg(msg, s, end))
@@ -124,131 +117,138 @@
         else:
             esc = s[end + 1:end + 5]
             next_end = end + 5
-            msg = "Invalid \\uXXXX escape"
-            try:
-                if len(esc) != 4:
-                    raise ValueError
-                uni = int(esc, 16)
-                if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
-                    msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
-                    if not s[end + 5:end + 7] == '\\u':
-                        raise ValueError
-                    esc2 = s[end + 7:end + 11]
-                    if len(esc2) != 4:
-                        raise ValueError
-                    uni2 = int(esc2, 16)
-                    uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
-                    next_end += 6
-                m = chr(uni)
-            except ValueError:
+            if len(esc) != 4:
+                msg = "Invalid \\uXXXX escape"
                 raise ValueError(errmsg(msg, s, end))
+            uni = int(esc, 16)
+            # Check for surrogate pair on UCS-4 systems
+            if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
+                msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
+                if not s[end + 5:end + 7] == '\\u':
+                    raise ValueError(errmsg(msg, s, end))
+                esc2 = s[end + 7:end + 11]
+                if len(esc2) != 4:
+                    raise ValueError(errmsg(msg, s, end))
+                uni2 = int(esc2, 16)
+                uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
+                next_end += 6
+            char = chr(uni)
+
             end = next_end
-        _append(m)
+        _append(char)
     return ''.join(chunks), end
 
 
-# Use speedup
-if c_scanstring is not None:
-    scanstring = c_scanstring
-else:
-    scanstring = py_scanstring
-
-def JSONString(match, context):
-    encoding = getattr(context, 'encoding', None)
-    strict = getattr(context, 'strict', True)
-    return scanstring(match.string, match.end(), encoding, strict)
-pattern(r'"')(JSONString)
+# Use speedup if available
+scanstring = c_scanstring or py_scanstring
 
+WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
+WHITESPACE_STR = ' \t\n\r'
 
-WHITESPACE = re.compile(r'\s*', FLAGS)
 
-
-def JSONObject(match, context, _w=WHITESPACE.match):
+def JSONObject(s_and_end, strict, scan_once, object_hook, object_pairs_hook,
+        _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+    s, end = s_and_end
     pairs = []
     pairs_append = pairs.append
-    s = match.string
-    end = _w(s, match.end()).end()
+    # Use a slice to prevent IndexError from being raised, the following
+    # check will raise a more specific ValueError if the string is empty
     nextchar = s[end:end + 1]
-    # Trivial empty object
-    if nextchar == '}':
-        return pairs, end + 1
+    # Normally we expect nextchar == '"'
     if nextchar != '"':
-        raise ValueError(errmsg("Expecting property name", s, end))
+        if nextchar in _ws:
+            end = _w(s, end).end()
+            nextchar = s[end:end + 1]
+        # Trivial empty object
+        if nextchar == '}':
+            return pairs, end + 1
+        elif nextchar != '"':
+            raise ValueError(errmsg("Expecting property name", s, end))
     end += 1
-    encoding = getattr(context, 'encoding', None)
-    strict = getattr(context, 'strict', True)
-    iterscan = JSONScanner.iterscan
     while True:
-        key, end = scanstring(s, end, encoding, strict)
-        end = _w(s, end).end()
+        key, end = scanstring(s, end, strict)
+        # To skip some function call overhead we optimize the fast paths where
+        # the JSON key separator is ": " or just ":".
         if s[end:end + 1] != ':':
-            raise ValueError(errmsg("Expecting : delimiter", s, end))
-        end = _w(s, end + 1).end()
+            end = _w(s, end).end()
+            if s[end:end + 1] != ':':
+                raise ValueError(errmsg("Expecting : delimiter", s, end))
+        end += 1
+
         try:
-            value, end = next(iterscan(s, idx=end, context=context))
+            if s[end] in _ws:
+                end += 1
+                if s[end] in _ws:
+                    end = _w(s, end + 1).end()
+        except IndexError:
+            pass
+
+        try:
+            value, end = scan_once(s, end)
         except StopIteration:
             raise ValueError(errmsg("Expecting object", s, end))
         pairs_append((key, value))
-        end = _w(s, end).end()
-        nextchar = s[end:end + 1]
+        try:
+            nextchar = s[end]
+            if nextchar in _ws:
+                end = _w(s, end + 1).end()
+                nextchar = s[end]
+        except IndexError:
+            nextchar = ''
         end += 1
+
         if nextchar == '}':
             break
-        if nextchar != ',':
+        elif nextchar != ',':
             raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
         end = _w(s, end).end()
         nextchar = s[end:end + 1]
         end += 1
         if nextchar != '"':
             raise ValueError(errmsg("Expecting property name", s, end - 1))
-    object_pairs_hook = getattr(context, 'object_pairs_hook', None)
     if object_pairs_hook is not None:
         result = object_pairs_hook(pairs)
         return result, end
     pairs = dict(pairs)
-    object_hook = getattr(context, 'object_hook', None)
     if object_hook is not None:
         pairs = object_hook(pairs)
     return pairs, end
-pattern(r'{')(JSONObject)
-
 
-def JSONArray(match, context, _w=WHITESPACE.match):
+def JSONArray(s_and_end, scan_once, context, _w=WHITESPACE.match):
+    s, end = s_and_end
     values = []
-    s = match.string
-    end = _w(s, match.end()).end()
-    # Look-ahead for trivial empty array
     nextchar = s[end:end + 1]
+    if nextchar in _ws:
+        end = _w(s, end + 1).end()
+        nextchar = s[end:end + 1]
+    # Look-ahead for trivial empty array
     if nextchar == ']':
         return values, end + 1
-    iterscan = JSONScanner.iterscan
+    _append = values.append
     while True:
         try:
-            value, end = next(iterscan(s, idx=end, context=context))
+            value, end = scan_once(s, end)
         except StopIteration:
             raise ValueError(errmsg("Expecting object", s, end))
-        values.append(value)
-        end = _w(s, end).end()
+        _append(value)
         nextchar = s[end:end + 1]
+        if nextchar in _ws:
+            end = _w(s, end + 1).end()
+            nextchar = s[end:end + 1]
         end += 1
         if nextchar == ']':
             break
-        if nextchar != ',':
+        elif nextchar != ',':
             raise ValueError(errmsg("Expecting , delimiter", s, end))
-        end = _w(s, end).end()
-    return values, end
-pattern(r'\[')(JSONArray)
-
-
-ANYTHING = [
-    JSONObject,
-    JSONArray,
-    JSONString,
-    JSONConstant,
-    JSONNumber,
-]
+        try:
+            if s[end] in _ws:
+                end += 1
+                if s[end] in _ws:
+                    end = _w(s, end + 1).end()
+        except IndexError:
+            pass
 
-JSONScanner = Scanner(ANYTHING)
+    return values, end
 
 
 class JSONDecoder(object):
@@ -278,23 +278,14 @@
 
     It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
     their corresponding ``float`` values, which is outside the JSON spec.
-    """
 
-    _scanner = Scanner(ANYTHING)
-    __all__ = ['__init__', 'decode', 'raw_decode']
+    """
 
-    def __init__(self, encoding=None, object_hook=None, parse_float=None,
+    def __init__(self, object_hook=None, parse_float=None,
             parse_int=None, parse_constant=None, strict=True,
             object_pairs_hook=None):
-        """``encoding`` determines the encoding used to interpret any ``str``
-        objects decoded by this instance (utf-8 by default).  It has no
-        effect when decoding ``unicode`` objects.
-
-        Note that currently only encodings that are a superset of ASCII work,
-        strings of other encodings should be passed in as ``unicode``.
-
-        ``object_hook``, if specified, will be called with the result of
-        every JSON object decoded and its return value will be used in
+        """``object_hook``, if specified, will be called with the result
+        of every JSON object decoded and its return value will be used in
         place of the given ``dict``.  This can be used to provide custom
         deserializations (e.g. to support JSON-RPC class hinting).
 
@@ -309,22 +300,25 @@
         for JSON integers (e.g. float).
 
         ``parse_constant``, if specified, will be called with one of the
-        following strings: -Infinity, Infinity, NaN, null, true, false.
+        following strings: -Infinity, Infinity, NaN.
         This can be used to raise an exception if invalid JSON numbers
         are encountered.
 
         """
-        self.encoding = encoding
         self.object_hook = object_hook
-        self.object_pairs_hook = object_pairs_hook
-        self.parse_float = parse_float
-        self.parse_int = parse_int
-        self.parse_constant = parse_constant
+        self.parse_float = parse_float or float
+        self.parse_int = parse_int or int
+        self.parse_constant = parse_constant or _CONSTANTS.__getitem__
         self.strict = strict
+        self.object_pairs_hook = object_pairs_hook
+        self.parse_object = JSONObject
+        self.parse_array = JSONArray
+        self.parse_string = scanstring
+        self.scan_once = make_scanner(self)
+
 
     def decode(self, s, _w=WHITESPACE.match):
-        """
-        Return the Python representation of ``s`` (a ``str`` or ``unicode``
+        """Return the Python representation of ``s`` (a ``str`` or ``unicode``
         instance containing a JSON document)
 
         """
@@ -334,18 +328,17 @@
             raise ValueError(errmsg("Extra data", s, end, len(s)))
         return obj
 
-    def raw_decode(self, s, **kw):
-        """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
-        with a JSON document) and return a 2-tuple of the Python
+    def raw_decode(self, s, idx=0):
+        """Decode a JSON document from ``s`` (a ``str`` or ``unicode``
+        beginning with a JSON document) and return a 2-tuple of the Python
         representation and the index in ``s`` where the document ended.
 
         This can be used to decode a JSON document from a string that may
         have extraneous data at the end.
 
         """
-        kw.setdefault('context', self)
         try:
-            obj, end = next(self._scanner.iterscan(s, **kw))
+            obj, end = self.scan_once(s, idx)
         except StopIteration:
             raise ValueError("No JSON object could be decoded")
         return obj, end

Modified: python/branches/pep-0383/Lib/json/encoder.py
==============================================================================
--- python/branches/pep-0383/Lib/json/encoder.py	(original)
+++ python/branches/pep-0383/Lib/json/encoder.py	Sat May  2 21:20:57 2009
@@ -1,19 +1,19 @@
 """Implementation of JSONEncoder
 """
-
 import re
-import math
 
 try:
     from _json import encode_basestring_ascii as c_encode_basestring_ascii
 except ImportError:
     c_encode_basestring_ascii = None
-
-__all__ = ['JSONEncoder']
+try:
+    from _json import make_encoder as c_make_encoder
+except ImportError:
+    c_make_encoder = None
 
 ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
 ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
-HAS_UTF8 = re.compile(r'[\x80-\xff]')
+HAS_UTF8 = re.compile(b'[\x80-\xff]')
 ESCAPE_DCT = {
     '\\': '\\\\',
     '"': '\\"',
@@ -25,30 +25,12 @@
 }
 for i in range(0x20):
     ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
+    #ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
 
+# Assume this produces an infinity on all machines (probably not guaranteed)
+INFINITY = float('1e66666')
 FLOAT_REPR = repr
 
-def floatstr(o, allow_nan=True):
-    # Check for specials.  Note that this type of test is processor- and/or
-    # platform-specific, so do tests which don't depend on the internals.
-
-    if math.isnan(o):
-        text = 'NaN'
-    elif math.isinf(o):
-        if math.copysign(1., o) == 1.:
-            text = 'Infinity'
-        else:
-            text = '-Infinity'
-    else:
-        return FLOAT_REPR(o)
-
-    if not allow_nan:
-        msg = "Out of range float values are not JSON compliant: " + repr(o)
-        raise ValueError(msg)
-
-    return text
-
-
 def encode_basestring(s):
     """Return a JSON representation of a Python string
 
@@ -59,8 +41,9 @@
 
 
 def py_encode_basestring_ascii(s):
-    if isinstance(s, bytes): # and HAS_UTF8.search(s) is not None:
-        s = s.decode('utf-8')
+    """Return an ASCII-only JSON representation of a Python string
+
+    """
     def replace(match):
         s = match.group(0)
         try:
@@ -69,20 +52,18 @@
             n = ord(s)
             if n < 0x10000:
                 return '\\u{0:04x}'.format(n)
+                #return '\\u%04x' % (n,)
             else:
                 # surrogate pair
                 n -= 0x10000
                 s1 = 0xd800 | ((n >> 10) & 0x3ff)
                 s2 = 0xdc00 | (n & 0x3ff)
                 return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
-    return '"' + (ESCAPE_ASCII.sub(replace, s)) + '"'
+    return '"' + ESCAPE_ASCII.sub(replace, s) + '"'
 
 
-if c_encode_basestring_ascii is not None:
-    encode_basestring_ascii = c_encode_basestring_ascii
-else:
-    encode_basestring_ascii = py_encode_basestring_ascii
-
+encode_basestring_ascii = (
+    c_encode_basestring_ascii or py_encode_basestring_ascii)
 
 class JSONEncoder(object):
     """Extensible JSON <http://json.org> encoder for Python data structures.
@@ -113,33 +94,32 @@
     implementation (to raise ``TypeError``).
 
     """
-    __all__ = ['__init__', 'default', 'encode', 'iterencode']
     item_separator = ', '
     key_separator = ': '
     def __init__(self, skipkeys=False, ensure_ascii=True,
             check_circular=True, allow_nan=True, sort_keys=False,
-            indent=None, separators=None, encoding='utf-8', default=None):
+            indent=None, separators=None, default=None):
         """Constructor for JSONEncoder, with sensible defaults.
 
-        If skipkeys is False, then it is a TypeError to attempt
+        If skipkeys is false, then it is a TypeError to attempt
         encoding of keys that are not str, int, long, float or None.  If
         skipkeys is True, such items are simply skipped.
 
-        If ensure_ascii is True, the output is guaranteed to be str
+        If ensure_ascii is true, the output is guaranteed to be str
         objects with all incoming unicode characters escaped.  If
         ensure_ascii is false, the output will be unicode object.
 
-        If check_circular is True, then lists, dicts, and custom encoded
+        If check_circular is true, then lists, dicts, and custom encoded
         objects will be checked for circular references during encoding to
         prevent an infinite recursion (which would cause an OverflowError).
         Otherwise, no such check takes place.
 
-        If allow_nan is True, then NaN, Infinity, and -Infinity will be
+        If allow_nan is true, then NaN, Infinity, and -Infinity will be
         encoded as such.  This behavior is not JSON specification compliant,
         but is consistent with most JavaScript based encoders and decoders.
         Otherwise, it will be a ValueError to encode such floats.
 
-        If sort_keys is True, then the output of dictionaries will be
+        If sort_keys is true, then the output of dictionaries will be
         sorted by key; this is useful for regression tests to ensure
         that JSON serializations can be compared on a day-to-day basis.
 
@@ -156,28 +136,130 @@
         that can't otherwise be serialized.  It should return a JSON encodable
         version of the object or raise a ``TypeError``.
 
-        If encoding is not None, then all input strings will be
-        transformed into unicode using that encoding prior to JSON-encoding.
-        The default is UTF-8.
-
         """
+
         self.skipkeys = skipkeys
         self.ensure_ascii = ensure_ascii
         self.check_circular = check_circular
         self.allow_nan = allow_nan
         self.sort_keys = sort_keys
         self.indent = indent
-        self.current_indent_level = 0
         if separators is not None:
             self.item_separator, self.key_separator = separators
         if default is not None:
             self.default = default
-        self.encoding = encoding
 
-    def _newline_indent(self):
-        return '\n' + (' ' * (self.indent * self.current_indent_level))
+    def default(self, o):
+        """Implement this method in a subclass such that it returns
+        a serializable object for ``o``, or calls the base implementation
+        (to raise a ``TypeError``).
+
+        For example, to support arbitrary iterators, you could
+        implement default like this::
+
+            def default(self, o):
+                try:
+                    iterable = iter(o)
+                except TypeError:
+                    pass
+                else:
+                    return list(iterable)
+                return JSONEncoder.default(self, o)
+
+        """
+        raise TypeError(repr(o) + " is not JSON serializable")
+
+    def encode(self, o):
+        """Return a JSON string representation of a Python data structure.
+
+        >>> JSONEncoder().encode({"foo": ["bar", "baz"]})
+        '{"foo": ["bar", "baz"]}'
+
+        """
+        # This is for extremely simple cases and benchmarks.
+        if isinstance(o, str):
+            if self.ensure_ascii:
+                return encode_basestring_ascii(o)
+            else:
+                return encode_basestring(o)
+        # This doesn't pass the iterator directly to ''.join() because the
+        # exceptions aren't as detailed.  The list call should be roughly
+        # equivalent to the PySequence_Fast that ''.join() would do.
+        chunks = self.iterencode(o, _one_shot=True)
+        if not isinstance(chunks, (list, tuple)):
+            chunks = list(chunks)
+        return ''.join(chunks)
+
+    def iterencode(self, o, _one_shot=False):
+        """Encode the given object and yield each string
+        representation as available.
+
+        For example::
+
+            for chunk in JSONEncoder().iterencode(bigobject):
+                mysocket.write(chunk)
+
+        """
+        if self.check_circular:
+            markers = {}
+        else:
+            markers = None
+        if self.ensure_ascii:
+            _encoder = encode_basestring_ascii
+        else:
+            _encoder = encode_basestring
+
+        def floatstr(o, allow_nan=self.allow_nan,
+                _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
+            # Check for specials.  Note that this type of test is processor
+            # and/or platform-specific, so do tests which don't depend on the
+            # internals.
+
+            if o != o:
+                text = 'NaN'
+            elif o == _inf:
+                text = 'Infinity'
+            elif o == _neginf:
+                text = '-Infinity'
+            else:
+                return _repr(o)
 
-    def _iterencode_list(self, lst, markers=None):
+            if not allow_nan:
+                raise ValueError(
+                    "Out of range float values are not JSON compliant: " +
+                    repr(o))
+
+            return text
+
+
+        if (_one_shot and c_make_encoder is not None
+                and not self.indent and not self.sort_keys):
+            _iterencode = c_make_encoder(
+                markers, self.default, _encoder, self.indent,
+                self.key_separator, self.item_separator, self.sort_keys,
+                self.skipkeys, self.allow_nan)
+        else:
+            _iterencode = _make_iterencode(
+                markers, self.default, _encoder, self.indent, floatstr,
+                self.key_separator, self.item_separator, self.sort_keys,
+                self.skipkeys, _one_shot)
+        return _iterencode(o, 0)
+
+def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
+        _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
+        ## HACK: hand-optimized bytecode; turn globals into locals
+        ValueError=ValueError,
+        dict=dict,
+        float=float,
+        id=id,
+        int=int,
+        isinstance=isinstance,
+        list=list,
+        str=str,
+        tuple=tuple,
+    ):
+
+    def _iterencode_list(lst, _current_indent_level):
         if not lst:
             yield '[]'
             return
@@ -186,31 +268,51 @@
             if markerid in markers:
                 raise ValueError("Circular reference detected")
             markers[markerid] = lst
-        yield '['
-        if self.indent is not None:
-            self.current_indent_level += 1
-            newline_indent = self._newline_indent()
-            separator = self.item_separator + newline_indent
-            yield newline_indent
+        buf = '['
+        if _indent is not None:
+            _current_indent_level += 1
+            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+            separator = _item_separator + newline_indent
+            buf += newline_indent
         else:
             newline_indent = None
-            separator = self.item_separator
+            separator = _item_separator
         first = True
         for value in lst:
             if first:
                 first = False
             else:
-                yield separator
-            for chunk in self._iterencode(value, markers):
-                yield chunk
+                buf = separator
+            if isinstance(value, str):
+                yield buf + _encoder(value)
+            elif value is None:
+                yield buf + 'null'
+            elif value is True:
+                yield buf + 'true'
+            elif value is False:
+                yield buf + 'false'
+            elif isinstance(value, int):
+                yield buf + str(value)
+            elif isinstance(value, float):
+                yield buf + _floatstr(value)
+            else:
+                yield buf
+                if isinstance(value, (list, tuple)):
+                    chunks = _iterencode_list(value, _current_indent_level)
+                elif isinstance(value, dict):
+                    chunks = _iterencode_dict(value, _current_indent_level)
+                else:
+                    chunks = _iterencode(value, _current_indent_level)
+                for chunk in chunks:
+                    yield chunk
         if newline_indent is not None:
-            self.current_indent_level -= 1
-            yield self._newline_indent()
+            _current_indent_level -= 1
+            yield '\n' + (' ' * (_indent * _current_indent_level))
         yield ']'
         if markers is not None:
             del markers[markerid]
 
-    def _iterencode_dict(self, dct, markers=None):
+    def _iterencode_dict(dct, _current_indent_level):
         if not dct:
             yield '{}'
             return
@@ -220,78 +322,75 @@
                 raise ValueError("Circular reference detected")
             markers[markerid] = dct
         yield '{'
-        key_separator = self.key_separator
-        if self.indent is not None:
-            self.current_indent_level += 1
-            newline_indent = self._newline_indent()
-            item_separator = self.item_separator + newline_indent
+        if _indent is not None:
+            _current_indent_level += 1
+            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+            item_separator = _item_separator + newline_indent
             yield newline_indent
         else:
             newline_indent = None
-            item_separator = self.item_separator
+            item_separator = _item_separator
         first = True
-        if self.ensure_ascii:
-            encoder = encode_basestring_ascii
-        else:
-            encoder = encode_basestring
-        allow_nan = self.allow_nan
-        if self.sort_keys:
-            keys = list(dct.keys())
-            keys.sort()
-            items = [(k, dct[k]) for k in keys]
+        if _sort_keys:
+            items = sorted(dct.items(), key=lambda kv: kv[0])
         else:
-            items = iter(dct.items())
-        _encoding = self.encoding
-        _do_decode = (_encoding is not None
-            and not (_encoding == 'utf-8'))
+            items = dct.items()
         for key, value in items:
             if isinstance(key, str):
-                if _do_decode:
-                    key = key.decode(_encoding)
-            elif isinstance(key, str):
                 pass
             # JavaScript is weakly typed for these, so it makes sense to
             # also allow them.  Many encoders seem to do something like this.
             elif isinstance(key, float):
-                key = floatstr(key, allow_nan)
-            elif isinstance(key, (int, int)):
-                key = str(key)
+                key = _floatstr(key)
             elif key is True:
                 key = 'true'
             elif key is False:
                 key = 'false'
             elif key is None:
                 key = 'null'
-            elif self.skipkeys:
+            elif isinstance(key, int):
+                key = str(key)
+            elif _skipkeys:
                 continue
             else:
-                raise TypeError("key {0!r} is not a string".format(key))
+                raise TypeError("key " + repr(key) + " is not a string")
             if first:
                 first = False
             else:
                 yield item_separator
-            yield encoder(key)
-            yield key_separator
-            for chunk in self._iterencode(value, markers):
-                yield chunk
+            yield _encoder(key)
+            yield _key_separator
+            if isinstance(value, str):
+                yield _encoder(value)
+            elif value is None:
+                yield 'null'
+            elif value is True:
+                yield 'true'
+            elif value is False:
+                yield 'false'
+            elif isinstance(value, int):
+                yield str(value)
+            elif isinstance(value, float):
+                yield _floatstr(value)
+            else:
+                if isinstance(value, (list, tuple)):
+                    chunks = _iterencode_list(value, _current_indent_level)
+                elif isinstance(value, dict):
+                    chunks = _iterencode_dict(value, _current_indent_level)
+                else:
+                    chunks = _iterencode(value, _current_indent_level)
+                for chunk in chunks:
+                    yield chunk
         if newline_indent is not None:
-            self.current_indent_level -= 1
-            yield self._newline_indent()
+            _current_indent_level -= 1
+            yield '\n' + (' ' * (_indent * _current_indent_level))
         yield '}'
         if markers is not None:
             del markers[markerid]
 
-    def _iterencode(self, o, markers=None):
+    def _iterencode(o, _current_indent_level):
         if isinstance(o, str):
-            if self.ensure_ascii:
-                encoder = encode_basestring_ascii
-            else:
-                encoder = encode_basestring
-            _encoding = self.encoding
-            if (_encoding is not None and isinstance(o, str)
-                    and not (_encoding == 'utf-8')):
-                o = o.decode(_encoding)
-            yield encoder(o)
+            yield _encoder(o)
         elif o is None:
             yield 'null'
         elif o is True:
@@ -301,12 +400,12 @@
         elif isinstance(o, (int, int)):
             yield str(o)
         elif isinstance(o, float):
-            yield floatstr(o, self.allow_nan)
+            yield _floatstr(o)
         elif isinstance(o, (list, tuple)):
-            for chunk in self._iterencode_list(o, markers):
+            for chunk in _iterencode_list(o, _current_indent_level):
                 yield chunk
         elif isinstance(o, dict):
-            for chunk in self._iterencode_dict(o, markers):
+            for chunk in _iterencode_dict(o, _current_indent_level):
                 yield chunk
         else:
             if markers is not None:
@@ -314,71 +413,9 @@
                 if markerid in markers:
                     raise ValueError("Circular reference detected")
                 markers[markerid] = o
-            for chunk in self._iterencode_default(o, markers):
+            o = _default(o)
+            for chunk in _iterencode(o, _current_indent_level):
                 yield chunk
             if markers is not None:
                 del markers[markerid]
-
-    def _iterencode_default(self, o, markers=None):
-        newobj = self.default(o)
-        return self._iterencode(newobj, markers)
-
-    def default(self, o):
-        """Implement this method in a subclass such that it returns a serializable
-        object for ``o``, or calls the base implementation (to raise a
-        ``TypeError``).
-
-        For example, to support arbitrary iterators, you could implement
-        default like this::
-
-            def default(self, o):
-                try:
-                    iterable = iter(o)
-                except TypeError:
-                    pass
-                else:
-                    return list(iterable)
-                return JSONEncoder.default(self, o)
-
-        """
-        raise TypeError(repr(o) + " is not JSON serializable")
-
-    def encode(self, o):
-        """Return a JSON string representation of a Python data structure.
-
-        >>> JSONEncoder().encode({"foo": ["bar", "baz"]})
-        '{"foo": ["bar", "baz"]}'
-
-        """
-        # This is for extremely simple cases and benchmarks.
-        if isinstance(o, (str, bytes)):
-            if isinstance(o, bytes):
-                _encoding = self.encoding
-                if (_encoding is not None
-                        and not (_encoding == 'utf-8')):
-                    o = o.decode(_encoding)
-            if self.ensure_ascii:
-                return encode_basestring_ascii(o)
-            else:
-                return encode_basestring(o)
-        # This doesn't pass the iterator directly to ''.join() because the
-        # exceptions aren't as detailed.  The list call should be roughly
-        # equivalent to the PySequence_Fast that ''.join() would do.
-        chunks = list(self.iterencode(o))
-        return ''.join(chunks)
-
-    def iterencode(self, o):
-        """Encode the given object and yield each string representation as
-        available.
-
-        For example::
-
-            for chunk in JSONEncoder().iterencode(bigobject):
-                mysocket.write(chunk)
-
-        """
-        if self.check_circular:
-            markers = {}
-        else:
-            markers = None
-        return self._iterencode(o, markers)
+    return _iterencode

Modified: python/branches/pep-0383/Lib/json/scanner.py
==============================================================================
--- python/branches/pep-0383/Lib/json/scanner.py	(original)
+++ python/branches/pep-0383/Lib/json/scanner.py	Sat May  2 21:20:57 2009
@@ -1,69 +1,65 @@
-"""Iterator based sre token scanner
-
+"""JSON token scanner
 """
-
 import re
-import sre_parse
-import sre_compile
-import sre_constants
-
-from re import VERBOSE, MULTILINE, DOTALL
-from sre_constants import BRANCH, SUBPATTERN
-
-__all__ = ['Scanner', 'pattern']
-
-FLAGS = (VERBOSE | MULTILINE | DOTALL)
-
-class Scanner(object):
-    def __init__(self, lexicon, flags=FLAGS):
-        self.actions = [None]
-        # Combine phrases into a compound pattern
-        s = sre_parse.Pattern()
-        s.flags = flags
-        p = []
-        for idx, token in enumerate(lexicon):
-            phrase = token.pattern
-            try:
-                subpattern = sre_parse.SubPattern(s,
-                    [(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))])
-            except sre_constants.error:
-                raise
-            p.append(subpattern)
-            self.actions.append(token)
-
-        s.groups = len(p) + 1 # NOTE(guido): Added to make SRE validation work
-        p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
-        self.scanner = sre_compile.compile(p)
-
-    def iterscan(self, string, idx=0, context=None):
-        """Yield match, end_idx for each match
-
-        """
-        match = self.scanner.scanner(string, idx).match
-        actions = self.actions
-        lastend = idx
-        end = len(string)
-        while True:
-            m = match()
-            if m is None:
-                break
-            matchbegin, matchend = m.span()
-            if lastend == matchend:
-                break
-            action = actions[m.lastindex]
-            if action is not None:
-                rval, next_pos = action(m, context)
-                if next_pos is not None and next_pos != matchend:
-                    # "fast forward" the scanner
-                    matchend = next_pos
-                    match = self.scanner.scanner(string, matchend).match
-                yield rval, matchend
-            lastend = matchend
-
-
-def pattern(pattern, flags=FLAGS):
-    def decorator(fn):
-        fn.pattern = pattern
-        fn.regex = re.compile(pattern, flags)
-        return fn
-    return decorator
+try:
+    from _json import make_scanner as c_make_scanner
+except ImportError:
+    c_make_scanner = None
+
+__all__ = ['make_scanner']
+
+NUMBER_RE = re.compile(
+    r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
+    (re.VERBOSE | re.MULTILINE | re.DOTALL))
+
+def py_make_scanner(context):
+    parse_object = context.parse_object
+    parse_array = context.parse_array
+    parse_string = context.parse_string
+    match_number = NUMBER_RE.match
+    strict = context.strict
+    parse_float = context.parse_float
+    parse_int = context.parse_int
+    parse_constant = context.parse_constant
+    object_hook = context.object_hook
+
+    def _scan_once(string, idx):
+        try:
+            nextchar = string[idx]
+        except IndexError:
+            raise StopIteration
+
+        if nextchar == '"':
+            return parse_string(string, idx + 1, strict)
+        elif nextchar == '{':
+            return parse_object((string, idx + 1), strict,
+                _scan_once, object_hook, object_pairs_hook)
+        elif nextchar == '[':
+            return parse_array((string, idx + 1), _scan_once)
+        elif nextchar == 'n' and string[idx:idx + 4] == 'null':
+            return None, idx + 4
+        elif nextchar == 't' and string[idx:idx + 4] == 'true':
+            return True, idx + 4
+        elif nextchar == 'f' and string[idx:idx + 5] == 'false':
+            return False, idx + 5
+
+        m = match_number(string, idx)
+        if m is not None:
+            integer, frac, exp = m.groups()
+            if frac or exp:
+                res = parse_float(integer + (frac or '') + (exp or ''))
+            else:
+                res = parse_int(integer)
+            return res, m.end()
+        elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
+            return parse_constant('NaN'), idx + 3
+        elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
+            return parse_constant('Infinity'), idx + 8
+        elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
+            return parse_constant('-Infinity'), idx + 9
+        else:
+            raise StopIteration
+
+    return _scan_once
+
+make_scanner = c_make_scanner or py_make_scanner

Modified: python/branches/pep-0383/Lib/json/tests/test_decode.py
==============================================================================
--- python/branches/pep-0383/Lib/json/tests/test_decode.py	(original)
+++ python/branches/pep-0383/Lib/json/tests/test_decode.py	Sat May  2 21:20:57 2009
@@ -32,3 +32,10 @@
                                     object_pairs_hook = OrderedDict,
                                     object_hook = lambda x: None),
                          OrderedDict(p))
+
+    def test_decoder_optimizations(self):
+        # Several optimizations were made that skip over calls to
+        # the whitespace regex, so this test is designed to try and
+        # exercise the uncommon cases. The array cases are already covered.
+        rval = json.loads('{   "key"    :    "value"    ,  "k":"v"    }')
+        self.assertEquals(rval, {"key":"value", "k":"v"})

Modified: python/branches/pep-0383/Lib/json/tests/test_dump.py
==============================================================================
--- python/branches/pep-0383/Lib/json/tests/test_dump.py	(original)
+++ python/branches/pep-0383/Lib/json/tests/test_dump.py	Sat May  2 21:20:57 2009
@@ -11,3 +11,11 @@
 
     def test_dumps(self):
         self.assertEquals(json.dumps({}), '{}')
+
+    def test_encode_truefalse(self):
+        self.assertEquals(json.dumps(
+                 {True: False, False: True}, sort_keys=True),
+                 '{"false": true, "true": false}')
+        self.assertEquals(json.dumps(
+                {2: 3.0, 4.0: 5, False: 1, 6: True}, sort_keys=True),
+                '{"false": 1, "2": 3.0, "4.0": 5, "6": true}')

Modified: python/branches/pep-0383/Lib/json/tests/test_encode_basestring_ascii.py
==============================================================================
--- python/branches/pep-0383/Lib/json/tests/test_encode_basestring_ascii.py	(original)
+++ python/branches/pep-0383/Lib/json/tests/test_encode_basestring_ascii.py	Sat May  2 21:20:57 2009
@@ -3,22 +3,20 @@
 import json.encoder
 
 CASES = [
-    ('/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', b'"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
-    ('\u0123\u4567\u89ab\ucdef\uabcd\uef4a', b'"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
-    ('controls', b'"controls"'),
-    ('\x08\x0c\n\r\t', b'"\\b\\f\\n\\r\\t"'),
-    ('{"object with 1 member":["array with 1 element"]}', b'"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
-    (' s p a c e d ', b'" s p a c e d "'),
-    ('\U0001d120', b'"\\ud834\\udd20"'),
-    ('\u03b1\u03a9', b'"\\u03b1\\u03a9"'),
-    (b'\xce\xb1\xce\xa9', b'"\\u03b1\\u03a9"'),
-    ('\u03b1\u03a9', b'"\\u03b1\\u03a9"'),
-    (b'\xce\xb1\xce\xa9', b'"\\u03b1\\u03a9"'),
-    ('\u03b1\u03a9', b'"\\u03b1\\u03a9"'),
-    ('\u03b1\u03a9', b'"\\u03b1\\u03a9"'),
-    ("`1~!@#$%^&*()_+-={':[,]}|;.</>?", b'"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
-    ('\x08\x0c\n\r\t', b'"\\b\\f\\n\\r\\t"'),
-    ('\u0123\u4567\u89ab\ucdef\uabcd\uef4a', b'"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
+    ('/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
+    ('\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
+    ('controls', '"controls"'),
+    ('\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
+    ('{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
+    (' s p a c e d ', '" s p a c e d "'),
+    ('\U0001d120', '"\\ud834\\udd20"'),
+    ('\u03b1\u03a9', '"\\u03b1\\u03a9"'),
+    ('\u03b1\u03a9', '"\\u03b1\\u03a9"'),
+    ('\u03b1\u03a9', '"\\u03b1\\u03a9"'),
+    ('\u03b1\u03a9', '"\\u03b1\\u03a9"'),
+    ("`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
+    ('\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
+    ('\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
 ]
 
 class TestEncodeBaseStringAscii(TestCase):
@@ -26,12 +24,14 @@
         self._test_encode_basestring_ascii(json.encoder.py_encode_basestring_ascii)
 
     def test_c_encode_basestring_ascii(self):
-        if json.encoder.c_encode_basestring_ascii is not None:
-            self._test_encode_basestring_ascii(json.encoder.c_encode_basestring_ascii)
+        if not json.encoder.c_encode_basestring_ascii:
+            return
+        self._test_encode_basestring_ascii(json.encoder.c_encode_basestring_ascii)
 
     def _test_encode_basestring_ascii(self, encode_basestring_ascii):
         fname = encode_basestring_ascii.__name__
         for input_string, expect in CASES:
             result = encode_basestring_ascii(input_string)
-            result = result.encode("ascii")
-            self.assertEquals(result, expect)
+            self.assertEquals(result, expect,
+                '{0!r} != {1!r} for {2}({3!r})'.format(
+                    result, expect, fname, input_string))

Modified: python/branches/pep-0383/Lib/json/tests/test_fail.py
==============================================================================
--- python/branches/pep-0383/Lib/json/tests/test_fail.py	(original)
+++ python/branches/pep-0383/Lib/json/tests/test_fail.py	Sat May  2 21:20:57 2009
@@ -73,4 +73,4 @@
             except ValueError:
                 pass
             else:
-                self.fail("Expected failure for fail%d.json: %r" % (idx, doc))
+                self.fail("Expected failure for fail{0}.json: {1!r}".format(idx, doc))

Modified: python/branches/pep-0383/Lib/json/tests/test_float.py
==============================================================================
--- python/branches/pep-0383/Lib/json/tests/test_float.py	(original)
+++ python/branches/pep-0383/Lib/json/tests/test_float.py	Sat May  2 21:20:57 2009
@@ -5,5 +5,11 @@
 
 class TestFloat(TestCase):
     def test_floats(self):
-        for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100]:
+        for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100, 3.1]:
             self.assertEquals(float(json.dumps(num)), num)
+            self.assertEquals(json.loads(json.dumps(num)), num)
+
+    def test_ints(self):
+        for num in [1, 1<<32, 1<<64]:
+            self.assertEquals(json.dumps(num), str(num))
+            self.assertEquals(int(json.dumps(num)), num)

Modified: python/branches/pep-0383/Lib/json/tests/test_scanstring.py
==============================================================================
--- python/branches/pep-0383/Lib/json/tests/test_scanstring.py	(original)
+++ python/branches/pep-0383/Lib/json/tests/test_scanstring.py	Sat May  2 21:20:57 2009
@@ -15,96 +15,90 @@
 
     def _test_scanstring(self, scanstring):
         self.assertEquals(
-            scanstring('"z\\ud834\\udd20x"', 1, None, True),
+            scanstring('"z\\ud834\\udd20x"', 1, True),
             ('z\U0001d120x', 16))
 
         if sys.maxunicode == 65535:
             self.assertEquals(
-                scanstring('"z\U0001d120x"', 1, None, True),
+                scanstring('"z\U0001d120x"', 1, True),
                 ('z\U0001d120x', 6))
         else:
             self.assertEquals(
-                scanstring('"z\U0001d120x"', 1, None, True),
+                scanstring('"z\U0001d120x"', 1, True),
                 ('z\U0001d120x', 5))
 
         self.assertEquals(
-            scanstring('"\\u007b"', 1, None, True),
+            scanstring('"\\u007b"', 1, True),
             ('{', 8))
 
         self.assertEquals(
-            scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True),
+            scanstring('"A JSON payload should be an object or array, not a string."', 1, True),
             ('A JSON payload should be an object or array, not a string.', 60))
 
         self.assertEquals(
-            scanstring('["Unclosed array"', 2, None, True),
+            scanstring('["Unclosed array"', 2, True),
             ('Unclosed array', 17))
 
         self.assertEquals(
-            scanstring('["extra comma",]', 2, None, True),
+            scanstring('["extra comma",]', 2, True),
             ('extra comma', 14))
 
         self.assertEquals(
-            scanstring('["double extra comma",,]', 2, None, True),
+            scanstring('["double extra comma",,]', 2, True),
             ('double extra comma', 21))
 
         self.assertEquals(
-            scanstring('["Comma after the close"],', 2, None, True),
+            scanstring('["Comma after the close"],', 2, True),
             ('Comma after the close', 24))
 
         self.assertEquals(
-            scanstring('["Extra close"]]', 2, None, True),
+            scanstring('["Extra close"]]', 2, True),
             ('Extra close', 14))
 
         self.assertEquals(
-            scanstring('{"Extra comma": true,}', 2, None, True),
+            scanstring('{"Extra comma": true,}', 2, True),
             ('Extra comma', 14))
 
         self.assertEquals(
-            scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True),
+            scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, True),
             ('Extra value after close', 26))
 
         self.assertEquals(
-            scanstring('{"Illegal expression": 1 + 2}', 2, None, True),
+            scanstring('{"Illegal expression": 1 + 2}', 2, True),
             ('Illegal expression', 21))
 
         self.assertEquals(
-            scanstring('{"Illegal invocation": alert()}', 2, None, True),
+            scanstring('{"Illegal invocation": alert()}', 2, True),
             ('Illegal invocation', 21))
 
         self.assertEquals(
-            scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True),
+            scanstring('{"Numbers cannot have leading zeroes": 013}', 2, True),
             ('Numbers cannot have leading zeroes', 37))
 
         self.assertEquals(
-            scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True),
+            scanstring('{"Numbers cannot be hex": 0x14}', 2, True),
             ('Numbers cannot be hex', 24))
 
         self.assertEquals(
-            scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True),
+            scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, True),
             ('Too deep', 30))
 
         self.assertEquals(
-            scanstring('{"Missing colon" null}', 2, None, True),
+            scanstring('{"Missing colon" null}', 2, True),
             ('Missing colon', 16))
 
         self.assertEquals(
-            scanstring('{"Double colon":: null}', 2, None, True),
+            scanstring('{"Double colon":: null}', 2, True),
             ('Double colon', 15))
 
         self.assertEquals(
-            scanstring('{"Comma instead of colon", null}', 2, None, True),
+            scanstring('{"Comma instead of colon", null}', 2, True),
             ('Comma instead of colon', 25))
 
         self.assertEquals(
-            scanstring('["Colon instead of comma": false]', 2, None, True),
+            scanstring('["Colon instead of comma": false]', 2, True),
             ('Colon instead of comma', 25))
 
         self.assertEquals(
-            scanstring('["Bad value", truth]', 2, None, True),
+            scanstring('["Bad value", truth]', 2, True),
             ('Bad value', 12))
-
-    def test_issue3623(self):
-        self.assertRaises(ValueError, json.decoder.scanstring, b"xxx", 1,
-                          "xxx")
-        self.assertRaises(UnicodeDecodeError,
-                          json.encoder.encode_basestring_ascii, b"xx\xff")

Modified: python/branches/pep-0383/Lib/json/tests/test_unicode.py
==============================================================================
--- python/branches/pep-0383/Lib/json/tests/test_unicode.py	(original)
+++ python/branches/pep-0383/Lib/json/tests/test_unicode.py	Sat May  2 21:20:57 2009
@@ -4,20 +4,8 @@
 from collections import OrderedDict
 
 class TestUnicode(TestCase):
-    def test_encoding1(self):
-        encoder = json.JSONEncoder(encoding='utf-8')
-        u = '\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
-        s = u.encode('utf-8')
-        ju = encoder.encode(u)
-        js = encoder.encode(s)
-        self.assertEquals(ju, js)
-
-    def test_encoding2(self):
-        u = '\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
-        s = u.encode('utf-8')
-        ju = json.dumps(u, encoding='utf-8')
-        js = json.dumps(s, encoding='utf-8')
-        self.assertEquals(ju, js)
+    # test_encoding1 and test_encoding2 from 2.x are irrelevant (only str
+    # is supported as input, not bytes).
 
     def test_encoding3(self):
         u = '\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
@@ -52,8 +40,22 @@
     def test_unicode_decode(self):
         for i in range(0, 0xd7ff):
             u = chr(i)
-            js = '"\\u{0:04x}"'.format(i)
-            self.assertEquals(json.loads(js), u)
+            s = '"\\u{0:04x}"'.format(i)
+            self.assertEquals(json.loads(s), u)
+
+    def test_unicode_preservation(self):
+        self.assertEquals(type(json.loads('""')), str)
+        self.assertEquals(type(json.loads('"a"')), str)
+        self.assertEquals(type(json.loads('["a"]')[0]), str)
+
+    def test_bytes_encode(self):
+        self.assertRaises(TypeError, json.dumps, b"hi")
+        self.assertRaises(TypeError, json.dumps, [b"hi"])
+
+    def test_bytes_decode(self):
+        self.assertRaises(TypeError, json.loads, b'"hi"')
+        self.assertRaises(TypeError, json.loads, b'["hi"]')
+
 
     def test_object_pairs_hook_with_unicode(self):
         s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'

Modified: python/branches/pep-0383/Lib/json/tool.py
==============================================================================
--- python/branches/pep-0383/Lib/json/tool.py	(original)
+++ python/branches/pep-0383/Lib/json/tool.py	Sat May  2 21:20:57 2009
@@ -2,11 +2,11 @@
 
 Usage::
 
-    $ echo '{"json":"obj"}' | python -mjson.tool
+    $ echo '{"json":"obj"}' | python -m json.tool
     {
         "json": "obj"
     }
-    $ echo '{ 1.2:3.4}' | python -mjson.tool
+    $ echo '{ 1.2:3.4}' | python -m json.tool
     Expecting property name: line 1 column 2 (char 2)
 
 """
@@ -24,7 +24,7 @@
         infile = open(sys.argv[1], 'rb')
         outfile = open(sys.argv[2], 'wb')
     else:
-        raise SystemExit("{0} [infile [outfile]]".format(sys.argv[0]))
+        raise SystemExit(sys.argv[0] + " [infile [outfile]]")
     try:
         obj = json.load(infile)
     except ValueError as e:

Modified: python/branches/pep-0383/Lib/shutil.py
==============================================================================
--- python/branches/pep-0383/Lib/shutil.py	(original)
+++ python/branches/pep-0383/Lib/shutil.py	Sat May  2 21:20:57 2009
@@ -11,11 +11,15 @@
 import fnmatch
 
 __all__ = ["copyfileobj","copyfile","copymode","copystat","copy","copy2",
-           "copytree","move","rmtree","Error"]
+           "copytree","move","rmtree","Error", "SpecialFileError"]
 
 class Error(EnvironmentError):
     pass
 
+class SpecialFileError(EnvironmentError):
+    """Raised when trying to do a kind of operation (e.g. copying) which is
+    not supported on a special file (e.g. a named pipe)"""
+
 try:
     WindowsError
 except NameError:
@@ -48,6 +52,15 @@
 
     fsrc = None
     fdst = None
+    for fn in [src, dst]:
+        try:
+            st = os.stat(fn)
+        except OSError:
+            # File most likely does not exist
+            pass
+        # XXX What about other special files? (sockets, devices...)
+        if stat.S_ISFIFO(st.st_mode):
+            raise SpecialFileError("`%s` is a named pipe" % fn)
     try:
         fsrc = open(src, 'rb')
         fdst = open(dst, 'wb')
@@ -157,14 +170,14 @@
             elif os.path.isdir(srcname):
                 copytree(srcname, dstname, symlinks, ignore)
             else:
+                # Will raise a SpecialFileError for unsupported file types
                 copy2(srcname, dstname)
-            # XXX What about devices, sockets etc.?
-        except (IOError, os.error) as why:
-            errors.append((srcname, dstname, str(why)))
         # catch the Error from the recursive copytree so that we can
         # continue with other files
         except Error as err:
             errors.extend(err.args[0])
+        except EnvironmentError as why:
+            errors.append((srcname, dstname, str(why)))
     try:
         copystat(src, dst)
     except OSError as why:

Modified: python/branches/pep-0383/Lib/test/formatfloat_testcases.txt
==============================================================================
--- python/branches/pep-0383/Lib/test/formatfloat_testcases.txt	(original)
+++ python/branches/pep-0383/Lib/test/formatfloat_testcases.txt	Sat May  2 21:20:57 2009
@@ -22,8 +22,8 @@
 %.0f 123.456 -> 123
 %.0f 1234.56 -> 1235
 %.0f 1e49 -> 9999999999999999464902769475481793196872414789632
--- %.0f 1e50 -> 100000000000000007629769841091887003294964970946560
 %.0f 9.9999999999999987e+49 -> 99999999999999986860582406952576489172979654066176
+%.0f 1e50 -> 100000000000000007629769841091887003294964970946560
 
 -- precision 1
 %.1f 0.0001 -> 0.0

Modified: python/branches/pep-0383/Lib/test/string_tests.py
==============================================================================
--- python/branches/pep-0383/Lib/test/string_tests.py	(original)
+++ python/branches/pep-0383/Lib/test/string_tests.py	Sat May  2 21:20:57 2009
@@ -1105,14 +1105,7 @@
             value = 0.01
             for x in range(60):
                 value = value * 3.141592655 / 3.0 * 10.0
-                # The formatfloat() code in stringobject.c and
-                # unicodeobject.c uses a 120 byte buffer and switches from
-                # 'f' formatting to 'g' at precision 50, so we expect
-                # OverflowErrors for the ranges x < 50 and prec >= 67.
-                if x < 50 and prec >= 67:
-                    self.checkraises(OverflowError, format, "__mod__", value)
-                else:
-                    self.checkcall(format, "__mod__", value)
+                self.checkcall(format, "__mod__", value)
 
     def test_inplace_rewrites(self):
         # Check that strings don't copy and modify cached single-character strings

Modified: python/branches/pep-0383/Lib/test/support.py
==============================================================================
--- python/branches/pep-0383/Lib/test/support.py	(original)
+++ python/branches/pep-0383/Lib/test/support.py	Sat May  2 21:20:57 2009
@@ -13,6 +13,7 @@
 import warnings
 import unittest
 import importlib
+import collections
 
 __all__ = ["Error", "TestFailed", "ResourceDenied", "import_module",
            "verbose", "use_resources", "max_memuse", "record_original_stdout",
@@ -510,26 +511,45 @@
         sys.modules.update(self.original_modules)
 
 
-class EnvironmentVarGuard(object):
+class EnvironmentVarGuard(collections.MutableMapping):
 
     """Class to help protect the environment variable properly.  Can be used as
     a context manager."""
 
     def __init__(self):
+        self._environ = os.environ
         self._changed = {}
 
-    def set(self, envvar, value):
+    def __getitem__(self, envvar):
+        return self._environ[envvar]
+
+    def __setitem__(self, envvar, value):
         # Remember the initial value on the first access
         if envvar not in self._changed:
-            self._changed[envvar] = os.environ.get(envvar)
-        os.environ[envvar] = value
+            self._changed[envvar] = self._environ.get(envvar)
+        self._environ[envvar] = value
 
-    def unset(self, envvar):
+    def __delitem__(self, envvar):
         # Remember the initial value on the first access
         if envvar not in self._changed:
-            self._changed[envvar] = os.environ.get(envvar)
-        if envvar in os.environ:
-            del os.environ[envvar]
+            self._changed[envvar] = self._environ.get(envvar)
+        if envvar in self._environ:
+            del self._environ[envvar]
+
+    def keys(self):
+        return self._environ.keys()
+
+    def __iter__(self):
+        return iter(self._environ)
+
+    def __len__(self):
+        return len(self._environ)
+
+    def set(self, envvar, value):
+        self[envvar] = value
+
+    def unset(self, envvar):
+        del self[envvar]
 
     def __enter__(self):
         return self
@@ -537,10 +557,11 @@
     def __exit__(self, *ignore_exc):
         for (k, v) in self._changed.items():
             if v is None:
-                if k in os.environ:
-                    del os.environ[k]
+                if k in self._environ:
+                    del self._environ[k]
             else:
-                os.environ[k] = v
+                self._environ[k] = v
+
 
 class TransientResource(object):
 

Modified: python/branches/pep-0383/Lib/test/test_getopt.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_getopt.py	(original)
+++ python/branches/pep-0383/Lib/test/test_getopt.py	Sat May  2 21:20:57 2009
@@ -1,7 +1,7 @@
 # test_getopt.py
 # David Goodger <dgoodger at bigfoot.com> 2000-08-19
 
-from test.support import verbose, run_doctest, run_unittest
+from test.support import verbose, run_doctest, run_unittest, EnvironmentVarGuard
 import unittest
 
 import getopt
@@ -11,15 +11,13 @@
 
 class GetoptTests(unittest.TestCase):
     def setUp(self):
-        self.old_posixly_correct = os.environ.get("POSIXLY_CORRECT", sentinel)
-        if self.old_posixly_correct is not sentinel:
-            del os.environ["POSIXLY_CORRECT"]
+        self.env = EnvironmentVarGuard()
+        if "POSIXLY_CORRECT" in self.env:
+            del self.env["POSIXLY_CORRECT"]
 
     def tearDown(self):
-        if self.old_posixly_correct is sentinel:
-            os.environ.pop("POSIXLY_CORRECT", None)
-        else:
-            os.environ["POSIXLY_CORRECT"] = self.old_posixly_correct
+        self.env.__exit__()
+        del self.env
 
     def assertError(self, *args, **kwargs):
         self.assertRaises(getopt.GetoptError, *args, **kwargs)
@@ -135,7 +133,7 @@
         self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])
 
         # Posix style via POSIXLY_CORRECT
-        os.environ["POSIXLY_CORRECT"] = "1"
+        self.env["POSIXLY_CORRECT"] = "1"
         opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta='])
         self.assertEqual(opts, [('-a', '')])
         self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])

Modified: python/branches/pep-0383/Lib/test/test_gettext.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_gettext.py	(original)
+++ python/branches/pep-0383/Lib/test/test_gettext.py	Sat May  2 21:20:57 2009
@@ -58,10 +58,6 @@
 MOFILE = os.path.join(LOCALEDIR, 'gettext.mo')
 UMOFILE = os.path.join(LOCALEDIR, 'ugettext.mo')
 MMOFILE = os.path.join(LOCALEDIR, 'metadata.mo')
-try:
-    LANG = os.environ['LANGUAGE']
-except:
-    LANG = 'en'
 
 
 class GettextBaseTest(unittest.TestCase):
@@ -77,10 +73,12 @@
         fp = open(MMOFILE, 'wb')
         fp.write(base64.decodestring(MMO_DATA))
         fp.close()
-        os.environ['LANGUAGE'] = 'xx'
+        self.env = support.EnvironmentVarGuard()
+        self.env['LANGUAGE'] = 'xx'
 
     def tearDown(self):
-        os.environ['LANGUAGE'] = LANG
+        self.env.__exit__()
+        del self.env
         shutil.rmtree(os.path.split(LOCALEDIR)[0])
 
 

Modified: python/branches/pep-0383/Lib/test/test_io.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_io.py	(original)
+++ python/branches/pep-0383/Lib/test/test_io.py	Sat May  2 21:20:57 2009
@@ -526,6 +526,12 @@
 class CommonBufferedTests:
     # Tests common to BufferedReader, BufferedWriter and BufferedRandom
 
+    def test_detach(self):
+        raw = self.MockRawIO()
+        buf = self.tp(raw)
+        self.assertIs(buf.detach(), raw)
+        self.assertRaises(ValueError, buf.detach)
+
     def test_fileno(self):
         rawio = self.MockRawIO()
         bufio = self.tp(rawio)
@@ -811,6 +817,14 @@
         bufio.flush()
         self.assertEquals(b"".join(rawio._write_stack), b"abcghi")
 
+    def test_detach_flush(self):
+        raw = self.MockRawIO()
+        buf = self.tp(raw)
+        buf.write(b"howdy!")
+        self.assertFalse(raw._write_stack)
+        buf.detach()
+        self.assertEqual(raw._write_stack, [b"howdy!"])
+
     def test_write(self):
         # Write to the buffered IO but don't overflow the buffer.
         writer = self.MockRawIO()
@@ -1052,6 +1066,10 @@
         pair = self.tp(self.MockRawIO(), self.MockRawIO())
         self.assertFalse(pair.closed)
 
+    def test_detach(self):
+        pair = self.tp(self.MockRawIO(), self.MockRawIO())
+        self.assertRaises(self.UnsupportedOperation, pair.detach)
+
     def test_constructor_max_buffer_size_deprecation(self):
         with support.check_warnings() as w:
             warnings.simplefilter("always", DeprecationWarning)
@@ -1480,6 +1498,19 @@
         self.assertRaises(TypeError, t.__init__, b, newline=42)
         self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
 
+    def test_detach(self):
+        r = self.BytesIO()
+        b = self.BufferedWriter(r)
+        t = self.TextIOWrapper(b)
+        self.assertIs(t.detach(), b)
+
+        t = self.TextIOWrapper(b, encoding="ascii")
+        t.write("howdy")
+        self.assertFalse(r.getvalue())
+        t.detach()
+        self.assertEqual(r.getvalue(), b"howdy")
+        self.assertRaises(ValueError, t.detach)
+
     def test_repr(self):
         raw = self.BytesIO("hello".encode("utf-8"))
         b = self.BufferedReader(raw)

Modified: python/branches/pep-0383/Lib/test/test_memoryio.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_memoryio.py	(original)
+++ python/branches/pep-0383/Lib/test/test_memoryio.py	Sat May  2 21:20:57 2009
@@ -57,6 +57,10 @@
 
 class MemoryTestMixin:
 
+    def test_detach(self):
+        buf = self.ioclass()
+        self.assertRaises(self.UnsupportedOperation, buf.detach)
+
     def write_ops(self, f, t):
         self.assertEqual(f.write(t("blah.")), 5)
         self.assertEqual(f.seek(0), 0)
@@ -336,6 +340,9 @@
 
 
 class PyBytesIOTest(MemoryTestMixin, MemorySeekTestMixin, unittest.TestCase):
+
+    UnsupportedOperation = pyio.UnsupportedOperation
+
     @staticmethod
     def buftype(s):
         return s.encode("ascii")
@@ -413,6 +420,7 @@
 class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin, unittest.TestCase):
     buftype = str
     ioclass = pyio.StringIO
+    UnsupportedOperation = pyio.UnsupportedOperation
     EOF = ""
 
     # TextIO-specific behaviour.
@@ -518,9 +526,11 @@
 
 class CBytesIOTest(PyBytesIOTest):
     ioclass = io.BytesIO
+    UnsupportedOperation = io.UnsupportedOperation
 
 class CStringIOTest(PyStringIOTest):
     ioclass = io.StringIO
+    UnsupportedOperation = io.UnsupportedOperation
 
     # XXX: For the Python version of io.StringIO, this is highly
     # dependent on the encoding used for the underlying buffer.

Modified: python/branches/pep-0383/Lib/test/test_ntpath.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_ntpath.py	(original)
+++ python/branches/pep-0383/Lib/test/test_ntpath.py	Sat May  2 21:20:57 2009
@@ -141,12 +141,11 @@
         tester("ntpath.normpath('//machine/share//a/b')", r'\\machine\share\a\b')
 
     def test_expandvars(self):
-        oldenv = os.environ.copy()
-        try:
-            os.environ.clear()
-            os.environ["foo"] = "bar"
-            os.environ["{foo"] = "baz1"
-            os.environ["{foo}"] = "baz2"
+        with support.EnvironmentVarGuard() as env:
+            env.clear()
+            env["foo"] = "bar"
+            env["{foo"] = "baz1"
+            env["{foo}"] = "baz2"
             tester('ntpath.expandvars("foo")', "foo")
             tester('ntpath.expandvars("$foo bar")', "bar bar")
             tester('ntpath.expandvars("${foo}bar")', "barbar")
@@ -166,9 +165,6 @@
             tester('ntpath.expandvars("%?bar%")', "%?bar%")
             tester('ntpath.expandvars("%foo%%bar")', "bar%bar")
             tester('ntpath.expandvars("\'%foo%\'%bar")', "\'%foo%\'%bar")
-        finally:
-            os.environ.clear()
-            os.environ.update(oldenv)
 
     def test_abspath(self):
         # ntpath.abspath() can only be used on a system with the "nt" module

Modified: python/branches/pep-0383/Lib/test/test_optparse.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_optparse.py	(original)
+++ python/branches/pep-0383/Lib/test/test_optparse.py	Sat May  2 21:20:57 2009
@@ -1449,7 +1449,7 @@
         # screws things up for other tests when it's part of the Python
         # test suite.
         with support.EnvironmentVarGuard() as env:
-            env.set('COLUMNS', str(columns))
+            env['COLUMNS'] = str(columns)
             return InterceptingOptionParser(option_list=options)
 
     def assertHelpEquals(self, expected_output):
@@ -1474,7 +1474,7 @@
 
     def test_help_title_formatter(self):
         with support.EnvironmentVarGuard() as env:
-            env.set("COLUMNS", "80")
+            env["COLUMNS"] = "80"
             self.parser.formatter = TitledHelpFormatter()
             self.assertHelpEquals(_expected_help_title_formatter)
 

Modified: python/branches/pep-0383/Lib/test/test_posixpath.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_posixpath.py	(original)
+++ python/branches/pep-0383/Lib/test/test_posixpath.py	Sat May  2 21:20:57 2009
@@ -420,18 +420,17 @@
             self.assert_(isinstance(posixpath.expanduser(b"~foo/"), bytes))
 
             with support.EnvironmentVarGuard() as env:
-                env.set('HOME', '/')
+                env['HOME'] = '/'
                 self.assertEqual(posixpath.expanduser("~"), "/")
 
         self.assertRaises(TypeError, posixpath.expanduser)
 
     def test_expandvars(self):
-        oldenv = os.environ.copy()
-        try:
-            os.environ.clear()
-            os.environ["foo"] = "bar"
-            os.environ["{foo"] = "baz1"
-            os.environ["{foo}"] = "baz2"
+        with support.EnvironmentVarGuard() as env:
+            env.clear()
+            env["foo"] = "bar"
+            env["{foo"] = "baz1"
+            env["{foo}"] = "baz2"
             self.assertEqual(posixpath.expandvars("foo"), "foo")
             self.assertEqual(posixpath.expandvars("$foo bar"), "bar bar")
             self.assertEqual(posixpath.expandvars("${foo}bar"), "barbar")
@@ -457,11 +456,7 @@
             self.assertEqual(posixpath.expandvars(b"${{foo}}"), b"baz1}")
             self.assertEqual(posixpath.expandvars(b"$foo$foo"), b"barbar")
             self.assertEqual(posixpath.expandvars(b"$bar$bar"), b"$bar$bar")
-        finally:
-            os.environ.clear()
-            os.environ.update(oldenv)
-
-        self.assertRaises(TypeError, posixpath.expandvars)
+            self.assertRaises(TypeError, posixpath.expandvars)
 
     def test_normpath(self):
         self.assertEqual(posixpath.normpath(""), ".")

Modified: python/branches/pep-0383/Lib/test/test_shutil.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_shutil.py	(original)
+++ python/branches/pep-0383/Lib/test/test_shutil.py	Sat May  2 21:20:57 2009
@@ -9,6 +9,7 @@
 import os.path
 from test import support
 from test.support import TESTFN
+TESTFN2 = TESTFN + "2"
 
 class TestShutil(unittest.TestCase):
     def test_rmtree_errors(self):
@@ -226,6 +227,38 @@
             finally:
                 shutil.rmtree(TESTFN, ignore_errors=True)
 
+    if hasattr(os, "mkfifo"):
+        # Issue #3002: copyfile and copytree block indefinitely on named pipes
+        def test_copyfile_named_pipe(self):
+            os.mkfifo(TESTFN)
+            try:
+                self.assertRaises(shutil.SpecialFileError,
+                                  shutil.copyfile, TESTFN, TESTFN2)
+                self.assertRaises(shutil.SpecialFileError,
+                                  shutil.copyfile, __file__, TESTFN)
+            finally:
+                os.remove(TESTFN)
+
+        def test_copytree_named_pipe(self):
+            os.mkdir(TESTFN)
+            try:
+                subdir = os.path.join(TESTFN, "subdir")
+                os.mkdir(subdir)
+                pipe = os.path.join(subdir, "mypipe")
+                os.mkfifo(pipe)
+                try:
+                    shutil.copytree(TESTFN, TESTFN2)
+                except shutil.Error as e:
+                    errors = e.args[0]
+                    self.assertEqual(len(errors), 1)
+                    src, dst, error_msg = errors[0]
+                    self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
+                else:
+                    self.fail("shutil.Error should have been raised")
+            finally:
+                shutil.rmtree(TESTFN, ignore_errors=True)
+                shutil.rmtree(TESTFN2, ignore_errors=True)
+
 
 class TestMove(unittest.TestCase):
 

Modified: python/branches/pep-0383/Lib/test/test_tcl.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_tcl.py	(original)
+++ python/branches/pep-0383/Lib/test/test_tcl.py	Sat May  2 21:20:57 2009
@@ -144,23 +144,20 @@
         import sys
         if sys.platform.startswith(('win', 'darwin', 'cygwin')):
             return  # no failure possible on windows?
-        if 'DISPLAY' in os.environ:
-            old_display = os.environ['DISPLAY']
-            del os.environ['DISPLAY']
-            # on some platforms, deleting environment variables
-            # doesn't actually carry through to the process level
-            # because they don't support unsetenv
-            # If that's the case, abort.
-            display = os.popen('echo $DISPLAY').read().strip()
-            if display:
-                return
-        try:
+        with support.EnvironmentVarGuard() as env:
+            if 'DISPLAY' in os.environ:
+                del env['DISPLAY']
+                # on some platforms, deleting environment variables
+                # doesn't actually carry through to the process level
+                # because they don't support unsetenv
+                # If that's the case, abort.
+                display = os.popen('echo $DISPLAY').read().strip()
+                if display:
+                    return
+
             tcl = Tcl()
             self.assertRaises(TclError, tcl.winfo_geometry)
             self.assertRaises(TclError, tcl.loadtk)
-        finally:
-            if old_display is not None:
-                os.environ['DISPLAY'] = old_display
 
 def test_main():
     support.run_unittest(TclTest, TkinterTest)

Modified: python/branches/pep-0383/Lib/test/test_tempfile.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_tempfile.py	(original)
+++ python/branches/pep-0383/Lib/test/test_tempfile.py	Sat May  2 21:20:57 2009
@@ -153,7 +153,7 @@
             for envname in 'TMPDIR', 'TEMP', 'TMP':
                 dirname = os.getenv(envname)
                 if not dirname:
-                    env.set(envname, os.path.abspath(envname))
+                    env[envname] = os.path.abspath(envname)
 
             cand = tempfile._candidate_tempdir_list()
 

Modified: python/branches/pep-0383/Lib/test/test_types.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_types.py	(original)
+++ python/branches/pep-0383/Lib/test/test_types.py	Sat May  2 21:20:57 2009
@@ -538,10 +538,25 @@
         test(-1.0, ' f', '-1.000000')
         test( 1.0, '+f', '+1.000000')
         test(-1.0, '+f', '-1.000000')
-        test(1.1234e90, 'f', '1.1234e+90')
-        test(1.1234e90, 'F', '1.1234e+90')
-        test(1.1234e200, 'f', '1.1234e+200')
-        test(1.1234e200, 'F', '1.1234e+200')
+
+        # Python versions <= 3.0 switched from 'f' to 'g' formatting for
+        # values larger than 1e50.  No longer.
+        f = 1.1234e90
+        for fmt in 'f', 'F':
+            # don't do a direct equality check, since on some
+            # platforms only the first few digits of dtoa
+            # will be reliable
+            result = f.__format__(fmt)
+            self.assertEqual(len(result), 98)
+            self.assertEqual(result[-7], '.')
+            self.assert_(result[:12] in ('112340000000', '112339999999'))
+        f = 1.1234e200
+        for fmt in 'f', 'F':
+            result = f.__format__(fmt)
+            self.assertEqual(len(result), 208)
+            self.assertEqual(result[-7], '.')
+            self.assert_(result[:12] in ('112340000000', '112339999999'))
+
 
         test( 1.0, 'e', '1.000000e+00')
         test(-1.0, 'e', '-1.000000e+00')

Modified: python/branches/pep-0383/Lib/test/test_xmlrpc.py
==============================================================================
--- python/branches/pep-0383/Lib/test/test_xmlrpc.py	(original)
+++ python/branches/pep-0383/Lib/test/test_xmlrpc.py	Sat May  2 21:20:57 2009
@@ -572,7 +572,7 @@
 
     def test_cgi_get(self):
         with support.EnvironmentVarGuard() as env:
-            env.set('REQUEST_METHOD', 'GET')
+            env['REQUEST_METHOD'] = 'GET'
             # if the method is GET and no request_text is given, it runs handle_get
             # get sysout output
             tmp = sys.stdout
@@ -613,7 +613,7 @@
         sys.stdout = open(support.TESTFN, "w")
 
         with support.EnvironmentVarGuard() as env:
-            env.set('CONTENT_LENGTH', str(len(data)))
+            env['CONTENT_LENGTH'] = str(len(data))
             self.cgi.handle_request()
 
         sys.stdin.close()

Modified: python/branches/pep-0383/Lib/urllib/request.py
==============================================================================
--- python/branches/pep-0383/Lib/urllib/request.py	(original)
+++ python/branches/pep-0383/Lib/urllib/request.py	Sat May  2 21:20:57 2009
@@ -2244,18 +2244,11 @@
         # '<local>' string by the localhost entry and the corresponding
         # canonical entry.
         proxyOverride = proxyOverride.split(';')
-        i = 0
-        while i < len(proxyOverride):
-            if proxyOverride[i] == '<local>':
-                proxyOverride[i:i+1] = ['localhost',
-                                        '127.0.0.1',
-                                        socket.gethostname(),
-                                        socket.gethostbyname(
-                                            socket.gethostname())]
-            i += 1
-        # print proxyOverride
         # now check if we match one of the registry values.
         for test in proxyOverride:
+            if test == '<local>':
+                if '.' not in rawHost:
+                    return 1
             test = test.replace(".", r"\.")     # mask dots
             test = test.replace("*", r".*")     # change glob sequence
             test = test.replace("?", r".")      # change glob char

Modified: python/branches/pep-0383/Misc/NEWS
==============================================================================
--- python/branches/pep-0383/Misc/NEWS	(original)
+++ python/branches/pep-0383/Misc/NEWS	Sat May  2 21:20:57 2009
@@ -12,6 +12,15 @@
 Core and Builtins
 -----------------
 
+- Issue #5883: In the io module, the BufferedIOBase and TextIOBase ABCs have
+  received a new method, detach().  detach() disconnects the underlying stream
+  from the buffer or text IO and returns it.
+
+- Issue #5859: Remove switch from '%f' to '%g'-style formatting for
+  floats with absolute value over 1e50.  Also remove length
+  restrictions for float formatting: '%.67f' % 12.34 and '%.120e' %
+  12.34 no longer raise an exception.
+
 - Issue #1588: Add complex.__format__. For example, 
   format(complex(1, 2./3), '.5') now produces a sensible result.
 
@@ -98,6 +107,14 @@
 Library
 -------
 
+- The json module now works exclusively with str and not bytes.
+
+- Issue #3959: The ipaddr module has been added to the standard library.
+  Contributed by Google.
+
+- Issue #3002: ``shutil.copyfile()`` and ``shutil.copytree()`` now raise an
+  error when a named pipe is encountered, rather than blocking infinitely.
+
 - Issue #5857: tokenize.tokenize() now returns named tuples.
 
 - Issue #4305: ctypes should now build again on mipsel-linux-gnu
@@ -880,6 +897,9 @@
 Build
 -----
 
+- Issue #5726: Make Modules/ld_so_aix return the actual exit code of the
+  linker, rather than always exit successfully. Patch by Floris Bruynooghe.
+
 - Issue #4587: Add configure option --with-dbmliborder=db1:db2:... to specify
   the order that backends for the dbm extension are checked. 
 

Modified: python/branches/pep-0383/Modules/_io/bufferedio.c
==============================================================================
--- python/branches/pep-0383/Modules/_io/bufferedio.c	(original)
+++ python/branches/pep-0383/Modules/_io/bufferedio.c	Sat May  2 21:20:57 2009
@@ -73,6 +73,18 @@
     return NULL;
 }
 
+PyDoc_STRVAR(BufferedIOBase_detach_doc,
+    "Disconnect this buffer from its underlying raw stream and return it.\n"
+    "\n"
+    "After the raw stream has been detached, the buffer is in an unusable\n"
+    "state.\n");
+
+static PyObject *
+BufferedIOBase_detach(PyObject *self)
+{
+    return BufferedIOBase_unsupported("detach");
+}
+
 PyDoc_STRVAR(BufferedIOBase_read_doc,
     "Read and return up to n bytes.\n"
     "\n"
@@ -127,6 +139,7 @@
 
 
 static PyMethodDef BufferedIOBase_methods[] = {
+    {"detach", (PyCFunction)BufferedIOBase_detach, METH_NOARGS, BufferedIOBase_detach_doc},
     {"read", BufferedIOBase_read, METH_VARARGS, BufferedIOBase_read_doc},
     {"read1", BufferedIOBase_read1, METH_VARARGS, BufferedIOBase_read1_doc},
     {"readinto", BufferedIOBase_readinto, METH_VARARGS, NULL},
@@ -181,6 +194,7 @@
 
     PyObject *raw;
     int ok;    /* Initialized? */
+    int detached;
     int readable;
     int writable;
     
@@ -260,15 +274,25 @@
 
 #define CHECK_INITIALIZED(self) \
     if (self->ok <= 0) { \
-        PyErr_SetString(PyExc_ValueError, \
-            "I/O operation on uninitialized object"); \
+        if (self->detached) { \
+            PyErr_SetString(PyExc_ValueError, \
+                 "raw stream has been detached"); \
+        } else { \
+            PyErr_SetString(PyExc_ValueError, \
+                "I/O operation on uninitialized object"); \
+        } \
         return NULL; \
     }
 
 #define CHECK_INITIALIZED_INT(self) \
     if (self->ok <= 0) { \
-        PyErr_SetString(PyExc_ValueError, \
-            "I/O operation on uninitialized object"); \
+        if (self->detached) { \
+            PyErr_SetString(PyExc_ValueError, \
+                 "raw stream has been detached"); \
+        } else { \
+            PyErr_SetString(PyExc_ValueError, \
+                "I/O operation on uninitialized object"); \
+        } \
         return -1; \
     }
 
@@ -430,6 +454,24 @@
     return res;
 }
 
+/* detach */
+
+static PyObject *
+BufferedIOMixin_detach(BufferedObject *self, PyObject *args)
+{
+    PyObject *raw, *res;
+    CHECK_INITIALIZED(self)
+    res = PyObject_CallMethodObjArgs((PyObject *)self, _PyIO_str_flush, NULL);
+    if (res == NULL)
+        return NULL;
+    Py_DECREF(res);
+    raw = self->raw;
+    self->raw = NULL;
+    self->detached = 1;
+    self->ok = 0;
+    return raw;
+}
+
 /* Inquiries */
 
 static PyObject *
@@ -1101,6 +1143,7 @@
     PyObject *raw;
 
     self->ok = 0;
+    self->detached = 0;
 
     if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|n:BufferedReader", kwlist,
                                      &raw, &buffer_size)) {
@@ -1387,6 +1430,7 @@
 
 static PyMethodDef BufferedReader_methods[] = {
     /* BufferedIOMixin methods */
+    {"detach", (PyCFunction)BufferedIOMixin_detach, METH_NOARGS},
     {"flush", (PyCFunction)BufferedIOMixin_flush, METH_NOARGS},
     {"close", (PyCFunction)BufferedIOMixin_close, METH_NOARGS},
     {"seekable", (PyCFunction)BufferedIOMixin_seekable, METH_NOARGS},
@@ -1499,6 +1543,7 @@
     PyObject *raw;
 
     self->ok = 0;
+    self->detached = 0;
 
     if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|nn:BufferedReader", kwlist,
                                      &raw, &buffer_size, &max_buffer_size)) {
@@ -1745,6 +1790,7 @@
 static PyMethodDef BufferedWriter_methods[] = {
     /* BufferedIOMixin methods */
     {"close", (PyCFunction)BufferedIOMixin_close, METH_NOARGS},
+    {"detach", (PyCFunction)BufferedIOMixin_detach, METH_NOARGS},
     {"seekable", (PyCFunction)BufferedIOMixin_seekable, METH_NOARGS},
     {"readable", (PyCFunction)BufferedIOMixin_readable, METH_NOARGS},
     {"writable", (PyCFunction)BufferedIOMixin_writable, METH_NOARGS},
@@ -2089,6 +2135,7 @@
     PyObject *raw;
 
     self->ok = 0;
+    self->detached = 0;
 
     if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|nn:BufferedReader", kwlist,
                                      &raw, &buffer_size, &max_buffer_size)) {
@@ -2128,6 +2175,7 @@
 static PyMethodDef BufferedRandom_methods[] = {
     /* BufferedIOMixin methods */
     {"close", (PyCFunction)BufferedIOMixin_close, METH_NOARGS},
+    {"detach", (PyCFunction)BufferedIOMixin_detach, METH_NOARGS},
     {"seekable", (PyCFunction)BufferedIOMixin_seekable, METH_NOARGS},
     {"readable", (PyCFunction)BufferedIOMixin_readable, METH_NOARGS},
     {"writable", (PyCFunction)BufferedIOMixin_writable, METH_NOARGS},

Modified: python/branches/pep-0383/Modules/_io/textio.c
==============================================================================
--- python/branches/pep-0383/Modules/_io/textio.c	(original)
+++ python/branches/pep-0383/Modules/_io/textio.c	Sat May  2 21:20:57 2009
@@ -28,6 +28,19 @@
     return NULL;
 }
 
+PyDoc_STRVAR(TextIOBase_detach_doc,
+    "Separate the underlying buffer from the TextIOBase and return it.\n"
+    "\n"
+    "After the underlying buffer has been detached, the TextIO is in an\n"
+    "unusable state.\n"
+    );
+
+static PyObject *
+TextIOBase_detach(PyObject *self)
+{
+    return _unsupported("detach");
+}
+
 PyDoc_STRVAR(TextIOBase_read_doc,
     "Read at most n characters from stream.\n"
     "\n"
@@ -93,6 +106,7 @@
 
 
 static PyMethodDef TextIOBase_methods[] = {
+    {"detach", (PyCFunction)TextIOBase_detach, METH_NOARGS, TextIOBase_detach_doc},
     {"read", TextIOBase_read, METH_VARARGS, TextIOBase_read_doc},
     {"readline", TextIOBase_readline, METH_VARARGS, TextIOBase_readline_doc},
     {"write", TextIOBase_write, METH_VARARGS, TextIOBase_write_doc},
@@ -616,6 +630,7 @@
 {
     PyObject_HEAD
     int ok; /* initialized? */
+    int detached;
     Py_ssize_t chunk_size;
     PyObject *buffer;
     PyObject *encoding;
@@ -759,6 +774,7 @@
     int r;
 
     self->ok = 0;
+    self->detached = 0;
     if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|zzzi:fileio",
                                      kwlist, &buffer, &encoding, &errors,
                                      &newline, &line_buffering))
@@ -1059,19 +1075,45 @@
 
 #define CHECK_INITIALIZED(self) \
     if (self->ok <= 0) { \
-        PyErr_SetString(PyExc_ValueError, \
-            "I/O operation on uninitialized object"); \
+        if (self->detached) { \
+            PyErr_SetString(PyExc_ValueError, \
+                 "underlying buffer has been detached"); \
+        } else {                                   \
+            PyErr_SetString(PyExc_ValueError, \
+                "I/O operation on uninitialized object"); \
+        } \
         return NULL; \
     }
 
 #define CHECK_INITIALIZED_INT(self) \
     if (self->ok <= 0) { \
-        PyErr_SetString(PyExc_ValueError, \
-            "I/O operation on uninitialized object"); \
+        if (self->detached) { \
+            PyErr_SetString(PyExc_ValueError, \
+                 "underlying buffer has been detached"); \
+        } else {                                   \
+            PyErr_SetString(PyExc_ValueError, \
+                "I/O operation on uninitialized object"); \
+        } \
         return -1; \
     }
 
 
+static PyObject *
+TextIOWrapper_detach(PyTextIOWrapperObject *self)
+{
+    PyObject *buffer, *res;
+    CHECK_INITIALIZED(self);
+    res = PyObject_CallMethodObjArgs((PyObject *)self, _PyIO_str_flush, NULL);
+    if (res == NULL)
+        return NULL;
+    Py_DECREF(res);
+    buffer = self->buffer;
+    self->buffer = NULL;
+    self->detached = 1;
+    self->ok = 0;
+    return buffer;
+}
+
 Py_LOCAL_INLINE(const Py_UNICODE *)
 findchar(const Py_UNICODE *s, Py_ssize_t size, Py_UNICODE ch)
 {
@@ -2341,6 +2383,7 @@
 }
 
 static PyMethodDef TextIOWrapper_methods[] = {
+    {"detach", (PyCFunction)TextIOWrapper_detach, METH_NOARGS},
     {"write", (PyCFunction)TextIOWrapper_write, METH_VARARGS},
     {"read", (PyCFunction)TextIOWrapper_read, METH_VARARGS},
     {"readline", (PyCFunction)TextIOWrapper_readline, METH_VARARGS},

Modified: python/branches/pep-0383/Modules/_json.c
==============================================================================
--- python/branches/pep-0383/Modules/_json.c	(original)
+++ python/branches/pep-0383/Modules/_json.c	Sat May  2 21:20:57 2009
@@ -1,23 +1,160 @@
 #include "Python.h"
+#include "structmember.h"
+#if PY_VERSION_HEX < 0x02060000 && !defined(Py_TYPE)
+#define Py_TYPE(ob)     (((PyObject*)(ob))->ob_type)
+#endif
+#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
+typedef int Py_ssize_t;
+#define PY_SSIZE_T_MAX INT_MAX
+#define PY_SSIZE_T_MIN INT_MIN
+#define PyInt_FromSsize_t PyInt_FromLong
+#define PyInt_AsSsize_t PyInt_AsLong
+#endif
+#ifndef Py_IS_FINITE
+#define Py_IS_FINITE(X) (!Py_IS_INFINITY(X) && !Py_IS_NAN(X))
+#endif
+
+#ifdef __GNUC__
+#define UNUSED __attribute__((__unused__))
+#else
+#define UNUSED
+#endif
+
+#define PyScanner_Check(op) PyObject_TypeCheck(op, &PyScannerType)
+#define PyScanner_CheckExact(op) (Py_TYPE(op) == &PyScannerType)
+#define PyEncoder_Check(op) PyObject_TypeCheck(op, &PyEncoderType)
+#define PyEncoder_CheckExact(op) (Py_TYPE(op) == &PyEncoderType)
+
+static PyTypeObject PyScannerType;
+static PyTypeObject PyEncoderType;
+
+typedef struct _PyScannerObject {
+    PyObject_HEAD
+    PyObject *strict;
+    PyObject *object_hook;
+    PyObject *object_pairs_hook;
+    PyObject *parse_float;
+    PyObject *parse_int;
+    PyObject *parse_constant;
+} PyScannerObject;
+
+static PyMemberDef scanner_members[] = {
+    {"strict", T_OBJECT, offsetof(PyScannerObject, strict), READONLY, "strict"},
+    {"object_hook", T_OBJECT, offsetof(PyScannerObject, object_hook), READONLY, "object_hook"},
+    {"object_pairs_hook", T_OBJECT, offsetof(PyScannerObject, object_pairs_hook), READONLY},
+    {"parse_float", T_OBJECT, offsetof(PyScannerObject, parse_float), READONLY, "parse_float"},
+    {"parse_int", T_OBJECT, offsetof(PyScannerObject, parse_int), READONLY, "parse_int"},
+    {"parse_constant", T_OBJECT, offsetof(PyScannerObject, parse_constant), READONLY, "parse_constant"},
+    {NULL}
+};
+
+typedef struct _PyEncoderObject {
+    PyObject_HEAD
+    PyObject *markers;
+    PyObject *defaultfn;
+    PyObject *encoder;
+    PyObject *indent;
+    PyObject *key_separator;
+    PyObject *item_separator;
+    PyObject *sort_keys;
+    PyObject *skipkeys;
+    int fast_encode;
+    int allow_nan;
+} PyEncoderObject;
+
+static PyMemberDef encoder_members[] = {
+    {"markers", T_OBJECT, offsetof(PyEncoderObject, markers), READONLY, "markers"},
+    {"default", T_OBJECT, offsetof(PyEncoderObject, defaultfn), READONLY, "default"},
+    {"encoder", T_OBJECT, offsetof(PyEncoderObject, encoder), READONLY, "encoder"},
+    {"indent", T_OBJECT, offsetof(PyEncoderObject, indent), READONLY, "indent"},
+    {"key_separator", T_OBJECT, offsetof(PyEncoderObject, key_separator), READONLY, "key_separator"},
+    {"item_separator", T_OBJECT, offsetof(PyEncoderObject, item_separator), READONLY, "item_separator"},
+    {"sort_keys", T_OBJECT, offsetof(PyEncoderObject, sort_keys), READONLY, "sort_keys"},
+    {"skipkeys", T_OBJECT, offsetof(PyEncoderObject, skipkeys), READONLY, "skipkeys"},
+    {NULL}
+};
+
+static PyObject *
+ascii_escape_unicode(PyObject *pystr);
+static PyObject *
+py_encode_basestring_ascii(PyObject* self UNUSED, PyObject *pystr);
+void init_json(void);
+static PyObject *
+scan_once_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr);
+static PyObject *
+_build_rval_index_tuple(PyObject *rval, Py_ssize_t idx);
+static PyObject *
+scanner_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
+static int
+scanner_init(PyObject *self, PyObject *args, PyObject *kwds);
+static void
+scanner_dealloc(PyObject *self);
+static int
+scanner_clear(PyObject *self);
+static PyObject *
+encoder_new(PyTypeObject *type, PyObject *args, PyObject *kwds);
+static int
+encoder_init(PyObject *self, PyObject *args, PyObject *kwds);
+static void
+encoder_dealloc(PyObject *self);
+static int
+encoder_clear(PyObject *self);
+static int
+encoder_listencode_list(PyEncoderObject *s, PyObject *rval, PyObject *seq, Py_ssize_t indent_level);
+static int
+encoder_listencode_obj(PyEncoderObject *s, PyObject *rval, PyObject *obj, Py_ssize_t indent_level);
+static int
+encoder_listencode_dict(PyEncoderObject *s, PyObject *rval, PyObject *dct, Py_ssize_t indent_level);
+static PyObject *
+_encoded_const(PyObject *obj);
+static void
+raise_errmsg(char *msg, PyObject *s, Py_ssize_t end);
+static PyObject *
+encoder_encode_string(PyEncoderObject *s, PyObject *obj);
+static int
+_convertPyInt_AsSsize_t(PyObject *o, Py_ssize_t *size_ptr);
+static PyObject *
+_convertPyInt_FromSsize_t(Py_ssize_t *size_ptr);
+static PyObject *
+encoder_encode_float(PyEncoderObject *s, PyObject *obj);
 
-#define DEFAULT_ENCODING "utf-8"
 #define S_CHAR(c) (c >= ' ' && c <= '~' && c != '\\' && c != '"')
-#define MIN_EXPANSION 6
+#define IS_WHITESPACE(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || ((c) == '\r'))
 
+#define MIN_EXPANSION 6
 #ifdef Py_UNICODE_WIDE
 #define MAX_EXPANSION (2 * MIN_EXPANSION)
 #else
 #define MAX_EXPANSION MIN_EXPANSION
 #endif
 
+static int
+_convertPyInt_AsSsize_t(PyObject *o, Py_ssize_t *size_ptr)
+{
+    /* PyObject to Py_ssize_t converter */
+    *size_ptr = PyLong_AsSsize_t(o);
+    if (*size_ptr == -1 && PyErr_Occurred());
+        return 1;
+    return 0;
+}
+
+static PyObject *
+_convertPyInt_FromSsize_t(Py_ssize_t *size_ptr)
+{
+    /* Py_ssize_t to PyObject converter */
+    return PyLong_FromSsize_t(*size_ptr);
+}
+
 static Py_ssize_t
-ascii_escape_char(Py_UNICODE c, char *output, Py_ssize_t chars)
+ascii_escape_unichar(Py_UNICODE c, Py_UNICODE *output, Py_ssize_t chars)
 {
-    Py_UNICODE x;
+    /* Escape unicode code point c to ASCII escape sequences
+    in char *output. output must have at least 12 bytes unused to
+    accommodate an escaped surrogate pair "\uXXXX\uXXXX" */
     output[chars++] = '\\';
     switch (c) {
-        case '\\': output[chars++] = (char)c; break;
-        case '"': output[chars++] = (char)c; break;
+        case '\\': output[chars++] = c; break;
+        case '"': output[chars++] = c; break;
         case '\b': output[chars++] = 'b'; break;
         case '\f': output[chars++] = 'f'; break;
         case '\n': output[chars++] = 'n'; break;
@@ -30,27 +167,19 @@
                 Py_UNICODE v = c - 0x10000;
                 c = 0xd800 | ((v >> 10) & 0x3ff);
                 output[chars++] = 'u';
-                x = (c & 0xf000) >> 12;
-                output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
-                x = (c & 0x0f00) >> 8;
-                output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
-                x = (c & 0x00f0) >> 4;
-                output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
-                x = (c & 0x000f);
-                output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
+                output[chars++] = "0123456789abcdef"[(c >> 12) & 0xf];
+                output[chars++] = "0123456789abcdef"[(c >>  8) & 0xf];
+                output[chars++] = "0123456789abcdef"[(c >>  4) & 0xf];
+                output[chars++] = "0123456789abcdef"[(c      ) & 0xf];
                 c = 0xdc00 | (v & 0x3ff);
                 output[chars++] = '\\';
             }
 #endif
             output[chars++] = 'u';
-            x = (c & 0xf000) >> 12;
-            output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
-            x = (c & 0x0f00) >> 8;
-            output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
-            x = (c & 0x00f0) >> 4;
-            output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
-            x = (c & 0x000f);
-            output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10);
+            output[chars++] = "0123456789abcdef"[(c >> 12) & 0xf];
+            output[chars++] = "0123456789abcdef"[(c >>  8) & 0xf];
+            output[chars++] = "0123456789abcdef"[(c >>  4) & 0xf];
+            output[chars++] = "0123456789abcdef"[(c      ) & 0xf];
     }
     return chars;
 }
@@ -58,118 +187,66 @@
 static PyObject *
 ascii_escape_unicode(PyObject *pystr)
 {
+    /* Take a PyUnicode pystr and return a new ASCII-only escaped PyUnicode */
     Py_ssize_t i;
     Py_ssize_t input_chars;
     Py_ssize_t output_size;
+    Py_ssize_t max_output_size;
     Py_ssize_t chars;
     PyObject *rval;
-    char *output;
+    Py_UNICODE *output;
     Py_UNICODE *input_unicode;
 
     input_chars = PyUnicode_GET_SIZE(pystr);
     input_unicode = PyUnicode_AS_UNICODE(pystr);
+
     /* One char input can be up to 6 chars output, estimate 4 of these */
     output_size = 2 + (MIN_EXPANSION * 4) + input_chars;
-    rval = PyBytes_FromStringAndSize(NULL, output_size);
+    max_output_size = 2 + (input_chars * MAX_EXPANSION);
+    rval = PyUnicode_FromStringAndSize(NULL, output_size);
     if (rval == NULL) {
         return NULL;
     }
-    output = PyBytes_AS_STRING(rval);
+    output = PyUnicode_AS_UNICODE(rval);
     chars = 0;
     output[chars++] = '"';
     for (i = 0; i < input_chars; i++) {
         Py_UNICODE c = input_unicode[i];
         if (S_CHAR(c)) {
-            output[chars++] = (char)c;
+            output[chars++] = c;
         }
-	else {
-            chars = ascii_escape_char(c, output, chars);
+        else {
+            chars = ascii_escape_unichar(c, output, chars);
         }
         if (output_size - chars < (1 + MAX_EXPANSION)) {
             /* There's more than four, so let's resize by a lot */
-            output_size *= 2;
+            Py_ssize_t new_output_size = output_size * 2;
             /* This is an upper bound */
-            if (output_size > 2 + (input_chars * MAX_EXPANSION)) {
-                output_size = 2 + (input_chars * MAX_EXPANSION);
-            }
-            if (_PyBytes_Resize(&rval, output_size) == -1) {
-                return NULL;
-            }
-            output = PyBytes_AS_STRING(rval);
-        }
-    }
-    output[chars++] = '"';
-    if (_PyBytes_Resize(&rval, chars) == -1) {
-        return NULL;
-    }
-    return rval;
-}
-
-static PyObject *
-ascii_escape_str(PyObject *pystr)
-{
-    Py_ssize_t i;
-    Py_ssize_t input_chars;
-    Py_ssize_t output_size;
-    Py_ssize_t chars;
-    PyObject *rval;
-    char *output;
-    char *input_str;
-
-    input_chars = PyBytes_GET_SIZE(pystr);
-    input_str = PyBytes_AS_STRING(pystr);
-    /* One char input can be up to 6 chars output, estimate 4 of these */
-    output_size = 2 + (MIN_EXPANSION * 4) + input_chars;
-    rval = PyBytes_FromStringAndSize(NULL, output_size);
-    if (rval == NULL) {
-        return NULL;
-    }
-    output = PyBytes_AS_STRING(rval);
-    chars = 0;
-    output[chars++] = '"';
-    for (i = 0; i < input_chars; i++) {
-        Py_UNICODE c = (Py_UNICODE)input_str[i];
-        if (S_CHAR(c)) {
-            output[chars++] = (char)c;
-        }
-	else if (c > 0x7F) {
-            /* We hit a non-ASCII character, bail to unicode mode */
-            PyObject *uni;
-            Py_DECREF(rval);
-            uni = PyUnicode_DecodeUTF8(input_str, input_chars, "strict");
-            if (uni == NULL) {
-                return NULL;
-            }
-            rval = ascii_escape_unicode(uni);
-            Py_DECREF(uni);
-            return rval;
-        }
-	else {
-            chars = ascii_escape_char(c, output, chars);
-        }
-        /* An ASCII char can't possibly expand to a surrogate! */
-        if (output_size - chars < (1 + MIN_EXPANSION)) {
-            /* There's more than four, so let's resize by a lot */
-            output_size *= 2;
-            if (output_size > 2 + (input_chars * MIN_EXPANSION)) {
-                output_size = 2 + (input_chars * MIN_EXPANSION);
+            if (new_output_size > max_output_size) {
+                new_output_size = max_output_size;
             }
-            if (_PyBytes_Resize(&rval, output_size) == -1) {
-                return NULL;
+            /* Make sure that the output size changed before resizing */
+            if (new_output_size != output_size) {
+                output_size = new_output_size;
+                if (PyUnicode_Resize(&rval, output_size) == -1) {
+                    return NULL;
+                }
+                output = PyUnicode_AS_UNICODE(rval);
             }
-            output = PyBytes_AS_STRING(rval);
         }
     }
     output[chars++] = '"';
-    if (_PyBytes_Resize(&rval, chars) == -1) {
+    if (PyUnicode_Resize(&rval, chars) == -1) {
         return NULL;
     }
     return rval;
 }
 
-void
+static void
 raise_errmsg(char *msg, PyObject *s, Py_ssize_t end)
 {
+    /* Use the Python function json.decoder.errmsg to raise a nice
+    looking ValueError exception */
     static PyObject *errmsg_fn = NULL;
     PyObject *pymsg;
     if (errmsg_fn == NULL) {
@@ -177,63 +254,73 @@
         if (decoder == NULL)
             return;
         errmsg_fn = PyObject_GetAttrString(decoder, "errmsg");
+        Py_DECREF(decoder);
         if (errmsg_fn == NULL)
             return;
-        Py_DECREF(decoder);
     }
-    pymsg = PyObject_CallFunction(errmsg_fn, "(zOn)", msg, s, end);
+    pymsg = PyObject_CallFunction(errmsg_fn, "(zOO&)", msg, s, _convertPyInt_FromSsize_t, &end);
     if (pymsg) {
         PyErr_SetObject(PyExc_ValueError, pymsg);
         Py_DECREF(pymsg);
     }
-/*
-
-def linecol(doc, pos):
-    lineno = doc.count('\n', 0, pos) + 1
-    if lineno == 1:
-        colno = pos
-    else:
-        colno = pos - doc.rindex('\n', 0, pos)
-    return lineno, colno
-
-def errmsg(msg, doc, pos, end=None):
-    lineno, colno = linecol(doc, pos)
-    if end is None:
-        return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
-    endlineno, endcolno = linecol(doc, end)
-    return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
-        msg, lineno, colno, endlineno, endcolno, pos, end)
-
-*/
 }
 
 static PyObject *
 join_list_unicode(PyObject *lst)
 {
-    static PyObject *ustr = NULL;
-    static PyObject *joinstr = NULL;
-    if (ustr == NULL) {
-        Py_UNICODE c = 0;
-        ustr = PyUnicode_FromUnicode(&c, 0);
+    /* return u''.join(lst) */
+    static PyObject *sep = NULL;
+    if (sep == NULL) {
+        sep = PyUnicode_FromStringAndSize("", 0);
+        if (sep == NULL)
+            return NULL;
+    }
+    return PyUnicode_Join(sep, lst);
+}
+
+static PyObject *
+_build_rval_index_tuple(PyObject *rval, Py_ssize_t idx) {
+    /* return (rval, idx) tuple, stealing reference to rval */
+    PyObject *tpl;
+    PyObject *pyidx;
+    /*
+    steal a reference to rval, returns (rval, idx)
+    */
+    if (rval == NULL) {
+        return NULL;
     }
-    if (joinstr == NULL) {
-        joinstr = PyUnicode_InternFromString("join");
+    pyidx = PyLong_FromSsize_t(idx);
+    if (pyidx == NULL) {
+        Py_DECREF(rval);
+        return NULL;
     }
-    if (joinstr == NULL || ustr == NULL) {
+    tpl = PyTuple_New(2);
+    if (tpl == NULL) {
+        Py_DECREF(pyidx);
+        Py_DECREF(rval);
         return NULL;
     }
-    return PyObject_CallMethodObjArgs(ustr, joinstr, lst, NULL);
+    PyTuple_SET_ITEM(tpl, 0, rval);
+    PyTuple_SET_ITEM(tpl, 1, pyidx);
+    return tpl;
 }
 
 static PyObject *
-scanstring_str(PyObject *pystr, Py_ssize_t end, char *encoding, int strict)
+scanstring_unicode(PyObject *pystr, Py_ssize_t end, int strict, Py_ssize_t *next_end_ptr)
 {
+    /* Read the JSON string from PyUnicode pystr.
+    end is the index of the first character after the quote.
+    if strict is zero then literal control characters are allowed
+    *next_end_ptr is a return-by-reference index of the character
+        after the end quote
+
+    Return value is a new PyUnicode
+    */
     PyObject *rval;
-    Py_ssize_t len = PyBytes_GET_SIZE(pystr);
+    Py_ssize_t len = PyUnicode_GET_SIZE(pystr);
     Py_ssize_t begin = end - 1;
     Py_ssize_t next = begin;
-    char *buf = PyBytes_AS_STRING(pystr);
-    Py_buffer info;
+    const Py_UNICODE *buf = PyUnicode_AS_UNICODE(pystr);
     PyObject *chunks = PyList_New(0);
     if (chunks == NULL) {
         goto bail;
@@ -262,16 +349,7 @@
         }
         /* Pick up this chunk if it's not zero length */
         if (next != end) {
-            PyObject *strchunk;
-            if (PyBuffer_FillInfo(&info, NULL, &buf[end], next - end, 1, 0) < 0) {
-                goto bail;
-            }
-            strchunk = PyMemoryView_FromBuffer(&info);
-            if (strchunk == NULL) {
-                goto bail;
-            }
-            chunk = PyUnicode_FromEncodedObject(strchunk, encoding, NULL);
-            Py_DECREF(strchunk);
+            chunk = PyUnicode_FromUnicode(&buf[end], next - end);
             if (chunk == NULL) {
                 goto bail;
             }
@@ -320,18 +398,18 @@
             }
             /* Decode 4 hex digits */
             for (; next < end; next++) {
-                Py_ssize_t shl = (end - next - 1) << 2;
                 Py_UNICODE digit = buf[next];
+                c <<= 4;
                 switch (digit) {
                     case '0': case '1': case '2': case '3': case '4':
                     case '5': case '6': case '7': case '8': case '9':
-                        c |= (digit - '0') << shl; break;
+                        c |= (digit - '0'); break;
                     case 'a': case 'b': case 'c': case 'd': case 'e':
                     case 'f':
-                        c |= (digit - 'a' + 10) << shl; break;
+                        c |= (digit - 'a' + 10); break;
                     case 'A': case 'B': case 'C': case 'D': case 'E':
                     case 'F':
-                        c |= (digit - 'A' + 10) << shl; break;
+                        c |= (digit - 'A' + 10); break;
                     default:
                         raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
                         goto bail;
@@ -339,38 +417,46 @@
             }
 #ifdef Py_UNICODE_WIDE
             /* Surrogate pair */
-            if (c >= 0xd800 && c <= 0xdbff) {
+            if ((c & 0xfc00) == 0xd800) {
                 Py_UNICODE c2 = 0;
                 if (end + 6 >= len) {
-                    raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr,
-                        end - 5);
+                    raise_errmsg("Unpaired high surrogate", pystr, end - 5);
+                    goto bail;
                 }
                 if (buf[next++] != '\\' || buf[next++] != 'u') {
-                    raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr,
-                        end - 5);
+                    raise_errmsg("Unpaired high surrogate", pystr, end - 5);
+                    goto bail;
                 }
                 end += 6;
                 /* Decode 4 hex digits */
                 for (; next < end; next++) {
-                    Py_ssize_t shl = (end - next - 1) << 2;
+                    c2 <<= 4;
                     Py_UNICODE digit = buf[next];
                     switch (digit) {
                         case '0': case '1': case '2': case '3': case '4':
                         case '5': case '6': case '7': case '8': case '9':
-                            c2 |= (digit - '0') << shl; break;
+                            c2 |= (digit - '0'); break;
                         case 'a': case 'b': case 'c': case 'd': case 'e':
                         case 'f':
-                            c2 |= (digit - 'a' + 10) << shl; break;
+                            c2 |= (digit - 'a' + 10); break;
                         case 'A': case 'B': case 'C': case 'D': case 'E':
                         case 'F':
-                            c2 |= (digit - 'A' + 10) << shl; break;
+                            c2 |= (digit - 'A' + 10); break;
                         default:
                             raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
                             goto bail;
                     }
                 }
+                if ((c2 & 0xfc00) != 0xdc00) {
+                    raise_errmsg("Unpaired high surrogate", pystr, end - 5);
+                    goto bail;
+                }
                 c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
             }
+            else if ((c & 0xfc00) == 0xdc00) {
+                raise_errmsg("Unpaired low surrogate", pystr, end - 5);
+                goto bail;
+            }
 #endif
         }
         chunk = PyUnicode_FromUnicode(&c, 1);
@@ -388,237 +474,1176 @@
     if (rval == NULL) {
         goto bail;
     }
-    Py_CLEAR(chunks);
-    return Py_BuildValue("(Nn)", rval, end);
+    Py_DECREF(chunks);
+    *next_end_ptr = end;
+    return rval;
 bail:
+    *next_end_ptr = -1;
     Py_XDECREF(chunks);
     return NULL;
 }
 
+PyDoc_STRVAR(pydoc_scanstring,
+    "scanstring(basestring, end, strict=True) -> (bytes, end)\n"
+    "\n"
+    "Scan the string s for a JSON string. End is the index of the\n"
+    "character in s after the quote that started the JSON string.\n"
+    "Unescapes all valid JSON string escape sequences and raises ValueError\n"
+    "on attempt to decode an invalid string. If strict is False then literal\n"
+    "control characters are allowed in the string.\n"
+    "\n"
+    "Returns a tuple of the decoded string and the index of the character in s\n"
+    "after the end quote."
+);
 
 static PyObject *
-scanstring_unicode(PyObject *pystr, Py_ssize_t end, int strict)
+py_scanstring(PyObject* self UNUSED, PyObject *args)
 {
+    PyObject *pystr;
     PyObject *rval;
-    Py_ssize_t len = PyUnicode_GET_SIZE(pystr);
-    Py_ssize_t begin = end - 1;
-    Py_ssize_t next = begin;
-    const Py_UNICODE *buf = PyUnicode_AS_UNICODE(pystr);
-    PyObject *chunks = PyList_New(0);
-    if (chunks == NULL) {
-        goto bail;
+    Py_ssize_t end;
+    Py_ssize_t next_end = -1;
+    int strict = 1;
+    if (!PyArg_ParseTuple(args, "OO&|i:scanstring", &pystr, _convertPyInt_AsSsize_t, &end, &strict)) {
+        return NULL;
     }
-    if (end < 0 || len <= end) {
-        PyErr_SetString(PyExc_ValueError, "end is out of bounds");
-        goto bail;
+    if (PyUnicode_Check(pystr)) {
+        rval = scanstring_unicode(pystr, end, strict, &next_end);
     }
-    while (1) {
-        /* Find the end of the string or the next escape */
-        Py_UNICODE c = 0;
-        PyObject *chunk = NULL;
-        for (next = end; next < len; next++) {
-            c = buf[next];
-            if (c == '"' || c == '\\') {
-                break;
-            }
-            else if (strict && c <= 0x1f) {
-                raise_errmsg("Invalid control character at", pystr, next);
+    else {
+        PyErr_Format(PyExc_TypeError, 
+                     "first argument must be a string or bytes, not %.80s",
+                     Py_TYPE(pystr)->tp_name);
+        return NULL;
+    }
+    return _build_rval_index_tuple(rval, next_end);
+}
+
+PyDoc_STRVAR(pydoc_encode_basestring_ascii,
+    "encode_basestring_ascii(basestring) -> bytes\n"
+    "\n"
+    "Return an ASCII-only JSON representation of a Python string"
+);
+
+static PyObject *
+py_encode_basestring_ascii(PyObject* self UNUSED, PyObject *pystr)
+{
+    PyObject *rval;
+    /* Return an ASCII-only JSON representation of a Python string */
+    /* METH_O */
+    if (PyUnicode_Check(pystr)) {
+        rval = ascii_escape_unicode(pystr);
+    }
+    else {
+        PyErr_Format(PyExc_TypeError,
+                     "first argument must be a string, not %.80s",
+                     Py_TYPE(pystr)->tp_name);
+        return NULL;
+    }
+    return rval;
+}
+
+static void
+scanner_dealloc(PyObject *self)
+{
+    /* Deallocate scanner object */
+    scanner_clear(self);
+    Py_TYPE(self)->tp_free(self);
+}
+
+static int
+scanner_traverse(PyObject *self, visitproc visit, void *arg)
+{
+    PyScannerObject *s;
+    assert(PyScanner_Check(self));
+    s = (PyScannerObject *)self;
+    Py_VISIT(s->strict);
+    Py_VISIT(s->object_hook);
+    Py_VISIT(s->object_pairs_hook);
+    Py_VISIT(s->parse_float);
+    Py_VISIT(s->parse_int);
+    Py_VISIT(s->parse_constant);
+    return 0;
+}
+
+static int
+scanner_clear(PyObject *self)
+{
+    PyScannerObject *s;
+    assert(PyScanner_Check(self));
+    s = (PyScannerObject *)self;
+    Py_CLEAR(s->strict);
+    Py_CLEAR(s->object_hook);
+    Py_CLEAR(s->object_pairs_hook);
+    Py_CLEAR(s->parse_float);
+    Py_CLEAR(s->parse_int);
+    Py_CLEAR(s->parse_constant);
+    return 0;
+}
+
+static PyObject *
+_parse_object_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
+    /* Read a JSON object from PyUnicode pystr.
+    idx is the index of the first character after the opening curly brace.
+    *next_idx_ptr is a return-by-reference index to the first character after
+        the closing curly brace.
+
+    Returns a new PyObject (usually a dict, but object_hook can change that)
+    */
+    Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
+    Py_ssize_t end_idx = PyUnicode_GET_SIZE(pystr) - 1;
+    PyObject *val = NULL;
+    PyObject *rval = PyList_New(0);
+    PyObject *key = NULL;
+    int strict = PyObject_IsTrue(s->strict);
+    Py_ssize_t next_idx;
+    if (rval == NULL)
+        return NULL;
+
+    /* skip whitespace after { */
+    while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+    /* only loop if the object is non-empty */
+    if (idx <= end_idx && str[idx] != '}') {
+        while (idx <= end_idx) {
+            /* read key */
+            if (str[idx] != '"') {
+                raise_errmsg("Expecting property name", pystr, idx);
                 goto bail;
             }
-        }
-        if (!(c == '"' || c == '\\')) {
-            raise_errmsg("Unterminated string starting at", pystr, begin);
-            goto bail;
-        }
-        /* Pick up this chunk if it's not zero length */
-        if (next != end) {
-            chunk = PyUnicode_FromUnicode(&buf[end], next - end);
-            if (chunk == NULL) {
+            key = scanstring_unicode(pystr, idx + 1, strict, &next_idx);
+            if (key == NULL)
+                goto bail;
+            idx = next_idx;
+
+            /* skip whitespace between key and : delimiter, read :, skip whitespace */
+            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+            if (idx > end_idx || str[idx] != ':') {
+                raise_errmsg("Expecting : delimiter", pystr, idx);
                 goto bail;
             }
-            if (PyList_Append(chunks, chunk)) {
-                Py_DECREF(chunk);
+            idx++;
+            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+            /* read any JSON term */
+            val = scan_once_unicode(s, pystr, idx, &next_idx);
+            if (val == NULL)
                 goto bail;
+
+            {
+                PyObject *tuple = PyTuple_Pack(2, key, val);
+                if (tuple == NULL)
+                    goto bail;
+                if (PyList_Append(rval, tuple) == -1) {
+                    Py_DECREF(tuple);
+                    goto bail;
+                }
+                Py_DECREF(tuple);
             }
-            Py_DECREF(chunk);
-        }
-        next++;
-        if (c == '"') {
-            end = next;
-            break;
-        }
-        if (next == len) {
-            raise_errmsg("Unterminated string starting at", pystr, begin);
-            goto bail;
-        }
-        c = buf[next];
-        if (c != 'u') {
-            /* Non-unicode backslash escapes */
-            end = next + 1;
-            switch (c) {
-                case '"': break;
-                case '\\': break;
-                case '/': break;
-                case 'b': c = '\b'; break;
-                case 'f': c = '\f'; break;
-                case 'n': c = '\n'; break;
-                case 'r': c = '\r'; break;
-                case 't': c = '\t'; break;
-                default: c = 0;
+
+            Py_CLEAR(key);
+            Py_CLEAR(val);
+            idx = next_idx;
+
+            /* skip whitespace before } or , */
+            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+            /* bail if the object is closed or we didn't get the , delimiter */
+            if (idx > end_idx) break;
+            if (str[idx] == '}') {
+                break;
             }
-            if (c == 0) {
-                raise_errmsg("Invalid \\escape", pystr, end - 2);
+            else if (str[idx] != ',') {
+                raise_errmsg("Expecting , delimiter", pystr, idx);
                 goto bail;
             }
+            idx++;
+
+            /* skip whitespace after , delimiter */
+            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
         }
-        else {
-            c = 0;
-            next++;
-            end = next + 4;
-            if (end >= len) {
-                raise_errmsg("Invalid \\uXXXX escape", pystr, next - 1);
+    }
+
+    /* verify that idx < end_idx, str[idx] should be '}' */
+    if (idx > end_idx || str[idx] != '}') {
+        raise_errmsg("Expecting object", pystr, end_idx);
+        goto bail;
+    }
+
+    *next_idx_ptr = idx + 1;
+
+    if (s->object_pairs_hook != Py_None) {
+        val = PyObject_CallFunctionObjArgs(s->object_pairs_hook, rval, NULL);
+        if (val == NULL)
+            goto bail;
+        Py_DECREF(rval);
+        return val;
+    }
+
+    val = PyDict_New();
+    if (val == NULL)
+        goto bail;
+    if (PyDict_MergeFromSeq2(val, rval, 1) == -1)
+        goto bail;
+    Py_DECREF(rval);
+    rval = val;
+
+    /* if object_hook is not None: rval = object_hook(rval) */
+    if (s->object_hook != Py_None) {
+        val = PyObject_CallFunctionObjArgs(s->object_hook, rval, NULL);
+        if (val == NULL)
+            goto bail;
+        Py_DECREF(rval);
+        rval = val;
+        val = NULL;
+    }
+    return rval;
+bail:
+    Py_XDECREF(key);
+    Py_XDECREF(val);
+    Py_DECREF(rval);
+    return NULL;
+}
+
+static PyObject *
+_parse_array_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
+    /* Read a JSON array from PyString pystr.
+    idx is the index of the first character after the opening brace.
+    *next_idx_ptr is a return-by-reference index to the first character after
+        the closing brace.
+
+    Returns a new PyList
+    */
+    Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
+    Py_ssize_t end_idx = PyUnicode_GET_SIZE(pystr) - 1;
+    PyObject *val = NULL;
+    PyObject *rval = PyList_New(0);
+    Py_ssize_t next_idx;
+    if (rval == NULL)
+        return NULL;
+
+    /* skip whitespace after [ */
+    while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+    /* only loop if the array is non-empty */
+    if (idx <= end_idx && str[idx] != ']') {
+        while (idx <= end_idx) {
+
+            /* read any JSON term  */
+            val = scan_once_unicode(s, pystr, idx, &next_idx);
+            if (val == NULL)
                 goto bail;
+
+            if (PyList_Append(rval, val) == -1)
+                goto bail;
+
+            Py_CLEAR(val);
+            idx = next_idx;
+
+            /* skip whitespace between term and , */
+            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
+
+            /* bail if the array is closed or we didn't get the , delimiter */
+            if (idx > end_idx) break;
+            if (str[idx] == ']') {
+                break;
             }
-            /* Decode 4 hex digits */
-            for (; next < end; next++) {
-                Py_ssize_t shl = (end - next - 1) << 2;
-                Py_UNICODE digit = buf[next];
-                switch (digit) {
-                    case '0': case '1': case '2': case '3': case '4':
-                    case '5': case '6': case '7': case '8': case '9':
-                        c |= (digit - '0') << shl; break;
-                    case 'a': case 'b': case 'c': case 'd': case 'e':
-                    case 'f':
-                        c |= (digit - 'a' + 10) << shl; break;
-                    case 'A': case 'B': case 'C': case 'D': case 'E':
-                    case 'F':
-                        c |= (digit - 'A' + 10) << shl; break;
-                    default:
-                        raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
-                        goto bail;
-                }
-            }
-#ifdef Py_UNICODE_WIDE
-            /* Surrogate pair */
-            if (c >= 0xd800 && c <= 0xdbff) {
-                Py_UNICODE c2 = 0;
-                if (end + 6 >= len) {
-                    raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr,
-                        end - 5);
-                }
-                if (buf[next++] != '\\' || buf[next++] != 'u') {
-                    raise_errmsg("Invalid \\uXXXX\\uXXXX surrogate pair", pystr,
-                        end - 5);
-                }
-                end += 6;
-                /* Decode 4 hex digits */
-                for (; next < end; next++) {
-                    Py_ssize_t shl = (end - next - 1) << 2;
-                    Py_UNICODE digit = buf[next];
-                    switch (digit) {
-                        case '0': case '1': case '2': case '3': case '4':
-                        case '5': case '6': case '7': case '8': case '9':
-                            c2 |= (digit - '0') << shl; break;
-                        case 'a': case 'b': case 'c': case 'd': case 'e':
-                        case 'f':
-                            c2 |= (digit - 'a' + 10) << shl; break;
-                        case 'A': case 'B': case 'C': case 'D': case 'E':
-                        case 'F':
-                            c2 |= (digit - 'A' + 10) << shl; break;
-                        default:
-                            raise_errmsg("Invalid \\uXXXX escape", pystr, end - 5);
-                            goto bail;
-                    }
-                }
-                c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00));
+            else if (str[idx] != ',') {
+                raise_errmsg("Expecting , delimiter", pystr, idx);
+                goto bail;
             }
-#endif
-        }
-        chunk = PyUnicode_FromUnicode(&c, 1);
-        if (chunk == NULL) {
-            goto bail;
-        }
-        if (PyList_Append(chunks, chunk)) {
-            Py_DECREF(chunk);
-            goto bail;
+            idx++;
+
+            /* skip whitespace after , */
+            while (idx <= end_idx && IS_WHITESPACE(str[idx])) idx++;
         }
-        Py_DECREF(chunk);
     }
 
-    rval = join_list_unicode(chunks);
-    if (rval == NULL) {
+    /* verify that idx < end_idx, str[idx] should be ']' */
+    if (idx > end_idx || str[idx] != ']') {
+        raise_errmsg("Expecting object", pystr, end_idx);
         goto bail;
     }
-    Py_CLEAR(chunks);
-    return Py_BuildValue("(Nn)", rval, end);
+    *next_idx_ptr = idx + 1;
+    return rval;
 bail:
-    Py_XDECREF(chunks);
+    Py_XDECREF(val);
+    Py_DECREF(rval);
     return NULL;
 }
 
-PyDoc_STRVAR(pydoc_scanstring,
-"scanstring(str_or_bytes, end, encoding) -> (bytes, end)\n");
+static PyObject *
+_parse_constant(PyScannerObject *s, char *constant, Py_ssize_t idx, Py_ssize_t *next_idx_ptr) {
+    /* Read a JSON constant from PyString pystr.
+    constant is the constant string that was found
+        ("NaN", "Infinity", "-Infinity").
+    idx is the index of the first character of the constant
+    *next_idx_ptr is a return-by-reference index to the first character after
+        the constant.
+
+    Returns the result of parse_constant
+    */
+    PyObject *cstr;
+    PyObject *rval;
+    /* constant is "NaN", "Infinity", or "-Infinity" */
+    cstr = PyUnicode_InternFromString(constant);
+    if (cstr == NULL)
+        return NULL;
+
+    /* rval = parse_constant(constant) */
+    rval = PyObject_CallFunctionObjArgs(s->parse_constant, cstr, NULL);
+    idx += PyUnicode_GET_SIZE(cstr);
+    Py_DECREF(cstr);
+    *next_idx_ptr = idx;
+    return rval;
+}
 
 static PyObject *
-py_scanstring(PyObject* self, PyObject *args)
-{
-    PyObject *pystr;
-    Py_ssize_t end;
-    char *encoding = NULL;
-    int strict = 0;
-    if (!PyArg_ParseTuple(args, "On|zi:scanstring", &pystr, &end, &encoding, &strict)) {
+_match_number_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t start, Py_ssize_t *next_idx_ptr) {
+    /* Read a JSON number from PyUnicode pystr.
+    idx is the index of the first character of the number
+    *next_idx_ptr is a return-by-reference index to the first character after
+        the number.
+
+    Returns a new PyObject representation of that number:
+        PyInt, PyLong, or PyFloat.
+        May return other types if parse_int or parse_float are set
+    */
+    Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
+    Py_ssize_t end_idx = PyUnicode_GET_SIZE(pystr) - 1;
+    Py_ssize_t idx = start;
+    int is_float = 0;
+    PyObject *rval;
+    PyObject *numstr;
+
+    /* read a sign if it's there, make sure it's not the end of the string */
+    if (str[idx] == '-') {
+        idx++;
+        if (idx > end_idx) {
+            PyErr_SetNone(PyExc_StopIteration);
+            return NULL;
+        }
+    }
+
+    /* read as many integer digits as we find as long as it doesn't start with 0 */
+    if (str[idx] >= '1' && str[idx] <= '9') {
+        idx++;
+        while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
+    }
+    /* if it starts with 0 we only expect one integer digit */
+    else if (str[idx] == '0') {
+        idx++;
+    }
+    /* no integer digits, error */
+    else {
+        PyErr_SetNone(PyExc_StopIteration);
         return NULL;
     }
-    if (encoding == NULL) {
-        encoding = DEFAULT_ENCODING;
+
+    /* if the next char is '.' followed by a digit then read all float digits */
+    if (idx < end_idx && str[idx] == '.' && str[idx + 1] >= '0' && str[idx + 1] <= '9') {
+        is_float = 1;
+        idx += 2;
+        while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
+    }
+
+    /* if the next char is 'e' or 'E' then maybe read the exponent (or backtrack) */
+    if (idx < end_idx && (str[idx] == 'e' || str[idx] == 'E')) {
+        Py_ssize_t e_start = idx;
+        idx++;
+
+        /* read an exponent sign if present */
+        if (idx < end_idx && (str[idx] == '-' || str[idx] == '+')) idx++;
+
+        /* read all digits */
+        while (idx <= end_idx && str[idx] >= '0' && str[idx] <= '9') idx++;
+
+        /* if we got a digit, then parse as float. if not, backtrack */
+        if (str[idx - 1] >= '0' && str[idx - 1] <= '9') {
+            is_float = 1;
+        }
+        else {
+            idx = e_start;
+        }
     }
-    if (PyBytes_Check(pystr)) {
-        return scanstring_str(pystr, end, encoding, strict);
+
+    /* copy the section we determined to be a number */
+    numstr = PyUnicode_FromUnicode(&str[start], idx - start);
+    if (numstr == NULL)
+        return NULL;
+    if (is_float) {
+        /* parse as a float using a fast path if available, otherwise call user defined method */
+        if (s->parse_float != (PyObject *)&PyFloat_Type) {
+            rval = PyObject_CallFunctionObjArgs(s->parse_float, numstr, NULL);
+        }
+        else {
+            rval = PyFloat_FromString(numstr);
+        }
+    }
+    else {
+        /* no fast path for unicode -> int, just call */
+        rval = PyObject_CallFunctionObjArgs(s->parse_int, numstr, NULL);
+    }
+    Py_DECREF(numstr);
+    *next_idx_ptr = idx;
+    return rval;
+}
+
+static PyObject *
+scan_once_unicode(PyScannerObject *s, PyObject *pystr, Py_ssize_t idx, Py_ssize_t *next_idx_ptr)
+{
+    /* Read one JSON term (of any kind) from PyUnicode pystr.
+    idx is the index of the first character of the term
+    *next_idx_ptr is a return-by-reference index to the first character after
+        the number.
+
+    Returns a new PyObject representation of the term.
+    */
+    Py_UNICODE *str = PyUnicode_AS_UNICODE(pystr);
+    Py_ssize_t length = PyUnicode_GET_SIZE(pystr);
+    if (idx >= length) {
+        PyErr_SetNone(PyExc_StopIteration);
+        return NULL;
     }
-    else if (PyUnicode_Check(pystr)) {
-        return scanstring_unicode(pystr, end, strict);
+    switch (str[idx]) {
+        case '"':
+            /* string */
+            return scanstring_unicode(pystr, idx + 1,
+                PyObject_IsTrue(s->strict),
+                next_idx_ptr);
+        case '{':
+            /* object */
+            return _parse_object_unicode(s, pystr, idx + 1, next_idx_ptr);
+        case '[':
+            /* array */
+            return _parse_array_unicode(s, pystr, idx + 1, next_idx_ptr);
+        case 'n':
+            /* null */
+            if ((idx + 3 < length) && str[idx + 1] == 'u' && str[idx + 2] == 'l' && str[idx + 3] == 'l') {
+                Py_INCREF(Py_None);
+                *next_idx_ptr = idx + 4;
+                return Py_None;
+            }
+            break;
+        case 't':
+            /* true */
+            if ((idx + 3 < length) && str[idx + 1] == 'r' && str[idx + 2] == 'u' && str[idx + 3] == 'e') {
+                Py_INCREF(Py_True);
+                *next_idx_ptr = idx + 4;
+                return Py_True;
+            }
+            break;
+        case 'f':
+            /* false */
+            if ((idx + 4 < length) && str[idx + 1] == 'a' && str[idx + 2] == 'l' && str[idx + 3] == 's' && str[idx + 4] == 'e') {
+                Py_INCREF(Py_False);
+                *next_idx_ptr = idx + 5;
+                return Py_False;
+            }
+            break;
+        case 'N':
+            /* NaN */
+            if ((idx + 2 < length) && str[idx + 1] == 'a' && str[idx + 2] == 'N') {
+                return _parse_constant(s, "NaN", idx, next_idx_ptr);
+            }
+            break;
+        case 'I':
+            /* Infinity */
+            if ((idx + 7 < length) && str[idx + 1] == 'n' && str[idx + 2] == 'f' && str[idx + 3] == 'i' && str[idx + 4] == 'n' && str[idx + 5] == 'i' && str[idx + 6] == 't' && str[idx + 7] == 'y') {
+                return _parse_constant(s, "Infinity", idx, next_idx_ptr);
+            }
+            break;
+        case '-':
+            /* -Infinity */
+            if ((idx + 8 < length) && str[idx + 1] == 'I' && str[idx + 2] == 'n' && str[idx + 3] == 'f' && str[idx + 4] == 'i' && str[idx + 5] == 'n' && str[idx + 6] == 'i' && str[idx + 7] == 't' && str[idx + 8] == 'y') {
+                return _parse_constant(s, "-Infinity", idx, next_idx_ptr);
+            }
+            break;
+    }
+    /* Didn't find a string, object, array, or named constant. Look for a number. */
+    return _match_number_unicode(s, pystr, idx, next_idx_ptr);
+}
+
+static PyObject *
+scanner_call(PyObject *self, PyObject *args, PyObject *kwds)
+{
+    /* Python callable interface to scan_once_{str,unicode} */
+    PyObject *pystr;
+    PyObject *rval;
+    Py_ssize_t idx;
+    Py_ssize_t next_idx = -1;
+    static char *kwlist[] = {"string", "idx", NULL};
+    PyScannerObject *s;
+    assert(PyScanner_Check(self));
+    s = (PyScannerObject *)self;
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&:scan_once", kwlist, &pystr, _convertPyInt_AsSsize_t, &idx))
+        return NULL;
+
+    if (PyUnicode_Check(pystr)) {
+        rval = scan_once_unicode(s, pystr, idx, &next_idx);
     }
     else {
-        PyErr_Format(PyExc_TypeError, 
-                     "first argument must be a string or bytes, not %.80s",
-                     Py_TYPE(pystr)->tp_name);
+        PyErr_Format(PyExc_TypeError,
+                 "first argument must be a string, not %.80s",
+                 Py_TYPE(pystr)->tp_name);
         return NULL;
     }
+    return _build_rval_index_tuple(rval, next_idx);
 }
 
-PyDoc_STRVAR(pydoc_encode_basestring_ascii,
-"encode_basestring_ascii(str_or_bytes) -> bytes\n");
+static PyObject *
+scanner_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+    PyScannerObject *s;
+    s = (PyScannerObject *)type->tp_alloc(type, 0);
+    if (s != NULL) {
+        s->strict = NULL;
+        s->object_hook = NULL;
+        s->object_pairs_hook = NULL;
+        s->parse_float = NULL;
+        s->parse_int = NULL;
+        s->parse_constant = NULL;
+    }
+    return (PyObject *)s;
+}
+
+static int
+scanner_init(PyObject *self, PyObject *args, PyObject *kwds)
+{
+    /* Initialize Scanner object */
+    PyObject *ctx;
+    static char *kwlist[] = {"context", NULL};
+    PyScannerObject *s;
+
+    assert(PyScanner_Check(self));
+    s = (PyScannerObject *)self;
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:make_scanner", kwlist, &ctx))
+        return -1;
+
+    /* All of these will fail "gracefully" so we don't need to verify them */
+    s->strict = PyObject_GetAttrString(ctx, "strict");
+    if (s->strict == NULL)
+        goto bail;
+    s->object_hook = PyObject_GetAttrString(ctx, "object_hook");
+    if (s->object_hook == NULL)
+        goto bail;
+    s->object_pairs_hook = PyObject_GetAttrString(ctx, "object_pairs_hook");
+    if (s->object_pairs_hook == NULL)
+        goto bail;
+    s->parse_float = PyObject_GetAttrString(ctx, "parse_float");
+    if (s->parse_float == NULL)
+        goto bail;
+    s->parse_int = PyObject_GetAttrString(ctx, "parse_int");
+    if (s->parse_int == NULL)
+        goto bail;
+    s->parse_constant = PyObject_GetAttrString(ctx, "parse_constant");
+    if (s->parse_constant == NULL)
+        goto bail;
+
+    return 0;
+
+bail:
+    Py_CLEAR(s->strict);
+    Py_CLEAR(s->object_hook);
+    Py_CLEAR(s->object_pairs_hook);
+    Py_CLEAR(s->parse_float);
+    Py_CLEAR(s->parse_int);
+    Py_CLEAR(s->parse_constant);
+    return -1;
+}
+
+PyDoc_STRVAR(scanner_doc, "JSON scanner object");
+
+static
+PyTypeObject PyScannerType = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "_json.Scanner",       /* tp_name */
+    sizeof(PyScannerObject), /* tp_basicsize */
+    0,                    /* tp_itemsize */
+    scanner_dealloc, /* tp_dealloc */
+    0,                    /* tp_print */
+    0,                    /* tp_getattr */
+    0,                    /* tp_setattr */
+    0,                    /* tp_compare */
+    0,                    /* tp_repr */
+    0,                    /* tp_as_number */
+    0,                    /* tp_as_sequence */
+    0,                    /* tp_as_mapping */
+    0,                    /* tp_hash */
+    scanner_call,         /* tp_call */
+    0,                    /* tp_str */
+    0,/* PyObject_GenericGetAttr, */                    /* tp_getattro */
+    0,/* PyObject_GenericSetAttr, */                    /* tp_setattro */
+    0,                    /* tp_as_buffer */
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,   /* tp_flags */
+    scanner_doc,          /* tp_doc */
+    scanner_traverse,                    /* tp_traverse */
+    scanner_clear,                    /* tp_clear */
+    0,                    /* tp_richcompare */
+    0,                    /* tp_weaklistoffset */
+    0,                    /* tp_iter */
+    0,                    /* tp_iternext */
+    0,                    /* tp_methods */
+    scanner_members,                    /* tp_members */
+    0,                    /* tp_getset */
+    0,                    /* tp_base */
+    0,                    /* tp_dict */
+    0,                    /* tp_descr_get */
+    0,                    /* tp_descr_set */
+    0,                    /* tp_dictoffset */
+    scanner_init,                    /* tp_init */
+    0,/* PyType_GenericAlloc, */        /* tp_alloc */
+    scanner_new,          /* tp_new */
+    0,/* PyObject_GC_Del, */              /* tp_free */
+};
 
 static PyObject *
-py_encode_basestring_ascii(PyObject* self, PyObject *pystr)
+encoder_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+    PyEncoderObject *s;
+    s = (PyEncoderObject *)type->tp_alloc(type, 0);
+    if (s != NULL) {
+        s->markers = NULL;
+        s->defaultfn = NULL;
+        s->encoder = NULL;
+        s->indent = NULL;
+        s->key_separator = NULL;
+        s->item_separator = NULL;
+        s->sort_keys = NULL;
+        s->skipkeys = NULL;
+    }
+    return (PyObject *)s;
+}
+
+static int
+encoder_init(PyObject *self, PyObject *args, PyObject *kwds)
 {
+    /* initialize Encoder object */
+    static char *kwlist[] = {"markers", "default", "encoder", "indent", "key_separator", "item_separator", "sort_keys", "skipkeys", "allow_nan", NULL};
+
+    PyEncoderObject *s;
+    PyObject *allow_nan;
+
+    assert(PyEncoder_Check(self));
+    s = (PyEncoderObject *)self;
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "OOOOOOOOO:make_encoder", kwlist,
+        &s->markers, &s->defaultfn, &s->encoder, &s->indent, &s->key_separator, &s->item_separator, &s->sort_keys, &s->skipkeys, &allow_nan))
+        return -1;
+
+    Py_INCREF(s->markers);
+    Py_INCREF(s->defaultfn);
+    Py_INCREF(s->encoder);
+    Py_INCREF(s->indent);
+    Py_INCREF(s->key_separator);
+    Py_INCREF(s->item_separator);
+    Py_INCREF(s->sort_keys);
+    Py_INCREF(s->skipkeys);
+    s->fast_encode = (PyCFunction_Check(s->encoder) && PyCFunction_GetFunction(s->encoder) == (PyCFunction)py_encode_basestring_ascii);
+    s->allow_nan = PyObject_IsTrue(allow_nan);
+    return 0;
+}
+
+static PyObject *
+encoder_call(PyObject *self, PyObject *args, PyObject *kwds)
+{
+    /* Python callable interface to encode_listencode_obj */
+    static char *kwlist[] = {"obj", "_current_indent_level", NULL};
+    PyObject *obj;
     PyObject *rval;
-    /* METH_O */
-    if (PyBytes_Check(pystr)) {
-        rval = ascii_escape_str(pystr);
+    Py_ssize_t indent_level;
+    PyEncoderObject *s;
+    assert(PyEncoder_Check(self));
+    s = (PyEncoderObject *)self;
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&:_iterencode", kwlist,
+        &obj, _convertPyInt_AsSsize_t, &indent_level))
+        return NULL;
+    rval = PyList_New(0);
+    if (rval == NULL)
+        return NULL;
+    if (encoder_listencode_obj(s, rval, obj, indent_level)) {
+        Py_DECREF(rval);
+        return NULL;
     }
-    else if (PyUnicode_Check(pystr)) {
-        rval = ascii_escape_unicode(pystr);
+    return rval;
+}
+
+static PyObject *
+_encoded_const(PyObject *obj)
+{
+    /* Return the JSON string representation of None, True, False */
+    if (obj == Py_None) {
+        static PyObject *s_null = NULL;
+        if (s_null == NULL) {
+            s_null = PyUnicode_InternFromString("null");
+        }
+        Py_INCREF(s_null);
+        return s_null;
+    }
+    else if (obj == Py_True) {
+        static PyObject *s_true = NULL;
+        if (s_true == NULL) {
+            s_true = PyUnicode_InternFromString("true");
+        }
+        Py_INCREF(s_true);
+        return s_true;
+    }
+    else if (obj == Py_False) {
+        static PyObject *s_false = NULL;
+        if (s_false == NULL) {
+            s_false = PyUnicode_InternFromString("false");
+        }
+        Py_INCREF(s_false);
+        return s_false;
     }
     else {
-        PyErr_Format(PyExc_TypeError, 
-                     "first argument must be a string or unicode, not %.80s",
-                     Py_TYPE(pystr)->tp_name);
+        PyErr_SetString(PyExc_ValueError, "not a const");
         return NULL;
     }
-    if (rval != NULL && PyBytes_Check(rval)) {
-        PyObject *urval = PyUnicode_DecodeASCII(PyBytes_AS_STRING(rval), PyBytes_GET_SIZE(rval), NULL);
-        Py_DECREF(rval);
-        return urval;
+}
+
+static PyObject *
+encoder_encode_float(PyEncoderObject *s, PyObject *obj)
+{
+    /* Return the JSON representation of a PyFloat */
+    double i = PyFloat_AS_DOUBLE(obj);
+    if (!Py_IS_FINITE(i)) {
+        if (!s->allow_nan) {
+            PyErr_SetString(PyExc_ValueError, "Out of range float values are not JSON compliant");
+            return NULL;
+        }
+        if (i > 0) {
+            return PyUnicode_FromString("Infinity");
+        }
+        else if (i < 0) {
+            return PyUnicode_FromString("-Infinity");
+        }
+        else {
+            return PyUnicode_FromString("NaN");
+        }
     }
+    /* Use a better float format here? */
+    return PyObject_Repr(obj);
+}
+
+static PyObject *
+encoder_encode_string(PyEncoderObject *s, PyObject *obj)
+{
+    /* Return the JSON representation of a string */
+    if (s->fast_encode)
+        return py_encode_basestring_ascii(NULL, obj);
+    else
+        return PyObject_CallFunctionObjArgs(s->encoder, obj, NULL);
+}
+
+static int
+_steal_list_append(PyObject *lst, PyObject *stolen)
+{
+    /* Append stolen and then decrement its reference count */
+    int rval = PyList_Append(lst, stolen);
+    Py_DECREF(stolen);
     return rval;
 }
 
-static PyMethodDef json_methods[] = {
-    {"encode_basestring_ascii", (PyCFunction)py_encode_basestring_ascii,
-     METH_O, pydoc_encode_basestring_ascii},
-    {"scanstring", (PyCFunction)py_scanstring, METH_VARARGS,
-     pydoc_scanstring},
+static int
+encoder_listencode_obj(PyEncoderObject *s, PyObject *rval, PyObject *obj, Py_ssize_t indent_level)
+{
+    /* Encode Python object obj to a JSON term, rval is a PyList */
+    PyObject *newobj;
+    int rv;
+
+    if (obj == Py_None || obj == Py_True || obj == Py_False) {
+        PyObject *cstr = _encoded_const(obj);
+        if (cstr == NULL)
+            return -1;
+        return _steal_list_append(rval, cstr);
+    }
+    else if (PyUnicode_Check(obj))
+    {
+        PyObject *encoded = encoder_encode_string(s, obj);
+        if (encoded == NULL)
+            return -1;
+        return _steal_list_append(rval, encoded);
+    }
+    else if (PyLong_Check(obj)) {
+        PyObject *encoded = PyObject_Str(obj);
+        if (encoded == NULL)
+            return -1;
+        return _steal_list_append(rval, encoded);
+    }
+    else if (PyFloat_Check(obj)) {
+        PyObject *encoded = encoder_encode_float(s, obj);
+        if (encoded == NULL)
+            return -1;
+        return _steal_list_append(rval, encoded);
+    }
+    else if (PyList_Check(obj) || PyTuple_Check(obj)) {
+        return encoder_listencode_list(s, rval, obj, indent_level);
+    }
+    else if (PyDict_Check(obj)) {
+        return encoder_listencode_dict(s, rval, obj, indent_level);
+    }
+    else {
+        PyObject *ident = NULL;
+        if (s->markers != Py_None) {
+            int has_key;
+            ident = PyLong_FromVoidPtr(obj);
+            if (ident == NULL)
+                return -1;
+            has_key = PyDict_Contains(s->markers, ident);
+            if (has_key) {
+                if (has_key != -1)
+                    PyErr_SetString(PyExc_ValueError, "Circular reference detected");
+                Py_DECREF(ident);
+                return -1;
+            }
+            if (PyDict_SetItem(s->markers, ident, obj)) {
+                Py_DECREF(ident);
+                return -1;
+            }
+        }
+        newobj = PyObject_CallFunctionObjArgs(s->defaultfn, obj, NULL);
+        if (newobj == NULL) {
+            Py_XDECREF(ident);
+            return -1;
+        }
+        rv = encoder_listencode_obj(s, rval, newobj, indent_level);
+        Py_DECREF(newobj);
+        if (rv) {
+            Py_XDECREF(ident);
+            return -1;
+        }
+        if (ident != NULL) {
+            if (PyDict_DelItem(s->markers, ident)) {
+                Py_XDECREF(ident);
+                return -1;
+            }
+            Py_XDECREF(ident);
+        }
+        return rv;
+    }
+}
+
+static int
+encoder_listencode_dict(PyEncoderObject *s, PyObject *rval, PyObject *dct, Py_ssize_t indent_level)
+{
+    /* Encode Python dict dct a JSON term, rval is a PyList */
+    static PyObject *open_dict = NULL;
+    static PyObject *close_dict = NULL;
+    static PyObject *empty_dict = NULL;
+    PyObject *kstr = NULL;
+    PyObject *ident = NULL;
+    PyObject *key, *value;
+    Py_ssize_t pos;
+    int skipkeys;
+    Py_ssize_t idx;
+
+    if (open_dict == NULL || close_dict == NULL || empty_dict == NULL) {
+        open_dict = PyUnicode_InternFromString("{");
+        close_dict = PyUnicode_InternFromString("}");
+        empty_dict = PyUnicode_InternFromString("{}");
+        if (open_dict == NULL || close_dict == NULL || empty_dict == NULL)
+            return -1;
+    }
+    if (PyDict_Size(dct) == 0)
+        return PyList_Append(rval, empty_dict);
+
+    if (s->markers != Py_None) {
+        int has_key;
+        ident = PyLong_FromVoidPtr(dct);
+        if (ident == NULL)
+            goto bail;
+        has_key = PyDict_Contains(s->markers, ident);
+        if (has_key) {
+            if (has_key != -1)
+                PyErr_SetString(PyExc_ValueError, "Circular reference detected");
+            goto bail;
+        }
+        if (PyDict_SetItem(s->markers, ident, dct)) {
+            goto bail;
+        }
+    }
+
+    if (PyList_Append(rval, open_dict))
+        goto bail;
+
+    if (s->indent != Py_None) {
+        /* TODO: DOES NOT RUN */
+        indent_level += 1;
+        /*
+            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+            separator = _item_separator + newline_indent
+            buf += newline_indent
+        */
+    }
+
+    /* TODO: C speedup not implemented for sort_keys */
+
+    pos = 0;
+    skipkeys = PyObject_IsTrue(s->skipkeys);
+    idx = 0;
+    while (PyDict_Next(dct, &pos, &key, &value)) {
+        PyObject *encoded;
+
+        if (PyUnicode_Check(key)) {
+            Py_INCREF(key);
+            kstr = key;
+        }
+        else if (PyFloat_Check(key)) {
+            kstr = encoder_encode_float(s, key);
+            if (kstr == NULL)
+                goto bail;
+        }
+        else if (PyLong_Check(key)) {
+            kstr = PyObject_Str(key);
+            if (kstr == NULL)
+                goto bail;
+        }
+        else if (key == Py_True || key == Py_False || key == Py_None) {
+            kstr = _encoded_const(key);
+            if (kstr == NULL)
+                goto bail;
+        }
+        else if (skipkeys) {
+            continue;
+        }
+        else {
+            /* TODO: include repr of key */
+            PyErr_SetString(PyExc_ValueError, "keys must be a string");
+            goto bail;
+        }
+
+        if (idx) {
+            if (PyList_Append(rval, s->item_separator))
+                goto bail;
+        }
+
+        encoded = encoder_encode_string(s, kstr);
+        Py_CLEAR(kstr);
+        if (encoded == NULL)
+            goto bail;
+        if (PyList_Append(rval, encoded)) {
+            Py_DECREF(encoded);
+            goto bail;
+        }
+        Py_DECREF(encoded);
+        if (PyList_Append(rval, s->key_separator))
+            goto bail;
+        if (encoder_listencode_obj(s, rval, value, indent_level))
+            goto bail;
+        idx += 1;
+    }
+    if (ident != NULL) {
+        if (PyDict_DelItem(s->markers, ident))
+            goto bail;
+        Py_CLEAR(ident);
+    }
+    if (s->indent != Py_None) {
+        /* TODO: DOES NOT RUN */
+        indent_level -= 1;
+        /*
+            yield '\n' + (' ' * (_indent * _current_indent_level))
+        */
+    }
+    if (PyList_Append(rval, close_dict))
+        goto bail;
+    return 0;
+
+bail:
+    Py_XDECREF(kstr);
+    Py_XDECREF(ident);
+    return -1;
+}
+
+
+static int
+encoder_listencode_list(PyEncoderObject *s, PyObject *rval, PyObject *seq, Py_ssize_t indent_level)
+{
+    /* Encode Python list seq to a JSON term, rval is a PyList */
+    static PyObject *open_array = NULL;
+    static PyObject *close_array = NULL;
+    static PyObject *empty_array = NULL;
+    PyObject *ident = NULL;
+    PyObject *s_fast = NULL;
+    Py_ssize_t num_items;
+    PyObject **seq_items;
+    Py_ssize_t i;
+
+    if (open_array == NULL || close_array == NULL || empty_array == NULL) {
+        open_array = PyUnicode_InternFromString("[");
+        close_array = PyUnicode_InternFromString("]");
+        empty_array = PyUnicode_InternFromString("[]");
+        if (open_array == NULL || close_array == NULL || empty_array == NULL)
+            return -1;
+    }
+    ident = NULL;
+    s_fast = PySequence_Fast(seq, "_iterencode_list needs a sequence");
+    if (s_fast == NULL)
+        return -1;
+    num_items = PySequence_Fast_GET_SIZE(s_fast);
+    if (num_items == 0) {
+        Py_DECREF(s_fast);
+        return PyList_Append(rval, empty_array);
+    }
+
+    if (s->markers != Py_None) {
+        int has_key;
+        ident = PyLong_FromVoidPtr(seq);
+        if (ident == NULL)
+            goto bail;
+        has_key = PyDict_Contains(s->markers, ident);
+        if (has_key) {
+            if (has_key != -1)
+                PyErr_SetString(PyExc_ValueError, "Circular reference detected");
+            goto bail;
+        }
+        if (PyDict_SetItem(s->markers, ident, seq)) {
+            goto bail;
+        }
+    }
+
+    seq_items = PySequence_Fast_ITEMS(s_fast);
+    if (PyList_Append(rval, open_array))
+        goto bail;
+    if (s->indent != Py_None) {
+        /* TODO: DOES NOT RUN */
+        indent_level += 1;
+        /*
+            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+            separator = _item_separator + newline_indent
+            buf += newline_indent
+        */
+    }
+    for (i = 0; i < num_items; i++) {
+        PyObject *obj = seq_items[i];
+        if (i) {
+            if (PyList_Append(rval, s->item_separator))
+                goto bail;
+        }
+        if (encoder_listencode_obj(s, rval, obj, indent_level))
+            goto bail;
+    }
+    if (ident != NULL) {
+        if (PyDict_DelItem(s->markers, ident))
+            goto bail;
+        Py_CLEAR(ident);
+    }
+    if (s->indent != Py_None) {
+        /* TODO: DOES NOT RUN */
+        indent_level -= 1;
+        /*
+            yield '\n' + (' ' * (_indent * _current_indent_level))
+        */
+    }
+    if (PyList_Append(rval, close_array))
+        goto bail;
+    Py_DECREF(s_fast);
+    return 0;
+
+bail:
+    Py_XDECREF(ident);
+    Py_DECREF(s_fast);
+    return -1;
+}
+
+static void
+encoder_dealloc(PyObject *self)
+{
+    /* Deallocate Encoder */
+    encoder_clear(self);
+    Py_TYPE(self)->tp_free(self);
+}
+
+static int
+encoder_traverse(PyObject *self, visitproc visit, void *arg)
+{
+    PyEncoderObject *s;
+    assert(PyEncoder_Check(self));
+    s = (PyEncoderObject *)self;
+    Py_VISIT(s->markers);
+    Py_VISIT(s->defaultfn);
+    Py_VISIT(s->encoder);
+    Py_VISIT(s->indent);
+    Py_VISIT(s->key_separator);
+    Py_VISIT(s->item_separator);
+    Py_VISIT(s->sort_keys);
+    Py_VISIT(s->skipkeys);
+    return 0;
+}
+
+static int
+encoder_clear(PyObject *self)
+{
+    /* Deallocate Encoder */
+    PyEncoderObject *s;
+    assert(PyEncoder_Check(self));
+    s = (PyEncoderObject *)self;
+    Py_CLEAR(s->markers);
+    Py_CLEAR(s->defaultfn);
+    Py_CLEAR(s->encoder);
+    Py_CLEAR(s->indent);
+    Py_CLEAR(s->key_separator);
+    Py_CLEAR(s->item_separator);
+    Py_CLEAR(s->sort_keys);
+    Py_CLEAR(s->skipkeys);
+    return 0;
+}
+
+PyDoc_STRVAR(encoder_doc, "_iterencode(obj, _current_indent_level) -> iterable");
+
+static
+PyTypeObject PyEncoderType = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "_json.Encoder",       /* tp_name */
+    sizeof(PyEncoderObject), /* tp_basicsize */
+    0,                    /* tp_itemsize */
+    encoder_dealloc, /* tp_dealloc */
+    0,                    /* tp_print */
+    0,                    /* tp_getattr */
+    0,                    /* tp_setattr */
+    0,                    /* tp_compare */
+    0,                    /* tp_repr */
+    0,                    /* tp_as_number */
+    0,                    /* tp_as_sequence */
+    0,                    /* tp_as_mapping */
+    0,                    /* tp_hash */
+    encoder_call,         /* tp_call */
+    0,                    /* tp_str */
+    0,                    /* tp_getattro */
+    0,                    /* tp_setattro */
+    0,                    /* tp_as_buffer */
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,   /* tp_flags */
+    encoder_doc,          /* tp_doc */
+    encoder_traverse,     /* tp_traverse */
+    encoder_clear,        /* tp_clear */
+    0,                    /* tp_richcompare */
+    0,                    /* tp_weaklistoffset */
+    0,                    /* tp_iter */
+    0,                    /* tp_iternext */
+    0,                    /* tp_methods */
+    encoder_members,      /* tp_members */
+    0,                    /* tp_getset */
+    0,                    /* tp_base */
+    0,                    /* tp_dict */
+    0,                    /* tp_descr_get */
+    0,                    /* tp_descr_set */
+    0,                    /* tp_dictoffset */
+    encoder_init,         /* tp_init */
+    0,                    /* tp_alloc */
+    encoder_new,          /* tp_new */
+    0,                    /* tp_free */
+};
+
+static PyMethodDef speedups_methods[] = {
+    {"encode_basestring_ascii",
+        (PyCFunction)py_encode_basestring_ascii,
+        METH_O,
+        pydoc_encode_basestring_ascii},
+    {"scanstring",
+        (PyCFunction)py_scanstring,
+        METH_VARARGS,
+        pydoc_scanstring},
     {NULL, NULL, 0, NULL}
 };
 
@@ -630,7 +1655,7 @@
 	"_json",
 	module_doc,
 	-1,
-	json_methods,
+	speedups_methods,
 	NULL,
 	NULL,
 	NULL,
@@ -640,5 +1665,27 @@
 PyObject*
 PyInit__json(void)
 {
-	return PyModule_Create(&jsonmodule);
+    PyObject *m = PyModule_Create(&jsonmodule);
+    if (!m)
+        return NULL;
+    PyScannerType.tp_new = PyType_GenericNew;
+    if (PyType_Ready(&PyScannerType) < 0)
+        goto fail;
+    PyEncoderType.tp_new = PyType_GenericNew;
+    if (PyType_Ready(&PyEncoderType) < 0)
+        goto fail;
+    Py_INCREF((PyObject*)&PyScannerType);
+    if (PyModule_AddObject(m, "make_scanner", (PyObject*)&PyScannerType) < 0) {
+        Py_DECREF((PyObject*)&PyScannerType);
+        goto fail;
+    }
+    Py_INCREF((PyObject*)&PyEncoderType);
+    if (PyModule_AddObject(m, "make_encoder", (PyObject*)&PyEncoderType) < 0) {
+        Py_DECREF((PyObject*)&PyEncoderType);
+        goto fail;
+    }
+    return m;
+  fail:
+    Py_DECREF(m);
+    return NULL;
 }

Modified: python/branches/pep-0383/Modules/ld_so_aix
==============================================================================
--- python/branches/pep-0383/Modules/ld_so_aix	(original)
+++ python/branches/pep-0383/Modules/ld_so_aix	Sat May  2 21:20:57 2009
@@ -181,7 +181,10 @@
 # Perform the link.
 #echo $CC $CCOPT $CCARGS
 $CC $CCOPT $CCARGS
+retval=$?
 
 # Delete the module's export list file.
 # Comment this line if you need it.
 rm -f $expfile
+
+exit $retval

Modified: python/branches/pep-0383/Objects/longobject.c
==============================================================================
--- python/branches/pep-0383/Objects/longobject.c	(original)
+++ python/branches/pep-0383/Objects/longobject.c	Sat May  2 21:20:57 2009
@@ -3832,8 +3832,13 @@
 }
 
 static PyObject *
-long_getN(PyLongObject *v, void *context) {
-	return PyLong_FromLong((Py_intptr_t)context);
+long_get0(PyLongObject *v, void *context) {
+	return PyLong_FromLong(0L);
+}
+
+static PyObject *
+long_get1(PyLongObject *v, void *context) {
+	return PyLong_FromLong(1L);
 }
 
 static PyObject *
@@ -4091,22 +4096,22 @@
 };
 
 static PyGetSetDef long_getset[] = {
-    {"real", 
+    {"real",
      (getter)long_long, (setter)NULL,
      "the real part of a complex number",
      NULL},
-    {"imag", 
-     (getter)long_getN, (setter)NULL,
+    {"imag",
+     (getter)long_get0, (setter)NULL,
      "the imaginary part of a complex number",
-     (void*)0},
-    {"numerator", 
+     NULL},
+    {"numerator",
      (getter)long_long, (setter)NULL,
      "the numerator of a rational number in lowest terms",
      NULL},
-    {"denominator", 
-     (getter)long_getN, (setter)NULL,
+    {"denominator",
+     (getter)long_get1, (setter)NULL,
      "the denominator of a rational number in lowest terms",
-     (void*)1},
+     NULL},
     {NULL}  /* Sentinel */
 };
 

Modified: python/branches/pep-0383/Objects/stringlib/formatter.h
==============================================================================
--- python/branches/pep-0383/Objects/stringlib/formatter.h	(original)
+++ python/branches/pep-0383/Objects/stringlib/formatter.h	Sat May  2 21:20:57 2009
@@ -934,8 +934,12 @@
 
     if (precision < 0)
         precision = 6;
+
+#if PY_VERSION_HEX < 0x03010000
+    /* 3.1 no longer converts large 'f' to 'g'. */
     if ((type == 'f' || type == 'F') && fabs(val) >= 1e50)
         type = 'g';
+#endif
 
     /* Cast "type", because if we're in unicode we need to pass a
        8-bit char. This is safe, because we've restricted what "type"

Modified: python/branches/pep-0383/Objects/stringlib/string_format.h
==============================================================================
--- python/branches/pep-0383/Objects/stringlib/string_format.h	(original)
+++ python/branches/pep-0383/Objects/stringlib/string_format.h	Sat May  2 21:20:57 2009
@@ -34,7 +34,7 @@
 typedef enum {
     ANS_INIT,
     ANS_AUTO,
-    ANS_MANUAL,
+    ANS_MANUAL
 } AutoNumberState;   /* Keep track if we're auto-numbering fields */
 
 /* Keeps track of our auto-numbering state, and which number field we're on */

Modified: python/branches/pep-0383/Objects/unicodeobject.c
==============================================================================
--- python/branches/pep-0383/Objects/unicodeobject.c	(original)
+++ python/branches/pep-0383/Objects/unicodeobject.c	Sat May  2 21:20:57 2009
@@ -8900,73 +8900,27 @@
     return NULL;
 }
 
-static void
-strtounicode(Py_UNICODE *buffer, const char *charbuffer, Py_ssize_t len)
-{
-    register Py_ssize_t i;
-    for (i = len - 1; i >= 0; i--)
-        buffer[i] = (Py_UNICODE) charbuffer[i];
-}
+/* Returns a new reference to a PyUnicode object, or NULL on failure. */
 
-static int
-formatfloat(Py_UNICODE *buf,
-            size_t buflen,
-            int flags,
-            int prec,
-            int type,
-            PyObject *v)
-{
-    /* eric.smith: To minimize disturbances in PyUnicode_Format (the
-       only caller of this routine), I'm going to keep the existing
-       API to this function. That means that we'll allocate memory and
-       then copy back into the supplied buffer. But that's better than
-       all of the changes that would be required in PyUnicode_Format
-       because it does lots of memory management tricks. */
-
-    char* p = NULL;
-    int result = -1;
+static PyObject *
+formatfloat(PyObject *v, int flags, int prec, int type)
+{
+    char *p;
+    PyObject *result;
     double x;
-    Py_ssize_t len;
 
     x = PyFloat_AsDouble(v);
     if (x == -1.0 && PyErr_Occurred())
-        goto done;
+        return NULL;
+
     if (prec < 0)
         prec = 6;
 
-    /* make sure that the decimal representation of precision really does
-       need at most 10 digits: platforms with sizeof(int) == 8 exist! */
-    if (prec > 0x7fffffffL) {
-        PyErr_SetString(PyExc_OverflowError,
-                        "outrageously large precision "
-                        "for formatted float");
-        goto done;
-    }
-
-    if (type == 'f' && fabs(x) >= 1e50)
-        type = 'g';
-
-    if (((type == 'g' || type == 'G') &&
-         buflen <= (size_t)10 + (size_t)prec) ||
-        ((type == 'f' || type == 'F') &&
-         buflen <= (size_t)53 + (size_t)prec)) {
-        PyErr_SetString(PyExc_OverflowError,
-                        "formatted float is too long (precision too large?)");
-        goto done;
-    }
-
     p = PyOS_double_to_string(x, type, prec,
                               (flags & F_ALT) ? Py_DTSF_ALT : 0, NULL);
-    len = strlen(p);
-    if (len+1 >= buflen) {
-        /* Caller supplied buffer is not large enough. */
-        PyErr_NoMemory();
-        goto done;
-    }
-    strtounicode(buf, p, len);
-    result = Py_SAFE_DOWNCAST(len, Py_ssize_t, int);
-
-done:
+    if (p == NULL)
+        return NULL;
+    result = PyUnicode_FromStringAndSize(p, strlen(p));
     PyMem_Free(p);
     return result;
 }
@@ -9048,14 +9002,9 @@
 }
 
 /* fmt%(v1,v2,...) is roughly equivalent to sprintf(fmt, v1, v2, ...)
-
-   FORMATBUFLEN is the length of the buffer in which the floats, ints, &
-   chars are formatted. XXX This is a magic number. Each formatting
-   routine does bounds checking to ensure no overflow, but a better
-   solution may be to malloc a buffer of appropriate size for each
-   format. For now, the current solution is sufficient.
+   FORMATBUFLEN is the length of the buffer in which chars are formatted.
 */
-#define FORMATBUFLEN (size_t)120
+#define FORMATBUFLEN (size_t)10
 
 PyObject *PyUnicode_Format(PyObject *format,
                            PyObject *args)
@@ -9120,7 +9069,7 @@
             Py_UNICODE *pbuf;
             Py_UNICODE sign;
             Py_ssize_t len;
-            Py_UNICODE formatbuf[FORMATBUFLEN]; /* For format{float,int,char}() */
+            Py_UNICODE formatbuf[FORMATBUFLEN]; /* For formatchar() */
 
             fmt++;
             if (*fmt == '(') {
@@ -9365,11 +9314,11 @@
             case 'F':
             case 'g':
             case 'G':
-                pbuf = formatbuf;
-                len = formatfloat(pbuf, sizeof(formatbuf)/sizeof(Py_UNICODE),
-                                  flags, prec, c, v);
-                if (len < 0)
+                temp = formatfloat(v, flags, prec, c);
+                if (!temp)
                     goto onError;
+                pbuf = PyUnicode_AS_UNICODE(temp);
+                len = PyUnicode_GET_SIZE(temp);
                 sign = 1;
                 if (flags & F_ZERO)
                     fill = '0';

Modified: python/branches/pep-0383/PC/msvcrtmodule.c
==============================================================================
--- python/branches/pep-0383/PC/msvcrtmodule.c	(original)
+++ python/branches/pep-0383/PC/msvcrtmodule.c	Sat May  2 21:20:57 2009
@@ -220,18 +220,12 @@
 static PyObject *
 msvcrt_putwch(PyObject *self, PyObject *args)
 {
-	Py_UNICODE *ch;
-	int size;
+	int ch;
 
-	if (!PyArg_ParseTuple(args, "u#:putwch", &ch, &size))
+	if (!PyArg_ParseTuple(args, "C:putwch", &ch))
 		return NULL;
 
-	if (size == 0) {
-		PyErr_SetString(PyExc_ValueError,
-			"Expected unicode string of length 1");
-		return NULL;
-	}
-	_putwch(*ch);
+	_putwch(ch);
 	Py_RETURN_NONE;
 
 }
@@ -255,12 +249,12 @@
 static PyObject *
 msvcrt_ungetwch(PyObject *self, PyObject *args)
 {
-	Py_UNICODE ch;
+	int ch;
 
-	if (!PyArg_ParseTuple(args, "u:ungetwch", &ch))
+	if (!PyArg_ParseTuple(args, "C:ungetwch", &ch))
 		return NULL;
 
-	if (_ungetch(ch) == EOF)
+	if (_ungetwch(ch) == WEOF)
 		return PyErr_SetFromErrno(PyExc_IOError);
 	Py_INCREF(Py_None);
 	return Py_None;

Modified: python/branches/pep-0383/Python/pystrtod.c
==============================================================================
--- python/branches/pep-0383/Python/pystrtod.c	(original)
+++ python/branches/pep-0383/Python/pystrtod.c	Sat May  2 21:20:57 2009
@@ -620,12 +620,10 @@
                                          int flags,
                                          int *type)
 {
-	char buf[128];
 	char format[32];
-	Py_ssize_t len;
-	char *result;
-	char *p;
-	int t;
+	Py_ssize_t bufsize;
+	char *buf;
+	int t, exp;
 	int upper = 0;
 
 	/* Validate format_code, and map upper and lower case */
@@ -669,6 +667,61 @@
 		return NULL;
 	}
 
+	/* Here's a quick-and-dirty calculation to figure out how big a buffer
+	   we need.  In general, for a finite float we need:
+
+	     1 byte for each digit of the decimal significand, and
+
+	     1 for a possible sign
+	     1 for a possible decimal point
+	     2 for a possible [eE][+-]
+	     1 for each digit of the exponent;  if we allow 19 digits
+	       total then we're safe up to exponents of 2**63.
+	     1 for the trailing nul byte
+
+	   This gives a total of 24 + the number of digits in the significand,
+	   and the number of digits in the significand is:
+
+	     for 'g' format: at most precision, except possibly
+	       when precision == 0, when it's 1.
+	     for 'e' format: precision+1
+	     for 'f' format: precision digits after the point, at least 1
+	       before.  To figure out how many digits appear before the point
+	       we have to examine the size of the number.  If fabs(val) < 1.0
+	       then there will be only one digit before the point.  If
+	       fabs(val) >= 1.0, then there are at most
+
+	         1+floor(log10(ceiling(fabs(val))))
+
+	       digits before the point (where the 'ceiling' allows for the
+	       possibility that the rounding rounds the integer part of val
+	       up).  A safe upper bound for the above quantity is
+	       1+floor(exp/3), where exp is the unique integer such that 0.5
+	       <= fabs(val)/2**exp < 1.0.  This exp can be obtained from
+	       frexp.
+
+	   So we allow room for precision+1 digits for all formats, plus an
+	   extra floor(exp/3) digits for 'f' format.
+
+	*/
+
+	if (Py_IS_NAN(val) || Py_IS_INFINITY(val))
+		/* 3 for 'inf'/'nan', 1 for sign, 1 for '\0' */
+		bufsize = 5;
+	else {
+		bufsize = 25 + precision;
+		if (format_code == 'f' && fabs(val) >= 1.0) {
+			frexp(val, &exp);
+			bufsize += exp/3;
+		}
+	}
+
+	buf = PyMem_Malloc(bufsize);
+	if (buf == NULL) {
+		PyErr_NoMemory();
+		return NULL;
+	}
+
 	/* Handle nan and inf. */
 	if (Py_IS_NAN(val)) {
 		strcpy(buf, "nan");
@@ -687,38 +740,29 @@
 		PyOS_snprintf(format, sizeof(format), "%%%s.%i%c",
 			      (flags & Py_DTSF_ALT ? "#" : ""), precision,
 			      format_code);
-		_PyOS_ascii_formatd(buf, sizeof(buf), format, val, precision);
+		_PyOS_ascii_formatd(buf, bufsize, format, val, precision);
 	}
 
-	len = strlen(buf);
-
-	/* Add 1 for the trailing 0 byte.
-	   Add 1 because we might need to make room for the sign.
-	   */
-	result = PyMem_Malloc(len + 2);
-	if (result == NULL) {
-		PyErr_NoMemory();
-		return NULL;
-	}
-	p = result;
-
 	/* Add sign when requested.  It's convenient (esp. when formatting
 	 complex numbers) to include a sign even for inf and nan. */
-	if (flags & Py_DTSF_SIGN && buf[0] != '-')
-		*p++ = '+';
-
-	strcpy(p, buf);
-
+	if (flags & Py_DTSF_SIGN && buf[0] != '-') {
+		size_t len = strlen(buf);
+		/* the bufsize calculations above should ensure that we've got
+		   space to add a sign */
+		assert((size_t)bufsize >= len+2);
+		memmove(buf+1, buf, len+1);
+		buf[0] = '+';
+	}
 	if (upper) {
 		/* Convert to upper case. */
 		char *p1;
-		for (p1 = p; *p1; p1++)
+		for (p1 = buf; *p1; p1++)
 			*p1 = Py_TOUPPER(*p1);
 	}
 
 	if (type)
 		*type = t;
-	return result;
+	return buf;
 }
 
 #else


More information about the Python-checkins mailing list