[Python-checkins] python/dist/src/Lib hashlib.py, NONE, 1.2.2.2 md5.py, NONE, 1.1.2.2 sha.py, NONE, 1.1.2.2 BaseHTTPServer.py, 1.22.2.2, 1.22.2.3 ConfigParser.py, 1.44.2.2, 1.44.2.3 Cookie.py, 1.13.2.2, 1.13.2.3 DocXMLRPCServer.py, 1.2.4.2, 1.2.4.3 SimpleXMLRPCServer.py, 1.2.12.2, 1.2.12.3 SocketServer.py, 1.31.2.2, 1.31.2.3 UserDict.py, 1.17.2.2, 1.17.2.3 UserString.py, 1.13.2.2, 1.13.2.3 _LWPCookieJar.py, 1.2.4.1, 1.2.4.2 _MozillaCookieJar.py, 1.3.4.1, 1.3.4.2 _strptime.py, 1.15.4.2, 1.15.4.3 asynchat.py, 1.19.2.2, 1.19.2.3 asyncore.py, 1.32.2.2, 1.32.2.3 base64.py, 1.13.16.1, 1.13.16.2 calendar.py, 1.28.2.2, 1.28.2.3 cgi.py, 1.74.2.2, 1.74.2.3 cgitb.py, 1.5.2.2, 1.5.2.3 codecs.py, 1.26.2.2, 1.26.2.3 cookielib.py, 1.4.4.1, 1.4.4.2 copy.py, 1.28.2.2, 1.28.2.3 csv.py, 1.12.2.1, 1.12.2.2 decimal.py, 1.32.2.1, 1.32.2.2 difflib.py, 1.10.2.2, 1.10.2.3 doctest.py, 1.24.2.2, 1.24.2.3 dumbdbm.py, 1.19.2.2, 1.19.2.3 ftplib.py, 1.72.2.1, 1.72.2.2 glob.py, 1.10.20.1, 1.10.20.2 gzip.py, 1.34.2.2, 1.34.2.3 hmac.py, 1.7.2.1, 1.7.2.2 httplib.py, 1.54.2.2, 1.54.2.3 imaplib.py, 1.54.2.2, 1.54.2.3 imghdr.py, 1.11, 1.11.26.1 inspect.py, 1.36.2.2, 1.36.2.3 locale.py, 1.20.2.2, 1.20.2.3 macpath.py, 1.39.2.2, 1.39.2.3 markupbase.py, 1.6.2.2, 1.6.2.3 mhlib.py, 1.34.2.2, 1.34.2.3 mimetypes.py, 1.22.2.2, 1.22.2.3 nntplib.py, 1.30.2.2, 1.30.2.3 ntpath.py, 1.49.2.2, 1.49.2.3 optparse.py, 1.4.4.2, 1.4.4.3 os.py, 1.58.2.3, 1.58.2.4 os2emxpath.py, 1.6.2.2, 1.6.2.3 pdb.py, 1.53.2.2, 1.53.2.3 pickletools.py, 1.26.6.2, 1.26.6.3 popen2.py, 1.25.2.1, 1.25.2.2 poplib.py, 1.21.2.1, 1.21.2.2 posixfile.py, 1.24.10.1, 1.24.10.2 posixpath.py, 1.51.2.2, 1.51.2.3 profile.py, 1.47.2.2, 1.47.2.3 py_compile.py, 1.21.2.1, 1.21.2.2 pydoc.py, 1.65.2.2, 1.65.2.3 random.py, 1.34.2.2, 1.34.2.3 reconvert.py, 1.6.16.1, 1.6.16.2 rfc822.py, 1.72.2.2, 1.72.2.3 sets.py, 1.43.4.2, 1.43.4.3 shutil.py, 1.22.2.2, 1.22.2.3 smtplib.py, 1.58.2.2, 1.58.2.3 socket.py, 1.21.2.2, 1.21.2.3 sre.py, 1.44.10.2, 1.44.10.3 sre_compile.py, 1.43.2.2, 1.43.2.3 sre_parse.py, 1.55.2.2, 1.55.2.3 subprocess.py, 1.13.2.1, 1.13.2.2 symbol.py, 1.14.12.2, 1.14.12.3 tarfile.py, 1.8.4.2, 1.8.4.3 telnetlib.py, 1.19.2.2, 1.19.2.3 tempfile.py, 1.39.2.2, 1.39.2.3 textwrap.py, 1.12.2.2, 1.12.2.3 threading.py, 1.24.2.2, 1.24.2.3 tokenize.py, 1.32.2.2, 1.32.2.3 unittest.py, 1.16.2.2, 1.16.2.3 urllib.py, 1.148.2.2, 1.148.2.3 urllib2.py, 1.31.2.2, 1.31.2.3 urlparse.py, 1.32.2.2, 1.32.2.3 warnings.py, 1.16.2.2, 1.16.2.3 weakref.py, 1.17.2.2, 1.17.2.3 webbrowser.py, 1.32.2.2, 1.32.2.3 whichdb.py, 1.12.10.2, 1.12.10.3 xdrlib.py, 1.14.2.2, 1.14.2.3 xmlrpclib.py, 1.20.2.2, 1.20.2.3 zipfile.py, 1.24.2.2, 1.24.2.3 profile.doc, 1.2.32.1, NONE

jhylton@users.sourceforge.net jhylton at users.sourceforge.net
Sun Oct 16 07:24:40 CEST 2005


Update of /cvsroot/python/python/dist/src/Lib
In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv27718/Lib

Modified Files:
      Tag: ast-branch
	BaseHTTPServer.py ConfigParser.py Cookie.py DocXMLRPCServer.py 
	SimpleXMLRPCServer.py SocketServer.py UserDict.py 
	UserString.py _LWPCookieJar.py _MozillaCookieJar.py 
	_strptime.py asynchat.py asyncore.py base64.py calendar.py 
	cgi.py cgitb.py codecs.py cookielib.py copy.py csv.py 
	decimal.py difflib.py doctest.py dumbdbm.py ftplib.py glob.py 
	gzip.py hmac.py httplib.py imaplib.py imghdr.py inspect.py 
	locale.py macpath.py markupbase.py mhlib.py mimetypes.py 
	nntplib.py ntpath.py optparse.py os.py os2emxpath.py pdb.py 
	pickletools.py popen2.py poplib.py posixfile.py posixpath.py 
	profile.py py_compile.py pydoc.py random.py reconvert.py 
	rfc822.py sets.py shutil.py smtplib.py socket.py sre.py 
	sre_compile.py sre_parse.py subprocess.py symbol.py tarfile.py 
	telnetlib.py tempfile.py textwrap.py threading.py tokenize.py 
	unittest.py urllib.py urllib2.py urlparse.py warnings.py 
	weakref.py webbrowser.py whichdb.py xdrlib.py xmlrpclib.py 
	zipfile.py 
Added Files:
      Tag: ast-branch
	hashlib.py md5.py sha.py 
Removed Files:
      Tag: ast-branch
	profile.doc 
Log Message:
Merge head to branch (for the last time)


--- NEW FILE: hashlib.py ---
# $Id: hashlib.py,v 1.2.2.2 2005/10/16 05:23:59 jhylton Exp $
#
#  Copyright (C) 2005   Gregory P. Smith (greg at electricrain.com)
#  Licensed to PSF under a Contributor Agreement.
#

__doc__ = """hashlib module - A common interface to many hash functions.

new(name, string='') - returns a new hash object implementing the
                       given hash function; initializing the hash
                       using the given string data.

Named constructor functions are also available, these are much faster
than using new():

md5(), sha1(), sha224(), sha256(), sha384(), and sha512()

More algorithms may be available on your platform but the above are
guaranteed to exist.

Choose your hash function wisely.  Some have known weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
"""


def __get_builtin_constructor(name):
    if name in ('SHA1', 'sha1'):
        import _sha
        return _sha.new
    elif name in ('MD5', 'md5'):
        import _md5
        return _md5.new
    elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
        import _sha256
        bs = name[3:]
        if bs == '256':
            return _sha256.sha256
        elif bs == '224':
            return _sha256.sha224
    elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
        import _sha512
        bs = name[3:]
        if bs == '512':
            return _sha512.sha512
        elif bs == '384':
            return _sha512.sha384

    raise ValueError, "unsupported hash type"


def __py_new(name, string=''):
    """new(name, string='') - Return a new hashing object using the named algorithm;
    optionally initialized with a string.
    """
    return __get_builtin_constructor(name)(string)


def __hash_new(name, string=''):
    """new(name, string='') - Return a new hashing object using the named algorithm;
    optionally initialized with a string.
    """
    try:
        return _hashlib.new(name, string)
    except ValueError:
        # If the _hashlib module (OpenSSL) doesn't support the named
        # hash, try using our builtin implementations.
        # This allows for SHA224/256 and SHA384/512 support even though
        # the OpenSSL library prior to 0.9.8 doesn't provide them.
        return __get_builtin_constructor(name)(string)


try:
    import _hashlib
    # use the wrapper of the C implementation
    new = __hash_new

    for opensslFuncName in filter(lambda n: n.startswith('openssl_'), dir(_hashlib)):
        funcName = opensslFuncName[len('openssl_'):]
        try:
            # try them all, some may not work due to the OpenSSL
            # version not supporting that algorithm.
            f = getattr(_hashlib, opensslFuncName)
            f()
            # Use the C function directly (very fast)
            exec funcName + ' = f'
        except ValueError:
            try:
                # Use the builtin implementation directly (fast)
                exec funcName + ' = __get_builtin_constructor(funcName)'
            except ValueError:
                # this one has no builtin implementation, don't define it
                pass
    # clean up our locals
    del f
    del opensslFuncName
    del funcName

except ImportError:
    # We don't have the _hashlib OpenSSL module?
    # use the built in legacy interfaces via a wrapper function
    new = __py_new

    # lookup the C function to use directly for the named constructors
    md5 = __get_builtin_constructor('md5')
    sha1 = __get_builtin_constructor('sha1')
    sha224 = __get_builtin_constructor('sha224')
    sha256 = __get_builtin_constructor('sha256')
    sha384 = __get_builtin_constructor('sha384')
    sha512 = __get_builtin_constructor('sha512')

--- NEW FILE: md5.py ---
# $Id: md5.py,v 1.1.2.2 2005/10/16 05:23:59 jhylton Exp $
#
#  Copyright (C) 2005   Gregory P. Smith (greg at electricrain.com)
#  Licensed to PSF under a Contributor Agreement.

from hashlib import md5
new = md5

blocksize = 1        # legacy value (wrong in any useful sense)
digest_size = 16

--- NEW FILE: sha.py ---
# $Id: sha.py,v 1.1.2.2 2005/10/16 05:23:59 jhylton Exp $
#
#  Copyright (C) 2005   Gregory P. Smith (greg at electricrain.com)
#  Licensed to PSF under a Contributor Agreement.

from hashlib import sha1 as sha
new = sha

blocksize = 1        # legacy value (wrong in any useful sense)
digest_size = 20
digestsize = 20

Index: BaseHTTPServer.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/BaseHTTPServer.py,v
retrieving revision 1.22.2.2
retrieving revision 1.22.2.3
diff -u -d -r1.22.2.2 -r1.22.2.3
--- BaseHTTPServer.py	7 Jan 2005 06:57:43 -0000	1.22.2.2
+++ BaseHTTPServer.py	16 Oct 2005 05:23:59 -0000	1.22.2.3
@@ -89,6 +89,8 @@
 </body>
 """
 
+def _quote_html(html):
+    return html.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
 
 class HTTPServer(SocketServer.TCPServer):
 
@@ -336,8 +338,9 @@
             message = short
         explain = long
         self.log_error("code %d, message %s", code, message)
+        # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
         content = (self.error_message_format %
-                   {'code': code, 'message': message, 'explain': explain})
+                   {'code': code, 'message': _quote_html(message), 'explain': explain})
         self.send_response(code, message)
         self.send_header("Content-Type", "text/html")
         self.send_header('Connection', 'close')

Index: ConfigParser.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/ConfigParser.py,v
retrieving revision 1.44.2.2
retrieving revision 1.44.2.3
diff -u -d -r1.44.2.2 -r1.44.2.3
--- ConfigParser.py	7 Jan 2005 06:57:59 -0000	1.44.2.2
+++ ConfigParser.py	16 Oct 2005 05:23:59 -0000	1.44.2.3
@@ -28,7 +28,7 @@
         create the parser and specify a dictionary of intrinsic defaults.  The
         keys must be strings, the values must be appropriate for %()s string
         interpolation.  Note that `__name__' is always an intrinsic default;
-        it's value is the section's name.
+        its value is the section's name.
 
     sections()
         return all the configuration section names, sans DEFAULT

Index: Cookie.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/Cookie.py,v
retrieving revision 1.13.2.2
retrieving revision 1.13.2.3
diff -u -d -r1.13.2.2 -r1.13.2.3
--- Cookie.py	7 Jan 2005 06:58:00 -0000	1.13.2.2
+++ Cookie.py	16 Oct 2005 05:23:59 -0000	1.13.2.3
@@ -69,9 +69,8 @@
    >>> C = Cookie.SmartCookie()
    >>> C["fig"] = "newton"
    >>> C["sugar"] = "wafer"
-   >>> print C
-   Set-Cookie: fig=newton;
-   Set-Cookie: sugar=wafer;
+   >>> C.output()
+   'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
 
 Notice that the printable representation of a Cookie is the
 appropriate format for a Set-Cookie: header.  This is the
@@ -82,9 +81,9 @@
    >>> C["rocky"] = "road"
    >>> C["rocky"]["path"] = "/cookie"
    >>> print C.output(header="Cookie:")
-   Cookie: rocky=road; Path=/cookie;
+   Cookie: rocky=road; Path=/cookie
    >>> print C.output(attrs=[], header="Cookie:")
-   Cookie: rocky=road;
+   Cookie: rocky=road
 
 The load() method of a Cookie extracts cookies from a string.  In a
 CGI script, you would use this method to extract the cookies from the
@@ -92,9 +91,8 @@
 
    >>> C = Cookie.SmartCookie()
    >>> C.load("chips=ahoy; vienna=finger")
-   >>> print C
-   Set-Cookie: chips=ahoy;
-   Set-Cookie: vienna=finger;
+   >>> C.output()
+   'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
 
 The load() method is darn-tootin smart about identifying cookies
 within a string.  Escaped quotation marks, nested semicolons, and other
@@ -103,7 +101,7 @@
    >>> C = Cookie.SmartCookie()
    >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
    >>> print C
-   Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;";
+   Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
 
 Each element of the Cookie also supports all of the RFC 2109
 Cookie attributes.  Here's an example which sets the Path
@@ -113,7 +111,7 @@
    >>> C["oreo"] = "doublestuff"
    >>> C["oreo"]["path"] = "/"
    >>> print C
-   Set-Cookie: oreo=doublestuff; Path=/;
+   Set-Cookie: oreo=doublestuff; Path=/
 
 Each dictionary element has a 'value' attribute, which gives you
 back the value associated with the key.
@@ -144,9 +142,8 @@
    '7'
    >>> C["string"].value
    'seven'
-   >>> print C
-   Set-Cookie: number=7;
-   Set-Cookie: string=seven;
+   >>> C.output()
+   'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
 
 
 SerialCookie
@@ -165,9 +162,8 @@
    7
    >>> C["string"].value
    'seven'
-   >>> print C
-   Set-Cookie: number="I7\012.";
-   Set-Cookie: string="S'seven'\012p1\012.";
+   >>> C.output()
+   'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
 
 Be warned, however, if SerialCookie cannot de-serialize a value (because
 it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
@@ -190,9 +186,8 @@
    7
    >>> C["string"].value
    'seven'
-   >>> print C
-   Set-Cookie: number="I7\012.";
-   Set-Cookie: string=seven;
+   >>> C.output()
+   'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
 
 
 Backwards Compatibility
@@ -228,6 +223,7 @@
            "SmartCookie","Cookie"]
 
 _nulljoin = ''.join
+_semispacejoin = '; '.join
 _spacejoin = ' '.join
 
 #
@@ -470,9 +466,9 @@
     def js_output(self, attrs=None):
         # Print javascript
         return """
-        <SCRIPT LANGUAGE="JavaScript">
+        <script type="text/javascript">
         <!-- begin hiding
-        document.cookie = \"%s\"
+        document.cookie = \"%s\";
         // end hiding -->
         </script>
         """ % ( self.OutputString(attrs), )
@@ -485,7 +481,7 @@
         RA = result.append
 
         # First, the key=value pair
-        RA("%s=%s;" % (self.key, self.coded_value))
+        RA("%s=%s" % (self.key, self.coded_value))
 
         # Now add any defined attributes
         if attrs is None:
@@ -496,16 +492,16 @@
             if V == "": continue
             if K not in attrs: continue
             if K == "expires" and type(V) == type(1):
-                RA("%s=%s;" % (self._reserved[K], _getdate(V)))
+                RA("%s=%s" % (self._reserved[K], _getdate(V)))
             elif K == "max-age" and type(V) == type(1):
-                RA("%s=%d;" % (self._reserved[K], V))
+                RA("%s=%d" % (self._reserved[K], V))
             elif K == "secure":
-                RA("%s;" % self._reserved[K])
+                RA(str(self._reserved[K]))
             else:
-                RA("%s=%s;" % (self._reserved[K], V))
+                RA("%s=%s" % (self._reserved[K], V))
 
         # Return the result
-        return _spacejoin(result)
+        return _semispacejoin(result)
     # end OutputString
 # end Morsel class
 
@@ -581,7 +577,7 @@
         self.__set(key, rval, cval)
     # end __setitem__
 
-    def output(self, attrs=None, header="Set-Cookie:", sep="\n"):
+    def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
         """Return a string suitable for HTTP."""
         result = []
         items = self.items()

Index: DocXMLRPCServer.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/DocXMLRPCServer.py,v
retrieving revision 1.2.4.2
retrieving revision 1.2.4.3
diff -u -d -r1.2.4.2 -r1.2.4.3
--- DocXMLRPCServer.py	7 Jan 2005 06:58:00 -0000	1.2.4.2
+++ DocXMLRPCServer.py	16 Oct 2005 05:23:59 -0000	1.2.4.3
@@ -12,7 +12,6 @@
 
 import pydoc
 import inspect
-import types
 import re
 import sys
 
@@ -92,7 +91,7 @@
         else:
             argspec = '(...)'
 
-        if isinstance(object, types.TupleType):
+        if isinstance(object, tuple):
             argspec = object[0] or argspec
             docstring = object[1] or ""
         else:

Index: SimpleXMLRPCServer.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/SimpleXMLRPCServer.py,v
retrieving revision 1.2.12.2
retrieving revision 1.2.12.3
diff -u -d -r1.2.12.2 -r1.2.12.3
--- SimpleXMLRPCServer.py	7 Jan 2005 06:58:00 -0000	1.2.12.2
+++ SimpleXMLRPCServer.py	16 Oct 2005 05:23:59 -0000	1.2.12.3
@@ -106,14 +106,22 @@
 import sys
 import os
 
-def resolve_dotted_attribute(obj, attr):
+def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
     """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
 
     Resolves a dotted attribute name to an object.  Raises
     an AttributeError if any attribute in the chain starts with a '_'.
+
+    If the optional allow_dotted_names argument is false, dots are not
+    supported and this function operates similar to getattr(obj, attr).
     """
 
-    for i in attr.split('.'):
+    if allow_dotted_names:
+        attrs = attr.split('.')
+    else:
+        attrs = [attr]
+
+    for i in attrs:
         if i.startswith('_'):
             raise AttributeError(
                 'attempt to access private attribute "%s"' % i
@@ -155,14 +163,14 @@
         self.funcs = {}
         self.instance = None
 
-    def register_instance(self, instance):
+    def register_instance(self, instance, allow_dotted_names=False):
         """Registers an instance to respond to XML-RPC requests.
 
         Only one instance can be installed at a time.
 
         If the registered instance has a _dispatch method then that
         method will be called with the name of the XML-RPC method and
-        it's parameters as a tuple
+        its parameters as a tuple
         e.g. instance._dispatch('add',(2,3))
 
         If the registered instance does not have a _dispatch method
@@ -173,9 +181,23 @@
 
         If a registered function matches a XML-RPC request, then it
         will be called instead of the registered instance.
+
+        If the optional allow_dotted_names argument is true and the
+        instance does not have a _dispatch method, method names
+        containing dots are supported and resolved, as long as none of
+        the name segments start with an '_'.
+
+            *** SECURITY WARNING: ***
+
+            Enabling the allow_dotted_names options allows intruders
+            to access your module's global variables and may allow
+            intruders to execute arbitrary code on your machine.  Only
+            use this option on a secure, closed network.
+
         """
 
         self.instance = instance
+        self.allow_dotted_names = allow_dotted_names
 
     def register_function(self, function, name = None):
         """Registers a function to respond to XML-RPC requests.
@@ -294,7 +316,8 @@
                 try:
                     method = resolve_dotted_attribute(
                                 self.instance,
-                                method_name
+                                method_name,
+                                self.allow_dotted_names
                                 )
                 except AttributeError:
                     pass
@@ -348,7 +371,7 @@
 
         If the registered instance has a _dispatch method then that
         method will be called with the name of the XML-RPC method and
-        it's parameters as a tuple
+        its parameters as a tuple
         e.g. instance._dispatch('add',(2,3))
 
         If the registered instance does not have a _dispatch method
@@ -373,7 +396,8 @@
                     try:
                         func = resolve_dotted_attribute(
                             self.instance,
-                            method
+                            method,
+                            self.allow_dotted_names
                             )
                     except AttributeError:
                         pass

Index: SocketServer.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/SocketServer.py,v
retrieving revision 1.31.2.2
retrieving revision 1.31.2.3
diff -u -d -r1.31.2.2 -r1.31.2.3
--- SocketServer.py	7 Jan 2005 06:58:00 -0000	1.31.2.2
+++ SocketServer.py	16 Oct 2005 05:23:59 -0000	1.31.2.3
@@ -50,7 +50,7 @@
 unix server classes.
 
 Forking and threading versions of each type of server can be created
-using the ForkingServer and ThreadingServer mix-in classes.  For
+using the ForkingMixIn and ThreadingMixIn mix-in classes.  For
 instance, a threading UDP server class is created as follows:
 
         class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
@@ -65,8 +65,8 @@
 with your request handler class.
 
 The request handler class must be different for datagram or stream
-services.  This can be hidden by using the mix-in request handler
-classes StreamRequestHandler or DatagramRequestHandler.
+services.  This can be hidden by using the request handler
+subclasses StreamRequestHandler or DatagramRequestHandler.
 
 Of course, you still have to use your head!
 

Index: UserDict.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/UserDict.py,v
retrieving revision 1.17.2.2
retrieving revision 1.17.2.3
diff -u -d -r1.17.2.2 -r1.17.2.3
--- UserDict.py	7 Jan 2005 06:58:00 -0000	1.17.2.2
+++ UserDict.py	16 Oct 2005 05:23:59 -0000	1.17.2.3
@@ -63,12 +63,12 @@
         return self.data.popitem()
     def __contains__(self, key):
         return key in self.data
+    @classmethod
     def fromkeys(cls, iterable, value=None):
         d = cls()
         for key in iterable:
             d[key] = value
         return d
-    fromkeys = classmethod(fromkeys)
 
 class IterableUserDict(UserDict):
     def __iter__(self):

Index: UserString.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/UserString.py,v
retrieving revision 1.13.2.2
retrieving revision 1.13.2.3
diff -u -d -r1.13.2.2 -r1.13.2.3
--- UserString.py	7 Jan 2005 06:58:00 -0000	1.13.2.2
+++ UserString.py	16 Oct 2005 05:23:59 -0000	1.13.2.3
@@ -146,9 +146,13 @@
     def __hash__(self):
         raise TypeError, "unhashable type (it is mutable)"
     def __setitem__(self, index, sub):
+        if index < 0:
+            index += len(self.data)
         if index < 0 or index >= len(self.data): raise IndexError
         self.data = self.data[:index] + sub + self.data[index+1:]
     def __delitem__(self, index):
+        if index < 0:
+            index += len(self.data)
         if index < 0 or index >= len(self.data): raise IndexError
         self.data = self.data[:index] + self.data[index+1:]
     def __setslice__(self, start, end, sub):

Index: _LWPCookieJar.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/_LWPCookieJar.py,v
retrieving revision 1.2.4.1
retrieving revision 1.2.4.2
diff -u -d -r1.2.4.1 -r1.2.4.2
--- _LWPCookieJar.py	7 Jan 2005 06:58:00 -0000	1.2.4.1
+++ _LWPCookieJar.py	16 Oct 2005 05:23:59 -0000	1.2.4.2
@@ -115,13 +115,6 @@
 
                 for data in split_header_words([line]):
                     name, value = data[0]
-                    # name and value are an exception here, since a plain "foo"
-                    # (with no "=", unlike "bar=foo") means a cookie with no
-                    # name and value "foo".  With all other cookie-attributes,
-                    # the situation is reversed: "foo" means an attribute named
-                    # "foo" with no value!
-                    if value is None:
-                        name, value = value, name
                     standard = {}
                     rest = {}
                     for k in boolean_attrs:

Index: _MozillaCookieJar.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/_MozillaCookieJar.py,v
retrieving revision 1.3.4.1
retrieving revision 1.3.4.2
diff -u -d -r1.3.4.1 -r1.3.4.2
--- _MozillaCookieJar.py	7 Jan 2005 06:58:00 -0000	1.3.4.1
+++ _MozillaCookieJar.py	16 Oct 2005 05:23:59 -0000	1.3.4.2
@@ -73,6 +73,9 @@
                 secure = (secure == "TRUE")
                 domain_specified = (domain_specified == "TRUE")
                 if name == "":
+                    # cookies.txt regards 'Set-Cookie: foo' as a cookie
+                    # with no name, whereas cookielib regards it as a
+                    # cookie with no value.
                     name = value
                     value = None
 

Index: _strptime.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/_strptime.py,v
retrieving revision 1.15.4.2
retrieving revision 1.15.4.3
diff -u -d -r1.15.4.2 -r1.15.4.3
--- _strptime.py	7 Jan 2005 06:58:00 -0000	1.15.4.2
+++ _strptime.py	16 Oct 2005 05:23:59 -0000	1.15.4.3
@@ -147,11 +147,14 @@
                 # strings (e.g., MacOS 9 having timezone as ('','')).
                 if old:
                     current_format = current_format.replace(old, new)
+            # If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
+            # 2005-01-03 occurs before the first Monday of the year.  Otherwise
+            # %U is used.
             time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
-            if time.strftime(directive, time_tuple).find('00'):
-                U_W = '%U'
-            else:
+            if '00' in time.strftime(directive, time_tuple):
                 U_W = '%W'
+            else:
+                U_W = '%U'
             date_time[offset] = current_format.replace('11', U_W)
         self.LC_date_time = date_time[0]
         self.LC_date = date_time[1]
@@ -272,13 +275,14 @@
 
 def strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
     """Return a time struct based on the input string and the format string."""
-    global _TimeRE_cache
+    global _TimeRE_cache, _regex_cache
     _cache_lock.acquire()
     try:
         time_re = _TimeRE_cache
         locale_time = time_re.locale_time
         if _getlang() != locale_time.lang:
             _TimeRE_cache = TimeRE()
+            _regex_cache = {}
         if len(_regex_cache) > _CACHE_MAX_SIZE:
             _regex_cache.clear()
         format_regex = _regex_cache.get(format)

Index: asynchat.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/asynchat.py,v
retrieving revision 1.19.2.2
retrieving revision 1.19.2.3
diff -u -d -r1.19.2.2 -r1.19.2.3
--- asynchat.py	7 Jan 2005 06:58:00 -0000	1.19.2.2
+++ asynchat.py	16 Oct 2005 05:23:59 -0000	1.19.2.3
@@ -101,11 +101,11 @@
         while self.ac_in_buffer:
             lb = len(self.ac_in_buffer)
             terminator = self.get_terminator()
-            if terminator is None or terminator == '':
+            if not terminator:
                 # no terminator, collect it all
                 self.collect_incoming_data (self.ac_in_buffer)
                 self.ac_in_buffer = ''
-            elif isinstance(terminator, int):
+            elif isinstance(terminator, int) or isinstance(terminator, long):
                 # numeric terminator
                 n = terminator
                 if lb < n:

Index: asyncore.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/asyncore.py,v
retrieving revision 1.32.2.2
retrieving revision 1.32.2.3
diff -u -d -r1.32.2.2 -r1.32.2.3
--- asyncore.py	7 Jan 2005 06:58:00 -0000	1.32.2.2
+++ asyncore.py	16 Oct 2005 05:23:59 -0000	1.32.2.3
@@ -46,7 +46,6 @@
 sophisticated high-performance network servers and clients a snap.
 """
 
-import exceptions
 import select
 import socket
 import sys
@@ -61,7 +60,7 @@
 except NameError:
     socket_map = {}
 
-class ExitNow(exceptions.Exception):
+class ExitNow(Exception):
     pass
 
 def read(obj):

Index: base64.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/base64.py,v
retrieving revision 1.13.16.1
retrieving revision 1.13.16.2
diff -u -d -r1.13.16.1 -r1.13.16.2
--- base64.py	7 Jan 2005 06:58:00 -0000	1.13.16.1
+++ base64.py	16 Oct 2005 05:23:59 -0000	1.13.16.2
@@ -221,12 +221,14 @@
         acc += _b32rev[c] << shift
         shift -= 5
         if shift < 0:
-            parts.append(binascii.unhexlify(hex(acc)[2:-1]))
+            parts.append(binascii.unhexlify('%010x' % acc))
             acc = 0
             shift = 35
     # Process the last, partial quanta
-    last = binascii.unhexlify(hex(acc)[2:-1])
-    if padchars == 1:
+    last = binascii.unhexlify('%010x' % acc)
+    if padchars == 0:
+        last = ''                       # No characters
+    elif padchars == 1:
         last = last[:-1]
     elif padchars == 3:
         last = last[:-2]
@@ -234,7 +236,7 @@
         last = last[:-3]
     elif padchars == 6:
         last = last[:-4]
-    elif padchars <> 0:
+    else:
         raise TypeError('Incorrect padding')
     parts.append(last)
     return EMPTYSTRING.join(parts)

Index: calendar.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/calendar.py,v
retrieving revision 1.28.2.2
retrieving revision 1.28.2.3
diff -u -d -r1.28.2.2 -r1.28.2.3
--- calendar.py	7 Jan 2005 06:58:01 -0000	1.28.2.2
+++ calendar.py	16 Oct 2005 05:23:59 -0000	1.28.2.3
@@ -10,7 +10,8 @@
 __all__ = ["error","setfirstweekday","firstweekday","isleap",
            "leapdays","weekday","monthrange","monthcalendar",
            "prmonth","month","prcal","calendar","timegm",
-           "month_name", "month_abbr", "day_name", "day_abbr"]
+           "month_name", "month_abbr", "day_name", "day_abbr",
+           "weekheader"]
 
 # Exception raised for bad input (with string parameter for details)
 error = ValueError

Index: cgi.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/cgi.py,v
retrieving revision 1.74.2.2
retrieving revision 1.74.2.3
diff -u -d -r1.74.2.2 -r1.74.2.3
--- cgi.py	7 Jan 2005 06:58:01 -0000	1.74.2.2
+++ cgi.py	16 Oct 2005 05:23:59 -0000	1.74.2.3
@@ -237,7 +237,7 @@
 
     Arguments:
     fp   : input file
-    pdict: dictionary containing other parameters of conten-type header
+    pdict: dictionary containing other parameters of content-type header
 
     Returns a dictionary just like parse_qs(): keys are the field names, each
     value is a list of values for that field.  This is easy to use but not
@@ -1039,7 +1039,9 @@
 # =========
 
 def escape(s, quote=None):
-    """Replace special characters '&', '<' and '>' by SGML entities."""
+    '''Replace special characters "&", "<" and ">" to HTML-safe sequences.
+    If the optional flag quote is true, the quotation mark character (")
+    is also translated.'''
     s = s.replace("&", "&amp;") # Must be done first!
     s = s.replace("<", "&lt;")
     s = s.replace(">", "&gt;")

Index: cgitb.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/cgitb.py,v
retrieving revision 1.5.2.2
retrieving revision 1.5.2.3
diff -u -d -r1.5.2.2 -r1.5.2.3
--- cgitb.py	7 Jan 2005 06:58:01 -0000	1.5.2.2
+++ cgitb.py	16 Oct 2005 05:23:59 -0000	1.5.2.3
@@ -22,6 +22,7 @@
 """
 
 __author__ = 'Ka-Ping Yee'
+
 __version__ = '$Revision$'
 
 import sys
@@ -112,8 +113,11 @@
     frames = []
     records = inspect.getinnerframes(etb, context)
     for frame, file, lnum, func, lines, index in records:
-        file = file and os.path.abspath(file) or '?'
-        link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
+        if file:
+            file = os.path.abspath(file)
+            link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
+        else:
+            file = link = '?'
         args, varargs, varkw, locals = inspect.getargvalues(frame)
         call = ''
         if func != '?':
@@ -146,7 +150,7 @@
             if name in done: continue
             done[name] = 1
             if value is not __UNDEF__:
-                if where in ['global', 'builtin']:
+                if where in ('global', 'builtin'):
                     name = ('<em>%s</em> ' % where) + strong(name)
                 elif where == 'local':
                     name = strong(name)

Index: codecs.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/codecs.py,v
retrieving revision 1.26.2.2
retrieving revision 1.26.2.3
diff -u -d -r1.26.2.2 -r1.26.2.3
--- codecs.py	7 Jan 2005 06:58:01 -0000	1.26.2.2
+++ codecs.py	16 Oct 2005 05:23:59 -0000	1.26.2.3
@@ -229,13 +229,15 @@
         self.stream = stream
         self.errors = errors
         self.bytebuffer = ""
-        self.charbuffer = u""
-        self.atcr = False
+        # For str->str decoding this will stay a str
+        # For str->unicode decoding the first read will promote it to unicode
+        self.charbuffer = ""
+        self.linebuffer = None
 
     def decode(self, input, errors='strict'):
         raise NotImplementedError
 
-    def read(self, size=-1, chars=-1):
+    def read(self, size=-1, chars=-1, firstline=False):
 
         """ Decodes data from the stream self.stream and returns the
             resulting object.
@@ -252,12 +254,22 @@
             is intended to prevent having to decode huge files in one
             step.
 
+            If firstline is true, and a UnicodeDecodeError happens
+            after the first line terminator in the input only the first line
+            will be returned, the rest of the input will be kept until the
+            next call to read().
+
             The method should use a greedy read strategy meaning that
             it should read as much data as is allowed within the
             definition of the encoding and the given size, e.g.  if
             optional encoding endings or state markers are available
             on the stream, these should be read too.
         """
+        # If we have lines cached, first merge them back into characters
+        if self.linebuffer:
+            self.charbuffer = "".join(self.linebuffer)
+            self.linebuffer = None
+            
         # read until we get the required number of characters (if available)
         while True:
             # can the request can be satisfied from the character buffer?
@@ -274,7 +286,16 @@
                 newdata = self.stream.read(size)
             # decode bytes (those remaining from the last call included)
             data = self.bytebuffer + newdata
-            newchars, decodedbytes = self.decode(data, self.errors)
+            try:
+                newchars, decodedbytes = self.decode(data, self.errors)
+            except UnicodeDecodeError, exc:
+                if firstline:
+                    newchars, decodedbytes = self.decode(data[:exc.start], self.errors)
+                    lines = newchars.splitlines(True)
+                    if len(lines)<=1:
+                        raise
+                else:
+                    raise
             # keep undecoded bytes until the next call
             self.bytebuffer = data[decodedbytes:]
             # put new characters in the character buffer
@@ -285,7 +306,7 @@
         if chars < 0:
             # Return everything we've got
             result = self.charbuffer
-            self.charbuffer = u""
+            self.charbuffer = ""
         else:
             # Return the first chars characters
             result = self.charbuffer[:chars]
@@ -301,30 +322,63 @@
             read() method.
 
         """
+        # If we have lines cached from an earlier read, return
+        # them unconditionally
+        if self.linebuffer:
+            line = self.linebuffer[0]
+            del self.linebuffer[0]
+            if len(self.linebuffer) == 1:
+                # revert to charbuffer mode; we might need more data
+                # next time
+                self.charbuffer = self.linebuffer[0]
+                self.linebuffer = None
+            if not keepends:
+                line = line.splitlines(False)[0]
+            return line
+            
         readsize = size or 72
-        line = u""
+        line = ""
         # If size is given, we call read() only once
         while True:
-            data = self.read(readsize)
-            if self.atcr and data.startswith(u"\n"):
-                data = data[1:]
+            data = self.read(readsize, firstline=True)
             if data:
-                self.atcr = data.endswith(u"\r")
+                # If we're at a "\r" read one extra character (which might
+                # be a "\n") to get a proper line ending. If the stream is
+                # temporarily exhausted we return the wrong line ending.
+                if data.endswith("\r"):
+                    data += self.read(size=1, chars=1)
+
             line += data
             lines = line.splitlines(True)
             if lines:
+                if len(lines) > 1:
+                    # More than one line result; the first line is a full line
+                    # to return
+                    line = lines[0]
+                    del lines[0]
+                    if len(lines) > 1:
+                        # cache the remaining lines
+                        lines[-1] += self.charbuffer
+                        self.linebuffer = lines
+                        self.charbuffer = None
+                    else:
+                        # only one remaining line, put it back into charbuffer
+                        self.charbuffer = lines[0] + self.charbuffer
+                    if not keepends:
+                        line = line.splitlines(False)[0]
+                    break
                 line0withend = lines[0]
                 line0withoutend = lines[0].splitlines(False)[0]
                 if line0withend != line0withoutend: # We really have a line end
                     # Put the rest back together and keep it until the next call
-                    self.charbuffer = u"".join(lines[1:]) + self.charbuffer
+                    self.charbuffer = "".join(lines[1:]) + self.charbuffer
                     if keepends:
                         line = line0withend
                     else:
                         line = line0withoutend
-                break
+                    break
             # we didn't get anything or this was our only try
-            elif not data or size is not None:
+            if not data or size is not None:
                 if line and not keepends:
                     line = line.splitlines(False)[0]
                 break
@@ -356,7 +410,17 @@
             from decoding errors.
 
         """
-        pass
+        self.bytebuffer = ""
+        self.charbuffer = u""
+        self.linebuffer = None
+
+    def seek(self, offset, whence=0):
+        """ Set the input stream's current position.
+
+            Resets the codec buffers used for keeping state.
+        """
+        self.reset()
+        self.stream.seek(offset, whence)
 
     def next(self):
 
@@ -529,7 +593,9 @@
     def next(self):
 
         """ Return the next decoded line from the input stream."""
-        return self.reader.next()
+        data = self.reader.next()
+        data, bytesencoded = self.encode(data, self.errors)
+        return data
 
     def __iter__(self):
         return self
@@ -566,7 +632,7 @@
 
         Note: The wrapped version will only accept the object format
         defined by the codecs, i.e. Unicode objects for most builtin
-        codecs. Output is also codec dependent and will usually by
+        codecs. Output is also codec dependent and will usually be
         Unicode as well.
 
         Files are always opened in binary mode, even if no binary mode
@@ -720,11 +786,19 @@
 
 ### error handlers
 
-strict_errors = lookup_error("strict")
-ignore_errors = lookup_error("ignore")
-replace_errors = lookup_error("replace")
-xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
-backslashreplace_errors = lookup_error("backslashreplace")
+try:
+    strict_errors = lookup_error("strict")
+    ignore_errors = lookup_error("ignore")
+    replace_errors = lookup_error("replace")
+    xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
+    backslashreplace_errors = lookup_error("backslashreplace")
+except LookupError:
+    # In --disable-unicode builds, these error handler are missing
+    strict_errors = None
+    ignore_errors = None
+    replace_errors = None
+    xmlcharrefreplace_errors = None
+    backslashreplace_errors = None
 
 # Tell modulefinder that using codecs probably needs the encodings
 # package

Index: cookielib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/cookielib.py,v
retrieving revision 1.4.4.1
retrieving revision 1.4.4.2
diff -u -d -r1.4.4.1 -r1.4.4.2
--- cookielib.py	7 Jan 2005 06:58:01 -0000	1.4.4.1
+++ cookielib.py	16 Oct 2005 05:23:59 -0000	1.4.4.2
@@ -26,7 +26,6 @@
 """
 
 import sys, re, urlparse, copy, time, urllib, logging
-from types import StringTypes
 try:
     import threading as _threading
 except ImportError:
@@ -359,7 +358,7 @@
     [[('Basic', None), ('realm', '"foobar"')]]
 
     """
-    assert type(header_values) not in StringTypes
+    assert not isinstance(header_values, basestring)
     result = []
     for text in header_values:
         orig_text = text
@@ -448,19 +447,15 @@
     for ns_header in ns_headers:
         pairs = []
         version_set = False
-        for param in re.split(r";\s*", ns_header):
+        for ii, param in enumerate(re.split(r";\s*", ns_header)):
             param = param.rstrip()
             if param == "": continue
             if "=" not in param:
-                if param.lower() in known_attrs:
-                    k, v = param, None
-                else:
-                    # cookie with missing value
-                    k, v = param, None
+                k, v = param, None
             else:
                 k, v = re.split(r"\s*=\s*", param, 1)
                 k = k.lstrip()
-            if k is not None:
+            if ii != 0:
                 lc = k.lower()
                 if lc in known_attrs:
                     k = lc
@@ -783,12 +778,12 @@
 
     def __repr__(self):
         args = []
-        for name in ["version", "name", "value",
+        for name in ("version", "name", "value",
                      "port", "port_specified",
                      "domain", "domain_specified", "domain_initial_dot",
                      "path", "path_specified",
                      "secure", "expires", "discard", "comment", "comment_url",
-                     ]:
+                     ):
             attr = getattr(self, name)
             args.append("%s=%s" % (name, repr(attr)))
         args.append("rest=%s" % repr(self._rest))
@@ -981,9 +976,9 @@
                 if j == 0:  # domain like .foo.bar
                     tld = domain[i+1:]
                     sld = domain[j+1:i]
-                    if (sld.lower() in [
+                    if (sld.lower() in (
                         "co", "ac",
-                        "com", "edu", "org", "net", "gov", "mil", "int"] and
+                        "com", "edu", "org", "net", "gov", "mil", "int") and
                         len(tld) == 2):
                         # domain like .co.uk
                         debug("   country-code second level domain %s", domain)
@@ -1134,11 +1129,10 @@
         # having to load lots of MSIE cookie files unless necessary.
         req_host, erhn = eff_request_host(request)
         if not req_host.startswith("."):
-            dotted_req_host = "."+req_host
+            req_host = "."+req_host
         if not erhn.startswith("."):
-            dotted_erhn = "."+erhn
-        if not (dotted_req_host.endswith(domain) or
-                dotted_erhn.endswith(domain)):
+            erhn = "."+erhn
+        if not (req_host.endswith(domain) or erhn.endswith(domain)):
             #debug("   request domain %s does not match cookie domain %s",
             #      req_host, domain)
             return False
@@ -1416,7 +1410,7 @@
                     v = self._now + v
                 if (k in value_attrs) or (k in boolean_attrs):
                     if (v is None and
-                        k not in ["port", "comment", "commenturl"]):
+                        k not in ("port", "comment", "commenturl")):
                         debug("   missing value for %s attribute" % k)
                         bad_cookie = True
                         break

Index: copy.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/copy.py,v
retrieving revision 1.28.2.2
retrieving revision 1.28.2.3
diff -u -d -r1.28.2.2 -r1.28.2.3
--- copy.py	7 Jan 2005 06:58:01 -0000	1.28.2.2
+++ copy.py	16 Oct 2005 05:23:59 -0000	1.28.2.3
@@ -14,7 +14,7 @@
 class instances).
 
 - A shallow copy constructs a new compound object and then (to the
-  extent possible) inserts *the same objects* into in that the
+  extent possible) inserts *the same objects* into it that the
   original contains.
 
 - A deep copy constructs a new compound object and then, recursively,
@@ -99,7 +99,7 @@
 
 def _copy_immutable(x):
     return x
-for t in (types.NoneType, int, long, float, bool, str, tuple,
+for t in (type(None), int, long, float, bool, str, tuple,
           frozenset, type, xrange, types.ClassType,
           types.BuiltinFunctionType):
     d[t] = _copy_immutable
@@ -195,26 +195,26 @@
 
 def _deepcopy_atomic(x, memo):
     return x
-d[types.NoneType] = _deepcopy_atomic
-d[types.IntType] = _deepcopy_atomic
-d[types.LongType] = _deepcopy_atomic
-d[types.FloatType] = _deepcopy_atomic
-d[types.BooleanType] = _deepcopy_atomic
+d[type(None)] = _deepcopy_atomic
+d[int] = _deepcopy_atomic
+d[long] = _deepcopy_atomic
+d[float] = _deepcopy_atomic
+d[bool] = _deepcopy_atomic
 try:
-    d[types.ComplexType] = _deepcopy_atomic
-except AttributeError:
+    d[complex] = _deepcopy_atomic
+except NameError:
     pass
-d[types.StringType] = _deepcopy_atomic
+d[str] = _deepcopy_atomic
 try:
-    d[types.UnicodeType] = _deepcopy_atomic
-except AttributeError:
+    d[unicode] = _deepcopy_atomic
+except NameError:
     pass
 try:
     d[types.CodeType] = _deepcopy_atomic
 except AttributeError:
     pass
-d[types.TypeType] = _deepcopy_atomic
-d[types.XRangeType] = _deepcopy_atomic
+d[type] = _deepcopy_atomic
+d[xrange] = _deepcopy_atomic
 d[types.ClassType] = _deepcopy_atomic
 d[types.BuiltinFunctionType] = _deepcopy_atomic
 
@@ -224,7 +224,7 @@
     for a in x:
         y.append(deepcopy(a, memo))
     return y
-d[types.ListType] = _deepcopy_list
+d[list] = _deepcopy_list
 
 def _deepcopy_tuple(x, memo):
     y = []
@@ -243,7 +243,7 @@
         y = x
     memo[d] = y
     return y
-d[types.TupleType] = _deepcopy_tuple
+d[tuple] = _deepcopy_tuple
 
 def _deepcopy_dict(x, memo):
     y = {}
@@ -251,7 +251,7 @@
     for key, value in x.iteritems():
         y[deepcopy(key, memo)] = deepcopy(value, memo)
     return y
-d[types.DictionaryType] = _deepcopy_dict
+d[dict] = _deepcopy_dict
 if PyStringMap is not None:
     d[PyStringMap] = _deepcopy_dict
 

Index: csv.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/csv.py,v
retrieving revision 1.12.2.1
retrieving revision 1.12.2.2
diff -u -d -r1.12.2.1 -r1.12.2.2
--- csv.py	7 Jan 2005 06:58:02 -0000	1.12.2.1
+++ csv.py	16 Oct 2005 05:23:59 -0000	1.12.2.2
@@ -6,8 +6,10 @@
 import re
 from _csv import Error, __version__, writer, reader, register_dialect, \
                  unregister_dialect, get_dialect, list_dialects, \
+                 field_size_limit, \
                  QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
                  __doc__
+from _csv import Dialect as _Dialect
 
 try:
     from cStringIO import StringIO
@@ -41,48 +43,14 @@
     def __init__(self):
         if self.__class__ != Dialect:
             self._valid = True
-        errors = self._validate()
-        if errors != []:
-            raise Error, "Dialect did not validate: %s" % ", ".join(errors)
+        self._validate()
 
     def _validate(self):
-        errors = []
-        if not self._valid:
-            errors.append("can't directly instantiate Dialect class")
-
-        if self.delimiter is None:
-            errors.append("delimiter character not set")
-        elif (not isinstance(self.delimiter, str) or
-              len(self.delimiter) > 1):
-            errors.append("delimiter must be one-character string")
-
-        if self.quotechar is None:
-            if self.quoting != QUOTE_NONE:
-                errors.append("quotechar not set")
-        elif (not isinstance(self.quotechar, str) or
-              len(self.quotechar) > 1):
-            errors.append("quotechar must be one-character string")
-
-        if self.lineterminator is None:
-            errors.append("lineterminator not set")
-        elif not isinstance(self.lineterminator, str):
-            errors.append("lineterminator must be a string")
-
-        if self.doublequote not in (True, False):
-            errors.append("doublequote parameter must be True or False")
-
-        if self.skipinitialspace not in (True, False):
-            errors.append("skipinitialspace parameter must be True or False")
-
-        if self.quoting is None:
-            errors.append("quoting parameter not set")
-
-        if self.quoting is QUOTE_NONE:
-            if (not isinstance(self.escapechar, (unicode, str)) or
-                len(self.escapechar) > 1):
-                errors.append("escapechar must be a one-character string or unicode object")
-
-        return errors
+        try:
+            _Dialect(self)
+        except TypeError, e:
+            # We do this for compatibility with py2.3
+            raise Error(str(e))
 
 class excel(Dialect):
     """Describe the usual properties of Excel-generated CSV files."""

Index: decimal.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/decimal.py,v
retrieving revision 1.32.2.1
retrieving revision 1.32.2.2
diff -u -d -r1.32.2.1 -r1.32.2.2
--- decimal.py	7 Jan 2005 06:58:02 -0000	1.32.2.1
+++ decimal.py	16 Oct 2005 05:23:59 -0000	1.32.2.2
@@ -134,7 +134,7 @@
     'setcontext', 'getcontext'
 ]
 
-import copy
+import copy as _copy
 
 #Rounding
 ROUND_DOWN = 'ROUND_DOWN'
@@ -515,7 +515,7 @@
         if isinstance(value, (list,tuple)):
             if len(value) != 3:
                 raise ValueError, 'Invalid arguments'
-            if value[0] not in [0,1]:
+            if value[0] not in (0,1):
                 raise ValueError, 'Invalid sign'
             for digit in value[1]:
                 if not isinstance(digit, (int,long)) or digit < 0:
@@ -645,6 +645,8 @@
 
     def __cmp__(self, other, context=None):
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
 
         if self._is_special or other._is_special:
             ans = self._check_nans(other, context)
@@ -696,12 +698,12 @@
 
     def __eq__(self, other):
         if not isinstance(other, (Decimal, int, long)):
-            return False
+            return NotImplemented
         return self.__cmp__(other) == 0
 
     def __ne__(self, other):
         if not isinstance(other, (Decimal, int, long)):
-            return True
+            return NotImplemented
         return self.__cmp__(other) != 0
 
     def compare(self, other, context=None):
@@ -714,6 +716,8 @@
         Like __cmp__, but returns Decimal instances.
         """
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
 
         #compare(NaN, NaN) = NaN
         if (self._is_special or other and other._is_special):
@@ -728,6 +732,10 @@
         # Decimal integers must hash the same as the ints
         # Non-integer decimals are normalized and hashed as strings
         # Normalization assures that hast(100E-1) == hash(10)
+        if self._is_special:
+            if self._isnan():
+                raise TypeError('Cannot hash a NaN value.')
+            return hash(str(self))
         i = int(self)
         if self == Decimal(i):
             return hash(i)
@@ -752,18 +760,19 @@
         Captures all of the information in the underlying representation.
         """
 
-        if self._isnan():
-            minus = '-'*self._sign
-            if self._int == (0,):
-                info = ''
-            else:
-                info = ''.join(map(str, self._int))
-            if self._isnan() == 2:
-                return minus + 'sNaN' + info
-            return minus + 'NaN' + info
-        if self._isinfinity():
-            minus = '-'*self._sign
-            return minus + 'Infinity'
+        if self._is_special:
+            if self._isnan():
+                minus = '-'*self._sign
+                if self._int == (0,):
+                    info = ''
+                else:
+                    info = ''.join(map(str, self._int))
+                if self._isnan() == 2:
+                    return minus + 'sNaN' + info
+                return minus + 'NaN' + info
+            if self._isinfinity():
+                minus = '-'*self._sign
+                return minus + 'Infinity'
 
         if context is None:
             context = getcontext()
@@ -915,6 +924,8 @@
         -INF + INF (or the reverse) cause InvalidOperation errors.
         """
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
 
         if context is None:
             context = getcontext()
@@ -1002,6 +1013,8 @@
     def __sub__(self, other, context=None):
         """Return self + (-other)"""
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
 
         if self._is_special or other._is_special:
             ans = self._check_nans(other, context=context)
@@ -1019,6 +1032,8 @@
     def __rsub__(self, other, context=None):
         """Return other + (-self)"""
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
 
         tmp = Decimal(self)
         tmp._sign = 1 - tmp._sign
@@ -1064,6 +1079,8 @@
         (+-) INF * 0 (or its reverse) raise InvalidOperation.
         """
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
 
         if context is None:
             context = getcontext()
@@ -1136,6 +1153,10 @@
         computing the other value are not raised.
         """
         other = _convert_other(other)
+        if other is NotImplemented:
+            if divmod in (0, 1):
+                return NotImplemented
+            return (NotImplemented, NotImplemented)
 
         if context is None:
             context = getcontext()
@@ -1288,6 +1309,8 @@
     def __rdiv__(self, other, context=None):
         """Swaps self/other and returns __div__."""
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
         return other.__div__(self, context=context)
     __rtruediv__ = __rdiv__
 
@@ -1300,6 +1323,8 @@
     def __rdivmod__(self, other, context=None):
         """Swaps self/other and returns __divmod__."""
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
         return other.__divmod__(self, context=context)
 
     def __mod__(self, other, context=None):
@@ -1307,6 +1332,8 @@
         self % other
         """
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
 
         if self._is_special or other._is_special:
             ans = self._check_nans(other, context)
@@ -1321,6 +1348,8 @@
     def __rmod__(self, other, context=None):
         """Swaps self/other and returns __mod__."""
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
         return other.__mod__(self, context=context)
 
     def remainder_near(self, other, context=None):
@@ -1328,6 +1357,8 @@
         Remainder nearest to 0-  abs(remainder-near) <= other/2
         """
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
 
         if self._is_special or other._is_special:
             ans = self._check_nans(other, context)
@@ -1407,6 +1438,8 @@
     def __rfloordiv__(self, other, context=None):
         """Swaps self/other and returns __floordiv__."""
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
         return other.__floordiv__(self, context=context)
 
     def __float__(self):
@@ -1414,7 +1447,7 @@
         return float(str(self))
 
     def __int__(self):
-        """Converts self to a int, truncating if necessary."""
+        """Converts self to an int, truncating if necessary."""
         if self._is_special:
             if self._isnan():
                 context = getcontext()
@@ -1657,6 +1690,8 @@
         If modulo is None (default), don't take it mod modulo.
         """
         n = _convert_other(n)
+        if n is NotImplemented:
+            return n
 
         if context is None:
             context = getcontext()
@@ -1743,6 +1778,8 @@
     def __rpow__(self, other, context=None):
         """Swaps self/other and returns __pow__."""
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
         return other.__pow__(self, context=context)
 
     def normalize(self, context=None):
@@ -1997,6 +2034,8 @@
         NaN (and signals if one is sNaN).  Also rounds.
         """
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
 
         if self._is_special or other._is_special:
             # if one operand is a quiet NaN and the other is number, then the
@@ -2044,6 +2083,8 @@
         NaN (and signals if one is sNaN).  Also rounds.
         """
         other = _convert_other(other)
+        if other is NotImplemented:
+            return other
 
         if self._is_special or other._is_special:
             # if one operand is a quiet NaN and the other is number, then the
@@ -2170,7 +2211,7 @@
             del s
         for name, val in locals().items():
             if val is None:
-                setattr(self, name, copy.copy(getattr(DefaultContext, name)))
+                setattr(self, name, _copy.copy(getattr(DefaultContext, name)))
             else:
                 setattr(self, name, val)
         del self.self
@@ -2714,7 +2755,7 @@
         return a.sqrt(context=self)
 
     def subtract(self, a, b):
-        """Return the sum of the two operands.
+        """Return the difference between the two operands.
 
         >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
         Decimal("0.23")
@@ -2870,8 +2911,7 @@
         return other
     if isinstance(other, (int, long)):
         return Decimal(other)
-
-    raise TypeError, "You can interact Decimal only with int, long or Decimal data types."
+    return NotImplemented
 
 _infinity_map = {
     'inf' : 1,

Index: difflib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/difflib.py,v
retrieving revision 1.10.2.2
retrieving revision 1.10.2.3
diff -u -d -r1.10.2.2 -r1.10.2.3
--- difflib.py	7 Jan 2005 06:58:02 -0000	1.10.2.2
+++ difflib.py	16 Oct 2005 05:23:59 -0000	1.10.2.3
@@ -1371,7 +1371,7 @@
                 text = ' '
             # insert marks that won't be noticed by an xml/html escaper.
             text = '\0' + format_key + text + '\1'
-        # Return line of text, first allow user's line formatter to do it's
+        # Return line of text, first allow user's line formatter to do its
         # thing (such as adding the line number) then replace the special
         # marks with what the user's change markup.
         return (num_lines[side],text)
@@ -1472,7 +1472,7 @@
         """Yields from/to lines of text with a change indication.
 
         This function is an iterator.  It itself pulls lines from the line
-        iterator.  It's difference from that iterator is that this function
+        iterator.  Its difference from that iterator is that this function
         always yields a pair of from/to text lines (with the change
         indication).  If necessary it will collect single from/to lines
         until it has a matching pair from/to pair to yield.

Index: doctest.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/doctest.py,v
retrieving revision 1.24.2.2
retrieving revision 1.24.2.3
diff -u -d -r1.24.2.2 -r1.24.2.3
--- doctest.py	7 Jan 2005 06:58:03 -0000	1.24.2.2
+++ doctest.py	16 Oct 2005 05:23:59 -0000	1.24.2.3
@@ -2071,24 +2071,24 @@
     The old flag is returned so that a runner could restore the old
     value if it wished to:
 
-      >>> old = _unittest_reportflags
-      >>> set_unittest_reportflags(REPORT_NDIFF |
+      >>> import doctest
+      >>> old = doctest._unittest_reportflags
+      >>> doctest.set_unittest_reportflags(REPORT_NDIFF |
       ...                          REPORT_ONLY_FIRST_FAILURE) == old
       True
 
-      >>> import doctest
       >>> doctest._unittest_reportflags == (REPORT_NDIFF |
       ...                                   REPORT_ONLY_FIRST_FAILURE)
       True
 
     Only reporting flags can be set:
 
-      >>> set_unittest_reportflags(ELLIPSIS)
+      >>> doctest.set_unittest_reportflags(ELLIPSIS)
       Traceback (most recent call last):
       ...
       ValueError: ('Only reporting flags allowed', 8)
 
-      >>> set_unittest_reportflags(old) == (REPORT_NDIFF |
+      >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
       ...                                   REPORT_ONLY_FIRST_FAILURE)
       True
     """
@@ -2476,6 +2476,7 @@
           blah
        #
        #     Ho hum
+       <BLANKLINE>
        """
     output = []
     for piece in DocTestParser().parse(s):
@@ -2498,7 +2499,8 @@
     while output and output[0] == '#':
         output.pop(0)
     # Combine the output, and return it.
-    return '\n'.join(output)
+    # Add a courtesy newline to prevent exec from choking (see bug #1172785)
+    return '\n'.join(output) + '\n'
 
 def testsource(module, name):
     """Extract the test sources from a doctest docstring as a script.

Index: dumbdbm.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/dumbdbm.py,v
retrieving revision 1.19.2.2
retrieving revision 1.19.2.3
diff -u -d -r1.19.2.2 -r1.19.2.3
--- dumbdbm.py	7 Jan 2005 06:58:03 -0000	1.19.2.2
+++ dumbdbm.py	16 Oct 2005 05:23:59 -0000	1.19.2.3
@@ -81,6 +81,7 @@
             pass
         else:
             for line in f:
+                line = line.rstrip()
                 key, pos_and_siz_pair = eval(line)
                 self._index[key] = pos_and_siz_pair
             f.close()

Index: ftplib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/ftplib.py,v
retrieving revision 1.72.2.1
retrieving revision 1.72.2.2
diff -u -d -r1.72.2.1 -r1.72.2.2
--- ftplib.py	7 Jan 2005 06:58:03 -0000	1.72.2.1
+++ ftplib.py	16 Oct 2005 05:23:59 -0000	1.72.2.2
@@ -208,13 +208,13 @@
         if self.debugging: print '*resp*', self.sanitize(resp)
         self.lastresp = resp[:3]
         c = resp[:1]
+        if c in ('1', '2', '3'):
+            return resp
         if c == '4':
             raise error_temp, resp
         if c == '5':
             raise error_perm, resp
-        if c not in '123':
-            raise error_proto, resp
-        return resp
+        raise error_proto, resp
 
     def voidresp(self):
         """Expect a response beginning with '2'."""
@@ -582,17 +582,17 @@
     Raises error_proto if it does not contain '(|||port|)'
     Return ('host.addr.as.numbers', port#) tuple.'''
 
-    if resp[:3] <> '229':
+    if resp[:3] != '229':
         raise error_reply, resp
     left = resp.find('(')
     if left < 0: raise error_proto, resp
     right = resp.find(')', left + 1)
     if right < 0:
         raise error_proto, resp # should contain '(|||port|)'
-    if resp[left + 1] <> resp[right - 1]:
+    if resp[left + 1] != resp[right - 1]:
         raise error_proto, resp
     parts = resp[left + 1:right].split(resp[left+1])
-    if len(parts) <> 5:
+    if len(parts) != 5:
         raise error_proto, resp
     host = peer[0]
     port = int(parts[3])
@@ -755,7 +755,16 @@
 
 def test():
     '''Test program.
-    Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...'''
+    Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
+
+    -d dir
+    -l list
+    -p password
+    '''
+
+    if len(sys.argv) < 2:
+        print test.__doc__
+        sys.exit(0)
 
     debugging = 0
     rcfile = None

Index: glob.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/glob.py,v
retrieving revision 1.10.20.1
retrieving revision 1.10.20.2
diff -u -d -r1.10.20.1 -r1.10.20.2
--- glob.py	7 Jan 2005 06:58:04 -0000	1.10.20.1
+++ glob.py	16 Oct 2005 05:23:59 -0000	1.10.20.2
@@ -4,7 +4,7 @@
 import fnmatch
 import re
 
-__all__ = ["glob"]
+__all__ = ["glob", "iglob"]
 
 def glob(pathname):
     """Return a list of paths matching a pathname pattern.
@@ -12,35 +12,42 @@
     The pattern may contain simple shell-style wildcards a la fnmatch.
 
     """
+    return list(iglob(pathname))
+
+def iglob(pathname):
+    """Return a list of paths matching a pathname pattern.
+
+    The pattern may contain simple shell-style wildcards a la fnmatch.
+
+    """
     if not has_magic(pathname):
         if os.path.lexists(pathname):
-            return [pathname]
-        else:
-            return []
+            yield pathname
+        return
     dirname, basename = os.path.split(pathname)
     if not dirname:
-        return glob1(os.curdir, basename)
-    elif has_magic(dirname):
-        list = glob(dirname)
+        for name in glob1(os.curdir, basename):
+            yield name
+        return
+    if has_magic(dirname):
+        dirs = iglob(dirname)
     else:
-        list = [dirname]
-    if not has_magic(basename):
-        result = []
-        for dirname in list:
-            if basename or os.path.isdir(dirname):
-                name = os.path.join(dirname, basename)
-                if os.path.lexists(name):
-                    result.append(name)
+        dirs = [dirname]
+    if has_magic(basename):
+        glob_in_dir = glob1
     else:
-        result = []
-        for dirname in list:
-            sublist = glob1(dirname, basename)
-            for name in sublist:
-                result.append(os.path.join(dirname, name))
-    return result
+        glob_in_dir = glob0
+    for dirname in dirs:
+        for name in glob_in_dir(dirname, basename):
+            yield os.path.join(dirname, name)
+
+# These 2 helper functions non-recursively glob inside a literal directory.
+# They return a list of basenames. `glob1` accepts a pattern while `glob0`
+# takes a literal basename (so it only has to check for its existence).
 
 def glob1(dirname, pattern):
-    if not dirname: dirname = os.curdir
+    if not dirname:
+        dirname = os.curdir
     try:
         names = os.listdir(dirname)
     except os.error:
@@ -49,6 +56,17 @@
         names=filter(lambda x: x[0]!='.',names)
     return fnmatch.filter(names,pattern)
 
+def glob0(dirname, basename):
+    if basename == '':
+        # `os.path.split()` returns an empty basename for paths ending with a
+        # directory separator.  'q*x/' should match only directories.
+        if os.isdir(dirname):
+            return [basename]
+    else:
+        if os.path.lexists(os.path.join(dirname, basename)):
+            return [basename]
+    return []
+
 
 magic_check = re.compile('[*?[]')
 

Index: gzip.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/gzip.py,v
retrieving revision 1.34.2.2
retrieving revision 1.34.2.3
diff -u -d -r1.34.2.2 -r1.34.2.3
--- gzip.py	7 Jan 2005 06:58:04 -0000	1.34.2.2
+++ gzip.py	16 Oct 2005 05:23:59 -0000	1.34.2.3
@@ -55,6 +55,7 @@
     """
 
     myfileobj = None
+    max_read_chunk = 10 * 1024 * 1024   # 10Mb
 
     def __init__(self, filename=None, mode=None,
                  compresslevel=9, fileobj=None):
@@ -215,14 +216,14 @@
             try:
                 while True:
                     self._read(readsize)
-                    readsize = readsize * 2
+                    readsize = min(self.max_read_chunk, readsize * 2)
             except EOFError:
                 size = self.extrasize
         else:               # just get some more of it
             try:
                 while size > self.extrasize:
                     self._read(readsize)
-                    readsize = readsize * 2
+                    readsize = min(self.max_read_chunk, readsize * 2)
             except EOFError:
                 if size > self.extrasize:
                     size = self.extrasize
@@ -331,7 +332,10 @@
             return
         self.close()
 
-    def flush(self):
+    def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
+        if self.mode == WRITE:
+            # Ensure the compressor's buffer is flushed
+            self.fileobj.write(self.compress.flush(zlib_mode))
         self.fileobj.flush()
 
     def fileno(self):

Index: hmac.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/hmac.py,v
retrieving revision 1.7.2.1
retrieving revision 1.7.2.2
diff -u -d -r1.7.2.1 -r1.7.2.2
--- hmac.py	7 Jan 2005 06:58:04 -0000	1.7.2.1
+++ hmac.py	16 Oct 2005 05:23:59 -0000	1.7.2.2
@@ -28,27 +28,33 @@
 
         key:       key for the keyed hash object.
         msg:       Initial input for the hash, if provided.
-        digestmod: A module supporting PEP 247. Defaults to the md5 module.
+        digestmod: A module supporting PEP 247.  *OR*
+                   A hashlib constructor returning a new hash object.
+                   Defaults to hashlib.md5.
         """
 
         if key is _secret_backdoor_key: # cheap
             return
 
         if digestmod is None:
-            import md5
-            digestmod = md5
+            import hashlib
+            digestmod = hashlib.md5
 
-        self.digestmod = digestmod
-        self.outer = digestmod.new()
-        self.inner = digestmod.new()
-        self.digest_size = digestmod.digest_size
+        if callable(digestmod):
+            self.digest_cons = digestmod
+        else:
+            self.digest_cons = lambda d='': digestmod.new(d)
+
+        self.outer = self.digest_cons()
+        self.inner = self.digest_cons()
+        self.digest_size = self.inner.digest_size
 
         blocksize = 64
         ipad = "\x36" * blocksize
         opad = "\x5C" * blocksize
 
         if len(key) > blocksize:
-            key = digestmod.new(key).digest()
+            key = self.digest_cons(key).digest()
 
         key = key + chr(0) * (blocksize - len(key))
         self.outer.update(_strxor(key, opad))
@@ -70,7 +76,7 @@
         An update to this copy won't affect the original object.
         """
         other = HMAC(_secret_backdoor_key)
-        other.digestmod = self.digestmod
+        other.digest_cons = self.digest_cons
         other.digest_size = self.digest_size
         other.inner = self.inner.copy()
         other.outer = self.outer.copy()

Index: httplib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/httplib.py,v
retrieving revision 1.54.2.2
retrieving revision 1.54.2.3
diff -u -d -r1.54.2.2 -r1.54.2.3
--- httplib.py	7 Jan 2005 06:58:04 -0000	1.54.2.2
+++ httplib.py	16 Oct 2005 05:23:59 -0000	1.54.2.3
@@ -153,6 +153,9 @@
 INSUFFICIENT_STORAGE = 507
 NOT_EXTENDED = 510
 
+# maximal amount of data to read at one time in _safe_read
+MAXAMOUNT = 1048576
+
 class HTTPMessage(mimetools.Message):
 
     def addheader(self, key, value):
@@ -353,6 +356,7 @@
             raise UnknownProtocol(version)
 
         if self.version == 9:
+            self.length = None
             self.chunked = 0
             self.will_close = 1
             self.msg = HTTPMessage(StringIO())
@@ -540,14 +544,14 @@
         reading. If the bytes are truly not available (due to EOF), then the
         IncompleteRead exception can be used to detect the problem.
         """
-        s = ''
+        s = []
         while amt > 0:
-            chunk = self.fp.read(amt)
+            chunk = self.fp.read(min(amt, MAXAMOUNT))
             if not chunk:
                 raise IncompleteRead(s)
-            s += chunk
+            s.append(chunk)
             amt -= len(chunk)
-        return s
+        return ''.join(s)
 
     def getheader(self, name, default=None):
         if self.msg is None:

Index: imaplib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/imaplib.py,v
retrieving revision 1.54.2.2
retrieving revision 1.54.2.3
diff -u -d -r1.54.2.2 -r1.54.2.3
--- imaplib.py	7 Jan 2005 06:58:04 -0000	1.54.2.2
+++ imaplib.py	16 Oct 2005 05:23:59 -0000	1.54.2.3
@@ -18,8 +18,9 @@
 # IMAP4_SSL contributed by Tino Lange <Tino.Lange at isg.de> March 2002.
 # GET/SETQUOTA contributed by Andreas Zeidler <az at kreativkombinat.de> June 2002.
 # PROXYAUTH contributed by Rick Holbert <holbert.13 at osu.edu> November 2002.
+# GET/SETANNOTATION contributed by Tomas Lindroos <skitta at abo.fi> June 2005.
 
-__version__ = "2.55"
+__version__ = "2.58"
 
 import binascii, os, random, re, socket, sys, time
 
@@ -51,6 +52,7 @@
         'EXPUNGE':      ('SELECTED',),
         'FETCH':        ('SELECTED',),
         'GETACL':       ('AUTH', 'SELECTED'),
+        'GETANNOTATION':('AUTH', 'SELECTED'),
         'GETQUOTA':     ('AUTH', 'SELECTED'),
         'GETQUOTAROOT': ('AUTH', 'SELECTED'),
         'MYRIGHTS':     ('AUTH', 'SELECTED'),
@@ -66,6 +68,7 @@
         'SEARCH':       ('SELECTED',),
         'SELECT':       ('AUTH', 'SELECTED'),
         'SETACL':       ('AUTH', 'SELECTED'),
+        'SETANNOTATION':('AUTH', 'SELECTED'),
         'SETQUOTA':     ('AUTH', 'SELECTED'),
         'SORT':         ('SELECTED',),
         'STATUS':       ('AUTH', 'SELECTED'),
@@ -81,7 +84,7 @@
 Continuation = re.compile(r'\+( (?P<data>.*))?')
 Flags = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)')
 InternalDate = re.compile(r'.*INTERNALDATE "'
-        r'(?P<day>[ 123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
+        r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
         r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
         r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
         r'"')
@@ -133,10 +136,10 @@
             the command re-tried.
     "readonly" exceptions imply the command should be re-tried.
 
-    Note: to use this module, you must read the RFCs pertaining
-    to the IMAP4 protocol, as the semantics of the arguments to
-    each IMAP4 command are left to the invoker, not to mention
-    the results.
+    Note: to use this module, you must read the RFCs pertaining to the
+    IMAP4 protocol, as the semantics of the arguments to each IMAP4
+    command are left to the invoker, not to mention the results. Also,
+    most IMAP servers implement a sub-set of the commands available here.
     """
 
     class error(Exception): pass    # Logical errors - debug required
@@ -152,7 +155,7 @@
         self.tagged_commands = {}       # Tagged commands awaiting response
         self.untagged_responses = {}    # {typ: [data, ...], ...}
         self.continuation_response = '' # Last continuation response
-        self.is_readonly = None         # READ-ONLY desired state
+        self.is_readonly = False        # READ-ONLY desired state
         self.tagnum = 0
 
         # Open socket to server.
@@ -162,7 +165,7 @@
         # Create unique tag for this session,
         # and compile tagged response matcher.
 
-        self.tagpre = Int2AP(random.randint(0, 31999))
+        self.tagpre = Int2AP(random.randint(4096, 65535))
         self.tagre = re.compile(r'(?P<tag>'
                         + self.tagpre
                         + r'\d+) (?P<type>[A-Z]+) (?P<data>.*)')
@@ -186,11 +189,10 @@
         else:
             raise self.error(self.welcome)
 
-        cap = 'CAPABILITY'
-        self._simple_command(cap)
-        if not cap in self.untagged_responses:
+        typ, dat = self.capability()
+        if dat == [None]:
             raise self.error('no CAPABILITY response from server')
-        self.capabilities = tuple(self.untagged_responses[cap][-1].upper().split())
+        self.capabilities = tuple(dat[-1].upper().split())
 
         if __debug__:
             if self.debug >= 3:
@@ -345,6 +347,15 @@
         return typ, dat
 
 
+    def capability(self):
+        """(typ, [data]) = <instance>.capability()
+        Fetch capabilities list from server."""
+
+        name = 'CAPABILITY'
+        typ, dat = self._simple_command(name)
+        return self._untagged_response(typ, dat, name)
+
+
     def check(self):
         """Checkpoint mailbox on server.
 
@@ -436,6 +447,14 @@
         return self._untagged_response(typ, dat, 'ACL')
 
 
+    def getannotation(self, mailbox, entry, attribute):
+        """(typ, [data]) = <instance>.getannotation(mailbox, entry, attribute)
+        Retrieve ANNOTATIONs."""
+
+        typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute)
+        return self._untagged_response(typ, dat, 'ANNOTATION')
+
+
     def getquota(self, root):
         """Get the quota root's resource usage and limits.
 
@@ -603,12 +622,12 @@
         return self._untagged_response(typ, dat, name)
 
 
-    def select(self, mailbox='INBOX', readonly=None):
+    def select(self, mailbox='INBOX', readonly=False):
         """Select a mailbox.
 
         Flush all untagged responses.
 
-        (typ, [data]) = <instance>.select(mailbox='INBOX', readonly=None)
+        (typ, [data]) = <instance>.select(mailbox='INBOX', readonly=False)
 
         'data' is count of messages in mailbox ('EXISTS' response).
 
@@ -617,7 +636,7 @@
         """
         self.untagged_responses = {}    # Flush old responses.
         self.is_readonly = readonly
-        if readonly is not None:
+        if readonly:
             name = 'EXAMINE'
         else:
             name = 'SELECT'
@@ -643,6 +662,14 @@
         return self._simple_command('SETACL', mailbox, who, what)
 
 
+    def setannotation(self, *args):
+        """(typ, [data]) = <instance>.setannotation(mailbox[, entry, attribute]+)
+        Set ANNOTATIONs."""
+
+        typ, dat = self._simple_command('SETANNOTATION', *args)
+        return self._untagged_response(typ, dat, 'ANNOTATION')
+
+
     def setquota(self, root, limits):
         """Set the quota root's resource limits.
 

Index: imghdr.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/imghdr.py,v
retrieving revision 1.11
retrieving revision 1.11.26.1
diff -u -d -r1.11 -r1.11.26.1
--- imghdr.py	24 Jan 2001 06:27:27 -0000	1.11
+++ imghdr.py	16 Oct 2005 05:23:59 -0000	1.11.26.1
@@ -101,6 +101,13 @@
 
 tests.append(test_jpeg)
 
+def test_exif(h, f):
+    """JPEG data in Exif format"""
+    if h[6:10] == 'Exif':
+        return 'jpeg'
+
+tests.append(test_exif)
+
 def test_bmp(h, f):
     if h[:2] == 'BM':
         return 'bmp'

Index: inspect.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/inspect.py,v
retrieving revision 1.36.2.2
retrieving revision 1.36.2.3
diff -u -d -r1.36.2.2 -r1.36.2.3
--- inspect.py	7 Jan 2005 06:58:05 -0000	1.36.2.2
+++ inspect.py	16 Oct 2005 05:23:59 -0000	1.36.2.3
@@ -29,6 +29,7 @@
 __date__ = '1 Jan 2001'
 
 import sys, os, types, string, re, dis, imp, tokenize, linecache
+from operator import attrgetter
 
 # ----------------------------------------------------------- type-checking
 def ismodule(object):
@@ -346,7 +347,7 @@
 def getsourcefile(object):
     """Return the Python source file an object was defined in, if it exists."""
     filename = getfile(object)
-    if string.lower(filename[-4:]) in ['.pyc', '.pyo']:
+    if string.lower(filename[-4:]) in ('.pyc', '.pyo'):
         filename = filename[:-4] + '.py'
     for suffix, mode, kind in imp.get_suffixes():
         if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
@@ -453,7 +454,7 @@
         # Look for a comment block at the top of the file.
         start = 0
         if lines and lines[0][:2] == '#!': start = 1
-        while start < len(lines) and string.strip(lines[start]) in ['', '#']:
+        while start < len(lines) and string.strip(lines[start]) in ('', '#'):
             start = start + 1
         if start < len(lines) and lines[start][:1] == '#':
             comments = []
@@ -484,42 +485,30 @@
                 comments[-1:] = []
             return string.join(comments, '')
 
-class ListReader:
-    """Provide a readline() method to return lines from a list of strings."""
-    def __init__(self, lines):
-        self.lines = lines
-        self.index = 0
-
-    def readline(self):
-        i = self.index
-        if i < len(self.lines):
-            self.index = i + 1
-            return self.lines[i]
-        else: return ''
-
 class EndOfBlock(Exception): pass
 
 class BlockFinder:
     """Provide a tokeneater() method to detect the end of a code block."""
     def __init__(self):
         self.indent = 0
+        self.islambda = False
         self.started = False
         self.passline = False
-        self.last = 0
+        self.last = 1
 
     def tokeneater(self, type, token, (srow, scol), (erow, ecol), line):
         if not self.started:
+            # look for the first "def", "class" or "lambda"
             if token in ("def", "class", "lambda"):
-                lastcolon = line.rfind(":")
-                if lastcolon:
-                    oneline = re.search(r"\w", line[lastcolon:])
-                    if oneline and line[-2:] != "\\\n":
-                        raise EndOfBlock, srow
+                if token == "lambda":
+                    self.islambda = True
                 self.started = True
-            self.passline = True
+            self.passline = True    # skip to the end of the line
         elif type == tokenize.NEWLINE:
-            self.passline = False
+            self.passline = False   # stop skipping when a NEWLINE is seen
             self.last = srow
+            if self.islambda:       # lambdas always end at the first NEWLINE
+                raise EndOfBlock
         elif self.passline:
             pass
         elif type == tokenize.INDENT:
@@ -527,19 +516,24 @@
             self.passline = True
         elif type == tokenize.DEDENT:
             self.indent = self.indent - 1
-            if self.indent == 0:
-                raise EndOfBlock, self.last
-        elif type == tokenize.NAME and scol == 0:
-            raise EndOfBlock, self.last
+            # the end of matching indent/dedent pairs end a block
+            # (note that this only works for "def"/"class" blocks,
+            #  not e.g. for "if: else:" or "try: finally:" blocks)
+            if self.indent <= 0:
+                raise EndOfBlock
+        elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
+            # any other token on the same indentation level end the previous
+            # block as well, except the pseudo-tokens COMMENT and NL.
+            raise EndOfBlock
 
 def getblock(lines):
     """Extract the block of code at the top of the given list of lines."""
+    blockfinder = BlockFinder()
     try:
-        tokenize.tokenize(ListReader(lines).readline, BlockFinder().tokeneater)
-    except EndOfBlock, eob:
-        return lines[:eob.args[0]]
-    # Fooling the indent/dedent logic implies a one-line definition
-    return lines[:1]
+        tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)
+    except (EndOfBlock, IndentationError):
+        pass
+    return lines[:blockfinder.last]
 
 def getsourcelines(object):
     """Return a list of source lines and starting line number for an object.
@@ -567,7 +561,7 @@
 def walktree(classes, children, parent):
     """Recursive helper function for getclasstree()."""
     results = []
-    classes.sort(key=lambda c: (c.__module__, c.__name__))
+    classes.sort(key=attrgetter('__module__', '__name__'))
     for c in classes:
         results.append((c, c.__bases__))
         if c in children:
@@ -621,7 +615,7 @@
 
     # The following acrobatics are for anonymous (tuple) arguments.
     for i in range(nargs):
-        if args[i][:1] in ['', '.']:
+        if args[i][:1] in ('', '.'):
             stack, remain, count = [], [], []
             while step < len(code):
                 op = ord(code[step])
@@ -630,7 +624,7 @@
                     opname = dis.opname[op]
                     value = ord(code[step]) + ord(code[step+1])*256
                     step = step + 2
-                    if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']:
+                    if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
                         remain.append(value)
                         count.append(value)
                     elif opname == 'STORE_FAST':
@@ -696,7 +690,7 @@
 
 def strseq(object, convert, join=joinseq):
     """Recursively walk a sequence, stringifying each element."""
-    if type(object) in [types.ListType, types.TupleType]:
+    if type(object) in (list, tuple):
         return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
     else:
         return convert(object)

Index: locale.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/locale.py,v
retrieving revision 1.20.2.2
retrieving revision 1.20.2.3
diff -u -d -r1.20.2.2 -r1.20.2.3
--- locale.py	7 Jan 2005 06:58:05 -0000	1.20.2.2
+++ locale.py	16 Oct 2005 05:23:59 -0000	1.20.2.3
@@ -306,7 +306,7 @@
     else:
         return language + '.' + encoding
 
-def getdefaultlocale(envvars=('LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')):
+def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
 
     """ Tries to determine the default locale settings and returns
         them as tuple (language code, encoding).
@@ -351,6 +351,8 @@
     for variable in envvars:
         localename = lookup(variable,None)
         if localename:
+            if variable == 'LANGUAGE':
+                localename = localename.split(':')[0]
             break
     else:
         localename = 'C'

Index: macpath.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/macpath.py,v
retrieving revision 1.39.2.2
retrieving revision 1.39.2.3
diff -u -d -r1.39.2.2 -r1.39.2.3
--- macpath.py	7 Jan 2005 06:58:05 -0000	1.39.2.2
+++ macpath.py	16 Oct 2005 05:23:59 -0000	1.39.2.3
@@ -5,7 +5,7 @@
 
 __all__ = ["normcase","isabs","join","splitdrive","split","splitext",
            "basename","dirname","commonprefix","getsize","getmtime",
-           "getatime","getctime", "islink","exists","isdir","isfile",
+           "getatime","getctime", "islink","exists","lexists","isdir","isfile",
            "walk","expanduser","expandvars","normpath","abspath",
            "curdir","pardir","sep","pathsep","defpath","altsep","extsep",
            "devnull","realpath","supports_unicode_filenames"]
@@ -175,14 +175,14 @@
 def commonprefix(m):
     "Given a list of pathnames, returns the longest common leading component"
     if not m: return ''
-    prefix = m[0]
-    for item in m:
-        for i in range(len(prefix)):
-            if prefix[:i+1] != item[:i+1]:
-                prefix = prefix[:i]
-                if i == 0: return ''
-                break
-    return prefix
+    s1 = min(m)
+    s2 = max(m)
+    n = min(len(s1), len(s2))
+    for i in xrange(n):
+        if s1[i] != s2[i]:
+            return s1[:i]
+    return s1[:n]
+
 
 def expandvars(path):
     """Dummy to retain interface-compatibility with other operating systems."""

Index: markupbase.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/markupbase.py,v
retrieving revision 1.6.2.2
retrieving revision 1.6.2.3
diff -u -d -r1.6.2.2 -r1.6.2.3
--- markupbase.py	7 Jan 2005 06:58:05 -0000	1.6.2.2
+++ markupbase.py	16 Oct 2005 05:23:59 -0000	1.6.2.3
@@ -1,4 +1,10 @@
-"""Shared support for scanning document type declarations in HTML and XHTML."""
+"""Shared support for scanning document type declarations in HTML and XHTML.
+
+This module is used as a foundation for the HTMLParser and sgmllib
+modules (indirectly, for htmllib as well).  It has no documented
+public API and should not be used directly.
+
+"""
 
 import re
 

Index: mhlib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/mhlib.py,v
retrieving revision 1.34.2.2
retrieving revision 1.34.2.3
diff -u -d -r1.34.2.2 -r1.34.2.3
--- mhlib.py	7 Jan 2005 06:58:05 -0000	1.34.2.2
+++ mhlib.py	16 Oct 2005 05:23:59 -0000	1.34.2.3
@@ -982,11 +982,11 @@
     context = mh.getcontext()
     f = mh.openfolder(context)
     do('f.getcurrent()')
-    for seq in ['first', 'last', 'cur', '.', 'prev', 'next',
+    for seq in ('first', 'last', 'cur', '.', 'prev', 'next',
                 'first:3', 'last:3', 'cur:3', 'cur:-3',
                 'prev:3', 'next:3',
                 '1:3', '1:-3', '100:3', '100:-3', '10000:3', '10000:-3',
-                'all']:
+                'all'):
         try:
             do('f.parsesequence(%r)' % (seq,))
         except Error, msg:

Index: mimetypes.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/mimetypes.py,v
retrieving revision 1.22.2.2
retrieving revision 1.22.2.3
diff -u -d -r1.22.2.2 -r1.22.2.3
--- mimetypes.py	7 Jan 2005 06:58:05 -0000	1.22.2.2
+++ mimetypes.py	16 Oct 2005 05:23:59 -0000	1.22.2.3
@@ -443,12 +443,14 @@
     '.vcf'    : 'text/x-vcard',
     '.wav'    : 'audio/x-wav',
     '.wiz'    : 'application/msword',
+    '.wsdl'   : 'application/xml',
     '.xbm'    : 'image/x-xbitmap',
     '.xlb'    : 'application/vnd.ms-excel',
     # Duplicates :(
     '.xls'    : 'application/excel',
     '.xls'    : 'application/vnd.ms-excel',
     '.xml'    : 'text/xml',
+    '.xpdl'   : 'application/xml',
     '.xpm'    : 'image/x-xpixmap',
     '.xsl'    : 'application/xml',
     '.xwd'    : 'image/x-xwindowdump',

Index: nntplib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/nntplib.py,v
retrieving revision 1.30.2.2
retrieving revision 1.30.2.3
diff -u -d -r1.30.2.2 -r1.30.2.3
--- nntplib.py	7 Jan 2005 06:58:06 -0000	1.30.2.2
+++ nntplib.py	16 Oct 2005 05:23:59 -0000	1.30.2.3
@@ -281,7 +281,7 @@
         - time: string 'hhmmss' indicating the time
         Return:
         - resp: server response if successful
-        - list: list of article ids"""
+        - list: list of message ids"""
 
         cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
         return self.longcmd(cmd, file)
@@ -391,7 +391,7 @@
         Returns:
         - resp: server response if successful
         - nr:   the article number
-        - id:   the article id"""
+        - id:   the message id"""
 
         return self.statcmd('STAT ' + id)
 

Index: ntpath.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/ntpath.py,v
retrieving revision 1.49.2.2
retrieving revision 1.49.2.3
diff -u -d -r1.49.2.2 -r1.49.2.3
--- ntpath.py	7 Jan 2005 06:58:06 -0000	1.49.2.2
+++ ntpath.py	16 Oct 2005 05:23:59 -0000	1.49.2.3
@@ -11,10 +11,10 @@
 
 __all__ = ["normcase","isabs","join","splitdrive","split","splitext",
            "basename","dirname","commonprefix","getsize","getmtime",
-           "getatime","getctime", "islink","exists","isdir","isfile","ismount",
-           "walk","expanduser","expandvars","normpath","abspath","splitunc",
-           "curdir","pardir","sep","pathsep","defpath","altsep","extsep",
-           "devnull","realpath","supports_unicode_filenames"]
+           "getatime","getctime", "islink","exists","lexists","isdir","isfile",
+           "ismount","walk","expanduser","expandvars","normpath","abspath",
+           "splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
+           "extsep","devnull","realpath","supports_unicode_filenames"]
 
 # strings representing various path-related bits and pieces
 curdir = '.'
@@ -212,14 +212,13 @@
 def commonprefix(m):
     "Given a list of pathnames, returns the longest common leading component"
     if not m: return ''
-    prefix = m[0]
-    for item in m:
-        for i in range(len(prefix)):
-            if prefix[:i+1] != item[:i+1]:
-                prefix = prefix[:i]
-                if i == 0: return ''
-                break
-    return prefix
+    s1 = min(m)
+    s2 = max(m)
+    n = min(len(s1), len(s2))
+    for i in xrange(n):
+        if s1[i] != s2[i]:
+            return s1[:i]
+    return s1[:n]
 
 
 # Get size, mtime, atime of files.

Index: optparse.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/optparse.py,v
retrieving revision 1.4.4.2
retrieving revision 1.4.4.3
diff -u -d -r1.4.4.2 -r1.4.4.3
--- optparse.py	7 Jan 2005 06:58:06 -0000	1.4.4.2
+++ optparse.py	16 Oct 2005 05:23:59 -0000	1.4.4.3
@@ -67,7 +67,6 @@
 """
 
 import sys, os
-import types
 import textwrap
 try:
     from gettext import gettext as _
@@ -590,7 +589,7 @@
             if self.choices is None:
                 raise OptionError(
                     "must supply a list of choices for type 'choice'", self)
-            elif type(self.choices) not in (types.TupleType, types.ListType):
+            elif type(self.choices) not in (tuple, list):
                 raise OptionError(
                     "choices must be a list of strings ('%s' supplied)"
                     % str(type(self.choices)).split("'")[1], self)
@@ -634,12 +633,12 @@
                 raise OptionError(
                     "callback not callable: %r" % self.callback, self)
             if (self.callback_args is not None and
-                type(self.callback_args) is not types.TupleType):
+                type(self.callback_args) is not tuple):
                 raise OptionError(
                     "callback_args, if supplied, must be a tuple: not %r"
                     % self.callback_args, self)
             if (self.callback_kwargs is not None and
-                type(self.callback_kwargs) is not types.DictType):
+                type(self.callback_kwargs) is not dict):
                 raise OptionError(
                     "callback_kwargs, if supplied, must be a dict: not %r"
                     % self.callback_kwargs, self)
@@ -927,7 +926,7 @@
         """add_option(Option)
            add_option(opt_str, ..., kwarg=val, ...)
         """
-        if type(args[0]) is types.StringType:
+        if type(args[0]) is str:
             option = self.option_class(*args, **kwargs)
         elif len(args) == 1 and not kwargs:
             option = args[0]
@@ -1213,7 +1212,7 @@
 
     def add_option_group(self, *args, **kwargs):
         # XXX lots of overlap with OptionContainer.add_option()
-        if type(args[0]) is types.StringType:
+        if type(args[0]) is str:
             group = OptionGroup(self, *args, **kwargs)
         elif len(args) == 1 and not kwargs:
             group = args[0]

Index: os.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/os.py,v
retrieving revision 1.58.2.3
retrieving revision 1.58.2.4
diff -u -d -r1.58.2.3 -r1.58.2.4
--- os.py	7 Jan 2005 06:58:07 -0000	1.58.2.3
+++ os.py	16 Oct 2005 05:23:59 -0000	1.58.2.4
@@ -29,7 +29,8 @@
 
 # Note:  more names are added to __all__ later.
 __all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
-           "defpath", "name", "path", "devnull"]
+           "defpath", "name", "path", "devnull",
+           "SEEK_SET", "SEEK_CUR", "SEEK_END"]
 
 def _get_exports_list(module):
     try:
@@ -135,6 +136,12 @@
 
 del _names
 
+# Python uses fixed values for the SEEK_ constants; they are mapped
+# to native constants if necessary in posixmodule.c
+SEEK_SET = 0
+SEEK_CUR = 1
+SEEK_END = 2
+
 #'
 
 # Super directory utilities.
@@ -435,6 +442,22 @@
                 return key.upper() in self.data
             def get(self, key, failobj=None):
                 return self.data.get(key.upper(), failobj)
+            def update(self, dict=None, **kwargs):
+                if dict:
+                    try:
+                        keys = dict.keys()
+                    except AttributeError:
+                        # List of (key, value)
+                        for k, v in dict:
+                            self[k] = v
+                    else:
+                        # got keys
+                        # cannot use items(), since mappings
+                        # may not have them.
+                        for k in keys:
+                            self[k] = dict[k]
+                if kwargs:
+                    self.update(kwargs)
             def copy(self):
                 return dict(self)
 
@@ -446,6 +469,22 @@
             def __setitem__(self, key, item):
                 putenv(key, item)
                 self.data[key] = item
+            def update(self,  dict=None, **kwargs):
+                if dict:
+                    try:
+                        keys = dict.keys()
+                    except AttributeError:
+                        # List of (key, value)
+                        for k, v in dict:
+                            self[k] = v
+                    else:
+                        # got keys
+                        # cannot use items(), since mappings
+                        # may not have them.
+                        for k in keys:
+                            self[k] = dict[k]
+                if kwargs:
+                    self.update(kwargs)
             try:
                 unsetenv
             except NameError:
@@ -676,22 +715,18 @@
     pass
 
 if not _exists("urandom"):
-    _urandomfd = None
     def urandom(n):
         """urandom(n) -> str
 
         Return a string of n random bytes suitable for cryptographic use.
 
         """
-        global _urandomfd
-        if _urandomfd is None:
-            try:
-                _urandomfd = open("/dev/urandom", O_RDONLY)
-            except:
-                _urandomfd = NotImplementedError
-        if _urandomfd is NotImplementedError:
+        try:
+            _urandomfd = open("/dev/urandom", O_RDONLY)
+        except:
             raise NotImplementedError("/dev/urandom (or equivalent) not found")
         bytes = ""
         while len(bytes) < n:
             bytes += read(_urandomfd, n - len(bytes))
+        close(_urandomfd)
         return bytes

Index: os2emxpath.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/os2emxpath.py,v
retrieving revision 1.6.2.2
retrieving revision 1.6.2.3
diff -u -d -r1.6.2.2 -r1.6.2.3
--- os2emxpath.py	7 Jan 2005 06:58:07 -0000	1.6.2.2
+++ os2emxpath.py	16 Oct 2005 05:23:59 -0000	1.6.2.3
@@ -10,10 +10,10 @@
 
 __all__ = ["normcase","isabs","join","splitdrive","split","splitext",
            "basename","dirname","commonprefix","getsize","getmtime",
-           "getatime","getctime", "islink","exists","isdir","isfile","ismount",
-           "walk","expanduser","expandvars","normpath","abspath","splitunc",
-           "curdir","pardir","sep","pathsep","defpath","altsep","extsep",
-           "devnull","realpath","supports_unicode_filenames"]
+           "getatime","getctime", "islink","exists","lexists","isdir","isfile",
+           "ismount","walk","expanduser","expandvars","normpath","abspath",
+           "splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
+           "extsep","devnull","realpath","supports_unicode_filenames"]
 
 # strings representing various path-related bits and pieces
 curdir = '.'
@@ -173,14 +173,13 @@
 def commonprefix(m):
     "Given a list of pathnames, returns the longest common leading component"
     if not m: return ''
-    prefix = m[0]
-    for item in m:
-        for i in range(len(prefix)):
-            if prefix[:i+1] != item[:i+1]:
-                prefix = prefix[:i]
-                if i == 0: return ''
-                break
-    return prefix
+    s1 = min(m)
+    s2 = max(m)
+    n = min(len(s1), len(s2))
+    for i in xrange(n):
+        if s1[i] != s2[i]:
+            return s1[:i]
+    return s1[:n]
 
 
 # Get size, mtime, atime of files.

Index: pdb.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/pdb.py,v
retrieving revision 1.53.2.2
retrieving revision 1.53.2.3
diff -u -d -r1.53.2.2 -r1.53.2.3
--- pdb.py	7 Jan 2005 06:58:07 -0000	1.53.2.2
+++ pdb.py	16 Oct 2005 05:23:59 -0000	1.53.2.3
@@ -450,11 +450,14 @@
             return
         numberlist = arg.split()
         for i in numberlist:
+            if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
+                print 'No breakpoint numbered', i
+                continue
             err = self.clear_bpbynumber(i)
             if err:
                 print '***', err
             else:
-                print 'Deleted breakpoint %s ' % (i,)
+                print 'Deleted breakpoint', i
     do_cl = do_clear # 'c' is already an abbreviation for 'continue'
 
     def do_where(self, arg):

Index: pickletools.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/pickletools.py,v
retrieving revision 1.26.6.2
retrieving revision 1.26.6.3
diff -u -d -r1.26.6.2 -r1.26.6.3
--- pickletools.py	7 Jan 2005 06:58:07 -0000	1.26.6.2
+++ pickletools.py	16 Oct 2005 05:23:59 -0000	1.26.6.3
@@ -1996,6 +1996,11 @@
     if stack:
         raise ValueError("stack not empty after STOP: %r" % stack)
 
+# For use in the doctest, simply as an example of a class to pickle.
+class _Example:
+    def __init__(self, value):
+        self.value = value
+
 _dis_test = r"""
 >>> import pickle
 >>> x = [1, 2, (3, 4), {'abc': u"def"}]
@@ -2060,27 +2065,27 @@
    18: .    STOP
 highest protocol among opcodes = 0
 
->>> x = [pickle.PicklingError()] * 2
+>>> from pickletools import _Example
+>>> x = [_Example(42)] * 2
 >>> dis(pickle.dumps(x, 0))
     0: (    MARK
     1: l        LIST       (MARK at 0)
     2: p    PUT        0
     5: (    MARK
-    6: i        INST       'pickle PicklingError' (MARK at 5)
+    6: i        INST       'pickletools _Example' (MARK at 5)
    28: p    PUT        1
    31: (    MARK
    32: d        DICT       (MARK at 31)
    33: p    PUT        2
-   36: S    STRING     'args'
-   44: p    PUT        3
-   47: (    MARK
-   48: t        TUPLE      (MARK at 47)
-   49: s    SETITEM
-   50: b    BUILD
-   51: a    APPEND
-   52: g    GET        1
-   55: a    APPEND
-   56: .    STOP
+   36: S    STRING     'value'
+   45: p    PUT        3
+   48: I    INT        42
+   52: s    SETITEM
+   53: b    BUILD
+   54: a    APPEND
+   55: g    GET        1
+   58: a    APPEND
+   59: .    STOP
 highest protocol among opcodes = 0
 
 >>> dis(pickle.dumps(x, 1))
@@ -2088,20 +2093,20 @@
     1: q    BINPUT     0
     3: (    MARK
     4: (        MARK
-    5: c            GLOBAL     'pickle PicklingError'
+    5: c            GLOBAL     'pickletools _Example'
    27: q            BINPUT     1
    29: o            OBJ        (MARK at 4)
    30: q        BINPUT     2
    32: }        EMPTY_DICT
    33: q        BINPUT     3
-   35: U        SHORT_BINSTRING 'args'
-   41: q        BINPUT     4
-   43: )        EMPTY_TUPLE
-   44: s        SETITEM
-   45: b        BUILD
-   46: h        BINGET     2
-   48: e        APPENDS    (MARK at 3)
-   49: .    STOP
+   35: U        SHORT_BINSTRING 'value'
+   42: q        BINPUT     4
+   44: K        BININT1    42
+   46: s        SETITEM
+   47: b        BUILD
+   48: h        BINGET     2
+   50: e        APPENDS    (MARK at 3)
+   51: .    STOP
 highest protocol among opcodes = 1
 
 Try "the canonical" recursive-object test.

Index: popen2.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/popen2.py,v
retrieving revision 1.25.2.1
retrieving revision 1.25.2.2
diff -u -d -r1.25.2.1 -r1.25.2.2
--- popen2.py	7 Jan 2005 06:58:07 -0000	1.25.2.1
+++ popen2.py	16 Oct 2005 05:23:59 -0000	1.25.2.2
@@ -213,7 +213,7 @@
         raise ValueError("wrote %r read %r" % (teststr, got))
     got = e.read()
     if got:
-        raise ValueError("unexected %r on stderr" % (got,))
+        raise ValueError("unexpected %r on stderr" % (got,))
     for inst in _active[:]:
         inst.wait()
     if _active:

Index: poplib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/poplib.py,v
retrieving revision 1.21.2.1
retrieving revision 1.21.2.2
diff -u -d -r1.21.2.1 -r1.21.2.2
--- poplib.py	7 Jan 2005 06:58:07 -0000	1.21.2.1
+++ poplib.py	16 Oct 2005 05:23:59 -0000	1.21.2.2
@@ -219,7 +219,7 @@
         """Request listing, return result.
 
         Result without a message number argument is in form
-        ['response', ['mesg_num octets', ...]].
+        ['response', ['mesg_num octets', ...], octets].
 
         Result when a message number argument is given is a
         single response: the "scan listing" for that message.

Index: posixfile.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/posixfile.py,v
retrieving revision 1.24.10.1
retrieving revision 1.24.10.2
diff -u -d -r1.24.10.1 -r1.24.10.2
--- posixfile.py	7 Jan 2005 06:58:07 -0000	1.24.10.1
+++ posixfile.py	16 Oct 2005 05:23:59 -0000	1.24.10.2
@@ -179,10 +179,11 @@
         if sys.platform in ('netbsd1',
                             'openbsd2',
                             'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
-                            'freebsd6', 'bsdos2', 'bsdos3', 'bsdos4'):
+                            'freebsd6', 'freebsd7',
+                            'bsdos2', 'bsdos3', 'bsdos4'):
             flock = struct.pack('lxxxxlxxxxlhh', \
                   l_start, l_len, os.getpid(), l_type, l_whence)
-        elif sys.platform in ['aix3', 'aix4']:
+        elif sys.platform in ('aix3', 'aix4'):
             flock = struct.pack('hhlllii', \
                   l_type, l_whence, l_start, l_len, 0, 0, 0)
         else:
@@ -198,7 +199,7 @@
                                 'bsdos2', 'bsdos3', 'bsdos4'):
                 l_start, l_len, l_pid, l_type, l_whence = \
                     struct.unpack('lxxxxlxxxxlhh', flock)
-            elif sys.platform in ['aix3', 'aix4']:
+            elif sys.platform in ('aix3', 'aix4'):
                 l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \
                     struct.unpack('hhlllii', flock)
             elif sys.platform == "linux2":

Index: posixpath.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/posixpath.py,v
retrieving revision 1.51.2.2
retrieving revision 1.51.2.3
diff -u -d -r1.51.2.2 -r1.51.2.3
--- posixpath.py	7 Jan 2005 06:58:07 -0000	1.51.2.2
+++ posixpath.py	16 Oct 2005 05:23:59 -0000	1.51.2.3
@@ -15,8 +15,8 @@
 
 __all__ = ["normcase","isabs","join","splitdrive","split","splitext",
            "basename","dirname","commonprefix","getsize","getmtime",
-           "getatime","getctime","islink","exists","isdir","isfile","ismount",
-           "walk","expanduser","expandvars","normpath","abspath",
+           "getatime","getctime","islink","exists","lexists","isdir","isfile",
+           "ismount","walk","expanduser","expandvars","normpath","abspath",
            "samefile","sameopenfile","samestat",
            "curdir","pardir","sep","pathsep","defpath","altsep","extsep",
            "devnull","realpath","supports_unicode_filenames"]
@@ -414,7 +414,7 @@
     if isabs(filename):
         bits = ['/'] + filename.split('/')[1:]
     else:
-        bits = filename.split('/')
+        bits = [''] + filename.split('/')
 
     for i in range(2, len(bits)+1):
         component = join(*bits[0:i])

Index: profile.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/profile.py,v
retrieving revision 1.47.2.2
retrieving revision 1.47.2.3
diff -u -d -r1.47.2.2 -r1.47.2.3
--- profile.py	7 Jan 2005 06:58:08 -0000	1.47.2.2
+++ profile.py	16 Oct 2005 05:23:59 -0000	1.47.2.3
@@ -4,8 +4,6 @@
 #
 # Based on prior profile module by Sjoerd Mullender...
 #   which was hacked somewhat by: Guido van Rossum
-#
-# See profile.doc for more information
 
 """Class for profiling Python code."""
 
@@ -94,18 +92,10 @@
     else:
         return prof.print_stats()
 
-# print help
+# Backwards compatibility.
 def help():
-    for dirname in sys.path:
-        fullname = os.path.join(dirname, 'profile.doc')
-        if os.path.exists(fullname):
-            sts = os.system('${PAGER-more} ' + fullname)
-            if sts: print '*** Pager exit status:', sts
-            break
-    else:
-        print 'Sorry, can\'t find the help file "profile.doc"',
-        print 'along the Python search path.'
-
+    print "Documentation for the profile module can be found "
+    print "in the Python Library Reference, section 'The Python Profiler'."
 
 if os.name == "mac":
     import MacOS
@@ -117,6 +107,20 @@
         t = timer()
         return t[0] + t[1]
 
+# Using getrusage(3) is better than clock(3) if available:
+# on some systems (e.g. FreeBSD), getrusage has a higher resolution
+# Furthermore, on a POSIX system, returns microseconds, which
+# wrap around after 36min.
+_has_res = 0
+try:
+    import resource
+    resgetrusage = lambda: resource.getrusage(resource.RUSAGE_SELF)
+    def _get_time_resource(timer=resgetrusage):
+        t = timer()
+        return t[0] + t[1]
+    _has_res = 1
+except ImportError:
+    pass
 
 class Profile:
     """Profiler class.
@@ -169,8 +173,12 @@
             bias = self.bias
         self.bias = bias     # Materialize in local dict for lookup speed.
 
-        if timer is None:
-            if os.name == 'mac':
+        if not timer:
+            if _has_res:
+                self.timer = resgetrusage
+                self.dispatcher = self.trace_dispatch
+                self.get_time = _get_time_resource
+            elif os.name == 'mac':
                 self.timer = MacOS.GetTicks
                 self.dispatcher = self.trace_dispatch_mac
                 self.get_time = _get_time_mac
@@ -360,7 +368,7 @@
         "exception": trace_dispatch_exception,
         "return": trace_dispatch_return,
         "c_call": trace_dispatch_c_call,
-        "c_exception": trace_dispatch_exception,
+        "c_exception": trace_dispatch_return,  # the C function returned
         "c_return": trace_dispatch_return,
         }
 
@@ -583,26 +591,19 @@
 def Stats(*args):
     print 'Report generating functions are in the "pstats" module\a'
 
-
-# When invoked as main program, invoke the profiler on a script
-if __name__ == '__main__':
+def main():
     usage = "profile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
-    if not sys.argv[1:]:
-        print "Usage: ", usage
-        sys.exit(2)
-
-    class ProfileParser(OptionParser):
-        def __init__(self, usage):
-            OptionParser.__init__(self)
-            self.usage = usage
-
-    parser = ProfileParser(usage)
+    parser = OptionParser(usage=usage)
     parser.allow_interspersed_args = False
     parser.add_option('-o', '--outfile', dest="outfile",
         help="Save stats to <outfile>", default=None)
     parser.add_option('-s', '--sort', dest="sort",
         help="Sort order when printing to stdout, based on pstats.Stats class", default=-1)
 
+    if not sys.argv[1:]:
+        parser.print_usage()
+        sys.exit(2)
+
     (options, args) = parser.parse_args()
     sys.argv[:] = args
 
@@ -610,4 +611,9 @@
         sys.path.insert(0, os.path.dirname(sys.argv[0]))
         run('execfile(%r)' % (sys.argv[0],), options.outfile, options.sort)
     else:
-        print "Usage: ", usage
+        parser.print_usage()
+    return parser
+
+# When invoked as main program, invoke the profiler on a script
+if __name__ == '__main__':
+    main()

Index: py_compile.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/py_compile.py,v
retrieving revision 1.21.2.1
retrieving revision 1.21.2.2
diff -u -d -r1.21.2.1 -r1.21.2.2
--- py_compile.py	28 Apr 2003 17:32:10 -0000	1.21.2.1
+++ py_compile.py	16 Oct 2005 05:23:59 -0000	1.21.2.2
@@ -128,7 +128,7 @@
         if doraise:
             raise py_exc
         else:
-            sys.stderr.write(py_exc.msg)
+            sys.stderr.write(py_exc.msg + '\n')
             return
     if cfile is None:
         cfile = file + (__debug__ and 'c' or 'o')

Index: pydoc.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/pydoc.py,v
retrieving revision 1.65.2.2
retrieving revision 1.65.2.3
diff -u -d -r1.65.2.2 -r1.65.2.3
--- pydoc.py	7 Jan 2005 06:58:08 -0000	1.65.2.2
+++ pydoc.py	16 Oct 2005 05:23:59 -0000	1.65.2.3
@@ -36,6 +36,7 @@
 
 __author__ = "Ka-Ping Yee <ping at lfw.org>"
 __date__ = "26 February 2001"
+
 __version__ = "$Revision$"
 __credits__ = """Guido van Rossum, for an excellent programming language.
 Tommy Burnette, the original creator of manpy.
@@ -153,8 +154,8 @@
 def visiblename(name, all=None):
     """Decide whether to show documentation on a variable."""
     # Certain special names are redundant.
-    if name in ['__builtins__', '__doc__', '__file__', '__path__',
-                '__module__', '__name__']: return 0
+    if name in ('__builtins__', '__doc__', '__file__', '__path__',
+                '__module__', '__name__', '__slots__'): return 0
     # Private names are hidden, but special names are displayed.
     if name.startswith('__') and name.endswith('__'): return 1
     if all is not None:
@@ -163,12 +164,20 @@
     else:
         return not name.startswith('_')
 
+def classify_class_attrs(object):
+    """Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
+    def fixup((name, kind, cls, value)):
+        if inspect.isdatadescriptor(value):
+            kind = 'data descriptor'
+        return name, kind, cls, value
+    return map(fixup, inspect.classify_class_attrs(object))
+
 # ----------------------------------------------------- module manipulation
 
 def ispackage(path):
     """Guess whether a path refers to a package directory."""
     if os.path.isdir(path):
-        for ext in ['.py', '.pyc', '.pyo']:
+        for ext in ('.py', '.pyc', '.pyo'):
             if os.path.isfile(os.path.join(path, '__init__' + ext)):
                 return True
     return False
@@ -718,13 +727,13 @@
                     push('\n')
             return attrs
 
-        def spillproperties(msg, attrs, predicate):
+        def spilldescriptors(msg, attrs, predicate):
             ok, attrs = _split_list(attrs, predicate)
             if ok:
                 hr.maybe()
                 push(msg)
                 for name, kind, homecls, value in ok:
-                    push(self._docproperty(name, value, mod))
+                    push(self._docdescriptor(name, value, mod))
             return attrs
 
         def spilldata(msg, attrs, predicate):
@@ -749,7 +758,7 @@
             return attrs
 
         attrs = filter(lambda (name, kind, cls, value): visiblename(name),
-                       inspect.classify_class_attrs(object))
+                       classify_class_attrs(object))
         mdict = {}
         for key, kind, homecls, value in attrs:
             mdict[key] = anchor = '#' + name + '-' + key
@@ -788,8 +797,8 @@
                           lambda t: t[1] == 'class method')
             attrs = spill('Static methods %s' % tag, attrs,
                           lambda t: t[1] == 'static method')
-            attrs = spillproperties('Properties %s' % tag, attrs,
-                                    lambda t: t[1] == 'property')
+            attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
+                                     lambda t: t[1] == 'data descriptor')
             attrs = spilldata('Data and other attributes %s' % tag, attrs,
                               lambda t: t[1] == 'data')
             assert attrs == []
@@ -871,29 +880,22 @@
             doc = doc and '<dd><tt>%s</tt></dd>' % doc
             return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
 
-    def _docproperty(self, name, value, mod):
+    def _docdescriptor(self, name, value, mod):
         results = []
         push = results.append
 
         if name:
             push('<dl><dt><strong>%s</strong></dt>\n' % name)
         if value.__doc__ is not None:
-            doc = self.markup(value.__doc__, self.preformat)
+            doc = self.markup(getdoc(value), self.preformat)
             push('<dd><tt>%s</tt></dd>\n' % doc)
-        for attr, tag in [('fget', '<em>get</em>'),
-                          ('fset', '<em>set</em>'),
-                          ('fdel', '<em>delete</em>')]:
-            func = getattr(value, attr)
-            if func is not None:
-                base = self.document(func, tag, mod)
-                push('<dd>%s</dd>\n' % base)
         push('</dl>\n')
 
         return ''.join(results)
 
     def docproperty(self, object, name=None, mod=None, cl=None):
         """Produce html documentation for a property."""
-        return self._docproperty(name, object, mod)
+        return self._docdescriptor(name, object, mod)
 
     def docother(self, object, name=None, mod=None, *ignored):
         """Produce HTML documentation for a data object."""
@@ -1078,7 +1080,7 @@
         if data:
             contents = []
             for key, value in data:
-                contents.append(self.docother(value, key, name, 70))
+                contents.append(self.docother(value, key, name, maxlen=70))
             result = result + self.section('DATA', join(contents, '\n'))
 
         if hasattr(object, '__version__'):
@@ -1143,13 +1145,13 @@
                                        name, mod, object))
             return attrs
 
-        def spillproperties(msg, attrs, predicate):
+        def spilldescriptors(msg, attrs, predicate):
             ok, attrs = _split_list(attrs, predicate)
             if ok:
                 hr.maybe()
                 push(msg)
                 for name, kind, homecls, value in ok:
-                    push(self._docproperty(name, value, mod))
+                    push(self._docdescriptor(name, value, mod))
             return attrs
 
         def spilldata(msg, attrs, predicate):
@@ -1159,15 +1161,15 @@
                 push(msg)
                 for name, kind, homecls, value in ok:
                     if callable(value) or inspect.isdatadescriptor(value):
-                        doc = getattr(value, "__doc__", None)
+                        doc = getdoc(value)
                     else:
                         doc = None
                     push(self.docother(getattr(object, name),
-                                       name, mod, 70, doc) + '\n')
+                                       name, mod, maxlen=70, doc=doc) + '\n')
             return attrs
 
         attrs = filter(lambda (name, kind, cls, value): visiblename(name),
-                       inspect.classify_class_attrs(object))
+                       classify_class_attrs(object))
         while attrs:
             if mro:
                 thisclass = mro.popleft()
@@ -1195,8 +1197,8 @@
                           lambda t: t[1] == 'class method')
             attrs = spill("Static methods %s:\n" % tag, attrs,
                           lambda t: t[1] == 'static method')
-            attrs = spillproperties("Properties %s:\n" % tag, attrs,
-                                    lambda t: t[1] == 'property')
+            attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
+                                     lambda t: t[1] == 'data descriptor')
             attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
                               lambda t: t[1] == 'data')
             assert attrs == []
@@ -1254,35 +1256,24 @@
             doc = getdoc(object) or ''
             return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
 
-    def _docproperty(self, name, value, mod):
+    def _docdescriptor(self, name, value, mod):
         results = []
         push = results.append
 
         if name:
-            push(name)
-        need_blank_after_doc = 0
+            push(self.bold(name))
+            push('\n')
         doc = getdoc(value) or ''
         if doc:
             push(self.indent(doc))
-            need_blank_after_doc = 1
-        for attr, tag in [('fget', '<get>'),
-                          ('fset', '<set>'),
-                          ('fdel', '<delete>')]:
-            func = getattr(value, attr)
-            if func is not None:
-                if need_blank_after_doc:
-                    push('')
-                    need_blank_after_doc = 0
-                base = self.document(func, tag, mod)
-                push(self.indent(base))
-
-        return '\n'.join(results)
+            push('\n')
+        return ''.join(results)
 
     def docproperty(self, object, name=None, mod=None, cl=None):
         """Produce text documentation for a property."""
-        return self._docproperty(name, object, mod)
+        return self._docdescriptor(name, object, mod)
 
-    def docother(self, object, name=None, mod=None, maxlen=None, doc=None):
+    def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
         """Produce text documentation for a data object."""
         repr = self.repr(object)
         if maxlen:
@@ -1308,12 +1299,12 @@
         return plainpager
     if not sys.stdin.isatty() or not sys.stdout.isatty():
         return plainpager
-    if os.environ.get('TERM') in ['dumb', 'emacs']:
+    if os.environ.get('TERM') in ('dumb', 'emacs'):
         return plainpager
     if 'PAGER' in os.environ:
         if sys.platform == 'win32': # pipes completely broken in Windows
             return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
-        elif os.environ.get('TERM') in ['dumb', 'emacs']:
+        elif os.environ.get('TERM') in ('dumb', 'emacs'):
             return lambda text: pipepager(plain(text), os.environ['PAGER'])
         else:
             return lambda text: pipepager(text, os.environ['PAGER'])
@@ -1379,14 +1370,14 @@
             sys.stdout.flush()
             c = getchar()
 
-            if c in ['q', 'Q']:
+            if c in ('q', 'Q'):
                 sys.stdout.write('\r          \r')
                 break
-            elif c in ['\r', '\n']:
+            elif c in ('\r', '\n'):
                 sys.stdout.write('\r          \r' + lines[r] + '\n')
                 r = r + 1
                 continue
-            if c in ['b', 'B', '\x1b']:
+            if c in ('b', 'B', '\x1b'):
                 r = r - inc - inc
                 if r < 0: r = 0
             sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
@@ -1464,6 +1455,14 @@
             desc += ' in ' + name[:name.rfind('.')]
         elif module and module is not object:
             desc += ' in module ' + module.__name__
+        if not (inspect.ismodule(object) or
+                inspect.isclass(object) or
+                inspect.isroutine(object) or
+                isinstance(object, property)):
+            # If the passed object is a piece of data or an instance,
+            # document its available methods instead of its value.
+            object = type(object)
+            desc += ' object'
         pager(title % desc + '\n\n' + text.document(object, name))
     except (ImportError, ErrorDuringImport), value:
         print value
@@ -1656,7 +1655,7 @@
             except (KeyboardInterrupt, EOFError):
                 break
             request = strip(replace(request, '"', '', "'", ''))
-            if lower(request) in ['q', 'quit']: break
+            if lower(request) in ('q', 'quit'): break
             self.help(request)
 
     def getline(self, prompt):

Index: random.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/random.py,v
retrieving revision 1.34.2.2
retrieving revision 1.34.2.3
diff -u -d -r1.34.2.2 -r1.34.2.3
--- random.py	7 Jan 2005 06:58:08 -0000	1.34.2.2
+++ random.py	16 Oct 2005 05:23:59 -0000	1.34.2.3
@@ -41,7 +41,7 @@
 
 from warnings import warn as _warn
 from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
-from math import log as _log, exp as _exp, pi as _pi, e as _e
+from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
 from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
 from os import urandom as _urandom
 from binascii import hexlify as _hexlify
@@ -286,15 +286,14 @@
         """
 
         # Sampling without replacement entails tracking either potential
-        # selections (the pool) in a list or previous selections in a
-        # dictionary.
+        # selections (the pool) in a list or previous selections in a set.
 
         # When the number of selections is small compared to the
         # population, then tracking selections is efficient, requiring
-        # only a small dictionary and an occasional reselection.  For
+        # only a small set and an occasional reselection.  For
         # a larger number of selections, the pool tracking method is
         # preferred since the list takes less space than the
-        # dictionary and it doesn't suffer from frequent reselections.
+        # set and it doesn't suffer from frequent reselections.
 
         n = len(population)
         if not 0 <= k <= n:
@@ -302,7 +301,10 @@
         random = self.random
         _int = int
         result = [None] * k
-        if n < 6 * k:     # if n len list takes less space than a k len dict
+        setsize = 21        # size of a small set minus size of an empty list
+        if k > 5:
+            setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
+        if n <= setsize:    # is an n-length list smaller than a k-length set
             pool = list(population)
             for i in xrange(k):         # invariant:  non-selected at [0,n-i)
                 j = _int(random() * (n-i))
@@ -311,14 +313,16 @@
         else:
             try:
                 n > 0 and (population[0], population[n//2], population[n-1])
-            except (TypeError, KeyError):   # handle sets and dictionaries
+            except (TypeError, KeyError):   # handle non-sequence iterables
                 population = tuple(population)
-            selected = {}
+            selected = set()
+            selected_add = selected.add
             for i in xrange(k):
                 j = _int(random() * n)
                 while j in selected:
                     j = _int(random() * n)
-                result[i] = selected[j] = population[j]
+                selected_add(j)
+                result[i] = population[j]
         return result
 
 ## -------------------- real-valued distributions  -------------------
@@ -345,7 +349,7 @@
         # Math Software, 3, (1977), pp257-260.
 
         random = self.random
-        while True:
+        while 1:
             u1 = random()
             u2 = 1.0 - random()
             z = NV_MAGICCONST*(u1-0.5)/u2
@@ -415,7 +419,7 @@
         b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
         r = (1.0 + b * b)/(2.0 * b)
 
-        while True:
+        while 1:
             u1 = random()
 
             z = _cos(_pi * u1)
@@ -424,7 +428,7 @@
 
             u2 = random()
 
-            if not (u2 >= c * (2.0 - c) and u2 > c * _exp(1.0 - c)):
+            if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):
                 break
 
         u3 = random()
@@ -462,7 +466,7 @@
             bbb = alpha - LOG4
             ccc = alpha + ainv
 
-            while True:
+            while 1:
                 u1 = random()
                 if not 1e-7 < u1 < .9999999:
                     continue
@@ -485,18 +489,19 @@
 
             # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
 
-            while True:
+            while 1:
                 u = random()
                 b = (_e + alpha)/_e
                 p = b*u
                 if p <= 1.0:
-                    x = pow(p, 1.0/alpha)
+                    x = p ** (1.0/alpha)
                 else:
-                    # p > 1
                     x = -_log((b-p)/alpha)
                 u1 = random()
-                if not (((p <= 1.0) and (u1 > _exp(-x))) or
-                          ((p > 1)  and  (u1 > pow(x, alpha - 1.0)))):
+                if p > 1.0:
+                    if u1 <= x ** (alpha - 1.0):
+                        break
+                elif u1 <= _exp(-x):
                     break
             return x * beta
 

Index: reconvert.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/reconvert.py,v
retrieving revision 1.6.16.1
retrieving revision 1.6.16.2
diff -u -d -r1.6.16.1 -r1.6.16.2
--- reconvert.py	7 Jan 2005 06:58:08 -0000	1.6.16.1
+++ reconvert.py	16 Oct 2005 05:23:59 -0000	1.6.16.2
@@ -166,7 +166,7 @@
         if q in s and altq not in s:
             q = altq
     else:
-        assert quote in ('"', "'")
+        assert quote in ('"', "'", '"""', "'''")
         q = quote
     res = q
     for c in s:

Index: rfc822.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/rfc822.py,v
retrieving revision 1.72.2.2
retrieving revision 1.72.2.3
diff -u -d -r1.72.2.2 -r1.72.2.3
--- rfc822.py	7 Jan 2005 06:58:09 -0000	1.72.2.2
+++ rfc822.py	16 Oct 2005 05:23:59 -0000	1.72.2.3
@@ -90,8 +90,6 @@
                 fp.tell()
             except (AttributeError, IOError):
                 seekable = 0
-            else:
-                seekable = 1
         self.fp = fp
         self.seekable = seekable
         self.startofheaders = None
@@ -134,7 +132,7 @@
         """
         self.dict = {}
         self.unixfrom = ''
-        self.headers = list = []
+        self.headers = lst = []
         self.status = ''
         headerseen = ""
         firstline = 1
@@ -161,7 +159,7 @@
             firstline = 0
             if headerseen and line[0] in ' \t':
                 # It's a continuation line.
-                list.append(line)
+                lst.append(line)
                 x = (self.dict[headerseen] + "\n " + line.strip())
                 self.dict[headerseen] = x.strip()
                 continue
@@ -174,7 +172,7 @@
             headerseen = self.isheader(line)
             if headerseen:
                 # It's a legal header line, save it.
-                list.append(line)
+                lst.append(line)
                 self.dict[headerseen] = line[len(headerseen)+1:].strip()
                 continue
             else:
@@ -202,8 +200,7 @@
         i = line.find(':')
         if i > 0:
             return line[:i].lower()
-        else:
-            return None
+        return None
 
     def islast(self, line):
         """Determine whether a line is a legal end of RFC 2822 headers.
@@ -235,7 +232,7 @@
         """
         name = name.lower() + ':'
         n = len(name)
-        list = []
+        lst = []
         hit = 0
         for line in self.headers:
             if line[:n].lower() == name:
@@ -243,8 +240,8 @@
             elif not line[:1].isspace():
                 hit = 0
             if hit:
-                list.append(line)
-        return list
+                lst.append(line)
+        return lst
 
     def getfirstmatchingheader(self, name):
         """Get the first header line matching name.
@@ -254,7 +251,7 @@
         """
         name = name.lower() + ':'
         n = len(name)
-        list = []
+        lst = []
         hit = 0
         for line in self.headers:
             if hit:
@@ -263,8 +260,8 @@
             elif line[:n].lower() == name:
                 hit = 1
             if hit:
-                list.append(line)
-        return list
+                lst.append(line)
+        return lst
 
     def getrawheader(self, name):
         """A higher-level interface to getfirstmatchingheader().
@@ -275,11 +272,11 @@
         occur.
         """
 
-        list = self.getfirstmatchingheader(name)
-        if not list:
+        lst = self.getfirstmatchingheader(name)
+        if not lst:
             return None
-        list[0] = list[0][len(name) + 1:]
-        return ''.join(list)
+        lst[0] = lst[0][len(name) + 1:]
+        return ''.join(lst)
 
     def getheader(self, name, default=None):
         """Get the header value for a name.
@@ -288,10 +285,7 @@
         header value for a given header name, or None if it doesn't exist.
         This uses the dictionary version which finds the *last* such header.
         """
-        try:
-            return self.dict[name.lower()]
-        except KeyError:
-            return default
+        return self.dict.get(name.lower(), default)
     get = getheader
 
     def getheaders(self, name):
@@ -399,8 +393,7 @@
         del self[name] # Won't fail if it doesn't exist
         self.dict[name.lower()] = value
         text = name + ": " + value
-        lines = text.split("\n")
-        for line in lines:
+        for line in text.split("\n"):
             self.headers.append(line + "\n")
 
     def __delitem__(self, name):
@@ -411,7 +404,7 @@
         del self.dict[name]
         name = name + ':'
         n = len(name)
-        list = []
+        lst = []
         hit = 0
         for i in range(len(self.headers)):
             line = self.headers[i]
@@ -420,8 +413,8 @@
             elif not line[:1].isspace():
                 hit = 0
             if hit:
-                list.append(i)
-        for i in reversed(list):
+                lst.append(i)
+        for i in reversed(lst):
             del self.headers[i]
 
     def setdefault(self, name, default=""):
@@ -430,8 +423,7 @@
             return self.dict[lowername]
         else:
             text = name + ": " + default
-            lines = text.split("\n")
-            for line in lines:
+            for line in text.split("\n"):
                 self.headers.append(line + "\n")
             self.dict[lowername] = default
             return default
@@ -473,29 +465,28 @@
 # XXX The inverses of the parse functions may also be useful.
 
 
-def unquote(str):
+def unquote(s):
     """Remove quotes from a string."""
-    if len(str) > 1:
-        if str.startswith('"') and str.endswith('"'):
-            return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
-        if str.startswith('<') and str.endswith('>'):
-            return str[1:-1]
-    return str
+    if len(s) > 1:
+        if s.startswith('"') and s.endswith('"'):
+            return s[1:-1].replace('\\\\', '\\').replace('\\"', '"')
+        if s.startswith('<') and s.endswith('>'):
+            return s[1:-1]
+    return s
 
 
-def quote(str):
+def quote(s):
     """Add quotes around a string."""
-    return str.replace('\\', '\\\\').replace('"', '\\"')
+    return s.replace('\\', '\\\\').replace('"', '\\"')
 
 
 def parseaddr(address):
     """Parse an address into a (realname, mailaddr) tuple."""
     a = AddressList(address)
-    list = a.addresslist
-    if not list:
+    lst = a.addresslist
+    if not lst:
         return (None, None)
-    else:
-        return list[0]
+    return lst[0]
 
 
 class AddrlistClass:
@@ -543,12 +534,10 @@
         Returns a list containing all of the addresses.
         """
         result = []
-        while 1:
+        ad = self.getaddress()
+        while ad:
+            result += ad
             ad = self.getaddress()
-            if ad:
-                result += ad
-            else:
-                break
         return result
 
     def getaddress(self):
@@ -581,11 +570,11 @@
             returnlist = []
 
             fieldlen = len(self.field)
-            self.pos = self.pos + 1
+            self.pos += 1
             while self.pos < len(self.field):
                 self.gotonext()
                 if self.pos < fieldlen and self.field[self.pos] == ';':
-                    self.pos = self.pos + 1
+                    self.pos += 1
                     break
                 returnlist = returnlist + self.getaddress()
 
@@ -602,11 +591,11 @@
             if plist:
                 returnlist = [(' '.join(self.commentlist), plist[0])]
             elif self.field[self.pos] in self.specials:
-                self.pos = self.pos + 1
+                self.pos += 1
 
         self.gotonext()
         if self.pos < len(self.field) and self.field[self.pos] == ',':
-            self.pos = self.pos + 1
+            self.pos += 1
         return returnlist
 
     def getrouteaddr(self):
@@ -618,7 +607,7 @@
             return
 
         expectroute = 0
-        self.pos = self.pos + 1
+        self.pos += 1
         self.gotonext()
         adlist = ""
         while self.pos < len(self.field):
@@ -626,16 +615,16 @@
                 self.getdomain()
                 expectroute = 0
             elif self.field[self.pos] == '>':
-                self.pos = self.pos + 1
+                self.pos += 1
                 break
             elif self.field[self.pos] == '@':
-                self.pos = self.pos + 1
+                self.pos += 1
                 expectroute = 1
             elif self.field[self.pos] == ':':
-                self.pos = self.pos + 1
+                self.pos += 1
             else:
                 adlist = self.getaddrspec()
-                self.pos = self.pos + 1
+                self.pos += 1
                 break
             self.gotonext()
 
@@ -649,7 +638,7 @@
         while self.pos < len(self.field):
             if self.field[self.pos] == '.':
                 aslist.append('.')
-                self.pos = self.pos + 1
+                self.pos += 1
             elif self.field[self.pos] == '"':
                 aslist.append('"%s"' % self.getquote())
             elif self.field[self.pos] in self.atomends:
@@ -661,7 +650,7 @@
             return ''.join(aslist)
 
         aslist.append('@')
-        self.pos = self.pos + 1
+        self.pos += 1
         self.gotonext()
         return ''.join(aslist) + self.getdomain()
 
@@ -670,13 +659,13 @@
         sdlist = []
         while self.pos < len(self.field):
             if self.field[self.pos] in self.LWS:
-                self.pos = self.pos + 1
+                self.pos += 1
             elif self.field[self.pos] == '(':
                 self.commentlist.append(self.getcomment())
             elif self.field[self.pos] == '[':
                 sdlist.append(self.getdomainliteral())
             elif self.field[self.pos] == '.':
-                self.pos = self.pos + 1
+                self.pos += 1
                 sdlist.append('.')
             elif self.field[self.pos] in self.atomends:
                 break
@@ -701,13 +690,13 @@
 
         slist = ['']
         quote = 0
-        self.pos = self.pos + 1
+        self.pos += 1
         while self.pos < len(self.field):
             if quote == 1:
                 slist.append(self.field[self.pos])
                 quote = 0
             elif self.field[self.pos] in endchars:
-                self.pos = self.pos + 1
+                self.pos += 1
                 break
             elif allowcomments and self.field[self.pos] == '(':
                 slist.append(self.getcomment())
@@ -715,7 +704,7 @@
                 quote = 1
             else:
                 slist.append(self.field[self.pos])
-            self.pos = self.pos + 1
+            self.pos += 1
 
         return ''.join(slist)
 
@@ -746,7 +735,7 @@
             if self.field[self.pos] in atomends:
                 break
             else: atomlist.append(self.field[self.pos])
-            self.pos = self.pos + 1
+            self.pos += 1
 
         return ''.join(atomlist)
 
@@ -761,7 +750,7 @@
 
         while self.pos < len(self.field):
             if self.field[self.pos] in self.LWS:
-                self.pos = self.pos + 1
+                self.pos += 1
             elif self.field[self.pos] == '"':
                 plist.append(self.getquote())
             elif self.field[self.pos] == '(':
@@ -930,16 +919,15 @@
         else:
             tzsign = 1
         tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
-    tuple = (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset)
-    return tuple
+    return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset)
 
 
 def parsedate(data):
     """Convert a time string to a time tuple."""
     t = parsedate_tz(data)
-    if type(t) == type( () ):
-        return t[:9]
-    else: return t
+    if t is None:
+        return t
+    return t[:9]
 
 
 def mktime_tz(data):
@@ -965,10 +953,10 @@
         timeval = time.time()
     timeval = time.gmtime(timeval)
     return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
-            ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][timeval[6]],
+            ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[timeval[6]],
             timeval[2],
-            ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
-             "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][timeval[1]-1],
+            ("Jan", "Feb", "Mar", "Apr", "May", "Jun",
+             "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")[timeval[1]-1],
                                 timeval[0], timeval[3], timeval[4], timeval[5])
 
 
@@ -1002,7 +990,7 @@
     m.rewindbody()
     n = 0
     while f.readline():
-        n = n + 1
+        n += 1
     print 'Lines:', n
     print '-'*70
     print 'len =', len(m)

Index: sets.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/sets.py,v
retrieving revision 1.43.4.2
retrieving revision 1.43.4.3
diff -u -d -r1.43.4.2 -r1.43.4.3
--- sets.py	7 Jan 2005 06:58:09 -0000	1.43.4.2
+++ sets.py	16 Oct 2005 05:23:59 -0000	1.43.4.3
@@ -480,6 +480,8 @@
         value = True
         if not isinstance(other, BaseSet):
             other = Set(other)
+        if self is other:
+            self.clear()
         for elt in other:
             if elt in data:
                 del data[elt]
@@ -497,6 +499,8 @@
         data = self._data
         if not isinstance(other, BaseSet):
             other = Set(other)
+        if self is other:
+            self.clear()
         for elt in ifilter(data.has_key, other):
             del data[elt]
 

Index: shutil.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/shutil.py,v
retrieving revision 1.22.2.2
retrieving revision 1.22.2.3
diff -u -d -r1.22.2.2 -r1.22.2.3
--- shutil.py	7 Jan 2005 06:58:10 -0000	1.22.2.2
+++ shutil.py	16 Oct 2005 05:23:59 -0000	1.22.2.3
@@ -7,13 +7,12 @@
 import os
 import sys
 import stat
-import exceptions
 from os.path import abspath
 
 __all__ = ["copyfileobj","copyfile","copymode","copystat","copy","copy2",
            "copytree","move","rmtree","Error"]
 
-class Error(exceptions.EnvironmentError):
+class Error(EnvironmentError):
     pass
 
 def copyfileobj(fsrc, fdst, length=16*1024):
@@ -108,7 +107,7 @@
 
     """
     names = os.listdir(src)
-    os.mkdir(dst)
+    os.makedirs(dst)
     errors = []
     for name in names:
         srcname = os.path.join(src, name)
@@ -124,6 +123,11 @@
             # XXX What about devices, sockets etc.?
         except (IOError, os.error), why:
             errors.append((srcname, dstname, why))
+        # catch the Error from the recursive copytree so that we can
+        # continue with other files
+        except Error, err:
+            errors.extend(err.args[0])
+    copystat(src, dst)
     if errors:
         raise Error, errors
 

Index: smtplib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/smtplib.py,v
retrieving revision 1.58.2.2
retrieving revision 1.58.2.3
diff -u -d -r1.58.2.2 -r1.58.2.3
--- smtplib.py	7 Jan 2005 06:58:10 -0000	1.58.2.2
+++ smtplib.py	16 Oct 2005 05:23:59 -0000	1.58.2.3
@@ -43,7 +43,7 @@
 
 import socket
 import re
-import rfc822
+import email.Utils
 import base64
 import hmac
 from email.base64MIME import encode as encode_base64
@@ -171,7 +171,7 @@
     """
     m = (None, None)
     try:
-        m=rfc822.parseaddr(addr)[1]
+        m = email.Utils.parseaddr(addr)[1]
     except AttributeError:
         pass
     if m == (None, None): # Indicates parse failure or AttributeError
@@ -290,10 +290,10 @@
             af, socktype, proto, canonname, sa = res
             try:
                 self.sock = socket.socket(af, socktype, proto)
-                if self.debuglevel > 0: print>>stderr, 'connect:', (host, port)
+                if self.debuglevel > 0: print>>stderr, 'connect:', sa
                 self.sock.connect(sa)
             except socket.error, msg:
-                if self.debuglevel > 0: print>>stderr, 'connect fail:', (host, port)
+                if self.debuglevel > 0: print>>stderr, 'connect fail:', msg
                 if self.sock:
                     self.sock.close()
                 self.sock = None
@@ -439,7 +439,7 @@
         """SMTP 'help' command.
         Returns help text from server."""
         self.putcmd("help", args)
-        return self.getreply()
+        return self.getreply()[1]
 
     def rset(self):
         """SMTP 'rset' command -- resets session."""
@@ -578,7 +578,7 @@
             (code, resp) = self.docmd(encode_base64(password, eol=""))
         elif authmethod is None:
             raise SMTPException("No suitable authentication method found.")
-        if code not in [235, 503]:
+        if code not in (235, 503):
             # 235 == 'Authentication successful'
             # 503 == 'Error: already authenticated'
             raise SMTPAuthenticationError(code, resp)

Index: socket.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/socket.py,v
retrieving revision 1.21.2.2
retrieving revision 1.21.2.3
diff -u -d -r1.21.2.2 -r1.21.2.3
--- socket.py	7 Jan 2005 06:58:10 -0000	1.21.2.2
+++ socket.py	16 Oct 2005 05:23:59 -0000	1.21.2.3
@@ -102,7 +102,7 @@
 
     First the hostname returned by gethostbyaddr() is checked, then
     possibly existing aliases. In case no FQDN is available, hostname
-    is returned.
+    from gethostname() is returned.
     """
     name = name.strip()
     if not name or name == '0.0.0.0':

Index: sre.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/sre.py,v
retrieving revision 1.44.10.2
retrieving revision 1.44.10.3
diff -u -d -r1.44.10.2 -r1.44.10.3
--- sre.py	7 Jan 2005 06:58:10 -0000	1.44.10.2
+++ sre.py	16 Oct 2005 05:23:59 -0000	1.44.10.3
@@ -188,12 +188,18 @@
     "Compile a template pattern, returning a pattern object"
     return _compile(pattern, flags|T)
 
+_alphanum = {}
+for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890':
+    _alphanum[c] = 1
+del c
+
 def escape(pattern):
     "Escape all non-alphanumeric characters in pattern."
     s = list(pattern)
+    alphanum = _alphanum
     for i in range(len(pattern)):
         c = pattern[i]
-        if not ("a" <= c <= "z" or "A" <= c <= "Z" or "0" <= c <= "9"):
+        if c not in alphanum:
             if c == "\000":
                 s[i] = "\\000"
             else:

Index: sre_compile.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/sre_compile.py,v
retrieving revision 1.43.2.2
retrieving revision 1.43.2.3
diff -u -d -r1.43.2.2 -r1.43.2.3
--- sre_compile.py	7 Jan 2005 06:58:10 -0000	1.43.2.2
+++ sre_compile.py	16 Oct 2005 05:23:59 -0000	1.43.2.3
@@ -24,14 +24,25 @@
 def _identityfunction(x):
     return x
 
+def set(seq):
+    s = {}
+    for elem in seq:
+        s[elem] = 1
+    return s
+
+_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
+_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
+_SUCCESS_CODES = set([SUCCESS, FAILURE])
+_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
+
 def _compile(code, pattern, flags):
     # internal: compile a (sub)pattern
     emit = code.append
     _len = len
-    LITERAL_CODES = {LITERAL:1, NOT_LITERAL:1}
-    REPEATING_CODES = {REPEAT:1, MIN_REPEAT:1, MAX_REPEAT:1}
-    SUCCESS_CODES = {SUCCESS:1, FAILURE:1}
-    ASSERT_CODES = {ASSERT:1, ASSERT_NOT:1}
+    LITERAL_CODES = _LITERAL_CODES
+    REPEATING_CODES = _REPEATING_CODES
+    SUCCESS_CODES = _SUCCESS_CODES
+    ASSERT_CODES = _ASSERT_CODES
     for op, av in pattern:
         if op in LITERAL_CODES:
             if flags & SRE_FLAG_IGNORECASE:
@@ -156,7 +167,7 @@
             emit(av-1)
         elif op is GROUPREF_EXISTS:
             emit(OPCODES[op])
-            emit((av[0]-1)*2)
+            emit(av[0]-1)
             skipyes = _len(code); emit(0)
             _compile(code, av[1], flags)
             if av[2]:

Index: sre_parse.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/sre_parse.py,v
retrieving revision 1.55.2.2
retrieving revision 1.55.2.3
diff -u -d -r1.55.2.2 -r1.55.2.3
--- sre_parse.py	7 Jan 2005 06:58:10 -0000	1.55.2.2
+++ sre_parse.py	16 Oct 2005 05:23:59 -0000	1.55.2.3
@@ -16,15 +16,21 @@
 
 from sre_constants import *
 
+def set(seq):
+    s = {}
+    for elem in seq:
+        s[elem] = 1
+    return s
+
 SPECIAL_CHARS = ".\\[{()*+?^$|"
 REPEAT_CHARS = "*+?{"
 
-DIGITS = tuple("0123456789")
+DIGITS = set("0123456789")
 
-OCTDIGITS = tuple("01234567")
-HEXDIGITS = tuple("0123456789abcdefABCDEF")
+OCTDIGITS = set("01234567")
+HEXDIGITS = set("0123456789abcdefABCDEF")
 
-WHITESPACE = tuple(" \t\n\r\v\f")
+WHITESPACE = set(" \t\n\r\v\f")
 
 ESCAPES = {
     r"\a": (LITERAL, ord("\a")),
@@ -371,6 +377,11 @@
     subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
     return subpattern
 
+_PATTERNENDERS = set("|)")
+_ASSERTCHARS = set("=!<")
+_LOOKBEHINDASSERTCHARS = set("=!")
+_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
+
 def _parse(source, state):
     # parse a simple pattern
     subpattern = SubPattern(state)
@@ -380,10 +391,10 @@
     sourceget = source.get
     sourcematch = source.match
     _len = len
-    PATTERNENDERS = ("|", ")")
-    ASSERTCHARS = ("=", "!", "<")
-    LOOKBEHINDASSERTCHARS = ("=", "!")
-    REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
+    PATTERNENDERS = _PATTERNENDERS
+    ASSERTCHARS = _ASSERTCHARS
+    LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
+    REPEATCODES = _REPEATCODES
 
     while 1:
 
@@ -474,6 +485,9 @@
             elif this == "+":
                 min, max = 1, MAXREPEAT
             elif this == "{":
+                if source.next == "}":
+                    subpatternappend((LITERAL, ord(this)))
+                    continue
                 here = source.tell()
                 min, max = 0, MAXREPEAT
                 lo = hi = ""

Index: subprocess.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/subprocess.py,v
retrieving revision 1.13.2.1
retrieving revision 1.13.2.2
diff -u -d -r1.13.2.1 -r1.13.2.2
--- subprocess.py	7 Jan 2005 06:58:10 -0000	1.13.2.1
+++ subprocess.py	16 Oct 2005 05:23:59 -0000	1.13.2.2
@@ -2,28 +2,12 @@
 #
 # For more information about this module, see PEP 324.
 #
-# Copyright (c) 2003-2004 by Peter Astrand <astrand at lysator.liu.se>
-#
-# By obtaining, using, and/or copying this software and/or its
-# associated documentation, you agree that you have read, understood,
-# and will comply with the following terms and conditions:
+# This module should remain compatible with Python 2.2, see PEP 291.
 #
-# Permission to use, copy, modify, and distribute this software and
-# its associated documentation for any purpose and without fee is
-# hereby granted, provided that the above copyright notice appears in
-# all copies, and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of the
-# author not be used in advertising or publicity pertaining to
-# distribution of the software without specific, written prior
-# permission.
+# Copyright (c) 2003-2005 by Peter Astrand <astrand at lysator.liu.se>
 #
-# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
-# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# Licensed to PSF under a Contributor Agreement.
+# See http://www.python.org/2.4/license for licensing details.
 
 r"""subprocess - Subprocesses with accessible I/O streams
 
@@ -528,6 +512,7 @@
             result.extend(bs_buf)
 
         if needquote:
+            result.extend(bs_buf)
             result.append('"')
 
     return ''.join(result)
@@ -615,6 +600,33 @@
         data = data.replace("\r", "\n")
         return data
 
+    def communicate(self, input=None):
+        """Interact with process: Send data to stdin.  Read data from
+        stdout and stderr, until end-of-file is reached.  Wait for
+        process to terminate.  The optional input argument should be a
+        string to be sent to the child process, or None, if no data
+        should be sent to the child.
+
+        communicate() returns a tuple (stdout, stderr)."""
+
+        # Optimization: If we are only using one pipe, or no pipe at
+        # all, using select() or threads is unnecessary.
+        if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
+            stdout = None
+            stderr = None
+            if self.stdin:
+                if input:
+                    self.stdin.write(input)
+                self.stdin.close()
+            elif self.stdout:
+                stdout = self.stdout.read()
+            elif self.stderr:
+                stderr = self.stderr.read()
+            self.wait()
+            return (stdout, stderr)
+
+        return self._communicate(input)
+
 
     if mswindows:
         #
@@ -624,42 +636,42 @@
             """Construct and return tupel with IO objects:
             p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
             """
-            if stdin == None and stdout == None and stderr == None:
+            if stdin is None and stdout is None and stderr is None:
                 return (None, None, None, None, None, None)
 
             p2cread, p2cwrite = None, None
             c2pread, c2pwrite = None, None
             errread, errwrite = None, None
 
-            if stdin == None:
+            if stdin is None:
                 p2cread = GetStdHandle(STD_INPUT_HANDLE)
             elif stdin == PIPE:
                 p2cread, p2cwrite = CreatePipe(None, 0)
                 # Detach and turn into fd
                 p2cwrite = p2cwrite.Detach()
                 p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
-            elif type(stdin) == types.IntType:
+            elif isinstance(stdin, int):
                 p2cread = msvcrt.get_osfhandle(stdin)
             else:
                 # Assuming file-like object
                 p2cread = msvcrt.get_osfhandle(stdin.fileno())
             p2cread = self._make_inheritable(p2cread)
 
-            if stdout == None:
+            if stdout is None:
                 c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
             elif stdout == PIPE:
                 c2pread, c2pwrite = CreatePipe(None, 0)
                 # Detach and turn into fd
                 c2pread = c2pread.Detach()
                 c2pread = msvcrt.open_osfhandle(c2pread, 0)
-            elif type(stdout) == types.IntType:
+            elif isinstance(stdout, int):
                 c2pwrite = msvcrt.get_osfhandle(stdout)
             else:
                 # Assuming file-like object
                 c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
             c2pwrite = self._make_inheritable(c2pwrite)
 
-            if stderr == None:
+            if stderr is None:
                 errwrite = GetStdHandle(STD_ERROR_HANDLE)
             elif stderr == PIPE:
                 errread, errwrite = CreatePipe(None, 0)
@@ -668,7 +680,7 @@
                 errread = msvcrt.open_osfhandle(errread, 0)
             elif stderr == STDOUT:
                 errwrite = c2pwrite
-            elif type(stderr) == types.IntType:
+            elif isinstance(stderr, int):
                 errwrite = msvcrt.get_osfhandle(stderr)
             else:
                 # Assuming file-like object
@@ -716,7 +728,7 @@
 
             # Process startup details
             default_startupinfo = STARTUPINFO()
-            if startupinfo == None:
+            if startupinfo is None:
                 startupinfo = default_startupinfo
             if not None in (p2cread, c2pwrite, errwrite):
                 startupinfo.dwFlags |= STARTF_USESTDHANDLES
@@ -775,18 +787,18 @@
             # output pipe are maintained in this process or else the
             # pipe will not close when the child process exits and the
             # ReadFile will hang.
-            if p2cread != None:
+            if p2cread is not None:
                 p2cread.Close()
-            if c2pwrite != None:
+            if c2pwrite is not None:
                 c2pwrite.Close()
-            if errwrite != None:
+            if errwrite is not None:
                 errwrite.Close()
 
 
         def poll(self):
             """Check if child process has terminated.  Returns returncode
             attribute."""
-            if self.returncode == None:
+            if self.returncode is None:
                 if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
                     self.returncode = GetExitCodeProcess(self._handle)
                     _active.remove(self)
@@ -796,7 +808,7 @@
         def wait(self):
             """Wait for child process to terminate.  Returns returncode
             attribute."""
-            if self.returncode == None:
+            if self.returncode is None:
                 obj = WaitForSingleObject(self._handle, INFINITE)
                 self.returncode = GetExitCodeProcess(self._handle)
                 _active.remove(self)
@@ -807,14 +819,7 @@
             buffer.append(fh.read())
 
 
-        def communicate(self, input=None):
-            """Interact with process: Send data to stdin.  Read data from
-            stdout and stderr, until end-of-file is reached.  Wait for
-            process to terminate.  The optional input argument should be a
-            string to be sent to the child process, or None, if no data
-            should be sent to the child.
-
-            communicate() returns a tuple (stdout, stderr)."""
+        def _communicate(self, input):
             stdout = None # Return
             stderr = None # Return
 
@@ -832,7 +837,7 @@
                 stderr_thread.start()
 
             if self.stdin:
-                if input != None:
+                if input is not None:
                     self.stdin.write(input)
                 self.stdin.close()
 
@@ -842,9 +847,9 @@
                 stderr_thread.join()
 
             # All data exchanged.  Translate lists into strings.
-            if stdout != None:
+            if stdout is not None:
                 stdout = stdout[0]
-            if stderr != None:
+            if stderr is not None:
                 stderr = stderr[0]
 
             # Translate newlines, if requested.  We cannot let the file
@@ -872,33 +877,33 @@
             c2pread, c2pwrite = None, None
             errread, errwrite = None, None
 
-            if stdin == None:
+            if stdin is None:
                 pass
             elif stdin == PIPE:
                 p2cread, p2cwrite = os.pipe()
-            elif type(stdin) == types.IntType:
+            elif isinstance(stdin, int):
                 p2cread = stdin
             else:
                 # Assuming file-like object
                 p2cread = stdin.fileno()
 
-            if stdout == None:
+            if stdout is None:
                 pass
             elif stdout == PIPE:
                 c2pread, c2pwrite = os.pipe()
-            elif type(stdout) == types.IntType:
+            elif isinstance(stdout, int):
                 c2pwrite = stdout
             else:
                 # Assuming file-like object
                 c2pwrite = stdout.fileno()
 
-            if stderr == None:
+            if stderr is None:
                 pass
             elif stderr == PIPE:
                 errread, errwrite = os.pipe()
             elif stderr == STDOUT:
                 errwrite = c2pwrite
-            elif type(stderr) == types.IntType:
+            elif isinstance(stderr, int):
                 errwrite = stderr
             else:
                 # Assuming file-like object
@@ -943,7 +948,7 @@
             if shell:
                 args = ["/bin/sh", "-c"] + args
 
-            if executable == None:
+            if executable is None:
                 executable = args[0]
 
             # For transferring possible exec failure from child to parent
@@ -986,13 +991,13 @@
                     if close_fds:
                         self._close_fds(but=errpipe_write)
 
-                    if cwd != None:
+                    if cwd is not None:
                         os.chdir(cwd)
 
                     if preexec_fn:
                         apply(preexec_fn)
 
-                    if env == None:
+                    if env is None:
                         os.execvp(executable, args)
                     else:
                         os.execvpe(executable, args, env)
@@ -1043,7 +1048,7 @@
         def poll(self):
             """Check if child process has terminated.  Returns returncode
             attribute."""
-            if self.returncode == None:
+            if self.returncode is None:
                 try:
                     pid, sts = os.waitpid(self.pid, os.WNOHANG)
                     if pid == self.pid:
@@ -1056,20 +1061,13 @@
         def wait(self):
             """Wait for child process to terminate.  Returns returncode
             attribute."""
-            if self.returncode == None:
+            if self.returncode is None:
                 pid, sts = os.waitpid(self.pid, 0)
                 self._handle_exitstatus(sts)
             return self.returncode
 
 
-        def communicate(self, input=None):
-            """Interact with process: Send data to stdin.  Read data from
-            stdout and stderr, until end-of-file is reached.  Wait for
-            process to terminate.  The optional input argument should be a
-            string to be sent to the child process, or None, if no data
-            should be sent to the child.
-
-            communicate() returns a tuple (stdout, stderr)."""
+        def _communicate(self, input):
             read_set = []
             write_set = []
             stdout = None # Return
@@ -1118,9 +1116,9 @@
                     stderr.append(data)
 
             # All data exchanged.  Translate lists into strings.
-            if stdout != None:
+            if stdout is not None:
                 stdout = ''.join(stdout)
-            if stderr != None:
+            if stderr is not None:
                 stderr = ''.join(stderr)
 
             # Translate newlines, if requested.  We cannot let the file

Index: symbol.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/symbol.py,v
retrieving revision 1.14.12.2
retrieving revision 1.14.12.3
diff -u -d -r1.14.12.2 -r1.14.12.3
--- symbol.py	7 Jan 2005 06:58:10 -0000	1.14.12.2
+++ symbol.py	16 Oct 2005 05:23:59 -0000	1.14.12.3
@@ -88,6 +88,7 @@
 gen_if = 331
 testlist1 = 332
 encoding_decl = 333
+yield_expr = 334
 #--end constants--
 
 sym_name = {}

Index: tarfile.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/tarfile.py,v
retrieving revision 1.8.4.2
retrieving revision 1.8.4.3
diff -u -d -r1.8.4.2 -r1.8.4.3
--- tarfile.py	7 Jan 2005 06:58:10 -0000	1.8.4.2
+++ tarfile.py	16 Oct 2005 05:23:59 -0000	1.8.4.3
@@ -274,7 +274,7 @@
        _Stream is intended to be used only internally.
     """
 
-    def __init__(self, name, mode, type, fileobj, bufsize):
+    def __init__(self, name, mode, comptype, fileobj, bufsize):
         """Construct a _Stream object.
         """
         self._extfileobj = True
@@ -282,16 +282,22 @@
             fileobj = _LowLevelFile(name, mode)
             self._extfileobj = False
 
-        self.name    = name or ""
-        self.mode    = mode
-        self.type    = type
-        self.fileobj = fileobj
-        self.bufsize = bufsize
-        self.buf     = ""
-        self.pos     = 0L
-        self.closed  = False
+        if comptype == '*':
+            # Enable transparent compression detection for the
+            # stream interface
+            fileobj = _StreamProxy(fileobj)
+            comptype = fileobj.getcomptype()
 
-        if type == "gz":
+        self.name     = name or ""
+        self.mode     = mode
+        self.comptype = comptype
+        self.fileobj  = fileobj
+        self.bufsize  = bufsize
+        self.buf      = ""
+        self.pos      = 0L
+        self.closed   = False
+
+        if comptype == "gz":
             try:
                 import zlib
             except ImportError:
@@ -303,7 +309,7 @@
             else:
                 self._init_write_gz()
 
-        if type == "bz2":
+        if comptype == "bz2":
             try:
                 import bz2
             except ImportError:
@@ -315,7 +321,7 @@
                 self.cmp = bz2.BZ2Compressor()
 
     def __del__(self):
-        if not self.closed:
+        if hasattr(self, "closed") and not self.closed:
             self.close()
 
     def _init_write_gz(self):
@@ -334,10 +340,10 @@
     def write(self, s):
         """Write string s to the stream.
         """
-        if self.type == "gz":
+        if self.comptype == "gz":
             self.crc = self.zlib.crc32(s, self.crc)
         self.pos += len(s)
-        if self.type != "tar":
+        if self.comptype != "tar":
             s = self.cmp.compress(s)
         self.__write(s)
 
@@ -357,12 +363,16 @@
         if self.closed:
             return
 
-        if self.mode == "w" and self.type != "tar":
+        if self.mode == "w" and self.comptype != "tar":
             self.buf += self.cmp.flush()
+
         if self.mode == "w" and self.buf:
+            blocks, remainder = divmod(len(self.buf), self.bufsize)
+            if remainder > 0:
+                self.buf += NUL * (self.bufsize - remainder)
             self.fileobj.write(self.buf)
             self.buf = ""
-            if self.type == "gz":
+            if self.comptype == "gz":
                 self.fileobj.write(struct.pack("<l", self.crc))
                 self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
 
@@ -441,7 +451,7 @@
     def _read(self, size):
         """Return size bytes from the stream.
         """
-        if self.type == "tar":
+        if self.comptype == "tar":
             return self.__read(size)
 
         c = len(self.dbuf)
@@ -474,6 +484,30 @@
         return t[:size]
 # class _Stream
 
+class _StreamProxy(object):
+    """Small proxy class that enables transparent compression
+       detection for the Stream interface (mode 'r|*').
+    """
+
+    def __init__(self, fileobj):
+        self.fileobj = fileobj
+        self.buf = self.fileobj.read(BLOCKSIZE)
+
+    def read(self, size):
+        self.read = self.fileobj.read
+        return self.buf
+
+    def getcomptype(self):
+        if self.buf.startswith("\037\213\010"):
+            return "gz"
+        if self.buf.startswith("BZh91"):
+            return "bz2"
+        return "tar"
+
+    def close(self):
+        self.fileobj.close()
+# class StreamProxy
+
 #------------------------
 # Extraction file object
 #------------------------
@@ -616,6 +650,22 @@
         """Close the file object.
         """
         self.closed = True
+
+    def __iter__(self):
+        """Get an iterator over the file object.
+        """
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        return self
+
+    def next(self):
+        """Get the next item from the file iterator.
+        """
+        result = self.readline()
+        if not result:
+            raise StopIteration
+        return result
+
 #class ExFileObject
 
 #------------------
@@ -656,6 +706,7 @@
     def __repr__(self):
         return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
 
+    @classmethod
     def frombuf(cls, buf):
         """Construct a TarInfo object from a 512 byte string buffer.
         """
@@ -699,8 +750,6 @@
             tarinfo.name += "/"
         return tarinfo
 
-    frombuf = classmethod(frombuf)
-
     def tobuf(self):
         """Return a tar header block as a 512 byte string.
         """
@@ -858,12 +907,13 @@
     # the super-constructor. A sub-constructor is registered and made available
     # by adding it to the mapping in OPEN_METH.
 
+    @classmethod
     def open(cls, name=None, mode="r", fileobj=None, bufsize=20*512):
         """Open a tar archive for reading, writing or appending. Return
            an appropriate TarFile class.
 
            mode:
-           'r'          open for reading with transparent compression
+           'r' or 'r:*' open for reading with transparent compression
            'r:'         open for reading exclusively uncompressed
            'r:gz'       open for reading with gzip compression
            'r:bz2'      open for reading with bzip2 compression
@@ -871,6 +921,8 @@
            'w' or 'w:'  open for writing without compression
            'w:gz'       open for writing with gzip compression
            'w:bz2'      open for writing with bzip2 compression
+
+           'r|*'        open a stream of tar blocks with transparent compression
            'r|'         open an uncompressed stream of tar blocks for reading
            'r|gz'       open a gzip compressed stream of tar blocks
            'r|bz2'      open a bzip2 compressed stream of tar blocks
@@ -882,7 +934,17 @@
         if not name and not fileobj:
             raise ValueError, "nothing to open"
 
-        if ":" in mode:
+        if mode in ("r", "r:*"):
+            # Find out which *open() is appropriate for opening the file.
+            for comptype in cls.OPEN_METH:
+                func = getattr(cls, cls.OPEN_METH[comptype])
+                try:
+                    return func(name, "r", fileobj)
+                except (ReadError, CompressionError):
+                    continue
+            raise ReadError, "file could not be opened successfully"
+
+        elif ":" in mode:
             filemode, comptype = mode.split(":", 1)
             filemode = filemode or "r"
             comptype = comptype or "tar"
@@ -908,23 +970,12 @@
             t._extfileobj = False
             return t
 
-        elif mode == "r":
-            # Find out which *open() is appropriate for opening the file.
-            for comptype in cls.OPEN_METH:
-                func = getattr(cls, cls.OPEN_METH[comptype])
-                try:
-                    return func(name, "r", fileobj)
-                except (ReadError, CompressionError):
-                    continue
-            raise ReadError, "file could not be opened successfully"
-
         elif mode in "aw":
             return cls.taropen(name, mode, fileobj)
 
         raise ValueError, "undiscernible mode"
 
-    open = classmethod(open)
-
+    @classmethod
     def taropen(cls, name, mode="r", fileobj=None):
         """Open uncompressed tar archive name for reading or writing.
         """
@@ -932,8 +983,7 @@
             raise ValueError, "mode must be 'r', 'a' or 'w'"
         return cls(name, mode, fileobj)
 
-    taropen = classmethod(taropen)
-
+    @classmethod
     def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9):
         """Open gzip compressed tar archive name for reading or writing.
            Appending is not allowed.
@@ -970,8 +1020,7 @@
         t._extfileobj = False
         return t
 
-    gzopen = classmethod(gzopen)
-
+    @classmethod
     def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9):
         """Open bzip2 compressed tar archive name for reading or writing.
            Appending is not allowed.
@@ -1002,8 +1051,6 @@
         t._extfileobj = False
         return t
 
-    bz2open = classmethod(bz2open)
-
     # All *open() methods are registered here.
     OPEN_METH = {
         "tar": "taropen",   # uncompressed tar
@@ -1132,17 +1179,16 @@
 
         # Fill the TarInfo object with all
         # information we can get.
-        tarinfo.name  = arcname
-        tarinfo.mode  = stmd
-        tarinfo.uid   = statres.st_uid
-        tarinfo.gid   = statres.st_gid
-        if stat.S_ISDIR(stmd):
-            # For a directory, the size must be 0
-            tarinfo.size  = 0
-        else:
+        tarinfo.name = arcname
+        tarinfo.mode = stmd
+        tarinfo.uid = statres.st_uid
+        tarinfo.gid = statres.st_gid
+        if stat.S_ISREG(stmd):
             tarinfo.size = statres.st_size
+        else:
+            tarinfo.size = 0L
         tarinfo.mtime = statres.st_mtime
-        tarinfo.type  = type
+        tarinfo.type = type
         tarinfo.linkname = linkname
         if pwd:
             try:
@@ -1233,16 +1279,15 @@
             self.addfile(tarinfo, f)
             f.close()
 
-        if tarinfo.type in (LNKTYPE, SYMTYPE, FIFOTYPE, CHRTYPE, BLKTYPE):
-            tarinfo.size = 0L
-            self.addfile(tarinfo)
-
-        if tarinfo.isdir():
+        elif tarinfo.isdir():
             self.addfile(tarinfo)
             if recursive:
                 for f in os.listdir(name):
                     self.add(os.path.join(name, f), os.path.join(arcname, f))
 
+        else:
+            self.addfile(tarinfo)
+
     def addfile(self, tarinfo, fileobj=None):
         """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
            given, tarinfo.size bytes are read from it and added to the archive.
@@ -1310,6 +1355,47 @@
 
         self.members.append(tarinfo)
 
+    def extractall(self, path=".", members=None):
+        """Extract all members from the archive to the current working
+           directory and set owner, modification time and permissions on
+           directories afterwards. `path' specifies a different directory
+           to extract to. `members' is optional and must be a subset of the
+           list returned by getmembers().
+        """
+        directories = []
+
+        if members is None:
+            members = self
+
+        for tarinfo in members:
+            if tarinfo.isdir():
+                # Extract directory with a safe mode, so that
+                # all files below can be extracted as well.
+                try:
+                    os.makedirs(os.path.join(path, tarinfo.name), 0777)
+                except EnvironmentError:
+                    pass
+                directories.append(tarinfo)
+            else:
+                self.extract(tarinfo, path)
+
+        # Reverse sort directories.
+        directories.sort(lambda a, b: cmp(a.name, b.name))
+        directories.reverse()
+
+        # Set correct owner, mtime and filemode on directories.
+        for tarinfo in directories:
+            path = os.path.join(path, tarinfo.name)
+            try:
+                self.chown(tarinfo, path)
+                self.utime(tarinfo, path)
+                self.chmod(tarinfo, path)
+            except ExtractError, e:
+                if self.errorlevel > 1:
+                    raise
+                else:
+                    self._dbg(1, "tarfile: %s" % e)
+
     def extract(self, member, path=""):
         """Extract a member from the archive to the current working directory,
            using its full name. Its file information is extracted as accurately
@@ -1374,7 +1460,7 @@
                 # stream of tar blocks.
                 raise StreamError, "cannot extract (sym)link as file object"
             else:
-                # A (sym)link's file object is it's target's file object.
+                # A (sym)link's file object is its target's file object.
                 return self.extractfile(self._getmember(tarinfo.linkname,
                                                         tarinfo))
         else:
@@ -1840,6 +1926,7 @@
         """Construct a TarIter object.
         """
         self.tarfile = tarfile
+        self.index = 0
     def __iter__(self):
         """Return iterator object.
         """
@@ -1848,10 +1935,20 @@
         """Return the next item using TarFile's next() method.
            When all members have been read, set TarFile as _loaded.
         """
-        tarinfo = self.tarfile.next()
-        if not tarinfo:
-            self.tarfile._loaded = True
-            raise StopIteration
+        # Fix for SF #1100429: Under rare circumstances it can
+        # happen that getmembers() is called during iteration,
+        # which will cause TarIter to stop prematurely.
+        if not self.tarfile._loaded:
+            tarinfo = self.tarfile.next()
+            if not tarinfo:
+                self.tarfile._loaded = True
+                raise StopIteration
+        else:
+            try:
+                tarinfo = self.tarfile.members[self.index]
+            except IndexError:
+                raise StopIteration
+        self.index += 1
         return tarinfo
 
 # Helper classes for sparse file support
@@ -1915,8 +2012,7 @@
             raise ValueError, "unknown compression constant"
         if mode[0:1] == "r":
             members = self.tarfile.getmembers()
-            for i in xrange(len(members)):
-                m = members[i]
+            for m in members:
                 m.filename = m.name
                 m.file_size = m.size
                 m.date_time = time.gmtime(m.mtime)[:6]

Index: telnetlib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/telnetlib.py,v
retrieving revision 1.19.2.2
retrieving revision 1.19.2.3
diff -u -d -r1.19.2.2 -r1.19.2.3
--- telnetlib.py	7 Jan 2005 06:58:11 -0000	1.19.2.2
+++ telnetlib.py	16 Oct 2005 05:23:59 -0000	1.19.2.3
@@ -1,4 +1,4 @@
-"""TELNET client class.
+r"""TELNET client class.
 
 Based on RFC 854: TELNET Protocol Specification, by J. Postel and
 J. Reynolds

Index: tempfile.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/tempfile.py,v
retrieving revision 1.39.2.2
retrieving revision 1.39.2.3
diff -u -d -r1.39.2.2 -r1.39.2.3
--- tempfile.py	7 Jan 2005 06:58:11 -0000	1.39.2.2
+++ tempfile.py	16 Oct 2005 05:23:59 -0000	1.39.2.3
@@ -414,9 +414,9 @@
     'bufsize' -- the buffer size argument to os.fdopen (default -1).
     The file is created as mkstemp() would do it.
 
-    Returns a file object; the name of the file is accessible as
-    file.name.  The file will be automatically deleted when it is
-    closed.
+    Returns an object with a file-like interface; the name of the file
+    is accessible as file.name.  The file will be automatically deleted
+    when it is closed.
     """
 
     if dir is None:
@@ -451,8 +451,8 @@
         'bufsize' -- the buffer size argument to os.fdopen (default -1).
         The file is created as mkstemp() would do it.
 
-        Returns a file object.  The file has no name, and will cease to
-        exist when it is closed.
+        Returns an object with a file-like interface.  The file has no
+        name, and will cease to exist when it is closed.
         """
 
         if dir is None:

Index: textwrap.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/textwrap.py,v
retrieving revision 1.12.2.2
retrieving revision 1.12.2.3
diff -u -d -r1.12.2.2 -r1.12.2.3
--- textwrap.py	7 Jan 2005 06:58:11 -0000	1.12.2.2
+++ textwrap.py	16 Oct 2005 05:23:59 -0000	1.12.2.3
@@ -78,9 +78,10 @@
     # splits into
     #   Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
     # (after stripping out empty strings).
-    wordsep_re = re.compile(r'(\s+|'                  # any whitespace
-                            r'[^\s\w]*\w{2,}-(?=\w{2,})|' # hyphenated words
-                            r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))')   # em-dash
+    wordsep_re = re.compile(
+        r'(\s+|'                                  # any whitespace
+        r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|'   # hyphenated words
+        r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))')   # em-dash
 
     # XXX this is not locale- or charset-aware -- string.lowercase
     # is US-ASCII only (and therefore English-only)
@@ -160,7 +161,7 @@
             else:
                 i += 1
 
-    def _handle_long_word(self, chunks, cur_line, cur_len, width):
+    def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
         """_handle_long_word(chunks : [string],
                              cur_line : [string],
                              cur_len : int, width : int)
@@ -173,14 +174,14 @@
         # If we're allowed to break long words, then do so: put as much
         # of the next chunk onto the current line as will fit.
         if self.break_long_words:
-            cur_line.append(chunks[0][0:space_left])
-            chunks[0] = chunks[0][space_left:]
+            cur_line.append(reversed_chunks[-1][:space_left])
+            reversed_chunks[-1] = reversed_chunks[-1][space_left:]
 
         # Otherwise, we have to preserve the long word intact.  Only add
         # it to the current line if there's nothing already there --
         # that minimizes how much we violate the width constraint.
         elif not cur_line:
-            cur_line.append(chunks.pop(0))
+            cur_line.append(reversed_chunks.pop())
 
         # If we're not allowed to break long words, and there's already
         # text on the current line, do nothing.  Next time through the
@@ -205,6 +206,10 @@
         if self.width <= 0:
             raise ValueError("invalid width %r (must be > 0)" % self.width)
 
+        # Arrange in reverse order so items can be efficiently popped
+        # from a stack of chucks.
+        chunks.reverse()
+
         while chunks:
 
             # Start the list of chunks that will make up the current line.
@@ -223,15 +228,15 @@
 
             # First chunk on line is whitespace -- drop it, unless this
             # is the very beginning of the text (ie. no lines started yet).
-            if chunks[0].strip() == '' and lines:
-                del chunks[0]
+            if chunks[-1].strip() == '' and lines:
+                del chunks[-1]
 
             while chunks:
-                l = len(chunks[0])
+                l = len(chunks[-1])
 
                 # Can at least squeeze this chunk onto the current line.
                 if cur_len + l <= width:
-                    cur_line.append(chunks.pop(0))
+                    cur_line.append(chunks.pop())
                     cur_len += l
 
                 # Nope, this line is full.
@@ -240,7 +245,7 @@
 
             # The current line is full, and the next chunk is too big to
             # fit on *any* line (not just this one).
-            if chunks and len(chunks[0]) > width:
+            if chunks and len(chunks[-1]) > width:
                 self._handle_long_word(chunks, cur_line, cur_len, width)
 
             # If the last chunk on this line is all whitespace, drop it.

Index: threading.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/threading.py,v
retrieving revision 1.24.2.2
retrieving revision 1.24.2.3
diff -u -d -r1.24.2.2 -r1.24.2.3
--- threading.py	7 Jan 2005 06:58:11 -0000	1.24.2.2
+++ threading.py	16 Oct 2005 05:23:59 -0000	1.24.2.3
@@ -102,7 +102,7 @@
             self.__owner = me
             self.__count = 1
             if __debug__:
-                self._note("%s.acquire(%s): initial succes", self, blocking)
+                self._note("%s.acquire(%s): initial success", self, blocking)
         else:
             if __debug__:
                 self._note("%s.acquire(%s): failure", self, blocking)
@@ -358,7 +358,7 @@
 
 # Active thread administration
 _active_limbo_lock = _allocate_lock()
-_active = {}
+_active = {}    # maps thread id to Thread object
 _limbo = {}
 
 
@@ -374,9 +374,11 @@
     __exc_info = _sys.exc_info
 
     def __init__(self, group=None, target=None, name=None,
-                 args=(), kwargs={}, verbose=None):
+                 args=(), kwargs=None, verbose=None):
         assert group is None, "group argument must be None for now"
         _Verbose.__init__(self, verbose)
+        if kwargs is None:
+            kwargs = {}
         self.__target = target
         self.__name = str(name or _newname())
         self.__args = args
@@ -643,8 +645,9 @@
 
 
 # Dummy thread class to represent threads not started here.
-# These aren't garbage collected when they die,
-# nor can they be waited for.
+# These aren't garbage collected when they die, nor can they be waited for.
+# If they invoke anything in threading.py that calls currentThread(), they
+# leave an entry in the _active dict forever after.
 # Their purpose is to return *something* from currentThread().
 # They are marked as daemon threads so we won't wait for them
 # when we exit (conform previous semantics).
@@ -653,6 +656,12 @@
 
     def __init__(self):
         Thread.__init__(self, name=_newname("Dummy-%d"))
+
+        # Thread.__block consumes an OS-level locking primitive, which
+        # can never be used by a _DummyThread.  Since a _DummyThread
+        # instance is immortal, that's bad, so release this resource.
+        del self._Thread__block
+
         self._Thread__started = True
         _active_limbo_lock.acquire()
         _active[_get_ident()] = self

Index: tokenize.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/tokenize.py,v
retrieving revision 1.32.2.2
retrieving revision 1.32.2.3
diff -u -d -r1.32.2.2 -r1.32.2.3
--- tokenize.py	7 Jan 2005 06:58:11 -0000	1.32.2.2
+++ tokenize.py	16 Oct 2005 05:23:59 -0000	1.32.2.3
@@ -31,7 +31,7 @@
 
 import token
 __all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
-           "generate_tokens", "NL"]
+           "generate_tokens", "NL", "untokenize"]
 del x
 del token
 
@@ -159,12 +159,55 @@
     for token_info in generate_tokens(readline):
         tokeneater(*token_info)
 
+
+def untokenize(iterable):
+    """Transform tokens back into Python source code.
+
+    Each element returned by the iterable must be a token sequence
+    with at least two elements, a token number and token value.
+
+    Round-trip invariant:
+        # Output text will tokenize the back to the input
+        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
+        newcode = untokenize(t1)
+        readline = iter(newcode.splitlines(1)).next
+        t2 = [tok[:2] for tokin generate_tokens(readline)]
+        assert t1 == t2
+    """
+
+    startline = False
+    indents = []
+    toks = []
+    toks_append = toks.append
+    for tok in iterable:
+        toknum, tokval = tok[:2]
+
+        if toknum == NAME:
+            tokval += ' '
+
+        if toknum == INDENT:
+            indents.append(tokval)
+            continue
+        elif toknum == DEDENT:
+            indents.pop()
+            continue
+        elif toknum in (NEWLINE, COMMENT, NL):
+            startline = True
+        elif startline and indents:
+            toks_append(indents[-1])
+            startline = False
+        toks_append(tokval)
+    return ''.join(toks)
+
+
 def generate_tokens(readline):
     """
     The generate_tokens() generator requires one argment, readline, which
     must be a callable object which provides the same interface as the
     readline() method of built-in file objects. Each call to the function
-    should return one line of input as a string.
+    should return one line of input as a string.  Alternately, readline
+    can be a callable function terminating with StopIteration:
+        readline = open(myfile).next    # Example of alternate readline
 
     The generator produces 5-tuples with these members: the token type; the
     token string; a 2-tuple (srow, scol) of ints specifying the row and
@@ -180,7 +223,10 @@
     indents = [0]
 
     while 1:                                   # loop over lines in stream
-        line = readline()
+        try:
+            line = readline()
+        except StopIteration:
+            line = ''
         lnum = lnum + 1
         pos, max = 0, len(line)
 
@@ -225,6 +271,9 @@
                 indents.append(column)
                 yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
             while column < indents[-1]:
+                if column not in indents:
+                    raise IndentationError(
+                        "unindent does not match any outer indentation level")
                 indents = indents[:-1]
                 yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
 

Index: unittest.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/unittest.py,v
retrieving revision 1.16.2.2
retrieving revision 1.16.2.3
diff -u -d -r1.16.2.2 -r1.16.2.3
--- unittest.py	7 Jan 2005 06:58:11 -0000	1.16.2.2
+++ unittest.py	16 Oct 2005 05:23:59 -0000	1.16.2.3
@@ -71,7 +71,7 @@
     False, True = 0, 1
     def isinstance(obj, clsinfo):
         import __builtin__
-        if type(clsinfo) in (types.TupleType, types.ListType):
+        if type(clsinfo) in (tuple, list):
             for cls in clsinfo:
                 if cls is type: cls = types.ClassType
                 if __builtin__.isinstance(obj, cls):

Index: urllib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/urllib.py,v
retrieving revision 1.148.2.2
retrieving revision 1.148.2.3
diff -u -d -r1.148.2.2 -r1.148.2.3
--- urllib.py	7 Jan 2005 06:58:11 -0000	1.148.2.2
+++ urllib.py	16 Oct 2005 05:23:59 -0000	1.148.2.3
@@ -86,6 +86,11 @@
     if _urlopener:
         _urlopener.cleanup()
 
+# exception raised when downloaded size does not match content-length
+class ContentTooShortError(IOError):
+    def __init__(self, message, content):
+        IOError.__init__(self, message)
+        self.content = content
 
 ftpcache = {}
 class URLopener:
@@ -228,24 +233,31 @@
             self.tempcache[url] = result
         bs = 1024*8
         size = -1
-        blocknum = 1
+        read = 0
+        blocknum = 0
         if reporthook:
             if "content-length" in headers:
                 size = int(headers["Content-Length"])
-            reporthook(0, bs, size)
-        block = fp.read(bs)
-        if reporthook:
-            reporthook(1, bs, size)
-        while block:
-            tfp.write(block)
+            reporthook(blocknum, bs, size)
+        while 1:
             block = fp.read(bs)
-            blocknum = blocknum + 1
+            if block == "":
+                break
+            read += len(block)
+            tfp.write(block)
+            blocknum += 1
             if reporthook:
                 reporthook(blocknum, bs, size)
         fp.close()
         tfp.close()
         del fp
         del tfp
+
+        # raise exception if actual size does not match content-length header
+        if size >= 0 and read < size:
+            raise ContentTooShortError("retrieval incomplete: got only %i out "
+                                       "of %i bytes" % (read, size), result)
+
         return result
 
     # Each method named open_<type> knows how to open that type of URL
@@ -1037,23 +1049,18 @@
         return selector[1], selector[2:]
     return None, selector
 
+_hextochr = dict(('%02x' % i, chr(i)) for i in range(256))
+_hextochr.update(('%02X' % i, chr(i)) for i in range(256))
+
 def unquote(s):
     """unquote('abc%20def') -> 'abc def'."""
-    mychr = chr
-    myatoi = int
-    list = s.split('%')
-    res = [list[0]]
-    myappend = res.append
-    del list[0]
-    for item in list:
-        if item[1:2]:
-            try:
-                myappend(mychr(myatoi(item[:2], 16))
-                     + item[2:])
-            except ValueError:
-                myappend('%' + item)
-        else:
-            myappend('%' + item)
+    res = s.split('%')
+    for i in xrange(1, len(res)):
+        item = res[i]
+        try:
+            res[i] = _hextochr[item[:2]] + item[2:]
+        except KeyError:
+            res[i] = '%' + item
     return "".join(res)
 
 def unquote_plus(s):
@@ -1064,22 +1071,7 @@
 always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
                'abcdefghijklmnopqrstuvwxyz'
                '0123456789' '_.-')
-
-_fast_safe_test = always_safe + '/'
-_fast_safe = None
-
-def _fast_quote(s):
-    global _fast_safe
-    if _fast_safe is None:
-        _fast_safe = {}
-        for c in _fast_safe_test:
-            _fast_safe[c] = c
-    res = list(s)
-    for i in range(len(res)):
-        c = res[i]
-        if not c in _fast_safe:
-            res[i] = '%%%02X' % ord(c)
-    return ''.join(res)
+_safemaps = {}
 
 def quote(s, safe = '/'):
     """quote('abc def') -> 'abc%20def'
@@ -1102,25 +1094,25 @@
     called on a path where the existing slash characters are used as
     reserved characters.
     """
-    safe = always_safe + safe
-    if _fast_safe_test == safe:
-        return _fast_quote(s)
-    res = list(s)
-    for i in range(len(res)):
-        c = res[i]
-        if c not in safe:
-            res[i] = '%%%02X' % ord(c)
+    cachekey = (safe, always_safe)
+    try:
+        safe_map = _safemaps[cachekey]
+    except KeyError:
+        safe += always_safe
+        safe_map = {}
+        for i in range(256):
+            c = chr(i)
+            safe_map[c] = (c in safe) and c or ('%%%02X' % i)
+        _safemaps[cachekey] = safe_map
+    res = map(safe_map.__getitem__, s)
     return ''.join(res)
 
 def quote_plus(s, safe = ''):
     """Quote the query fragment of a URL; replacing ' ' with '+'"""
     if ' ' in s:
-        l = s.split(' ')
-        for i in range(len(l)):
-            l[i] = quote(l[i], safe)
-        return '+'.join(l)
-    else:
-        return quote(s, safe)
+        s = quote(s, safe + ' ')
+        return s.replace(' ', '+')
+    return quote(s, safe)
 
 def urlencode(query,doseq=0):
     """Encode a sequence of two-element tuples or dictionary into a URL query string.

Index: urllib2.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/urllib2.py,v
retrieving revision 1.31.2.2
retrieving revision 1.31.2.3
diff -u -d -r1.31.2.2 -r1.31.2.3
--- urllib2.py	7 Jan 2005 06:58:11 -0000	1.31.2.2
+++ urllib2.py	16 Oct 2005 05:23:59 -0000	1.31.2.3
@@ -277,8 +277,8 @@
 
 class OpenerDirector:
     def __init__(self):
-        server_version = "Python-urllib/%s" % __version__
-        self.addheaders = [('User-agent', server_version)]
+        client_version = "Python-urllib/%s" % __version__
+        self.addheaders = [('User-agent', client_version)]
         # manage the individual handlers
         self.handlers = []
         self.handle_open = {}
@@ -304,10 +304,13 @@
                 self.handle_error[protocol] = lookup
             elif condition == "open":
                 kind = protocol
-                lookup = getattr(self, "handle_"+condition)
-            elif condition in ["response", "request"]:
+                lookup = self.handle_open
+            elif condition == "response":
                 kind = protocol
-                lookup = getattr(self, "process_"+condition)
+                lookup = self.process_response
+            elif condition == "request":
+                kind = protocol
+                lookup = self.process_request
             else:
                 continue
 
@@ -381,7 +384,7 @@
                                 'unknown_open', req)
 
     def error(self, proto, *args):
-        if proto in ['http', 'https']:
+        if proto in ('http', 'https'):
             # XXX http[s] protocols are special-cased
             dict = self.handle_error['http'] # https is not different than http
             proto = args[2]  # YUCK!
@@ -582,7 +585,7 @@
             if ':' in user_pass:
                 user, password = user_pass.split(':', 1)
                 user_pass = base64.encodestring('%s:%s' % (unquote(user),
-                                                           unquote(password)))
+                                                unquote(password))).strip()
                 req.add_header('Proxy-authorization', 'Basic ' + user_pass)
         host = unquote(host)
         req.set_proxy(host, type)
@@ -859,7 +862,7 @@
             entdig = None
 
         A1 = "%s:%s:%s" % (user, realm, pw)
-        A2 = "%s:%s" % (req.has_data() and 'POST' or 'GET',
+        A2 = "%s:%s" % (req.get_method(),
                         # XXX selector: what about proxies and full urls
                         req.get_selector())
         if qop == 'auth':
@@ -1069,46 +1072,43 @@
 
     In particular, parse comma-separated lists where the elements of
     the list may include quoted-strings.  A quoted-string could
-    contain a comma.
+    contain a comma.  A non-quoted string could have quotes in the
+    middle.  Neither commas nor quotes count if they are escaped.
+    Only double-quotes count, not single-quotes.
     """
-    # XXX this function could probably use more testing
+    res = []
+    part = ''
 
-    list = []
-    end = len(s)
-    i = 0
-    inquote = 0
-    start = 0
-    while i < end:
-        cur = s[i:]
-        c = cur.find(',')
-        q = cur.find('"')
-        if c == -1:
-            list.append(s[start:])
-            break
-        if q == -1:
-            if inquote:
-                raise ValueError, "unbalanced quotes"
-            else:
-                list.append(s[start:i+c])
-                i = i + c + 1
+    escape = quote = False
+    for cur in s:
+        if escape:
+            part += cur
+            escape = False
+            continue
+        if quote:
+            if cur == '\\':
+                escape = True
                 continue
-        if inquote:
-            if q < c:
-                list.append(s[start:i+c])
-                i = i + c + 1
-                start = i
-                inquote = 0
-            else:
-                i = i + q
-        else:
-            if c < q:
-                list.append(s[start:i+c])
-                i = i + c + 1
-                start = i
-            else:
-                inquote = 1
-                i = i + q + 1
-    return map(lambda x: x.strip(), list)
+            elif cur == '"':
+                quote = False
+            part += cur
+            continue
+
+        if cur == ',':
+            res.append(part)
+            part = ''
+            continue
+
+        if cur == '"':
+            quote = True
+
+        part += cur
+
+    # append last part
+    if part:
+        res.append(part)
+
+    return [part.strip() for part in res]
 
 class FileHandler(BaseHandler):
     # Use local file or FTP depending on form of URL
@@ -1290,3 +1290,52 @@
             if inspect.isclass(ph):
                 ph = ph()
             opener.add_handler(ph)
+
+# Mapping status codes to official W3C names
+httpresponses = {
+    100: 'Continue',
+    101: 'Switching Protocols',
+
+    200: 'OK',
+    201: 'Created',
+    202: 'Accepted',
+    203: 'Non-Authoritative Information',
+    204: 'No Content',
+    205: 'Reset Content',
+    206: 'Partial Content',
+
+    300: 'Multiple Choices',
+    301: 'Moved Permanently',
+    302: 'Found',
+    303: 'See Other',
+    304: 'Not Modified',
+    305: 'Use Proxy',
+    306: '(Unused)',
+    307: 'Temporary Redirect',
+
+    400: 'Bad Request',
+    401: 'Unauthorized',
+    402: 'Payment Required',
+    403: 'Forbidden',
+    404: 'Not Found',
+    405: 'Method Not Allowed',
+    406: 'Not Acceptable',
+    407: 'Proxy Authentication Required',
+    408: 'Request Timeout',
+    409: 'Conflict',
+    410: 'Gone',
+    411: 'Length Required',
+    412: 'Precondition Failed',
+    413: 'Request Entity Too Large',
+    414: 'Request-URI Too Long',
+    415: 'Unsupported Media Type',
+    416: 'Requested Range Not Satisfiable',
+    417: 'Expectation Failed',
+
+    500: 'Internal Server Error',
+    501: 'Not Implemented',
+    502: 'Bad Gateway',
+    503: 'Service Unavailable',
+    504: 'Gateway Timeout',
+    505: 'HTTP Version Not Supported',
+}

Index: urlparse.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/urlparse.py,v
retrieving revision 1.32.2.2
retrieving revision 1.32.2.3
diff -u -d -r1.32.2.2 -r1.32.2.3
--- urlparse.py	7 Jan 2005 06:58:11 -0000	1.32.2.2
+++ urlparse.py	16 Oct 2005 05:23:59 -0000	1.32.2.3
@@ -13,7 +13,8 @@
                                'prospero', 'rtsp', 'rtspu', '']
 uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
                              'imap', 'wais', 'file', 'mms', 'https', 'shttp',
-                             'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '']
+                             'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
+                             'svn', 'svn+ssh']
 non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
                                   'telnet', 'wais', 'imap', 'snews', 'sip']
 uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
@@ -63,6 +64,15 @@
         i = url.find(';')
     return url[:i], url[i+1:]
 
+def _splitnetloc(url, start=0):
+    for c in '/?#': # the order is important!
+        delim = url.find(c, start)
+        if delim >= 0:
+            break
+    else:
+        delim = len(url)
+    return url[start:delim], url[delim:]
+
 def urlsplit(url, scheme='', allow_fragments=1):
     """Parse a URL into 5 components:
     <scheme>://<netloc>/<path>?<query>#<fragment>
@@ -82,13 +92,7 @@
             scheme = url[:i].lower()
             url = url[i+1:]
             if url[:2] == '//':
-                i = url.find('/', 2)
-                if i < 0:
-                    i = url.find('#')
-                    if i < 0:
-                        i = len(url)
-                netloc = url[2:i]
-                url = url[i:]
+                netloc, url = _splitnetloc(url, 2)
             if allow_fragments and '#' in url:
                 url, fragment = url.split('#', 1)
             if '?' in url:
@@ -101,12 +105,8 @@
                 break
         else:
             scheme, url = url[:i].lower(), url[i+1:]
-    if scheme in uses_netloc:
-        if url[:2] == '//':
-            i = url.find('/', 2)
-            if i < 0:
-                i = len(url)
-            netloc, url = url[2:i], url[i:]
+    if scheme in uses_netloc and url[:2] == '//':
+        netloc, url = _splitnetloc(url, 2)
     if allow_fragments and scheme in uses_fragment and '#' in url:
         url, fragment = url.split('#', 1)
     if scheme in uses_query and '?' in url:

Index: warnings.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/warnings.py,v
retrieving revision 1.16.2.2
retrieving revision 1.16.2.3
diff -u -d -r1.16.2.2 -r1.16.2.3
--- warnings.py	7 Jan 2005 06:58:11 -0000	1.16.2.2
+++ warnings.py	16 Oct 2005 05:23:59 -0000	1.16.2.3
@@ -50,7 +50,11 @@
             filename = filename[:-1]
     else:
         if module == "__main__":
-            filename = sys.argv[0]
+            try:
+                filename = sys.argv[0]
+            except AttributeError:
+                # embedded interpreters don't have sys.argv, see bug #839151
+                filename = '__main__'
         if not filename:
             filename = module
     registry = globals.setdefault("__warningregistry__", {})
@@ -216,7 +220,7 @@
     if not action:
         return "default"
     if action == "all": return "always" # Alias
-    for a in ['default', 'always', 'ignore', 'module', 'once', 'error']:
+    for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
         if a.startswith(action):
             return a
     raise _OptionError("invalid action: %r" % (action,))

Index: weakref.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/weakref.py,v
retrieving revision 1.17.2.2
retrieving revision 1.17.2.3
diff -u -d -r1.17.2.2 -r1.17.2.3
--- weakref.py	7 Jan 2005 06:58:11 -0000	1.17.2.2
+++ weakref.py	16 Oct 2005 05:23:59 -0000	1.17.2.3
@@ -43,12 +43,12 @@
     # way in).
 
     def __init__(self, *args, **kw):
-        UserDict.UserDict.__init__(self, *args, **kw)
         def remove(wr, selfref=ref(self)):
             self = selfref()
             if self is not None:
                 del self.data[wr.key]
         self._remove = remove
+        UserDict.UserDict.__init__(self, *args, **kw)
 
     def __getitem__(self, key):
         o = self.data[key]()

Index: webbrowser.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/webbrowser.py,v
retrieving revision 1.32.2.2
retrieving revision 1.32.2.3
diff -u -d -r1.32.2.2 -r1.32.2.3
--- webbrowser.py	7 Jan 2005 06:58:11 -0000	1.32.2.2
+++ webbrowser.py	16 Oct 2005 05:23:59 -0000	1.32.2.3
@@ -1,9 +1,11 @@
+#! /usr/bin/env python
 """Interfaces for launching and remotely controlling Web browsers."""
 
 import os
 import sys
+import stat
 
-__all__ = ["Error", "open", "get", "register"]
+__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
 
 class Error(Exception):
     pass
@@ -11,9 +13,13 @@
 _browsers = {}          # Dictionary of available browser controllers
 _tryorder = []          # Preference order of available browsers
 
-def register(name, klass, instance=None):
+def register(name, klass, instance=None, update_tryorder=1):
     """Register a browser connector and, optionally, connection."""
     _browsers[name.lower()] = [klass, instance]
+    if update_tryorder > 0:
+        _tryorder.append(name)
+    elif update_tryorder < 0:
+        _tryorder.insert(0, name)
 
 def get(using=None):
     """Return a browser launcher instance appropriate for the environment."""
@@ -26,27 +32,36 @@
             # User gave us a command line, don't mess with it.
             return GenericBrowser(browser)
         else:
-            # User gave us a browser name.
+            # User gave us a browser name or path.
             try:
                 command = _browsers[browser.lower()]
             except KeyError:
                 command = _synthesize(browser)
-            if command[1] is None:
-                return command[0]()
-            else:
+            if command[1] is not None:
                 return command[1]
+            elif command[0] is not None:
+                return command[0]()
     raise Error("could not locate runnable browser")
 
 # Please note: the following definition hides a builtin function.
+# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
+# instead of "from webbrowser import *".
 
 def open(url, new=0, autoraise=1):
-    get().open(url, new, autoraise)
+    for name in _tryorder:
+        browser = get(name)
+        if browser.open(url, new, autoraise):
+            return True
+    return False
 
 def open_new(url):
-    get().open(url, 1)
+    return open(url, 1)
 
+def open_new_tab(url):
+    return open(url, 2)
 
-def _synthesize(browser):
+
+def _synthesize(browser, update_tryorder=1):
     """Attempt to synthesize a controller base on existing controllers.
 
     This is useful to create a controller when a user specifies a path to
@@ -58,9 +73,10 @@
     executable for the requested browser, return [None, None].
 
     """
-    if not os.path.exists(browser):
+    cmd = browser.split()[0]
+    if not _iscommand(cmd):
         return [None, None]
-    name = os.path.basename(browser)
+    name = os.path.basename(cmd)
     try:
         command = _browsers[name.lower()]
     except KeyError:
@@ -72,27 +88,67 @@
         controller = copy.copy(controller)
         controller.name = browser
         controller.basename = os.path.basename(browser)
-        register(browser, None, controller)
+        register(browser, None, controller, update_tryorder)
         return [None, controller]
     return [None, None]
 
 
+if sys.platform[:3] == "win":
+    def _isexecutable(cmd):
+        cmd = cmd.lower()
+        if os.path.isfile(cmd) and (cmd.endswith(".exe") or 
+                                    cmd.endswith(".bat")):
+            return True
+        for ext in ".exe", ".bat":
+            if os.path.isfile(cmd + ext):
+                return True
+        return False
+else:
+    def _isexecutable(cmd):
+        if os.path.isfile(cmd):
+            mode = os.stat(cmd)[stat.ST_MODE]
+            if mode & stat.S_IXUSR or mode & stat.S_IXGRP or mode & stat.S_IXOTH:
+                return True
+        return False
+
 def _iscommand(cmd):
-    """Return True if cmd can be found on the executable search path."""
+    """Return True if cmd is executable or can be found on the executable
+    search path."""
+    if _isexecutable(cmd):
+        return True
     path = os.environ.get("PATH")
     if not path:
         return False
     for d in path.split(os.pathsep):
         exe = os.path.join(d, cmd)
-        if os.path.isfile(exe):
+        if _isexecutable(exe):
             return True
     return False
 
 
-PROCESS_CREATION_DELAY = 4
+# General parent classes
 
+class BaseBrowser(object):
+    """Parent class for all browsers."""
+
+    def __init__(self, name=""):
+        self.name = name
+        self.basename = name
+    
+    def open(self, url, new=0, autoraise=1):
+        raise NotImplementedError
+
+    def open_new(self, url):
+        return self.open(url, 1)
+
+    def open_new_tab(self, url):
+        return self.open(url, 2)
+
+
+class GenericBrowser(BaseBrowser):
+    """Class for all browsers started with a command
+       and without remote functionality."""
 
-class GenericBrowser:
     def __init__(self, cmd):
         self.name, self.args = cmd.split(None, 1)
         self.basename = os.path.basename(self.name)
@@ -100,104 +156,136 @@
     def open(self, url, new=0, autoraise=1):
         assert "'" not in url
         command = "%s %s" % (self.name, self.args)
-        os.system(command % url)
+        rc = os.system(command % url)
+        return not rc
 
-    def open_new(self, url):
-        self.open(url)
 
+class UnixBrowser(BaseBrowser):
+    """Parent class for all Unix browsers with remote functionality."""
 
-class Netscape:
-    "Launcher class for Netscape browsers."
-    def __init__(self, name):
-        self.name = name
-        self.basename = os.path.basename(name)
+    raise_opts = None
 
-    def _remote(self, action, autoraise):
-        raise_opt = ("-noraise", "-raise")[autoraise]
-        cmd = "%s %s -remote '%s' >/dev/null 2>&1" % (self.name,
-                                                      raise_opt,
-                                                      action)
+    remote_cmd = ''
+    remote_action = None
+    remote_action_newwin = None
+    remote_action_newtab = None
+    remote_background = False
+
+    def _remote(self, url, action, autoraise):
+        autoraise = int(bool(autoraise)) # always 0/1
+        raise_opt = self.raise_opts and self.raise_opts[autoraise] or ''
+        cmd = "%s %s %s '%s' >/dev/null 2>&1" % (self.name, raise_opt,
+                                                 self.remote_cmd, action)
+        if self.remote_background:
+            cmd += ' &'
         rc = os.system(cmd)
         if rc:
-            import time
-            os.system("%s &" % self.name)
-            time.sleep(PROCESS_CREATION_DELAY)
-            rc = os.system(cmd)
+            # bad return status, try again with simpler command
+            rc = os.system("%s %s" % (self.name, url))
         return not rc
 
     def open(self, url, new=0, autoraise=1):
-        if new:
-            self._remote("openURL(%s, new-window)"%url, autoraise)
+        assert "'" not in url
+        if new == 0:
+            action = self.remote_action
+        elif new == 1:
+            action = self.remote_action_newwin
+        elif new == 2:
+            if self.remote_action_newtab is None:
+                action = self.remote_action_newwin
+            else:
+                action = self.remote_action_newtab
         else:
-            self._remote("openURL(%s)" % url, autoraise)
+            raise Error("Bad 'new' parameter to open(); expected 0, 1, or 2, got %s" % new)
+        return self._remote(url, action % url, autoraise)
 
-    def open_new(self, url):
-        self.open(url, 1)
 
+class Mozilla(UnixBrowser):
+    """Launcher class for Mozilla/Netscape browsers."""
 
-class Galeon:
-    """Launcher class for Galeon browsers."""
-    def __init__(self, name):
-        self.name = name
-        self.basename = os.path.basename(name)
+    raise_opts = ("-noraise", "-raise")
 
-    def _remote(self, action, autoraise):
-        raise_opt = ("--noraise", "")[autoraise]
-        cmd = "%s %s %s >/dev/null 2>&1" % (self.name, raise_opt, action)
-        rc = os.system(cmd)
-        if rc:
-            import time
-            os.system("%s >/dev/null 2>&1 &" % self.name)
-            time.sleep(PROCESS_CREATION_DELAY)
-            rc = os.system(cmd)
-        return not rc
+    remote_cmd = '-remote'
+    remote_action = "openURL(%s)"
+    remote_action_newwin = "openURL(%s,new-window)"
+    remote_action_newtab = "openURL(%s,new-tab)"
 
-    def open(self, url, new=0, autoraise=1):
-        if new:
-            self._remote("-w '%s'" % url, autoraise)
-        else:
-            self._remote("-n '%s'" % url, autoraise)
+Netscape = Mozilla
 
-    def open_new(self, url):
-        self.open(url, 1)
 
+class Galeon(UnixBrowser):
+    """Launcher class for Galeon/Epiphany browsers."""
 
-class Konqueror:
+    raise_opts = ("-noraise", "")
+    remote_action = "-n '%s'"
+    remote_action_newwin = "-w '%s'"
+
+    remote_background = True
+
+
+class Konqueror(BaseBrowser):
     """Controller for the KDE File Manager (kfm, or Konqueror).
 
     See http://developer.kde.org/documentation/other/kfmclient.html
     for more information on the Konqueror remote-control interface.
 
     """
-    def __init__(self):
-        if _iscommand("konqueror"):
-            self.name = self.basename = "konqueror"
-        else:
-            self.name = self.basename = "kfm"
 
-    def _remote(self, action):
+    def _remote(self, url, action):
+        # kfmclient is the new KDE way of opening URLs.
         cmd = "kfmclient %s >/dev/null 2>&1" % action
         rc = os.system(cmd)
+        # Fall back to other variants.
         if rc:
-            import time
-            if self.basename == "konqueror":
-                os.system(self.name + " --silent &")
-            else:
-                os.system(self.name + " -d &")
-            time.sleep(PROCESS_CREATION_DELAY)
-            rc = os.system(cmd)
+            if _iscommand("konqueror"):
+                rc = os.system(self.name + " --silent '%s' &" % url)
+            elif _iscommand("kfm"):
+                rc = os.system(self.name + " -d '%s'" % url)
         return not rc
 
-    def open(self, url, new=1, autoraise=1):
+    def open(self, url, new=0, autoraise=1):
         # XXX Currently I know no way to prevent KFM from
         # opening a new win.
         assert "'" not in url
-        self._remote("openURL '%s'" % url)
+        if new == 2:
+            action = "newTab '%s'" % url
+        else:
+            action = "openURL '%s'" % url
+        ok = self._remote(url, action)
+        return ok
 
-    open_new = open
 
+class Opera(UnixBrowser):
+    "Launcher class for Opera browser."
 
-class Grail:
+    raise_opts = ("", "-raise")
+
+    remote_cmd = '-remote'
+    remote_action = "openURL(%s)"
+    remote_action_newwin = "openURL(%s,new-window)"
+    remote_action_newtab = "openURL(%s,new-page)"
+
+
+class Elinks(UnixBrowser):
+    "Launcher class for Elinks browsers."
+
+    remote_cmd = '-remote'
+    remote_action = "openURL(%s)"
+    remote_action_newwin = "openURL(%s,new-window)"
+    remote_action_newtab = "openURL(%s,new-tab)"
+
+    def _remote(self, url, action, autoraise):
+        # elinks doesn't like its stdout to be redirected -
+        # it uses redirected stdout as a signal to do -dump
+        cmd = "%s %s '%s' 2>/dev/null" % (self.name,
+                                          self.remote_cmd, action)
+        rc = os.system(cmd)
+        if rc:
+            rc = os.system("%s %s" % (self.name, url))
+        return not rc
+
+
+class Grail(BaseBrowser):
     # There should be a way to maintain a connection to Grail, but the
     # Grail remote control protocol doesn't really allow that at this
     # point.  It probably neverwill!
@@ -237,93 +325,101 @@
 
     def open(self, url, new=0, autoraise=1):
         if new:
-            self._remote("LOADNEW " + url)
+            ok = self._remote("LOADNEW " + url)
         else:
-            self._remote("LOAD " + url)
-
-    def open_new(self, url):
-        self.open(url, 1)
-
-
-class WindowsDefault:
-    def open(self, url, new=0, autoraise=1):
-        os.startfile(url)
+            ok = self._remote("LOAD " + url)
+        return ok
 
-    def open_new(self, url):
-        self.open(url)
 
 #
 # Platform support for Unix
 #
 
-# This is the right test because all these Unix browsers require either
-# a console terminal of an X display to run.  Note that we cannot split
-# the TERM and DISPLAY cases, because we might be running Python from inside
-# an xterm.
-if os.environ.get("TERM") or os.environ.get("DISPLAY"):
-    _tryorder = ["links", "lynx", "w3m"]
-
-    # Easy cases first -- register console browsers if we have them.
-    if os.environ.get("TERM"):
-        # The Links browser <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
-        if _iscommand("links"):
-            register("links", None, GenericBrowser("links '%s'"))
-        # The Lynx browser <http://lynx.browser.org/>
-        if _iscommand("lynx"):
-            register("lynx", None, GenericBrowser("lynx '%s'"))
-        # The w3m browser <http://ei5nazha.yz.yamagata-u.ac.jp/~aito/w3m/eng/>
-        if _iscommand("w3m"):
-            register("w3m", None, GenericBrowser("w3m '%s'"))
+# These are the right tests because all these Unix browsers require either
+# a console terminal or an X display to run.
 
-    # X browsers have more in the way of options
-    if os.environ.get("DISPLAY"):
-        _tryorder = ["galeon", "skipstone",
-                     "mozilla-firefox", "mozilla-firebird", "mozilla", "netscape",
-                     "kfm", "grail"] + _tryorder
+def register_X_browsers():
+    # First, the Mozilla/Netscape browsers
+    for browser in ("mozilla-firefox", "firefox",
+                    "mozilla-firebird", "firebird",
+                    "mozilla", "netscape"):
+        if _iscommand(browser):
+            register(browser, None, Mozilla(browser))
 
-        # First, the Netscape series
-        for browser in ("mozilla-firefox", "mozilla-firebird",
-                        "mozilla", "netscape"):
-            if _iscommand(browser):
-                register(browser, None, Netscape(browser))
+    # The default Gnome browser
+    if _iscommand("gconftool-2"):
+        # get the web browser string from gconftool
+        gc = 'gconftool-2 -g /desktop/gnome/url-handlers/http/command'
+        out = os.popen(gc)
+        commd = out.read().strip()
+        retncode = out.close()
 
-        # Next, Mosaic -- old but still in use.
-        if _iscommand("mosaic"):
-            register("mosaic", None, GenericBrowser(
-                "mosaic '%s' >/dev/null &"))
+        # if successful, register it
+        if retncode == None and len(commd) != 0:
+            register("gnome", None, GenericBrowser(
+                commd + " '%s' >/dev/null &"))
 
-        # Gnome's Galeon
-        if _iscommand("galeon"):
-            register("galeon", None, Galeon("galeon"))
+    # Konqueror/kfm, the KDE browser.
+    if _iscommand("kfm"):
+        register("kfm", Konqueror, Konqueror("kfm"))
+    elif _iscommand("konqueror"):
+        register("konqueror", Konqueror, Konqueror("konqueror"))
 
-        # Skipstone, another Gtk/Mozilla based browser
-        if _iscommand("skipstone"):
-            register("skipstone", None, GenericBrowser(
-                "skipstone '%s' >/dev/null &"))
+    # Gnome's Galeon and Epiphany
+    for browser in ("galeon", "epiphany"):
+        if _iscommand(browser):
+            register(browser, None, Galeon(browser))
 
-        # Konqueror/kfm, the KDE browser.
-        if _iscommand("kfm") or _iscommand("konqueror"):
-            register("kfm", Konqueror, Konqueror())
+    # Skipstone, another Gtk/Mozilla based browser
+    if _iscommand("skipstone"):
+        register("skipstone", None, GenericBrowser("skipstone '%s' &"))
 
-        # Grail, the Python browser.
-        if _iscommand("grail"):
-            register("grail", Grail, None)
+    # Opera, quite popular
+    if _iscommand("opera"):
+        register("opera", None, Opera("opera"))
 
+    # Next, Mosaic -- old but still in use.
+    if _iscommand("mosaic"):
+        register("mosaic", None, GenericBrowser("mosaic '%s' &"))
 
-class InternetConfig:
-    def open(self, url, new=0, autoraise=1):
-        ic.launchurl(url)
+    # Grail, the Python browser. Does anybody still use it?
+    if _iscommand("grail"):
+        register("grail", Grail, None)
 
-    def open_new(self, url):
-        self.open(url)
+# Prefer X browsers if present
+if os.environ.get("DISPLAY"):
+    register_X_browsers()
 
+# Also try console browsers
+if os.environ.get("TERM"):
+    # The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
+    if _iscommand("links"):
+        register("links", None, GenericBrowser("links '%s'"))
+    if _iscommand("elinks"):
+        register("elinks", None, Elinks("elinks"))
+    # The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
+    if _iscommand("lynx"):
+        register("lynx", None, GenericBrowser("lynx '%s'"))
+    # The w3m browser <http://w3m.sourceforge.net/>
+    if _iscommand("w3m"):
+        register("w3m", None, GenericBrowser("w3m '%s'"))
 
 #
 # Platform support for Windows
 #
 
 if sys.platform[:3] == "win":
-    _tryorder = ["netscape", "windows-default"]
+    class WindowsDefault(BaseBrowser):
+        def open(self, url, new=0, autoraise=1):
+            os.startfile(url)
+            return True # Oh, my...
+
+    _tryorder = []
+    _browsers = {}
+    # Prefer mozilla/netscape/opera if present
+    for browser in ("firefox", "firebird", "mozilla", "netscape", "opera"):
+        if _iscommand(browser):
+            register(browser, None, GenericBrowser(browser + ' %s'))
     register("windows-default", WindowsDefault)
 
 #
@@ -335,36 +431,112 @@
 except ImportError:
     pass
 else:
-    # internet-config is the only supported controller on MacOS,
-    # so don't mess with the default!
-    _tryorder = ["internet-config"]
-    register("internet-config", InternetConfig)
+    class InternetConfig(BaseBrowser):
+        def open(self, url, new=0, autoraise=1):
+            ic.launchurl(url)
+            return True # Any way to get status?
+
+    register("internet-config", InternetConfig, update_tryorder=-1)
+
+if sys.platform == 'darwin':
+    # Adapted from patch submitted to SourceForge by Steven J. Burr
+    class MacOSX(BaseBrowser):
+        """Launcher class for Aqua browsers on Mac OS X
+
+        Optionally specify a browser name on instantiation.  Note that this
+        will not work for Aqua browsers if the user has moved the application
+        package after installation.
+
+        If no browser is specified, the default browser, as specified in the
+        Internet System Preferences panel, will be used.
+        """
+        def __init__(self, name):
+            self.name = name
+
+        def open(self, url, new=0, autoraise=1):
+            assert "'" not in url
+            # new must be 0 or 1
+            new = int(bool(new))
+            if self.name == "default":
+                # User called open, open_new or get without a browser parameter
+                script = _safequote('open location "%s"', url) # opens in default browser
+            else:
+                # User called get and chose a browser
+                if self.name == "OmniWeb":
+                    toWindow = ""
+                else:
+                    # Include toWindow parameter of OpenURL command for browsers
+                    # that support it.  0 == new window; -1 == existing
+                    toWindow = "toWindow %d" % (new - 1)
+                cmd = _safequote('OpenURL "%s"', url)
+                script = '''tell application "%s"
+                                activate
+                                %s %s
+                            end tell''' % (self.name, cmd, toWindow)
+            # Open pipe to AppleScript through osascript command
+            osapipe = os.popen("osascript", "w")
+            if osapipe is None:
+                return False
+            # Write script to osascript's stdin
+            osapipe.write(script)
+            rc = osapipe.close()
+            return not rc
+
+    # Don't clear _tryorder or _browsers since OS X can use above Unix support
+    # (but we prefer using the OS X specific stuff)
+    register("MacOSX", None, MacOSX('default'), -1)
+
 
 #
 # Platform support for OS/2
 #
 
-if sys.platform[:3] == "os2" and _iscommand("netscape.exe"):
-    _tryorder = ["os2netscape"]
+if sys.platform[:3] == "os2" and _iscommand("netscape"):
+    _tryorder = []
+    _browsers = {}
     register("os2netscape", None,
-             GenericBrowser("start netscape.exe %s"))
+             GenericBrowser("start netscape %s"), -1)
+
 
 # OK, now that we know what the default preference orders for each
 # platform are, allow user to override them with the BROWSER variable.
-#
 if "BROWSER" in os.environ:
-    # It's the user's responsibility to register handlers for any unknown
-    # browser referenced by this value, before calling open().
-    _tryorder = os.environ["BROWSER"].split(os.pathsep)
+    _userchoices = os.environ["BROWSER"].split(os.pathsep)
+    _userchoices.reverse()
 
-for cmd in _tryorder:
-    if not cmd.lower() in _browsers:
-        if _iscommand(cmd.lower()):
-            register(cmd.lower(), None, GenericBrowser(
-                "%s '%%s'" % cmd.lower()))
-cmd = None # to make del work if _tryorder was empty
-del cmd
+    # Treat choices in same way as if passed into get() but do register
+    # and prepend to _tryorder
+    for cmdline in _userchoices:
+        if cmdline != '':
+            _synthesize(cmdline, -1)
+    cmdline = None # to make del work if _userchoices was empty
+    del cmdline
+    del _userchoices
 
-_tryorder = filter(lambda x: x.lower() in _browsers
-                   or x.find("%s") > -1, _tryorder)
 # what to do if _tryorder is now empty?
+
+
+def main():
+    import getopt
+    usage = """Usage: %s [-n | -t] url
+    -n: open new window
+    -t: open new tab""" % sys.argv[0]
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'ntd')
+    except getopt.error, msg:
+        print >>sys.stderr, msg
+        print >>sys.stderr, usage
+        sys.exit(1)
+    new_win = 0
+    for o, a in opts:
+        if o == '-n': new_win = 1
+        elif o == '-t': new_win = 2
+    if len(args) <> 1:
+        print >>sys.stderr, usage
+        sys.exit(1)
+
+    url = args[0]
+    open(url, new_win)
+
+if __name__ == "__main__":
+    main()

Index: whichdb.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/whichdb.py,v
retrieving revision 1.12.10.2
retrieving revision 1.12.10.3
diff -u -d -r1.12.10.2 -r1.12.10.3
--- whichdb.py	7 Jan 2005 06:58:11 -0000	1.12.10.2
+++ whichdb.py	16 Oct 2005 05:23:59 -0000	1.12.10.3
@@ -62,7 +62,7 @@
             return "dumbdbm"
         f = open(filename + os.extsep + "dir", "rb")
         try:
-            if f.read(1) in ["'", '"']:
+            if f.read(1) in ("'", '"'):
                 return "dumbdbm"
         finally:
             f.close()

Index: xdrlib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/xdrlib.py,v
retrieving revision 1.14.2.2
retrieving revision 1.14.2.3
diff -u -d -r1.14.2.2 -r1.14.2.3
--- xdrlib.py	7 Jan 2005 06:58:11 -0000	1.14.2.2
+++ xdrlib.py	16 Oct 2005 05:23:59 -0000	1.14.2.3
@@ -79,8 +79,8 @@
     def pack_fstring(self, n, s):
         if n < 0:
             raise ValueError, 'fstring size must be nonnegative'
-        n = ((n+3)/4)*4
         data = s[:n]
+        n = ((n+3)/4)*4
         data = data + (n - len(data)) * '\0'
         self.__buf.write(data)
 
@@ -157,7 +157,9 @@
         return struct.unpack('>l', data)[0]
 
     unpack_enum = unpack_int
-    unpack_bool = unpack_int
+
+    def unpack_bool(self):
+        return bool(self.unpack_int())
 
     def unpack_uhyper(self):
         hi = self.unpack_uint()
@@ -232,8 +234,8 @@
     p = Packer()
     packtest = [
         (p.pack_uint,    (9,)),
-        (p.pack_bool,    (None,)),
-        (p.pack_bool,    ('hello',)),
+        (p.pack_bool,    (True,)),
+        (p.pack_bool,    (False,)),
         (p.pack_uhyper,  (45L,)),
         (p.pack_float,   (1.9,)),
         (p.pack_double,  (1.9,)),
@@ -257,8 +259,8 @@
     up = Unpacker(data)
     unpacktest = [
         (up.unpack_uint,   (), lambda x: x == 9),
-        (up.unpack_bool,   (), lambda x: not x),
-        (up.unpack_bool,   (), lambda x: x),
+        (up.unpack_bool,   (), lambda x: x is True),
+        (up.unpack_bool,   (), lambda x: x is False),
         (up.unpack_uhyper, (), lambda x: x == 45L),
         (up.unpack_float,  (), lambda x: 1.89 < x < 1.91),
         (up.unpack_double, (), lambda x: 1.89 < x < 1.91),

Index: xmlrpclib.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/xmlrpclib.py,v
retrieving revision 1.20.2.2
retrieving revision 1.20.2.3
diff -u -d -r1.20.2.2 -r1.20.2.3
--- xmlrpclib.py	7 Jan 2005 06:58:12 -0000	1.20.2.2
+++ xmlrpclib.py	16 Oct 2005 05:23:59 -0000	1.20.2.3
@@ -149,6 +149,11 @@
     unicode = None # unicode support not available
 
 try:
+    import datetime
+except ImportError:
+    datetime = None
+
+try:
     _bool_is_builtin = False.__class__.__name__ == "bool"
 except NameError:
     _bool_is_builtin = 0
@@ -168,7 +173,7 @@
     def _stringify(string):
         # convert to 7-bit ascii if possible
         try:
-            return str(string)
+            return string.encode("ascii")
         except UnicodeError:
             return string
 else:
@@ -349,6 +354,16 @@
 
     def __init__(self, value=0):
         if not isinstance(value, StringType):
+            if datetime and isinstance(value, datetime.datetime):
+                self.value = value.strftime("%Y%m%dT%H:%M:%S")
+                return
+            if datetime and isinstance(value, datetime.date):
+                self.value = value.strftime("%Y%m%dT%H:%M:%S")
+                return
+            if datetime and isinstance(value, datetime.time):
+                today = datetime.datetime.now().strftime("%Y%m%d")
+                self.value = value.strftime(today+"T%H:%M:%S")
+                return
             if not isinstance(value, (TupleType, time.struct_time)):
                 if value == 0:
                     value = time.time()
@@ -386,6 +401,10 @@
     value.decode(data)
     return value
 
+def _datetime_type(data):
+    t = time.strptime(data, "%Y%m%dT%H:%M:%S")
+    return datetime.datetime(*tuple(t)[:6])
+
 ##
 # Wrapper for binary data.  This can be used to transport any kind
 # of binary data over XML-RPC, using BASE64 encoding.
@@ -699,6 +718,26 @@
         del self.memo[i]
     dispatch[DictType] = dump_struct
 
+    if datetime:
+        def dump_datetime(self, value, write):
+            write("<value><dateTime.iso8601>")
+            write(value.strftime("%Y%m%dT%H:%M:%S"))
+            write("</dateTime.iso8601></value>\n")
+        dispatch[datetime.datetime] = dump_datetime
+
+        def dump_date(self, value, write):
+            write("<value><dateTime.iso8601>")
+            write(value.strftime("%Y%m%dT00:00:00"))
+            write("</dateTime.iso8601></value>\n")
+        dispatch[datetime.date] = dump_date
+
+        def dump_time(self, value, write):
+            write("<value><dateTime.iso8601>")
+            write(datetime.datetime.now().date().strftime("%Y%m%dT"))
+            write(value.strftime("%H:%M:%S"))
+            write("</dateTime.iso8601></value>\n")
+        dispatch[datetime.time] = dump_time
+
     def dump_instance(self, value, write):
         # check for special wrappers
         if value.__class__ in WRAPPERS:
@@ -727,7 +766,7 @@
     # and again, if you don't understand what's going on in here,
     # that's perfectly ok.
 
-    def __init__(self):
+    def __init__(self, use_datetime=0):
         self._type = None
         self._stack = []
         self._marks = []
@@ -735,6 +774,9 @@
         self._methodname = None
         self._encoding = "utf-8"
         self.append = self._stack.append
+        self._use_datetime = use_datetime
+        if use_datetime and not datetime:
+            raise ValueError, "the datetime module is not available"
 
     def close(self):
         # return response tuple and target method
@@ -852,6 +894,8 @@
     def end_dateTime(self, data):
         value = DateTime()
         value.decode(data)
+        if self._use_datetime:
+            value = _datetime_type(data)
         self.append(value)
     dispatch["dateTime.iso8601"] = end_dateTime
 
@@ -953,17 +997,23 @@
 #
 # return A (parser, unmarshaller) tuple.
 
-def getparser():
+def getparser(use_datetime=0):
     """getparser() -> parser, unmarshaller
 
     Create an instance of the fastest available parser, and attach it
     to an unmarshalling object.  Return both objects.
     """
+    if use_datetime and not datetime:
+        raise ValueError, "the datetime module is not available"
     if FastParser and FastUnmarshaller:
-        target = FastUnmarshaller(True, False, _binary, _datetime, Fault)
+        if use_datetime:
+            mkdatetime = _datetime_type
+        else:
+            mkdatetime = _datetime
+        target = FastUnmarshaller(True, False, _binary, mkdatetime, Fault)
         parser = FastParser(target)
     else:
-        target = Unmarshaller()
+        target = Unmarshaller(use_datetime=use_datetime)
         if FastParser:
             parser = FastParser(target)
         elif SgmlopParser:
@@ -1066,7 +1116,7 @@
 #     (None if not present).
 # @see Fault
 
-def loads(data):
+def loads(data, use_datetime=0):
     """data -> unmarshalled data, method name
 
     Convert an XML-RPC packet to unmarshalled data plus a method
@@ -1075,7 +1125,7 @@
     If the XML-RPC packet represents a fault condition, this function
     raises a Fault exception.
     """
-    p, u = getparser()
+    p, u = getparser(use_datetime=use_datetime)
     p.feed(data)
     p.close()
     return u.close(), u.getmethodname()
@@ -1107,6 +1157,9 @@
     # client identifier (may be overridden)
     user_agent = "xmlrpclib.py/%s (by www.pythonware.com)" % __version__
 
+    def __init__(self, use_datetime=0):
+        self._use_datetime = use_datetime
+
     ##
     # Send a complete request, and parse the response.
     #
@@ -1153,7 +1206,7 @@
 
     def getparser(self):
         # get parser and unmarshaller
-        return getparser()
+        return getparser(use_datetime=self._use_datetime)
 
     ##
     # Get authorization info from host parameter
@@ -1347,7 +1400,7 @@
     """
 
     def __init__(self, uri, transport=None, encoding=None, verbose=0,
-                 allow_none=0):
+                 allow_none=0, use_datetime=0):
         # establish a "logical" server connection
 
         # get the url
@@ -1361,9 +1414,9 @@
 
         if transport is None:
             if type == "https":
-                transport = SafeTransport()
+                transport = SafeTransport(use_datetime=use_datetime)
             else:
-                transport = Transport()
+                transport = Transport(use_datetime=use_datetime)
         self.__transport = transport
 
         self.__encoding = encoding

Index: zipfile.py
===================================================================
RCS file: /cvsroot/python/python/dist/src/Lib/zipfile.py,v
retrieving revision 1.24.2.2
retrieving revision 1.24.2.3
diff -u -d -r1.24.2.2 -r1.24.2.3
--- zipfile.py	7 Jan 2005 06:58:13 -0000	1.24.2.2
+++ zipfile.py	16 Oct 2005 05:23:59 -0000	1.24.2.3
@@ -193,7 +193,7 @@
         self.NameToInfo = {}    # Find file info given name
         self.filelist = []      # List of ZipInfo instances for archive
         self.compression = compression  # Method of compression
-        self.mode = key = mode[0].replace('b', '')
+        self.mode = key = mode.replace('b', '')[0]
 
         # Check if we were passed a file-like object
         if isinstance(file, basestring):

--- profile.doc DELETED ---



More information about the Python-checkins mailing list