[pypy-commit] pypy vendor/stdlib: add the 3.2.3 version of the stdlib in lib-python/3.2

RonnyPfannschmidt noreply at buildbot.pypy.org
Sat Apr 14 11:26:33 CEST 2012


Author: Ronny Pfannschmidt <Ronny.Pfannschmidt at gmx.de>
Branch: vendor/stdlib
Changeset: r54357:359343b9ac0e
Date: 2012-04-14 11:24 +0200
http://bitbucket.org/pypy/pypy/changeset/359343b9ac0e/

Log:	add the 3.2.3 version of the stdlib in lib-python/3.2

diff too long, truncating to 10000 out of 558051 lines

diff --git a/lib-python/3.2/__future__.py b/lib-python/3.2/__future__.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/__future__.py
@@ -0,0 +1,134 @@
+"""Record of phased-in incompatible language changes.
+
+Each line is of the form:
+
+    FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
+                              CompilerFlag ")"
+
+where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
+of the same form as sys.version_info:
+
+    (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
+     PY_MINOR_VERSION, # the 1; an int
+     PY_MICRO_VERSION, # the 0; an int
+     PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
+     PY_RELEASE_SERIAL # the 3; an int
+    )
+
+OptionalRelease records the first release in which
+
+    from __future__ import FeatureName
+
+was accepted.
+
+In the case of MandatoryReleases that have not yet occurred,
+MandatoryRelease predicts the release in which the feature will become part
+of the language.
+
+Else MandatoryRelease records when the feature became part of the language;
+in releases at or after that, modules no longer need
+
+    from __future__ import FeatureName
+
+to use the feature in question, but may continue to use such imports.
+
+MandatoryRelease may also be None, meaning that a planned feature got
+dropped.
+
+Instances of class _Feature have two corresponding methods,
+.getOptionalRelease() and .getMandatoryRelease().
+
+CompilerFlag is the (bitfield) flag that should be passed in the fourth
+argument to the builtin function compile() to enable the feature in
+dynamically compiled code.  This flag is stored in the .compiler_flag
+attribute on _Future instances.  These values must match the appropriate
+#defines of CO_xxx flags in Include/compile.h.
+
+No feature line is ever to be deleted from this file.
+"""
+
+all_feature_names = [
+    "nested_scopes",
+    "generators",
+    "division",
+    "absolute_import",
+    "with_statement",
+    "print_function",
+    "unicode_literals",
+    "barry_as_FLUFL",
+]
+
+__all__ = ["all_feature_names"] + all_feature_names
+
+# The CO_xxx symbols are defined here under the same names used by
+# compile.h, so that an editor search will find them here.  However,
+# they're not exported in __all__, because they don't really belong to
+# this module.
+CO_NESTED            = 0x0010   # nested_scopes
+CO_GENERATOR_ALLOWED = 0        # generators (obsolete, was 0x1000)
+CO_FUTURE_DIVISION   = 0x2000   # division
+CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
+CO_FUTURE_WITH_STATEMENT  = 0x8000   # with statement
+CO_FUTURE_PRINT_FUNCTION  = 0x10000   # print function
+CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
+CO_FUTURE_BARRY_AS_BDFL = 0x40000
+
+class _Feature:
+    def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
+        self.optional = optionalRelease
+        self.mandatory = mandatoryRelease
+        self.compiler_flag = compiler_flag
+
+    def getOptionalRelease(self):
+        """Return first release in which this feature was recognized.
+
+        This is a 5-tuple, of the same form as sys.version_info.
+        """
+
+        return self.optional
+
+    def getMandatoryRelease(self):
+        """Return release in which this feature will become mandatory.
+
+        This is a 5-tuple, of the same form as sys.version_info, or, if
+        the feature was dropped, is None.
+        """
+
+        return self.mandatory
+
+    def __repr__(self):
+        return "_Feature" + repr((self.optional,
+                                  self.mandatory,
+                                  self.compiler_flag))
+
+nested_scopes = _Feature((2, 1, 0, "beta",  1),
+                         (2, 2, 0, "alpha", 0),
+                         CO_NESTED)
+
+generators = _Feature((2, 2, 0, "alpha", 1),
+                      (2, 3, 0, "final", 0),
+                      CO_GENERATOR_ALLOWED)
+
+division = _Feature((2, 2, 0, "alpha", 2),
+                    (3, 0, 0, "alpha", 0),
+                    CO_FUTURE_DIVISION)
+
+absolute_import = _Feature((2, 5, 0, "alpha", 1),
+                           (2, 7, 0, "alpha", 0),
+                           CO_FUTURE_ABSOLUTE_IMPORT)
+
+with_statement = _Feature((2, 5, 0, "alpha", 1),
+                          (2, 6, 0, "alpha", 0),
+                          CO_FUTURE_WITH_STATEMENT)
+
+print_function = _Feature((2, 6, 0, "alpha", 2),
+                          (3, 0, 0, "alpha", 0),
+                          CO_FUTURE_PRINT_FUNCTION)
+
+unicode_literals = _Feature((2, 6, 0, "alpha", 2),
+                            (3, 0, 0, "alpha", 0),
+                            CO_FUTURE_UNICODE_LITERALS)
+
+barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2),
+                         (3, 9, 0, "alpha", 0),
+                         CO_FUTURE_BARRY_AS_BDFL)
diff --git a/lib-python/3.2/__phello__.foo.py b/lib-python/3.2/__phello__.foo.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/__phello__.foo.py
@@ -0,0 +1,1 @@
+# This file exists as a helper for the test.test_frozen module.
diff --git a/lib-python/3.2/_abcoll.py b/lib-python/3.2/_abcoll.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/_abcoll.py
@@ -0,0 +1,623 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
+
+DON'T USE THIS MODULE DIRECTLY!  The classes here should be imported
+via collections; they are defined here only to alleviate certain
+bootstrapping issues.  Unit tests are in test_collections.
+"""
+
+from abc import ABCMeta, abstractmethod
+import sys
+
+__all__ = ["Hashable", "Iterable", "Iterator",
+           "Sized", "Container", "Callable",
+           "Set", "MutableSet",
+           "Mapping", "MutableMapping",
+           "MappingView", "KeysView", "ItemsView", "ValuesView",
+           "Sequence", "MutableSequence",
+           "ByteString",
+           ]
+
+
+### collection related types which are not exposed through builtin ###
+## iterators ##
+bytes_iterator = type(iter(b''))
+bytearray_iterator = type(iter(bytearray()))
+#callable_iterator = ???
+dict_keyiterator = type(iter({}.keys()))
+dict_valueiterator = type(iter({}.values()))
+dict_itemiterator = type(iter({}.items()))
+list_iterator = type(iter([]))
+list_reverseiterator = type(iter(reversed([])))
+range_iterator = type(iter(range(0)))
+set_iterator = type(iter(set()))
+str_iterator = type(iter(""))
+tuple_iterator = type(iter(()))
+zip_iterator = type(iter(zip()))
+## views ##
+dict_keys = type({}.keys())
+dict_values = type({}.values())
+dict_items = type({}.items())
+## misc ##
+dict_proxy = type(type.__dict__)
+
+
+### ONE-TRICK PONIES ###
+
+class Hashable(metaclass=ABCMeta):
+
+    @abstractmethod
+    def __hash__(self):
+        return 0
+
+    @classmethod
+    def __subclasshook__(cls, C):
+        if cls is Hashable:
+            for B in C.__mro__:
+                if "__hash__" in B.__dict__:
+                    if B.__dict__["__hash__"]:
+                        return True
+                    break
+        return NotImplemented
+
+
+class Iterable(metaclass=ABCMeta):
+
+    @abstractmethod
+    def __iter__(self):
+        while False:
+            yield None
+
+    @classmethod
+    def __subclasshook__(cls, C):
+        if cls is Iterable:
+            if any("__iter__" in B.__dict__ for B in C.__mro__):
+                return True
+        return NotImplemented
+
+
+class Iterator(Iterable):
+
+    @abstractmethod
+    def __next__(self):
+        raise StopIteration
+
+    def __iter__(self):
+        return self
+
+    @classmethod
+    def __subclasshook__(cls, C):
+        if cls is Iterator:
+            if (any("__next__" in B.__dict__ for B in C.__mro__) and
+                any("__iter__" in B.__dict__ for B in C.__mro__)):
+                return True
+        return NotImplemented
+
+Iterator.register(bytes_iterator)
+Iterator.register(bytearray_iterator)
+#Iterator.register(callable_iterator)
+Iterator.register(dict_keyiterator)
+Iterator.register(dict_valueiterator)
+Iterator.register(dict_itemiterator)
+Iterator.register(list_iterator)
+Iterator.register(list_reverseiterator)
+Iterator.register(range_iterator)
+Iterator.register(set_iterator)
+Iterator.register(str_iterator)
+Iterator.register(tuple_iterator)
+Iterator.register(zip_iterator)
+
+class Sized(metaclass=ABCMeta):
+
+    @abstractmethod
+    def __len__(self):
+        return 0
+
+    @classmethod
+    def __subclasshook__(cls, C):
+        if cls is Sized:
+            if any("__len__" in B.__dict__ for B in C.__mro__):
+                return True
+        return NotImplemented
+
+
+class Container(metaclass=ABCMeta):
+
+    @abstractmethod
+    def __contains__(self, x):
+        return False
+
+    @classmethod
+    def __subclasshook__(cls, C):
+        if cls is Container:
+            if any("__contains__" in B.__dict__ for B in C.__mro__):
+                return True
+        return NotImplemented
+
+
+class Callable(metaclass=ABCMeta):
+
+    @abstractmethod
+    def __call__(self, *args, **kwds):
+        return False
+
+    @classmethod
+    def __subclasshook__(cls, C):
+        if cls is Callable:
+            if any("__call__" in B.__dict__ for B in C.__mro__):
+                return True
+        return NotImplemented
+
+
+### SETS ###
+
+
+class Set(Sized, Iterable, Container):
+
+    """A set is a finite, iterable container.
+
+    This class provides concrete generic implementations of all
+    methods except for __contains__, __iter__ and __len__.
+
+    To override the comparisons (presumably for speed, as the
+    semantics are fixed), all you have to do is redefine __le__ and
+    then the other operations will automatically follow suit.
+    """
+
+    def __le__(self, other):
+        if not isinstance(other, Set):
+            return NotImplemented
+        if len(self) > len(other):
+            return False
+        for elem in self:
+            if elem not in other:
+                return False
+        return True
+
+    def __lt__(self, other):
+        if not isinstance(other, Set):
+            return NotImplemented
+        return len(self) < len(other) and self.__le__(other)
+
+    def __gt__(self, other):
+        if not isinstance(other, Set):
+            return NotImplemented
+        return other < self
+
+    def __ge__(self, other):
+        if not isinstance(other, Set):
+            return NotImplemented
+        return other <= self
+
+    def __eq__(self, other):
+        if not isinstance(other, Set):
+            return NotImplemented
+        return len(self) == len(other) and self.__le__(other)
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    @classmethod
+    def _from_iterable(cls, it):
+        '''Construct an instance of the class from any iterable input.
+
+        Must override this method if the class constructor signature
+        does not accept an iterable for an input.
+        '''
+        return cls(it)
+
+    def __and__(self, other):
+        if not isinstance(other, Iterable):
+            return NotImplemented
+        return self._from_iterable(value for value in other if value in self)
+
+    def isdisjoint(self, other):
+        for value in other:
+            if value in self:
+                return False
+        return True
+
+    def __or__(self, other):
+        if not isinstance(other, Iterable):
+            return NotImplemented
+        chain = (e for s in (self, other) for e in s)
+        return self._from_iterable(chain)
+
+    def __sub__(self, other):
+        if not isinstance(other, Set):
+            if not isinstance(other, Iterable):
+                return NotImplemented
+            other = self._from_iterable(other)
+        return self._from_iterable(value for value in self
+                                   if value not in other)
+
+    def __xor__(self, other):
+        if not isinstance(other, Set):
+            if not isinstance(other, Iterable):
+                return NotImplemented
+            other = self._from_iterable(other)
+        return (self - other) | (other - self)
+
+    def _hash(self):
+        """Compute the hash value of a set.
+
+        Note that we don't define __hash__: not all sets are hashable.
+        But if you define a hashable set type, its __hash__ should
+        call this function.
+
+        This must be compatible __eq__.
+
+        All sets ought to compare equal if they contain the same
+        elements, regardless of how they are implemented, and
+        regardless of the order of the elements; so there's not much
+        freedom for __eq__ or __hash__.  We match the algorithm used
+        by the built-in frozenset type.
+        """
+        MAX = sys.maxsize
+        MASK = 2 * MAX + 1
+        n = len(self)
+        h = 1927868237 * (n + 1)
+        h &= MASK
+        for x in self:
+            hx = hash(x)
+            h ^= (hx ^ (hx << 16) ^ 89869747)  * 3644798167
+            h &= MASK
+        h = h * 69069 + 907133923
+        h &= MASK
+        if h > MAX:
+            h -= MASK + 1
+        if h == -1:
+            h = 590923713
+        return h
+
+Set.register(frozenset)
+
+
+class MutableSet(Set):
+
+    @abstractmethod
+    def add(self, value):
+        """Add an element."""
+        raise NotImplementedError
+
+    @abstractmethod
+    def discard(self, value):
+        """Remove an element.  Do not raise an exception if absent."""
+        raise NotImplementedError
+
+    def remove(self, value):
+        """Remove an element. If not a member, raise a KeyError."""
+        if value not in self:
+            raise KeyError(value)
+        self.discard(value)
+
+    def pop(self):
+        """Return the popped value.  Raise KeyError if empty."""
+        it = iter(self)
+        try:
+            value = next(it)
+        except StopIteration:
+            raise KeyError
+        self.discard(value)
+        return value
+
+    def clear(self):
+        """This is slow (creates N new iterators!) but effective."""
+        try:
+            while True:
+                self.pop()
+        except KeyError:
+            pass
+
+    def __ior__(self, it):
+        for value in it:
+            self.add(value)
+        return self
+
+    def __iand__(self, it):
+        for value in (self - it):
+            self.discard(value)
+        return self
+
+    def __ixor__(self, it):
+        if it is self:
+            self.clear()
+        else:
+            if not isinstance(it, Set):
+                it = self._from_iterable(it)
+            for value in it:
+                if value in self:
+                    self.discard(value)
+                else:
+                    self.add(value)
+        return self
+
+    def __isub__(self, it):
+        if it is self:
+            self.clear()
+        else:
+            for value in it:
+                self.discard(value)
+        return self
+
+MutableSet.register(set)
+
+
+### MAPPINGS ###
+
+
+class Mapping(Sized, Iterable, Container):
+
+    @abstractmethod
+    def __getitem__(self, key):
+        raise KeyError
+
+    def get(self, key, default=None):
+        try:
+            return self[key]
+        except KeyError:
+            return default
+
+    def __contains__(self, key):
+        try:
+            self[key]
+        except KeyError:
+            return False
+        else:
+            return True
+
+    def keys(self):
+        return KeysView(self)
+
+    def items(self):
+        return ItemsView(self)
+
+    def values(self):
+        return ValuesView(self)
+
+    def __eq__(self, other):
+        if not isinstance(other, Mapping):
+            return NotImplemented
+        return dict(self.items()) == dict(other.items())
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+class MappingView(Sized):
+
+    def __init__(self, mapping):
+        self._mapping = mapping
+
+    def __len__(self):
+        return len(self._mapping)
+
+    def __repr__(self):
+        return '{0.__class__.__name__}({0._mapping!r})'.format(self)
+
+
+class KeysView(MappingView, Set):
+
+    @classmethod
+    def _from_iterable(self, it):
+        return set(it)
+
+    def __contains__(self, key):
+        return key in self._mapping
+
+    def __iter__(self):
+        for key in self._mapping:
+            yield key
+
+KeysView.register(dict_keys)
+
+
+class ItemsView(MappingView, Set):
+
+    @classmethod
+    def _from_iterable(self, it):
+        return set(it)
+
+    def __contains__(self, item):
+        key, value = item
+        try:
+            v = self._mapping[key]
+        except KeyError:
+            return False
+        else:
+            return v == value
+
+    def __iter__(self):
+        for key in self._mapping:
+            yield (key, self._mapping[key])
+
+ItemsView.register(dict_items)
+
+
+class ValuesView(MappingView):
+
+    def __contains__(self, value):
+        for key in self._mapping:
+            if value == self._mapping[key]:
+                return True
+        return False
+
+    def __iter__(self):
+        for key in self._mapping:
+            yield self._mapping[key]
+
+ValuesView.register(dict_values)
+
+
+class MutableMapping(Mapping):
+
+    @abstractmethod
+    def __setitem__(self, key, value):
+        raise KeyError
+
+    @abstractmethod
+    def __delitem__(self, key):
+        raise KeyError
+
+    __marker = object()
+
+    def pop(self, key, default=__marker):
+        try:
+            value = self[key]
+        except KeyError:
+            if default is self.__marker:
+                raise
+            return default
+        else:
+            del self[key]
+            return value
+
+    def popitem(self):
+        try:
+            key = next(iter(self))
+        except StopIteration:
+            raise KeyError
+        value = self[key]
+        del self[key]
+        return key, value
+
+    def clear(self):
+        try:
+            while True:
+                self.popitem()
+        except KeyError:
+            pass
+
+    def update(*args, **kwds):
+        if len(args) > 2:
+            raise TypeError("update() takes at most 2 positional "
+                            "arguments ({} given)".format(len(args)))
+        elif not args:
+            raise TypeError("update() takes at least 1 argument (0 given)")
+        self = args[0]
+        other = args[1] if len(args) >= 2 else ()
+
+        if isinstance(other, Mapping):
+            for key in other:
+                self[key] = other[key]
+        elif hasattr(other, "keys"):
+            for key in other.keys():
+                self[key] = other[key]
+        else:
+            for key, value in other:
+                self[key] = value
+        for key, value in kwds.items():
+            self[key] = value
+
+    def setdefault(self, key, default=None):
+        try:
+            return self[key]
+        except KeyError:
+            self[key] = default
+        return default
+
+MutableMapping.register(dict)
+
+
+### SEQUENCES ###
+
+
+class Sequence(Sized, Iterable, Container):
+
+    """All the operations on a read-only sequence.
+
+    Concrete subclasses must override __new__ or __init__,
+    __getitem__, and __len__.
+    """
+
+    @abstractmethod
+    def __getitem__(self, index):
+        raise IndexError
+
+    def __iter__(self):
+        i = 0
+        try:
+            while True:
+                v = self[i]
+                yield v
+                i += 1
+        except IndexError:
+            return
+
+    def __contains__(self, value):
+        for v in self:
+            if v == value:
+                return True
+        return False
+
+    def __reversed__(self):
+        for i in reversed(range(len(self))):
+            yield self[i]
+
+    def index(self, value):
+        for i, v in enumerate(self):
+            if v == value:
+                return i
+        raise ValueError
+
+    def count(self, value):
+        return sum(1 for v in self if v == value)
+
+Sequence.register(tuple)
+Sequence.register(str)
+Sequence.register(range)
+
+
+class ByteString(Sequence):
+
+    """This unifies bytes and bytearray.
+
+    XXX Should add all their methods.
+    """
+
+ByteString.register(bytes)
+ByteString.register(bytearray)
+
+
+class MutableSequence(Sequence):
+
+    @abstractmethod
+    def __setitem__(self, index, value):
+        raise IndexError
+
+    @abstractmethod
+    def __delitem__(self, index):
+        raise IndexError
+
+    @abstractmethod
+    def insert(self, index, value):
+        raise IndexError
+
+    def append(self, value):
+        self.insert(len(self), value)
+
+    def reverse(self):
+        n = len(self)
+        for i in range(n//2):
+            self[i], self[n-i-1] = self[n-i-1], self[i]
+
+    def extend(self, values):
+        for v in values:
+            self.append(v)
+
+    def pop(self, index=-1):
+        v = self[index]
+        del self[index]
+        return v
+
+    def remove(self, value):
+        del self[self.index(value)]
+
+    def __iadd__(self, values):
+        self.extend(values)
+        return self
+
+MutableSequence.register(list)
+MutableSequence.register(bytearray)  # Multiply inheriting, see ByteString
diff --git a/lib-python/3.2/_compat_pickle.py b/lib-python/3.2/_compat_pickle.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/_compat_pickle.py
@@ -0,0 +1,81 @@
+# This module is used to map the old Python 2 names to the new names used in
+# Python 3 for the pickle module.  This needed to make pickle streams
+# generated with Python 2 loadable by Python 3.
+
+# This is a copy of lib2to3.fixes.fix_imports.MAPPING.  We cannot import
+# lib2to3 and use the mapping defined there, because lib2to3 uses pickle.
+# Thus, this could cause the module to be imported recursively.
+IMPORT_MAPPING = {
+    'StringIO':  'io',
+    'cStringIO': 'io',
+    'cPickle': 'pickle',
+    '__builtin__' : 'builtins',
+    'copy_reg': 'copyreg',
+    'Queue': 'queue',
+    'SocketServer': 'socketserver',
+    'ConfigParser': 'configparser',
+    'repr': 'reprlib',
+    'FileDialog': 'tkinter.filedialog',
+    'tkFileDialog': 'tkinter.filedialog',
+    'SimpleDialog': 'tkinter.simpledialog',
+    'tkSimpleDialog': 'tkinter.simpledialog',
+    'tkColorChooser': 'tkinter.colorchooser',
+    'tkCommonDialog': 'tkinter.commondialog',
+    'Dialog': 'tkinter.dialog',
+    'Tkdnd': 'tkinter.dnd',
+    'tkFont': 'tkinter.font',
+    'tkMessageBox': 'tkinter.messagebox',
+    'ScrolledText': 'tkinter.scrolledtext',
+    'Tkconstants': 'tkinter.constants',
+    'Tix': 'tkinter.tix',
+    'ttk': 'tkinter.ttk',
+    'Tkinter': 'tkinter',
+    'markupbase': '_markupbase',
+    '_winreg': 'winreg',
+    'thread': '_thread',
+    'dummy_thread': '_dummy_thread',
+    'dbhash': 'dbm.bsd',
+    'dumbdbm': 'dbm.dumb',
+    'dbm': 'dbm.ndbm',
+    'gdbm': 'dbm.gnu',
+    'xmlrpclib': 'xmlrpc.client',
+    'DocXMLRPCServer': 'xmlrpc.server',
+    'SimpleXMLRPCServer': 'xmlrpc.server',
+    'httplib': 'http.client',
+    'htmlentitydefs' : 'html.entities',
+    'HTMLParser' : 'html.parser',
+    'Cookie': 'http.cookies',
+    'cookielib': 'http.cookiejar',
+    'BaseHTTPServer': 'http.server',
+    'SimpleHTTPServer': 'http.server',
+    'CGIHTTPServer': 'http.server',
+    'test.test_support': 'test.support',
+    'commands': 'subprocess',
+    'UserString' : 'collections',
+    'UserList' : 'collections',
+    'urlparse' : 'urllib.parse',
+    'robotparser' : 'urllib.robotparser',
+    'whichdb': 'dbm',
+    'anydbm': 'dbm'
+}
+
+
+# This contains rename rules that are easy to handle.  We ignore the more
+# complex stuff (e.g. mapping the names in the urllib and types modules).
+# These rules should be run before import names are fixed.
+NAME_MAPPING = {
+    ('__builtin__', 'xrange'):     ('builtins', 'range'),
+    ('__builtin__', 'reduce'):     ('functools', 'reduce'),
+    ('__builtin__', 'intern'):     ('sys', 'intern'),
+    ('__builtin__', 'unichr'):     ('builtins', 'chr'),
+    ('__builtin__', 'basestring'): ('builtins', 'str'),
+    ('__builtin__', 'long'):       ('builtins', 'int'),
+    ('itertools', 'izip'):         ('builtins', 'zip'),
+    ('itertools', 'imap'):         ('builtins', 'map'),
+    ('itertools', 'ifilter'):      ('builtins', 'filter'),
+    ('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'),
+}
+
+# Same, but for 3.x to 2.x
+REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items())
+REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items())
diff --git a/lib-python/3.2/_dummy_thread.py b/lib-python/3.2/_dummy_thread.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/_dummy_thread.py
@@ -0,0 +1,155 @@
+"""Drop-in replacement for the thread module.
+
+Meant to be used as a brain-dead substitute so that threaded code does
+not need to be rewritten for when the thread module is not present.
+
+Suggested usage is::
+
+    try:
+        import _thread
+    except ImportError:
+        import _dummy_thread as _thread
+
+"""
+# Exports only things specified by thread documentation;
+# skipping obsolete synonyms allocate(), start_new(), exit_thread().
+__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
+           'interrupt_main', 'LockType']
+
+# A dummy value
+TIMEOUT_MAX = 2**31
+
+# NOTE: this module can be imported early in the extension building process,
+# and so top level imports of other modules should be avoided.  Instead, all
+# imports are done when needed on a function-by-function basis.  Since threads
+# are disabled, the import lock should not be an issue anyway (??).
+
+class error(Exception):
+    """Dummy implementation of _thread.error."""
+
+    def __init__(self, *args):
+        self.args = args
+
+def start_new_thread(function, args, kwargs={}):
+    """Dummy implementation of _thread.start_new_thread().
+
+    Compatibility is maintained by making sure that ``args`` is a
+    tuple and ``kwargs`` is a dictionary.  If an exception is raised
+    and it is SystemExit (which can be done by _thread.exit()) it is
+    caught and nothing is done; all other exceptions are printed out
+    by using traceback.print_exc().
+
+    If the executed function calls interrupt_main the KeyboardInterrupt will be
+    raised when the function returns.
+
+    """
+    if type(args) != type(tuple()):
+        raise TypeError("2nd arg must be a tuple")
+    if type(kwargs) != type(dict()):
+        raise TypeError("3rd arg must be a dict")
+    global _main
+    _main = False
+    try:
+        function(*args, **kwargs)
+    except SystemExit:
+        pass
+    except:
+        import traceback
+        traceback.print_exc()
+    _main = True
+    global _interrupt
+    if _interrupt:
+        _interrupt = False
+        raise KeyboardInterrupt
+
+def exit():
+    """Dummy implementation of _thread.exit()."""
+    raise SystemExit
+
+def get_ident():
+    """Dummy implementation of _thread.get_ident().
+
+    Since this module should only be used when _threadmodule is not
+    available, it is safe to assume that the current process is the
+    only thread.  Thus a constant can be safely returned.
+    """
+    return -1
+
+def allocate_lock():
+    """Dummy implementation of _thread.allocate_lock()."""
+    return LockType()
+
+def stack_size(size=None):
+    """Dummy implementation of _thread.stack_size()."""
+    if size is not None:
+        raise error("setting thread stack size not supported")
+    return 0
+
+class LockType(object):
+    """Class implementing dummy implementation of _thread.LockType.
+
+    Compatibility is maintained by maintaining self.locked_status
+    which is a boolean that stores the state of the lock.  Pickling of
+    the lock, though, should not be done since if the _thread module is
+    then used with an unpickled ``lock()`` from here problems could
+    occur from this class not having atomic methods.
+
+    """
+
+    def __init__(self):
+        self.locked_status = False
+
+    def acquire(self, waitflag=None, timeout=-1):
+        """Dummy implementation of acquire().
+
+        For blocking calls, self.locked_status is automatically set to
+        True and returned appropriately based on value of
+        ``waitflag``.  If it is non-blocking, then the value is
+        actually checked and not set if it is already acquired.  This
+        is all done so that threading.Condition's assert statements
+        aren't triggered and throw a little fit.
+
+        """
+        if waitflag is None or waitflag:
+            self.locked_status = True
+            return True
+        else:
+            if not self.locked_status:
+                self.locked_status = True
+                return True
+            else:
+                if timeout > 0:
+                    import time
+                    time.sleep(timeout)
+                return False
+
+    __enter__ = acquire
+
+    def __exit__(self, typ, val, tb):
+        self.release()
+
+    def release(self):
+        """Release the dummy lock."""
+        # XXX Perhaps shouldn't actually bother to test?  Could lead
+        #     to problems for complex, threaded code.
+        if not self.locked_status:
+            raise error
+        self.locked_status = False
+        return True
+
+    def locked(self):
+        return self.locked_status
+
+# Used to signal that interrupt_main was called in a "thread"
+_interrupt = False
+# True when not executing in a "thread"
+_main = True
+
+def interrupt_main():
+    """Set _interrupt flag to True to have start_new_thread raise
+    KeyboardInterrupt upon exiting."""
+    if _main:
+        raise KeyboardInterrupt
+    else:
+        global _interrupt
+        _interrupt = True
diff --git a/lib-python/3.2/_markupbase.py b/lib-python/3.2/_markupbase.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/_markupbase.py
@@ -0,0 +1,395 @@
+"""Shared support for scanning document type declarations in HTML and XHTML.
+
+This module is used as a foundation for the html.parser module.  It has no
+documented public API and should not be used directly.
+
+"""
+
+import re
+
+_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
+_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
+_commentclose = re.compile(r'--\s*>')
+_markedsectionclose = re.compile(r']\s*]\s*>')
+
+# An analysis of the MS-Word extensions is available at
+# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
+
+_msmarkedsectionclose = re.compile(r']\s*>')
+
+del re
+
+
+class ParserBase:
+    """Parser base class which provides some common support methods used
+    by the SGML/HTML and XHTML parsers."""
+
+    def __init__(self):
+        if self.__class__ is ParserBase:
+            raise RuntimeError(
+                "_markupbase.ParserBase must be subclassed")
+
+    def error(self, message):
+        raise NotImplementedError(
+            "subclasses of ParserBase must override error()")
+
+    def reset(self):
+        self.lineno = 1
+        self.offset = 0
+
+    def getpos(self):
+        """Return current line number and offset."""
+        return self.lineno, self.offset
+
+    # Internal -- update line number and offset.  This should be
+    # called for each piece of data exactly once, in order -- in other
+    # words the concatenation of all the input strings to this
+    # function should be exactly the entire input.
+    def updatepos(self, i, j):
+        if i >= j:
+            return j
+        rawdata = self.rawdata
+        nlines = rawdata.count("\n", i, j)
+        if nlines:
+            self.lineno = self.lineno + nlines
+            pos = rawdata.rindex("\n", i, j) # Should not fail
+            self.offset = j-(pos+1)
+        else:
+            self.offset = self.offset + j-i
+        return j
+
+    _decl_otherchars = ''
+
+    # Internal -- parse declaration (for use by subclasses).
+    def parse_declaration(self, i):
+        # This is some sort of declaration; in "HTML as
+        # deployed," this should only be the document type
+        # declaration ("<!DOCTYPE html...>").
+        # ISO 8879:1986, however, has more complex
+        # declaration syntax for elements in <!...>, including:
+        # --comment--
+        # [marked section]
+        # name in the following list: ENTITY, DOCTYPE, ELEMENT,
+        # ATTLIST, NOTATION, SHORTREF, USEMAP,
+        # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
+        rawdata = self.rawdata
+        j = i + 2
+        assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
+        if rawdata[j:j+1] == ">":
+            # the empty comment <!>
+            return j + 1
+        if rawdata[j:j+1] in ("-", ""):
+            # Start of comment followed by buffer boundary,
+            # or just a buffer boundary.
+            return -1
+        # A simple, practical version could look like: ((name|stringlit) S*) + '>'
+        n = len(rawdata)
+        if rawdata[j:j+2] == '--': #comment
+            # Locate --.*-- as the body of the comment
+            return self.parse_comment(i)
+        elif rawdata[j] == '[': #marked section
+            # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
+            # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
+            # Note that this is extended by Microsoft Office "Save as Web" function
+            # to include [if...] and [endif].
+            return self.parse_marked_section(i)
+        else: #all other declaration elements
+            decltype, j = self._scan_name(j, i)
+        if j < 0:
+            return j
+        if decltype == "doctype":
+            self._decl_otherchars = ''
+        while j < n:
+            c = rawdata[j]
+            if c == ">":
+                # end of declaration syntax
+                data = rawdata[i+2:j]
+                if decltype == "doctype":
+                    self.handle_decl(data)
+                else:
+                    # According to the HTML5 specs sections "8.2.4.44 Bogus
+                    # comment state" and "8.2.4.45 Markup declaration open
+                    # state", a comment token should be emitted.
+                    # Calling unknown_decl provides more flexibility though.
+                    self.unknown_decl(data)
+                return j + 1
+            if c in "\"'":
+                m = _declstringlit_match(rawdata, j)
+                if not m:
+                    return -1 # incomplete
+                j = m.end()
+            elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
+                name, j = self._scan_name(j, i)
+            elif c in self._decl_otherchars:
+                j = j + 1
+            elif c == "[":
+                # this could be handled in a separate doctype parser
+                if decltype == "doctype":
+                    j = self._parse_doctype_subset(j + 1, i)
+                elif decltype in {"attlist", "linktype", "link", "element"}:
+                    # must tolerate []'d groups in a content model in an element declaration
+                    # also in data attribute specifications of attlist declaration
+                    # also link type declaration subsets in linktype declarations
+                    # also link attribute specification lists in link declarations
+                    self.error("unsupported '[' char in %s declaration" % decltype)
+                else:
+                    self.error("unexpected '[' char in declaration")
+            else:
+                self.error(
+                    "unexpected %r char in declaration" % rawdata[j])
+            if j < 0:
+                return j
+        return -1 # incomplete
+
+    # Internal -- parse a marked section
+    # Override this to handle MS-word extension syntax <![if word]>content<![endif]>
+    def parse_marked_section(self, i, report=1):
+        rawdata= self.rawdata
+        assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
+        sectName, j = self._scan_name( i+3, i )
+        if j < 0:
+            return j
+        if sectName in {"temp", "cdata", "ignore", "include", "rcdata"}:
+            # look for standard ]]> ending
+            match= _markedsectionclose.search(rawdata, i+3)
+        elif sectName in {"if", "else", "endif"}:
+            # look for MS Office ]> ending
+            match= _msmarkedsectionclose.search(rawdata, i+3)
+        else:
+            self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
+        if not match:
+            return -1
+        if report:
+            j = match.start(0)
+            self.unknown_decl(rawdata[i+3: j])
+        return match.end(0)
+
+    # Internal -- parse comment, return length or -1 if not terminated
+    def parse_comment(self, i, report=1):
+        rawdata = self.rawdata
+        if rawdata[i:i+4] != '<!--':
+            self.error('unexpected call to parse_comment()')
+        match = _commentclose.search(rawdata, i+4)
+        if not match:
+            return -1
+        if report:
+            j = match.start(0)
+            self.handle_comment(rawdata[i+4: j])
+        return match.end(0)
+
+    # Internal -- scan past the internal subset in a <!DOCTYPE declaration,
+    # returning the index just past any whitespace following the trailing ']'.
+    def _parse_doctype_subset(self, i, declstartpos):
+        rawdata = self.rawdata
+        n = len(rawdata)
+        j = i
+        while j < n:
+            c = rawdata[j]
+            if c == "<":
+                s = rawdata[j:j+2]
+                if s == "<":
+                    # end of buffer; incomplete
+                    return -1
+                if s != "<!":
+                    self.updatepos(declstartpos, j + 1)
+                    self.error("unexpected char in internal subset (in %r)" % s)
+                if (j + 2) == n:
+                    # end of buffer; incomplete
+                    return -1
+                if (j + 4) > n:
+                    # end of buffer; incomplete
+                    return -1
+                if rawdata[j:j+4] == "<!--":
+                    j = self.parse_comment(j, report=0)
+                    if j < 0:
+                        return j
+                    continue
+                name, j = self._scan_name(j + 2, declstartpos)
+                if j == -1:
+                    return -1
+                if name not in {"attlist", "element", "entity", "notation"}:
+                    self.updatepos(declstartpos, j + 2)
+                    self.error(
+                        "unknown declaration %r in internal subset" % name)
+                # handle the individual names
+                meth = getattr(self, "_parse_doctype_" + name)
+                j = meth(j, declstartpos)
+                if j < 0:
+                    return j
+            elif c == "%":
+                # parameter entity reference
+                if (j + 1) == n:
+                    # end of buffer; incomplete
+                    return -1
+                s, j = self._scan_name(j + 1, declstartpos)
+                if j < 0:
+                    return j
+                if rawdata[j] == ";":
+                    j = j + 1
+            elif c == "]":
+                j = j + 1
+                while j < n and rawdata[j].isspace():
+                    j = j + 1
+                if j < n:
+                    if rawdata[j] == ">":
+                        return j
+                    self.updatepos(declstartpos, j)
+                    self.error("unexpected char after internal subset")
+                else:
+                    return -1
+            elif c.isspace():
+                j = j + 1
+            else:
+                self.updatepos(declstartpos, j)
+                self.error("unexpected char %r in internal subset" % c)
+        # end of buffer reached
+        return -1
+
+    # Internal -- scan past <!ELEMENT declarations
+    def _parse_doctype_element(self, i, declstartpos):
+        name, j = self._scan_name(i, declstartpos)
+        if j == -1:
+            return -1
+        # style content model; just skip until '>'
+        rawdata = self.rawdata
+        if '>' in rawdata[j:]:
+            return rawdata.find(">", j) + 1
+        return -1
+
+    # Internal -- scan past <!ATTLIST declarations
+    def _parse_doctype_attlist(self, i, declstartpos):
+        rawdata = self.rawdata
+        name, j = self._scan_name(i, declstartpos)
+        c = rawdata[j:j+1]
+        if c == "":
+            return -1
+        if c == ">":
+            return j + 1
+        while 1:
+            # scan a series of attribute descriptions; simplified:
+            #   name type [value] [#constraint]
+            name, j = self._scan_name(j, declstartpos)
+            if j < 0:
+                return j
+            c = rawdata[j:j+1]
+            if c == "":
+                return -1
+            if c == "(":
+                # an enumerated type; look for ')'
+                if ")" in rawdata[j:]:
+                    j = rawdata.find(")", j) + 1
+                else:
+                    return -1
+                while rawdata[j:j+1].isspace():
+                    j = j + 1
+                if not rawdata[j:]:
+                    # end of buffer, incomplete
+                    return -1
+            else:
+                name, j = self._scan_name(j, declstartpos)
+            c = rawdata[j:j+1]
+            if not c:
+                return -1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if m:
+                    j = m.end()
+                else:
+                    return -1
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+            if c == "#":
+                if rawdata[j:] == "#":
+                    # end of buffer
+                    return -1
+                name, j = self._scan_name(j + 1, declstartpos)
+                if j < 0:
+                    return j
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+            if c == '>':
+                # all done
+                return j + 1
+
+    # Internal -- scan past <!NOTATION declarations
+    def _parse_doctype_notation(self, i, declstartpos):
+        name, j = self._scan_name(i, declstartpos)
+        if j < 0:
+            return j
+        rawdata = self.rawdata
+        while 1:
+            c = rawdata[j:j+1]
+            if not c:
+                # end of buffer; incomplete
+                return -1
+            if c == '>':
+                return j + 1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if not m:
+                    return -1
+                j = m.end()
+            else:
+                name, j = self._scan_name(j, declstartpos)
+                if j < 0:
+                    return j
+
+    # Internal -- scan past <!ENTITY declarations
+    def _parse_doctype_entity(self, i, declstartpos):
+        rawdata = self.rawdata
+        if rawdata[i:i+1] == "%":
+            j = i + 1
+            while 1:
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+                if c.isspace():
+                    j = j + 1
+                else:
+                    break
+        else:
+            j = i
+        name, j = self._scan_name(j, declstartpos)
+        if j < 0:
+            return j
+        while 1:
+            c = self.rawdata[j:j+1]
+            if not c:
+                return -1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if m:
+                    j = m.end()
+                else:
+                    return -1    # incomplete
+            elif c == ">":
+                return j + 1
+            else:
+                name, j = self._scan_name(j, declstartpos)
+                if j < 0:
+                    return j
+
+    # Internal -- scan a name token and the new position and the token, or
+    # return -1 if we've reached the end of the buffer.
+    def _scan_name(self, i, declstartpos):
+        rawdata = self.rawdata
+        n = len(rawdata)
+        if i == n:
+            return None, -1
+        m = _declname_match(rawdata, i)
+        if m:
+            s = m.group()
+            name = s.strip()
+            if (i + len(s)) == n:
+                return None, -1  # end of buffer
+            return name.lower(), m.end()
+        else:
+            self.updatepos(declstartpos, i)
+            self.error("expected name token at %r"
+                       % rawdata[declstartpos:declstartpos+20])
+
+    # To be overridden -- handlers for unknown objects
+    def unknown_decl(self, data):
+        pass
diff --git a/lib-python/3.2/_pyio.py b/lib-python/3.2/_pyio.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/_pyio.py
@@ -0,0 +1,2033 @@
+"""
+Python implementation of the io module.
+"""
+
+import os
+import abc
+import codecs
+import warnings
+import errno
+# Import _thread instead of threading to reduce startup cost
+try:
+    from _thread import allocate_lock as Lock
+except ImportError:
+    from _dummy_thread import allocate_lock as Lock
+
+import io
+from io import (__all__, SEEK_SET, SEEK_CUR, SEEK_END)
+from errno import EINTR
+
+# open() uses st_blksize whenever we can
+DEFAULT_BUFFER_SIZE = 8 * 1024  # bytes
+
+# NOTE: Base classes defined here are registered with the "official" ABCs
+# defined in io.py. We don't use real inheritance though, because we don't
+# want to inherit the C implementations.
+
+
+class BlockingIOError(IOError):
+
+    """Exception raised when I/O would block on a non-blocking I/O stream."""
+
+    def __init__(self, errno, strerror, characters_written=0):
+        super().__init__(errno, strerror)
+        if not isinstance(characters_written, int):
+            raise TypeError("characters_written must be a integer")
+        self.characters_written = characters_written
+
+
+def open(file, mode="r", buffering=-1, encoding=None, errors=None,
+         newline=None, closefd=True):
+
+    r"""Open file and return a stream.  Raise IOError upon failure.
+
+    file is either a text or byte string giving the name (and the path
+    if the file isn't in the current working directory) of the file to
+    be opened or an integer file descriptor of the file to be
+    wrapped. (If a file descriptor is given, it is closed when the
+    returned I/O object is closed, unless closefd is set to False.)
+
+    mode is an optional string that specifies the mode in which the file
+    is opened. It defaults to 'r' which means open for reading in text
+    mode.  Other common values are 'w' for writing (truncating the file if
+    it already exists), and 'a' for appending (which on some Unix systems,
+    means that all writes append to the end of the file regardless of the
+    current seek position). In text mode, if encoding is not specified the
+    encoding used is platform dependent. (For reading and writing raw
+    bytes use binary mode and leave encoding unspecified.) The available
+    modes are:
+
+    ========= ===============================================================
+    Character Meaning
+    --------- ---------------------------------------------------------------
+    'r'       open for reading (default)
+    'w'       open for writing, truncating the file first
+    'a'       open for writing, appending to the end of the file if it exists
+    'b'       binary mode
+    't'       text mode (default)
+    '+'       open a disk file for updating (reading and writing)
+    'U'       universal newline mode (for backwards compatibility; unneeded
+              for new code)
+    ========= ===============================================================
+
+    The default mode is 'rt' (open for reading text). For binary random
+    access, the mode 'w+b' opens and truncates the file to 0 bytes, while
+    'r+b' opens the file without truncation.
+
+    Python distinguishes between files opened in binary and text modes,
+    even when the underlying operating system doesn't. Files opened in
+    binary mode (appending 'b' to the mode argument) return contents as
+    bytes objects without any decoding. In text mode (the default, or when
+    't' is appended to the mode argument), the contents of the file are
+    returned as strings, the bytes having been first decoded using a
+    platform-dependent encoding or using the specified encoding if given.
+
+    buffering is an optional integer used to set the buffering policy.
+    Pass 0 to switch buffering off (only allowed in binary mode), 1 to select
+    line buffering (only usable in text mode), and an integer > 1 to indicate
+    the size of a fixed-size chunk buffer.  When no buffering argument is
+    given, the default buffering policy works as follows:
+
+    * Binary files are buffered in fixed-size chunks; the size of the buffer
+      is chosen using a heuristic trying to determine the underlying device's
+      "block size" and falling back on `io.DEFAULT_BUFFER_SIZE`.
+      On many systems, the buffer will typically be 4096 or 8192 bytes long.
+
+    * "Interactive" text files (files for which isatty() returns True)
+      use line buffering.  Other text files use the policy described above
+      for binary files.
+
+    encoding is the str name of the encoding used to decode or encode the
+    file. This should only be used in text mode. The default encoding is
+    platform dependent, but any encoding supported by Python can be
+    passed.  See the codecs module for the list of supported encodings.
+
+    errors is an optional string that specifies how encoding errors are to
+    be handled---this argument should not be used in binary mode. Pass
+    'strict' to raise a ValueError exception if there is an encoding error
+    (the default of None has the same effect), or pass 'ignore' to ignore
+    errors. (Note that ignoring encoding errors can lead to data loss.)
+    See the documentation for codecs.register for a list of the permitted
+    encoding error strings.
+
+    newline is a string controlling how universal newlines works (it only
+    applies to text mode). It can be None, '', '\n', '\r', and '\r\n'.  It works
+    as follows:
+
+    * On input, if newline is None, universal newlines mode is
+      enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
+      these are translated into '\n' before being returned to the
+      caller. If it is '', universal newline mode is enabled, but line
+      endings are returned to the caller untranslated. If it has any of
+      the other legal values, input lines are only terminated by the given
+      string, and the line ending is returned to the caller untranslated.
+
+    * On output, if newline is None, any '\n' characters written are
+      translated to the system default line separator, os.linesep. If
+      newline is '', no translation takes place. If newline is any of the
+      other legal values, any '\n' characters written are translated to
+      the given string.
+
+    closedfd is a bool. If closefd is False, the underlying file descriptor will
+    be kept open when the file is closed. This does not work when a file name is
+    given and must be True in that case.
+
+    open() returns a file object whose type depends on the mode, and
+    through which the standard file operations such as reading and writing
+    are performed. When open() is used to open a file in a text mode ('w',
+    'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
+    a file in a binary mode, the returned class varies: in read binary
+    mode, it returns a BufferedReader; in write binary and append binary
+    modes, it returns a BufferedWriter, and in read/write mode, it returns
+    a BufferedRandom.
+
+    It is also possible to use a string or bytearray as a file for both
+    reading and writing. For strings StringIO can be used like a file
+    opened in a text mode, and for bytes a BytesIO can be used like a file
+    opened in a binary mode.
+    """
+    if not isinstance(file, (str, bytes, int)):
+        raise TypeError("invalid file: %r" % file)
+    if not isinstance(mode, str):
+        raise TypeError("invalid mode: %r" % mode)
+    if not isinstance(buffering, int):
+        raise TypeError("invalid buffering: %r" % buffering)
+    if encoding is not None and not isinstance(encoding, str):
+        raise TypeError("invalid encoding: %r" % encoding)
+    if errors is not None and not isinstance(errors, str):
+        raise TypeError("invalid errors: %r" % errors)
+    modes = set(mode)
+    if modes - set("arwb+tU") or len(mode) > len(modes):
+        raise ValueError("invalid mode: %r" % mode)
+    reading = "r" in modes
+    writing = "w" in modes
+    appending = "a" in modes
+    updating = "+" in modes
+    text = "t" in modes
+    binary = "b" in modes
+    if "U" in modes:
+        if writing or appending:
+            raise ValueError("can't use U and writing mode at once")
+        reading = True
+    if text and binary:
+        raise ValueError("can't have text and binary mode at once")
+    if reading + writing + appending > 1:
+        raise ValueError("can't have read/write/append mode at once")
+    if not (reading or writing or appending):
+        raise ValueError("must have exactly one of read/write/append mode")
+    if binary and encoding is not None:
+        raise ValueError("binary mode doesn't take an encoding argument")
+    if binary and errors is not None:
+        raise ValueError("binary mode doesn't take an errors argument")
+    if binary and newline is not None:
+        raise ValueError("binary mode doesn't take a newline argument")
+    raw = FileIO(file,
+                 (reading and "r" or "") +
+                 (writing and "w" or "") +
+                 (appending and "a" or "") +
+                 (updating and "+" or ""),
+                 closefd)
+    line_buffering = False
+    if buffering == 1 or buffering < 0 and raw.isatty():
+        buffering = -1
+        line_buffering = True
+    if buffering < 0:
+        buffering = DEFAULT_BUFFER_SIZE
+        try:
+            bs = os.fstat(raw.fileno()).st_blksize
+        except (os.error, AttributeError):
+            pass
+        else:
+            if bs > 1:
+                buffering = bs
+    if buffering < 0:
+        raise ValueError("invalid buffering size")
+    if buffering == 0:
+        if binary:
+            return raw
+        raise ValueError("can't have unbuffered text I/O")
+    if updating:
+        buffer = BufferedRandom(raw, buffering)
+    elif writing or appending:
+        buffer = BufferedWriter(raw, buffering)
+    elif reading:
+        buffer = BufferedReader(raw, buffering)
+    else:
+        raise ValueError("unknown mode: %r" % mode)
+    if binary:
+        return buffer
+    text = TextIOWrapper(buffer, encoding, errors, newline, line_buffering)
+    text.mode = mode
+    return text
+
+
+class DocDescriptor:
+    """Helper for builtins.open.__doc__
+    """
+    def __get__(self, obj, typ):
+        return (
+            "open(file, mode='r', buffering=-1, encoding=None, "
+                 "errors=None, newline=None, closefd=True)\n\n" +
+            open.__doc__)
+
+class OpenWrapper:
+    """Wrapper for builtins.open
+
+    Trick so that open won't become a bound method when stored
+    as a class variable (as dbm.dumb does).
+
+    See initstdio() in Python/pythonrun.c.
+    """
+    __doc__ = DocDescriptor()
+
+    def __new__(cls, *args, **kwargs):
+        return open(*args, **kwargs)
+
+
+# In normal operation, both `UnsupportedOperation`s should be bound to the
+# same object.
+try:
+    UnsupportedOperation = io.UnsupportedOperation
+except AttributeError:
+    class UnsupportedOperation(ValueError, IOError):
+        pass
+
+
+class IOBase(metaclass=abc.ABCMeta):
+
+    """The abstract base class for all I/O classes, acting on streams of
+    bytes. There is no public constructor.
+
+    This class provides dummy implementations for many methods that
+    derived classes can override selectively; the default implementations
+    represent a file that cannot be read, written or seeked.
+
+    Even though IOBase does not declare read, readinto, or write because
+    their signatures will vary, implementations and clients should
+    consider those methods part of the interface. Also, implementations
+    may raise UnsupportedOperation when operations they do not support are
+    called.
+
+    The basic type used for binary data read from or written to a file is
+    bytes. bytearrays are accepted too, and in some cases (such as
+    readinto) needed. Text I/O classes work with str data.
+
+    Note that calling any method (even inquiries) on a closed stream is
+    undefined. Implementations may raise IOError in this case.
+
+    IOBase (and its subclasses) support the iterator protocol, meaning
+    that an IOBase object can be iterated over yielding the lines in a
+    stream.
+
+    IOBase also supports the :keyword:`with` statement. In this example,
+    fp is closed after the suite of the with statement is complete:
+
+    with open('spam.txt', 'r') as fp:
+        fp.write('Spam and eggs!')
+    """
+
+    ### Internal ###
+
+    def _unsupported(self, name):
+        """Internal: raise an IOError exception for unsupported operations."""
+        raise UnsupportedOperation("%s.%s() not supported" %
+                                   (self.__class__.__name__, name))
+
+    ### Positioning ###
+
+    def seek(self, pos, whence=0):
+        """Change stream position.
+
+        Change the stream position to byte offset offset. offset is
+        interpreted relative to the position indicated by whence.  Values
+        for whence are ints:
+
+        * 0 -- start of stream (the default); offset should be zero or positive
+        * 1 -- current stream position; offset may be negative
+        * 2 -- end of stream; offset is usually negative
+
+        Return an int indicating the new absolute position.
+        """
+        self._unsupported("seek")
+
+    def tell(self):
+        """Return an int indicating the current stream position."""
+        return self.seek(0, 1)
+
+    def truncate(self, pos=None):
+        """Truncate file to size bytes.
+
+        Size defaults to the current IO position as reported by tell().  Return
+        the new size.
+        """
+        self._unsupported("truncate")
+
+    ### Flush and close ###
+
+    def flush(self):
+        """Flush write buffers, if applicable.
+
+        This is not implemented for read-only and non-blocking streams.
+        """
+        self._checkClosed()
+        # XXX Should this return the number of bytes written???
+
+    __closed = False
+
+    def close(self):
+        """Flush and close the IO object.
+
+        This method has no effect if the file is already closed.
+        """
+        if not self.__closed:
+            self.flush()
+            self.__closed = True
+
+    def __del__(self):
+        """Destructor.  Calls close()."""
+        # The try/except block is in case this is called at program
+        # exit time, when it's possible that globals have already been
+        # deleted, and then the close() call might fail.  Since
+        # there's nothing we can do about such failures and they annoy
+        # the end users, we suppress the traceback.
+        try:
+            self.close()
+        except:
+            pass
+
+    ### Inquiries ###
+
+    def seekable(self):
+        """Return a bool indicating whether object supports random access.
+
+        If False, seek(), tell() and truncate() will raise UnsupportedOperation.
+        This method may need to do a test seek().
+        """
+        return False
+
+    def _checkSeekable(self, msg=None):
+        """Internal: raise UnsupportedOperation if file is not seekable
+        """
+        if not self.seekable():
+            raise UnsupportedOperation("File or stream is not seekable."
+                                       if msg is None else msg)
+
+    def readable(self):
+        """Return a bool indicating whether object was opened for reading.
+
+        If False, read() will raise UnsupportedOperation.
+        """
+        return False
+
+    def _checkReadable(self, msg=None):
+        """Internal: raise UnsupportedOperation if file is not readable
+        """
+        if not self.readable():
+            raise UnsupportedOperation("File or stream is not readable."
+                                       if msg is None else msg)
+
+    def writable(self):
+        """Return a bool indicating whether object was opened for writing.
+
+        If False, write() and truncate() will raise UnsupportedOperation.
+        """
+        return False
+
+    def _checkWritable(self, msg=None):
+        """Internal: raise UnsupportedOperation if file is not writable
+        """
+        if not self.writable():
+            raise UnsupportedOperation("File or stream is not writable."
+                                       if msg is None else msg)
+
+    @property
+    def closed(self):
+        """closed: bool.  True iff the file has been closed.
+
+        For backwards compatibility, this is a property, not a predicate.
+        """
+        return self.__closed
+
+    def _checkClosed(self, msg=None):
+        """Internal: raise an ValueError if file is closed
+        """
+        if self.closed:
+            raise ValueError("I/O operation on closed file."
+                             if msg is None else msg)
+
+    ### Context manager ###
+
+    def __enter__(self):  # That's a forward reference
+        """Context management protocol.  Returns self (an instance of IOBase)."""
+        self._checkClosed()
+        return self
+
+    def __exit__(self, *args):
+        """Context management protocol.  Calls close()"""
+        self.close()
+
+    ### Lower-level APIs ###
+
+    # XXX Should these be present even if unimplemented?
+
+    def fileno(self):
+        """Returns underlying file descriptor (an int) if one exists.
+
+        An IOError is raised if the IO object does not use a file descriptor.
+        """
+        self._unsupported("fileno")
+
+    def isatty(self):
+        """Return a bool indicating whether this is an 'interactive' stream.
+
+        Return False if it can't be determined.
+        """
+        self._checkClosed()
+        return False
+
+    ### Readline[s] and writelines ###
+
+    def readline(self, limit=-1):
+        r"""Read and return a line of bytes from the stream.
+
+        If limit is specified, at most limit bytes will be read.
+        Limit should be an int.
+
+        The line terminator is always b'\n' for binary files; for text
+        files, the newlines argument to open can be used to select the line
+        terminator(s) recognized.
+        """
+        # For backwards compatibility, a (slowish) readline().
+        if hasattr(self, "peek"):
+            def nreadahead():
+                readahead = self.peek(1)
+                if not readahead:
+                    return 1
+                n = (readahead.find(b"\n") + 1) or len(readahead)
+                if limit >= 0:
+                    n = min(n, limit)
+                return n
+        else:
+            def nreadahead():
+                return 1
+        if limit is None:
+            limit = -1
+        elif not isinstance(limit, int):
+            raise TypeError("limit must be an integer")
+        res = bytearray()
+        while limit < 0 or len(res) < limit:
+            b = self.read(nreadahead())
+            if not b:
+                break
+            res += b
+            if res.endswith(b"\n"):
+                break
+        return bytes(res)
+
+    def __iter__(self):
+        self._checkClosed()
+        return self
+
+    def __next__(self):
+        line = self.readline()
+        if not line:
+            raise StopIteration
+        return line
+
+    def readlines(self, hint=None):
+        """Return a list of lines from the stream.
+
+        hint can be specified to control the number of lines read: no more
+        lines will be read if the total size (in bytes/characters) of all
+        lines so far exceeds hint.
+        """
+        if hint is None or hint <= 0:
+            return list(self)
+        n = 0
+        lines = []
+        for line in self:
+            lines.append(line)
+            n += len(line)
+            if n >= hint:
+                break
+        return lines
+
+    def writelines(self, lines):
+        self._checkClosed()
+        for line in lines:
+            self.write(line)
+
+io.IOBase.register(IOBase)
+
+
+class RawIOBase(IOBase):
+
+    """Base class for raw binary I/O."""
+
+    # The read() method is implemented by calling readinto(); derived
+    # classes that want to support read() only need to implement
+    # readinto() as a primitive operation.  In general, readinto() can be
+    # more efficient than read().
+
+    # (It would be tempting to also provide an implementation of
+    # readinto() in terms of read(), in case the latter is a more suitable
+    # primitive operation, but that would lead to nasty recursion in case
+    # a subclass doesn't implement either.)
+
+    def read(self, n=-1):
+        """Read and return up to n bytes, where n is an int.
+
+        Returns an empty bytes object on EOF, or None if the object is
+        set not to block and has no data to read.
+        """
+        if n is None:
+            n = -1
+        if n < 0:
+            return self.readall()
+        b = bytearray(n.__index__())
+        n = self.readinto(b)
+        if n is None:
+            return None
+        del b[n:]
+        return bytes(b)
+
+    def readall(self):
+        """Read until EOF, using multiple read() call."""
+        res = bytearray()
+        while True:
+            data = self.read(DEFAULT_BUFFER_SIZE)
+            if not data:
+                break
+            res += data
+        if res:
+            return bytes(res)
+        else:
+            # b'' or None
+            return data
+
+    def readinto(self, b):
+        """Read up to len(b) bytes into bytearray b.
+
+        Returns an int representing the number of bytes read (0 for EOF), or
+        None if the object is set not to block and has no data to read.
+        """
+        self._unsupported("readinto")
+
+    def write(self, b):
+        """Write the given buffer to the IO stream.
+
+        Returns the number of bytes written, which may be less than len(b).
+        """
+        self._unsupported("write")
+
+io.RawIOBase.register(RawIOBase)
+from _io import FileIO
+RawIOBase.register(FileIO)
+
+
+class BufferedIOBase(IOBase):
+
+    """Base class for buffered IO objects.
+
+    The main difference with RawIOBase is that the read() method
+    supports omitting the size argument, and does not have a default
+    implementation that defers to readinto().
+
+    In addition, read(), readinto() and write() may raise
+    BlockingIOError if the underlying raw stream is in non-blocking
+    mode and not ready; unlike their raw counterparts, they will never
+    return None.
+
+    A typical implementation should not inherit from a RawIOBase
+    implementation, but wrap one.
+    """
+
+    def read(self, n=None):
+        """Read and return up to n bytes, where n is an int.
+
+        If the argument is omitted, None, or negative, reads and
+        returns all data until EOF.
+
+        If the argument is positive, and the underlying raw stream is
+        not 'interactive', multiple raw reads may be issued to satisfy
+        the byte count (unless EOF is reached first).  But for
+        interactive raw streams (XXX and for pipes?), at most one raw
+        read will be issued, and a short result does not imply that
+        EOF is imminent.
+
+        Returns an empty bytes array on EOF.
+
+        Raises BlockingIOError if the underlying raw stream has no
+        data at the moment.
+        """
+        self._unsupported("read")
+
+    def read1(self, n=None):
+        """Read up to n bytes with at most one read() system call,
+        where n is an int.
+        """
+        self._unsupported("read1")
+
+    def readinto(self, b):
+        """Read up to len(b) bytes into bytearray b.
+
+        Like read(), this may issue multiple reads to the underlying raw
+        stream, unless the latter is 'interactive'.
+
+        Returns an int representing the number of bytes read (0 for EOF).
+
+        Raises BlockingIOError if the underlying raw stream has no
+        data at the moment.
+        """
+        # XXX This ought to work with anything that supports the buffer API
+        data = self.read(len(b))
+        n = len(data)
+        try:
+            b[:n] = data
+        except TypeError as err:
+            import array
+            if not isinstance(b, array.array):
+                raise err
+            b[:n] = array.array('b', data)
+        return n
+
+    def write(self, b):
+        """Write the given bytes buffer to the IO stream.
+
+        Return the number of bytes written, which is never less than
+        len(b).
+
+        Raises BlockingIOError if the buffer is full and the
+        underlying raw stream cannot accept more data at the moment.
+        """
+        self._unsupported("write")
+
+    def detach(self):
+        """
+        Separate the underlying raw stream from the buffer and return it.
+
+        After the raw stream has been detached, the buffer is in an unusable
+        state.
+        """
+        self._unsupported("detach")
+
+io.BufferedIOBase.register(BufferedIOBase)
+
+
+class _BufferedIOMixin(BufferedIOBase):
+
+    """A mixin implementation of BufferedIOBase with an underlying raw stream.
+
+    This passes most requests on to the underlying raw stream.  It
+    does *not* provide implementations of read(), readinto() or
+    write().
+    """
+
+    def __init__(self, raw):
+        self._raw = raw
+
+    ### Positioning ###
+
+    def seek(self, pos, whence=0):
+        new_position = self.raw.seek(pos, whence)
+        if new_position < 0:
+            raise IOError("seek() returned an invalid position")
+        return new_position
+
+    def tell(self):
+        pos = self.raw.tell()
+        if pos < 0:
+            raise IOError("tell() returned an invalid position")
+        return pos
+
+    def truncate(self, pos=None):
+        # Flush the stream.  We're mixing buffered I/O with lower-level I/O,
+        # and a flush may be necessary to synch both views of the current
+        # file state.
+        self.flush()
+
+        if pos is None:
+            pos = self.tell()
+        # XXX: Should seek() be used, instead of passing the position
+        # XXX  directly to truncate?
+        return self.raw.truncate(pos)
+
+    ### Flush and close ###
+
+    def flush(self):
+        if self.closed:
+            raise ValueError("flush of closed file")
+        self.raw.flush()
+
+    def close(self):
+        if self.raw is not None and not self.closed:
+            try:
+                # may raise BlockingIOError or BrokenPipeError etc
+                self.flush()
+            finally:
+                self.raw.close()
+
+    def detach(self):
+        if self.raw is None:
+            raise ValueError("raw stream already detached")
+        self.flush()
+        raw = self._raw
+        self._raw = None
+        return raw
+
+    ### Inquiries ###
+
+    def seekable(self):
+        return self.raw.seekable()
+
+    def readable(self):
+        return self.raw.readable()
+
+    def writable(self):
+        return self.raw.writable()
+
+    @property
+    def raw(self):
+        return self._raw
+
+    @property
+    def closed(self):
+        return self.raw.closed
+
+    @property
+    def name(self):
+        return self.raw.name
+
+    @property
+    def mode(self):
+        return self.raw.mode
+
+    def __getstate__(self):
+        raise TypeError("can not serialize a '{0}' object"
+                        .format(self.__class__.__name__))
+
+    def __repr__(self):
+        clsname = self.__class__.__name__
+        try:
+            name = self.name
+        except AttributeError:
+            return "<_pyio.{0}>".format(clsname)
+        else:
+            return "<_pyio.{0} name={1!r}>".format(clsname, name)
+
+    ### Lower-level APIs ###
+
+    def fileno(self):
+        return self.raw.fileno()
+
+    def isatty(self):
+        return self.raw.isatty()
+
+
+class BytesIO(BufferedIOBase):
+
+    """Buffered I/O implementation using an in-memory bytes buffer."""
+
+    def __init__(self, initial_bytes=None):
+        buf = bytearray()
+        if initial_bytes is not None:
+            buf += initial_bytes
+        self._buffer = buf
+        self._pos = 0
+
+    def __getstate__(self):
+        if self.closed:
+            raise ValueError("__getstate__ on closed file")
+        return self.__dict__.copy()
+
+    def getvalue(self):
+        """Return the bytes value (contents) of the buffer
+        """
+        if self.closed:
+            raise ValueError("getvalue on closed file")
+        return bytes(self._buffer)
+
+    def getbuffer(self):
+        """Return a readable and writable view of the buffer.
+        """
+        return memoryview(self._buffer)
+
+    def read(self, n=None):
+        if self.closed:
+            raise ValueError("read from closed file")
+        if n is None:
+            n = -1
+        if n < 0:
+            n = len(self._buffer)
+        if len(self._buffer) <= self._pos:
+            return b""
+        newpos = min(len(self._buffer), self._pos + n)
+        b = self._buffer[self._pos : newpos]
+        self._pos = newpos
+        return bytes(b)
+
+    def read1(self, n):
+        """This is the same as read.
+        """
+        return self.read(n)
+
+    def write(self, b):
+        if self.closed:
+            raise ValueError("write to closed file")
+        if isinstance(b, str):
+            raise TypeError("can't write str to binary stream")
+        n = len(b)
+        if n == 0:
+            return 0
+        pos = self._pos
+        if pos > len(self._buffer):
+            # Inserts null bytes between the current end of the file
+            # and the new write position.
+            padding = b'\x00' * (pos - len(self._buffer))
+            self._buffer += padding
+        self._buffer[pos:pos + n] = b
+        self._pos += n
+        return n
+
+    def seek(self, pos, whence=0):
+        if self.closed:
+            raise ValueError("seek on closed file")
+        try:
+            pos.__index__
+        except AttributeError as err:
+            raise TypeError("an integer is required") from err
+        if whence == 0:
+            if pos < 0:
+                raise ValueError("negative seek position %r" % (pos,))
+            self._pos = pos
+        elif whence == 1:
+            self._pos = max(0, self._pos + pos)
+        elif whence == 2:
+            self._pos = max(0, len(self._buffer) + pos)
+        else:
+            raise ValueError("invalid whence value")
+        return self._pos
+
+    def tell(self):
+        if self.closed:
+            raise ValueError("tell on closed file")
+        return self._pos
+
+    def truncate(self, pos=None):
+        if self.closed:
+            raise ValueError("truncate on closed file")
+        if pos is None:
+            pos = self._pos
+        else:
+            try:
+                pos.__index__
+            except AttributeError as err:
+                raise TypeError("an integer is required") from err
+            if pos < 0:
+                raise ValueError("negative truncate position %r" % (pos,))
+        del self._buffer[pos:]
+        return pos
+
+    def readable(self):
+        return True
+
+    def writable(self):
+        return True
+
+    def seekable(self):
+        return True
+
+
+class BufferedReader(_BufferedIOMixin):
+
+    """BufferedReader(raw[, buffer_size])
+
+    A buffer for a readable, sequential BaseRawIO object.
+
+    The constructor creates a BufferedReader for the given readable raw
+    stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
+    is used.
+    """
+
+    def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE):
+        """Create a new buffered reader using the given readable raw IO object.
+        """
+        if not raw.readable():
+            raise IOError('"raw" argument must be readable.')
+
+        _BufferedIOMixin.__init__(self, raw)
+        if buffer_size <= 0:
+            raise ValueError("invalid buffer size")
+        self.buffer_size = buffer_size
+        self._reset_read_buf()
+        self._read_lock = Lock()
+
+    def _reset_read_buf(self):
+        self._read_buf = b""
+        self._read_pos = 0
+
+    def read(self, n=None):
+        """Read n bytes.
+
+        Returns exactly n bytes of data unless the underlying raw IO
+        stream reaches EOF or if the call would block in non-blocking
+        mode. If n is negative, read until EOF or until read() would
+        block.
+        """
+        if n is not None and n < -1:
+            raise ValueError("invalid number of bytes to read")
+        with self._read_lock:
+            return self._read_unlocked(n)
+
+    def _read_unlocked(self, n=None):
+        nodata_val = b""
+        empty_values = (b"", None)
+        buf = self._read_buf
+        pos = self._read_pos
+
+        # Special case for when the number of bytes to read is unspecified.
+        if n is None or n == -1:
+            self._reset_read_buf()
+            chunks = [buf[pos:]]  # Strip the consumed bytes.
+            current_size = 0
+            while True:
+                # Read until EOF or until read() would block.
+                try:
+                    chunk = self.raw.read()
+                except IOError as e:
+                    if e.errno != EINTR:
+                        raise
+                    continue
+                if chunk in empty_values:
+                    nodata_val = chunk
+                    break
+                current_size += len(chunk)
+                chunks.append(chunk)
+            return b"".join(chunks) or nodata_val
+
+        # The number of bytes to read is specified, return at most n bytes.
+        avail = len(buf) - pos  # Length of the available buffered data.
+        if n <= avail:
+            # Fast path: the data to read is fully buffered.
+            self._read_pos += n
+            return buf[pos:pos+n]
+        # Slow path: read from the stream until enough bytes are read,
+        # or until an EOF occurs or until read() would block.
+        chunks = [buf[pos:]]
+        wanted = max(self.buffer_size, n)
+        while avail < n:
+            try:
+                chunk = self.raw.read(wanted)
+            except IOError as e:
+                if e.errno != EINTR:
+                    raise
+                continue
+            if chunk in empty_values:
+                nodata_val = chunk
+                break
+            avail += len(chunk)
+            chunks.append(chunk)
+        # n is more then avail only when an EOF occurred or when
+        # read() would have blocked.
+        n = min(n, avail)
+        out = b"".join(chunks)
+        self._read_buf = out[n:]  # Save the extra data in the buffer.
+        self._read_pos = 0
+        return out[:n] if out else nodata_val
+
+    def peek(self, n=0):
+        """Returns buffered bytes without advancing the position.
+
+        The argument indicates a desired minimal number of bytes; we
+        do at most one raw read to satisfy it.  We never return more
+        than self.buffer_size.
+        """
+        with self._read_lock:
+            return self._peek_unlocked(n)
+
+    def _peek_unlocked(self, n=0):
+        want = min(n, self.buffer_size)
+        have = len(self._read_buf) - self._read_pos
+        if have < want or have <= 0:
+            to_read = self.buffer_size - have
+            while True:
+                try:
+                    current = self.raw.read(to_read)
+                except IOError as e:
+                    if e.errno != EINTR:
+                        raise
+                    continue
+                break
+            if current:
+                self._read_buf = self._read_buf[self._read_pos:] + current
+                self._read_pos = 0
+        return self._read_buf[self._read_pos:]
+
+    def read1(self, n):
+        """Reads up to n bytes, with at most one read() system call."""
+        # Returns up to n bytes.  If at least one byte is buffered, we
+        # only return buffered bytes.  Otherwise, we do one raw read.
+        if n < 0:
+            raise ValueError("number of bytes to read must be positive")
+        if n == 0:
+            return b""
+        with self._read_lock:
+            self._peek_unlocked(1)
+            return self._read_unlocked(
+                min(n, len(self._read_buf) - self._read_pos))
+
+    def tell(self):
+        return _BufferedIOMixin.tell(self) - len(self._read_buf) + self._read_pos
+
+    def seek(self, pos, whence=0):
+        if not (0 <= whence <= 2):
+            raise ValueError("invalid whence value")
+        with self._read_lock:
+            if whence == 1:
+                pos -= len(self._read_buf) - self._read_pos
+            pos = _BufferedIOMixin.seek(self, pos, whence)
+            self._reset_read_buf()
+            return pos
+
+class BufferedWriter(_BufferedIOMixin):
+
+    """A buffer for a writeable sequential RawIO object.
+
+    The constructor creates a BufferedWriter for the given writeable raw
+    stream. If the buffer_size is not given, it defaults to
+    DEFAULT_BUFFER_SIZE.
+    """
+
+    _warning_stack_offset = 2
+
+    def __init__(self, raw,
+                 buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
+        if not raw.writable():
+            raise IOError('"raw" argument must be writable.')
+
+        _BufferedIOMixin.__init__(self, raw)
+        if buffer_size <= 0:
+            raise ValueError("invalid buffer size")
+        if max_buffer_size is not None:
+            warnings.warn("max_buffer_size is deprecated", DeprecationWarning,
+                          self._warning_stack_offset)
+        self.buffer_size = buffer_size
+        self._write_buf = bytearray()
+        self._write_lock = Lock()
+
+    def write(self, b):
+        if self.closed:
+            raise ValueError("write to closed file")
+        if isinstance(b, str):
+            raise TypeError("can't write str to binary stream")
+        with self._write_lock:
+            # XXX we can implement some more tricks to try and avoid
+            # partial writes
+            if len(self._write_buf) > self.buffer_size:
+                # We're full, so let's pre-flush the buffer.  (This may
+                # raise BlockingIOError with characters_written == 0.)
+                self._flush_unlocked()
+            before = len(self._write_buf)
+            self._write_buf.extend(b)
+            written = len(self._write_buf) - before
+            if len(self._write_buf) > self.buffer_size:
+                try:
+                    self._flush_unlocked()
+                except BlockingIOError as e:
+                    if len(self._write_buf) > self.buffer_size:
+                        # We've hit the buffer_size. We have to accept a partial
+                        # write and cut back our buffer.
+                        overage = len(self._write_buf) - self.buffer_size
+                        written -= overage
+                        self._write_buf = self._write_buf[:self.buffer_size]
+                        raise BlockingIOError(e.errno, e.strerror, written)
+            return written
+
+    def truncate(self, pos=None):
+        with self._write_lock:
+            self._flush_unlocked()
+            if pos is None:
+                pos = self.raw.tell()
+            return self.raw.truncate(pos)
+
+    def flush(self):
+        with self._write_lock:
+            self._flush_unlocked()
+
+    def _flush_unlocked(self):
+        if self.closed:
+            raise ValueError("flush of closed file")
+        while self._write_buf:
+            try:
+                n = self.raw.write(self._write_buf)
+            except BlockingIOError:
+                raise RuntimeError("self.raw should implement RawIOBase: it "
+                                   "should not raise BlockingIOError")
+            except IOError as e:
+                if e.errno != EINTR:
+                    raise
+                continue
+            if n is None:
+                raise BlockingIOError(
+                    errno.EAGAIN,
+                    "write could not complete without blocking", 0)
+            if n > len(self._write_buf) or n < 0:
+                raise IOError("write() returned incorrect number of bytes")
+            del self._write_buf[:n]
+
+    def tell(self):
+        return _BufferedIOMixin.tell(self) + len(self._write_buf)
+
+    def seek(self, pos, whence=0):
+        if not (0 <= whence <= 2):
+            raise ValueError("invalid whence")
+        with self._write_lock:
+            self._flush_unlocked()
+            return _BufferedIOMixin.seek(self, pos, whence)
+
+
+class BufferedRWPair(BufferedIOBase):
+
+    """A buffered reader and writer object together.
+
+    A buffered reader object and buffered writer object put together to
+    form a sequential IO object that can read and write. This is typically
+    used with a socket or two-way pipe.
+
+    reader and writer are RawIOBase objects that are readable and
+    writeable respectively. If the buffer_size is omitted it defaults to
+    DEFAULT_BUFFER_SIZE.
+    """
+
+    # XXX The usefulness of this (compared to having two separate IO
+    # objects) is questionable.
+
+    def __init__(self, reader, writer,
+                 buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
+        """Constructor.
+
+        The arguments are two RawIO instances.
+        """
+        if max_buffer_size is not None:
+            warnings.warn("max_buffer_size is deprecated", DeprecationWarning, 2)
+
+        if not reader.readable():
+            raise IOError('"reader" argument must be readable.')
+
+        if not writer.writable():
+            raise IOError('"writer" argument must be writable.')
+
+        self.reader = BufferedReader(reader, buffer_size)
+        self.writer = BufferedWriter(writer, buffer_size)
+
+    def read(self, n=None):
+        if n is None:
+            n = -1
+        return self.reader.read(n)
+
+    def readinto(self, b):
+        return self.reader.readinto(b)
+
+    def write(self, b):
+        return self.writer.write(b)
+
+    def peek(self, n=0):
+        return self.reader.peek(n)
+
+    def read1(self, n):
+        return self.reader.read1(n)
+
+    def readable(self):
+        return self.reader.readable()
+
+    def writable(self):
+        return self.writer.writable()
+
+    def flush(self):
+        return self.writer.flush()
+
+    def close(self):
+        self.writer.close()
+        self.reader.close()
+
+    def isatty(self):
+        return self.reader.isatty() or self.writer.isatty()
+
+    @property
+    def closed(self):
+        return self.writer.closed
+
+
+class BufferedRandom(BufferedWriter, BufferedReader):
+
+    """A buffered interface to random access streams.
+
+    The constructor creates a reader and writer for a seekable stream,
+    raw, given in the first argument. If the buffer_size is omitted it
+    defaults to DEFAULT_BUFFER_SIZE.
+    """
+
+    _warning_stack_offset = 3
+
+    def __init__(self, raw,
+                 buffer_size=DEFAULT_BUFFER_SIZE, max_buffer_size=None):
+        raw._checkSeekable()
+        BufferedReader.__init__(self, raw, buffer_size)
+        BufferedWriter.__init__(self, raw, buffer_size, max_buffer_size)
+
+    def seek(self, pos, whence=0):
+        if not (0 <= whence <= 2):
+            raise ValueError("invalid whence")
+        self.flush()
+        if self._read_buf:
+            # Undo read ahead.
+            with self._read_lock:
+                self.raw.seek(self._read_pos - len(self._read_buf), 1)
+        # First do the raw seek, then empty the read buffer, so that
+        # if the raw seek fails, we don't lose buffered data forever.
+        pos = self.raw.seek(pos, whence)
+        with self._read_lock:
+            self._reset_read_buf()
+        if pos < 0:
+            raise IOError("seek() returned invalid position")
+        return pos
+
+    def tell(self):
+        if self._write_buf:
+            return BufferedWriter.tell(self)
+        else:
+            return BufferedReader.tell(self)
+
+    def truncate(self, pos=None):
+        if pos is None:
+            pos = self.tell()
+        # Use seek to flush the read buffer.
+        return BufferedWriter.truncate(self, pos)
+
+    def read(self, n=None):
+        if n is None:
+            n = -1
+        self.flush()
+        return BufferedReader.read(self, n)
+
+    def readinto(self, b):
+        self.flush()
+        return BufferedReader.readinto(self, b)
+
+    def peek(self, n=0):
+        self.flush()
+        return BufferedReader.peek(self, n)
+
+    def read1(self, n):
+        self.flush()
+        return BufferedReader.read1(self, n)
+
+    def write(self, b):
+        if self._read_buf:
+            # Undo readahead
+            with self._read_lock:
+                self.raw.seek(self._read_pos - len(self._read_buf), 1)
+                self._reset_read_buf()
+        return BufferedWriter.write(self, b)
+
+
+class TextIOBase(IOBase):
+
+    """Base class for text I/O.
+
+    This class provides a character and line based interface to stream
+    I/O. There is no readinto method because Python's character strings
+    are immutable. There is no public constructor.
+    """
+
+    def read(self, n=-1):
+        """Read at most n characters from stream, where n is an int.
+
+        Read from underlying buffer until we have n characters or we hit EOF.
+        If n is negative or omitted, read until EOF.
+
+        Returns a string.
+        """
+        self._unsupported("read")
+
+    def write(self, s):
+        """Write string s to stream and returning an int."""
+        self._unsupported("write")
+
+    def truncate(self, pos=None):
+        """Truncate size to pos, where pos is an int."""
+        self._unsupported("truncate")
+
+    def readline(self):
+        """Read until newline or EOF.
+
+        Returns an empty string if EOF is hit immediately.
+        """
+        self._unsupported("readline")
+
+    def detach(self):
+        """
+        Separate the underlying buffer from the TextIOBase and return it.
+
+        After the underlying buffer has been detached, the TextIO is in an
+        unusable state.
+        """
+        self._unsupported("detach")
+
+    @property
+    def encoding(self):
+        """Subclasses should override."""
+        return None
+
+    @property
+    def newlines(self):
+        """Line endings translated so far.
+
+        Only line endings translated during reading are considered.
+
+        Subclasses should override.
+        """
+        return None
+
+    @property
+    def errors(self):
+        """Error setting of the decoder or encoder.
+
+        Subclasses should override."""
+        return None
+
+io.TextIOBase.register(TextIOBase)
+
+
+class IncrementalNewlineDecoder(codecs.IncrementalDecoder):
+    r"""Codec used when reading a file in universal newlines mode.  It wraps
+    another incremental decoder, translating \r\n and \r into \n.  It also
+    records the types of newlines encountered.  When used with
+    translate=False, it ensures that the newline sequence is returned in
+    one piece.
+    """
+    def __init__(self, decoder, translate, errors='strict'):
+        codecs.IncrementalDecoder.__init__(self, errors=errors)
+        self.translate = translate
+        self.decoder = decoder
+        self.seennl = 0
+        self.pendingcr = False
+
+    def decode(self, input, final=False):
+        # decode input (with the eventual \r from a previous pass)
+        if self.decoder is None:
+            output = input
+        else:
+            output = self.decoder.decode(input, final=final)
+        if self.pendingcr and (output or final):
+            output = "\r" + output
+            self.pendingcr = False
+
+        # retain last \r even when not translating data:
+        # then readline() is sure to get \r\n in one pass
+        if output.endswith("\r") and not final:
+            output = output[:-1]
+            self.pendingcr = True
+
+        # Record which newlines are read
+        crlf = output.count('\r\n')
+        cr = output.count('\r') - crlf
+        lf = output.count('\n') - crlf
+        self.seennl |= (lf and self._LF) | (cr and self._CR) \
+                    | (crlf and self._CRLF)
+
+        if self.translate:
+            if crlf:
+                output = output.replace("\r\n", "\n")
+            if cr:
+                output = output.replace("\r", "\n")
+
+        return output
+
+    def getstate(self):
+        if self.decoder is None:
+            buf = b""
+            flag = 0
+        else:
+            buf, flag = self.decoder.getstate()
+        flag <<= 1
+        if self.pendingcr:
+            flag |= 1
+        return buf, flag
+
+    def setstate(self, state):
+        buf, flag = state
+        self.pendingcr = bool(flag & 1)
+        if self.decoder is not None:
+            self.decoder.setstate((buf, flag >> 1))
+
+    def reset(self):
+        self.seennl = 0
+        self.pendingcr = False
+        if self.decoder is not None:
+            self.decoder.reset()
+
+    _LF = 1
+    _CR = 2
+    _CRLF = 4
+
+    @property
+    def newlines(self):
+        return (None,
+                "\n",
+                "\r",
+                ("\r", "\n"),
+                "\r\n",
+                ("\n", "\r\n"),
+                ("\r", "\r\n"),
+                ("\r", "\n", "\r\n")
+               )[self.seennl]
+
+
+class TextIOWrapper(TextIOBase):
+
+    r"""Character and line based layer over a BufferedIOBase object, buffer.
+
+    encoding gives the name of the encoding that the stream will be
+    decoded or encoded with. It defaults to locale.getpreferredencoding.
+
+    errors determines the strictness of encoding and decoding (see the
+    codecs.register) and defaults to "strict".
+
+    newline can be None, '', '\n', '\r', or '\r\n'.  It controls the
+    handling of line endings. If it is None, universal newlines is
+    enabled.  With this enabled, on input, the lines endings '\n', '\r',
+    or '\r\n' are translated to '\n' before being returned to the
+    caller. Conversely, on output, '\n' is translated to the system
+    default line separator, os.linesep. If newline is any other of its
+    legal values, that newline becomes the newline when the file is read
+    and it is returned untranslated. On output, '\n' is converted to the
+    newline.
+
+    If line_buffering is True, a call to flush is implied when a call to
+    write contains a newline character.
+    """
+
+    _CHUNK_SIZE = 2048
+
+    def __init__(self, buffer, encoding=None, errors=None, newline=None,
+                 line_buffering=False, write_through=False):
+        if newline is not None and not isinstance(newline, str):
+            raise TypeError("illegal newline type: %r" % (type(newline),))
+        if newline not in (None, "", "\n", "\r", "\r\n"):
+            raise ValueError("illegal newline value: %r" % (newline,))
+        if encoding is None:
+            try:
+                encoding = os.device_encoding(buffer.fileno())
+            except (AttributeError, UnsupportedOperation):
+                pass
+            if encoding is None:
+                try:
+                    import locale
+                except ImportError:
+                    # Importing locale may fail if Python is being built
+                    encoding = "ascii"
+                else:
+                    encoding = locale.getpreferredencoding()
+
+        if not isinstance(encoding, str):
+            raise ValueError("invalid encoding: %r" % encoding)
+
+        if errors is None:
+            errors = "strict"
+        else:
+            if not isinstance(errors, str):
+                raise ValueError("invalid errors: %r" % errors)
+
+        self._buffer = buffer
+        self._line_buffering = line_buffering
+        self._encoding = encoding
+        self._errors = errors
+        self._readuniversal = not newline
+        self._readtranslate = newline is None
+        self._readnl = newline
+        self._writetranslate = newline != ''
+        self._writenl = newline or os.linesep
+        self._encoder = None
+        self._decoder = None
+        self._decoded_chars = ''  # buffer for text returned from decoder
+        self._decoded_chars_used = 0  # offset into _decoded_chars for read()
+        self._snapshot = None  # info for reconstructing decoder state
+        self._seekable = self._telling = self.buffer.seekable()
+        self._has_read1 = hasattr(self.buffer, 'read1')
+
+        if self._seekable and self.writable():
+            position = self.buffer.tell()
+            if position != 0:
+                try:
+                    self._get_encoder().setstate(0)
+                except LookupError:
+                    # Sometimes the encoder doesn't exist
+                    pass
+
+    # self._snapshot is either None, or a tuple (dec_flags, next_input)
+    # where dec_flags is the second (integer) item of the decoder state
+    # and next_input is the chunk of input bytes that comes next after the
+    # snapshot point.  We use this to reconstruct decoder states in tell().
+
+    # Naming convention:
+    #   - "bytes_..." for integer variables that count input bytes
+    #   - "chars_..." for integer variables that count decoded characters
+
+    def __repr__(self):
+        result = "<_pyio.TextIOWrapper"
+        try:
+            name = self.name
+        except AttributeError:
+            pass
+        else:
+            result += " name={0!r}".format(name)
+        try:
+            mode = self.mode
+        except AttributeError:
+            pass
+        else:
+            result += " mode={0!r}".format(mode)
+        return result + " encoding={0!r}>".format(self.encoding)
+
+    @property
+    def encoding(self):
+        return self._encoding
+
+    @property
+    def errors(self):
+        return self._errors
+
+    @property
+    def line_buffering(self):
+        return self._line_buffering
+
+    @property
+    def buffer(self):
+        return self._buffer
+
+    def seekable(self):
+        return self._seekable
+
+    def readable(self):
+        return self.buffer.readable()
+
+    def writable(self):
+        return self.buffer.writable()
+
+    def flush(self):
+        self.buffer.flush()
+        self._telling = self._seekable
+
+    def close(self):
+        if self.buffer is not None and not self.closed:
+            self.flush()
+            self.buffer.close()
+
+    @property
+    def closed(self):
+        return self.buffer.closed
+
+    @property
+    def name(self):
+        return self.buffer.name
+
+    def fileno(self):
+        return self.buffer.fileno()
+
+    def isatty(self):
+        return self.buffer.isatty()
+
+    def write(self, s):
+        'Write data, where s is a str'
+        if self.closed:
+            raise ValueError("write to closed file")
+        if not isinstance(s, str):
+            raise TypeError("can't write %s to text stream" %
+                            s.__class__.__name__)
+        length = len(s)
+        haslf = (self._writetranslate or self._line_buffering) and "\n" in s
+        if haslf and self._writetranslate and self._writenl != "\n":
+            s = s.replace("\n", self._writenl)
+        encoder = self._encoder or self._get_encoder()
+        # XXX What if we were just reading?
+        b = encoder.encode(s)
+        self.buffer.write(b)
+        if self._line_buffering and (haslf or "\r" in s):
+            self.flush()
+        self._snapshot = None
+        if self._decoder:
+            self._decoder.reset()
+        return length
+
+    def _get_encoder(self):
+        make_encoder = codecs.getincrementalencoder(self._encoding)
+        self._encoder = make_encoder(self._errors)
+        return self._encoder
+
+    def _get_decoder(self):
+        make_decoder = codecs.getincrementaldecoder(self._encoding)
+        decoder = make_decoder(self._errors)
+        if self._readuniversal:
+            decoder = IncrementalNewlineDecoder(decoder, self._readtranslate)
+        self._decoder = decoder
+        return decoder
+
+    # The following three methods implement an ADT for _decoded_chars.
+    # Text returned from the decoder is buffered here until the client
+    # requests it by calling our read() or readline() method.
+    def _set_decoded_chars(self, chars):
+        """Set the _decoded_chars buffer."""
+        self._decoded_chars = chars
+        self._decoded_chars_used = 0
+
+    def _get_decoded_chars(self, n=None):
+        """Advance into the _decoded_chars buffer."""
+        offset = self._decoded_chars_used
+        if n is None:
+            chars = self._decoded_chars[offset:]
+        else:
+            chars = self._decoded_chars[offset:offset + n]
+        self._decoded_chars_used += len(chars)
+        return chars
+
+    def _rewind_decoded_chars(self, n):
+        """Rewind the _decoded_chars buffer."""
+        if self._decoded_chars_used < n:
+            raise AssertionError("rewind decoded_chars out of bounds")
+        self._decoded_chars_used -= n
+
+    def _read_chunk(self):
+        """
+        Read and decode the next chunk of data from the BufferedReader.
+        """
+
+        # The return value is True unless EOF was reached.  The decoded
+        # string is placed in self._decoded_chars (replacing its previous
+        # value).  The entire input chunk is sent to the decoder, though
+        # some of it may remain buffered in the decoder, yet to be
+        # converted.
+
+        if self._decoder is None:
+            raise ValueError("no decoder")
+
+        if self._telling:
+            # To prepare for tell(), we need to snapshot a point in the
+            # file where the decoder's input buffer is empty.
+
+            dec_buffer, dec_flags = self._decoder.getstate()
+            # Given this, we know there was a valid snapshot point
+            # len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
+
+        # Read a chunk, decode it, and put the result in self._decoded_chars.
+        if self._has_read1:
+            input_chunk = self.buffer.read1(self._CHUNK_SIZE)
+        else:
+            input_chunk = self.buffer.read(self._CHUNK_SIZE)
+        eof = not input_chunk
+        self._set_decoded_chars(self._decoder.decode(input_chunk, eof))
+
+        if self._telling:
+            # At the snapshot point, len(dec_buffer) bytes before the read,
+            # the next input to be decoded is dec_buffer + input_chunk.
+            self._snapshot = (dec_flags, dec_buffer + input_chunk)
+
+        return not eof
+
+    def _pack_cookie(self, position, dec_flags=0,
+                           bytes_to_feed=0, need_eof=0, chars_to_skip=0):
+        # The meaning of a tell() cookie is: seek to position, set the
+        # decoder flags to dec_flags, read bytes_to_feed bytes, feed them
+        # into the decoder with need_eof as the EOF flag, then skip
+        # chars_to_skip characters of the decoded result.  For most simple
+        # decoders, tell() will often just give a byte offset in the file.
+        return (position | (dec_flags<<64) | (bytes_to_feed<<128) |
+               (chars_to_skip<<192) | bool(need_eof)<<256)
+
+    def _unpack_cookie(self, bigint):
+        rest, position = divmod(bigint, 1<<64)
+        rest, dec_flags = divmod(rest, 1<<64)
+        rest, bytes_to_feed = divmod(rest, 1<<64)
+        need_eof, chars_to_skip = divmod(rest, 1<<64)
+        return position, dec_flags, bytes_to_feed, need_eof, chars_to_skip
+
+    def tell(self):
+        if not self._seekable:
+            raise UnsupportedOperation("underlying stream is not seekable")
+        if not self._telling:
+            raise IOError("telling position disabled by next() call")
+        self.flush()
+        position = self.buffer.tell()
+        decoder = self._decoder
+        if decoder is None or self._snapshot is None:
+            if self._decoded_chars:
+                # This should never happen.
+                raise AssertionError("pending decoded text")
+            return position
+
+        # Skip backward to the snapshot point (see _read_chunk).
+        dec_flags, next_input = self._snapshot
+        position -= len(next_input)
+
+        # How many decoded characters have been used up since the snapshot?
+        chars_to_skip = self._decoded_chars_used
+        if chars_to_skip == 0:
+            # We haven't moved from the snapshot point.
+            return self._pack_cookie(position, dec_flags)
+
+        # Starting from the snapshot position, we will walk the decoder
+        # forward until it gives us enough decoded characters.
+        saved_state = decoder.getstate()
+        try:
+            # Note our initial start point.
+            decoder.setstate((b'', dec_flags))
+            start_pos = position
+            start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
+            need_eof = 0
+
+            # Feed the decoder one byte at a time.  As we go, note the
+            # nearest "safe start point" before the current location
+            # (a point where the decoder has nothing buffered, so seek()
+            # can safely start from there and advance to this location).
+            next_byte = bytearray(1)
+            for next_byte[0] in next_input:
+                bytes_fed += 1
+                chars_decoded += len(decoder.decode(next_byte))
+                dec_buffer, dec_flags = decoder.getstate()
+                if not dec_buffer and chars_decoded <= chars_to_skip:
+                    # Decoder buffer is empty, so this is a safe start point.
+                    start_pos += bytes_fed
+                    chars_to_skip -= chars_decoded
+                    start_flags, bytes_fed, chars_decoded = dec_flags, 0, 0
+                if chars_decoded >= chars_to_skip:
+                    break
+            else:
+                # We didn't get enough decoded data; signal EOF to get more.
+                chars_decoded += len(decoder.decode(b'', final=True))
+                need_eof = 1
+                if chars_decoded < chars_to_skip:
+                    raise IOError("can't reconstruct logical file position")
+
+            # The returned cookie corresponds to the last safe start point.
+            return self._pack_cookie(
+                start_pos, start_flags, bytes_fed, need_eof, chars_to_skip)
+        finally:
+            decoder.setstate(saved_state)
+
+    def truncate(self, pos=None):
+        self.flush()
+        if pos is None:
+            pos = self.tell()
+        return self.buffer.truncate(pos)
+
+    def detach(self):
+        if self.buffer is None:
+            raise ValueError("buffer is already detached")
+        self.flush()
+        buffer = self._buffer
+        self._buffer = None
+        return buffer
+
+    def seek(self, cookie, whence=0):
+        if self.closed:
+            raise ValueError("tell on closed file")
+        if not self._seekable:
+            raise UnsupportedOperation("underlying stream is not seekable")
+        if whence == 1: # seek relative to current position
+            if cookie != 0:
+                raise UnsupportedOperation("can't do nonzero cur-relative seeks")
+            # Seeking to the current position should attempt to
+            # sync the underlying buffer with the current position.
+            whence = 0
+            cookie = self.tell()
+        if whence == 2: # seek relative to end of file
+            if cookie != 0:
+                raise UnsupportedOperation("can't do nonzero end-relative seeks")
+            self.flush()
+            position = self.buffer.seek(0, 2)
+            self._set_decoded_chars('')
+            self._snapshot = None
+            if self._decoder:
+                self._decoder.reset()
+            return position
+        if whence != 0:
+            raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
+                             (whence,))
+        if cookie < 0:
+            raise ValueError("negative seek position %r" % (cookie,))
+        self.flush()
+
+        # The strategy of seek() is to go back to the safe start point
+        # and replay the effect of read(chars_to_skip) from there.
+        start_pos, dec_flags, bytes_to_feed, need_eof, chars_to_skip = \
+            self._unpack_cookie(cookie)
+
+        # Seek back to the safe start point.
+        self.buffer.seek(start_pos)
+        self._set_decoded_chars('')
+        self._snapshot = None
+
+        # Restore the decoder to its state from the safe start point.
+        if cookie == 0 and self._decoder:
+            self._decoder.reset()
+        elif self._decoder or dec_flags or chars_to_skip:
+            self._decoder = self._decoder or self._get_decoder()
+            self._decoder.setstate((b'', dec_flags))
+            self._snapshot = (dec_flags, b'')
+
+        if chars_to_skip:
+            # Just like _read_chunk, feed the decoder and save a snapshot.
+            input_chunk = self.buffer.read(bytes_to_feed)
+            self._set_decoded_chars(
+                self._decoder.decode(input_chunk, need_eof))
+            self._snapshot = (dec_flags, input_chunk)
+
+            # Skip chars_to_skip of the decoded characters.
+            if len(self._decoded_chars) < chars_to_skip:
+                raise IOError("can't restore logical file position")
+            self._decoded_chars_used = chars_to_skip
+
+        # Finally, reset the encoder (merely useful for proper BOM handling)
+        try:
+            encoder = self._encoder or self._get_encoder()
+        except LookupError:
+            # Sometimes the encoder doesn't exist
+            pass
+        else:
+            if cookie != 0:
+                encoder.setstate(0)
+            else:
+                encoder.reset()
+        return cookie
+
+    def read(self, n=None):
+        self._checkReadable()
+        if n is None:
+            n = -1
+        decoder = self._decoder or self._get_decoder()
+        try:
+            n.__index__
+        except AttributeError as err:
+            raise TypeError("an integer is required") from err
+        if n < 0:
+            # Read everything.
+            result = (self._get_decoded_chars() +
+                      decoder.decode(self.buffer.read(), final=True))
+            self._set_decoded_chars('')
+            self._snapshot = None
+            return result
+        else:
+            # Keep reading chunks until we have n characters to return.
+            eof = False
+            result = self._get_decoded_chars(n)
+            while len(result) < n and not eof:
+                eof = not self._read_chunk()
+                result += self._get_decoded_chars(n - len(result))
+            return result
+
+    def __next__(self):
+        self._telling = False
+        line = self.readline()
+        if not line:
+            self._snapshot = None
+            self._telling = self._seekable
+            raise StopIteration
+        return line
+
+    def readline(self, limit=None):
+        if self.closed:
+            raise ValueError("read from closed file")
+        if limit is None:
+            limit = -1
+        elif not isinstance(limit, int):
+            raise TypeError("limit must be an integer")
+
+        # Grab all the decoded text (we will rewind any extra bits later).
+        line = self._get_decoded_chars()
+
+        start = 0
+        # Make the decoder if it doesn't already exist.
+        if not self._decoder:
+            self._get_decoder()
+
+        pos = endpos = None
+        while True:
+            if self._readtranslate:
+                # Newlines are already translated, only search for \n
+                pos = line.find('\n', start)
+                if pos >= 0:
+                    endpos = pos + 1
+                    break
+                else:
+                    start = len(line)
+
+            elif self._readuniversal:
+                # Universal newline search. Find any of \r, \r\n, \n
+                # The decoder ensures that \r\n are not split in two pieces
+
+                # In C we'd look for these in parallel of course.
+                nlpos = line.find("\n", start)
+                crpos = line.find("\r", start)
+                if crpos == -1:
+                    if nlpos == -1:
+                        # Nothing found
+                        start = len(line)
+                    else:
+                        # Found \n
+                        endpos = nlpos + 1
+                        break
+                elif nlpos == -1:
+                    # Found lone \r
+                    endpos = crpos + 1
+                    break
+                elif nlpos < crpos:
+                    # Found \n
+                    endpos = nlpos + 1
+                    break
+                elif nlpos == crpos + 1:
+                    # Found \r\n
+                    endpos = crpos + 2
+                    break
+                else:
+                    # Found \r
+                    endpos = crpos + 1
+                    break
+            else:
+                # non-universal
+                pos = line.find(self._readnl)
+                if pos >= 0:
+                    endpos = pos + len(self._readnl)
+                    break
+
+            if limit >= 0 and len(line) >= limit:
+                endpos = limit  # reached length limit
+                break
+
+            # No line ending seen yet - get more data'
+            while self._read_chunk():
+                if self._decoded_chars:
+                    break
+            if self._decoded_chars:
+                line += self._get_decoded_chars()
+            else:
+                # end of file
+                self._set_decoded_chars('')
+                self._snapshot = None
+                return line
+
+        if limit >= 0 and endpos > limit:
+            endpos = limit  # don't exceed limit
+
+        # Rewind _decoded_chars to just after the line ending we found.
+        self._rewind_decoded_chars(len(line) - endpos)
+        return line[:endpos]
+
+    @property
+    def newlines(self):
+        return self._decoder.newlines if self._decoder else None
+
+
+class StringIO(TextIOWrapper):
+    """Text I/O implementation using an in-memory buffer.
+
+    The initial_value argument sets the value of object.  The newline
+    argument is like the one of TextIOWrapper's constructor.
+    """
+
+    def __init__(self, initial_value="", newline="\n"):
+        super(StringIO, self).__init__(BytesIO(),
+                                       encoding="utf-8",
+                                       errors="strict",
+                                       newline=newline)
+        # Issue #5645: make universal newlines semantics the same as in the
+        # C version, even under Windows.
+        if newline is None:
+            self._writetranslate = False
+        if initial_value is not None:
+            if not isinstance(initial_value, str):
+                raise TypeError("initial_value must be str or None, not {0}"
+                                .format(type(initial_value).__name__))
+                initial_value = str(initial_value)
+            self.write(initial_value)
+            self.seek(0)
+
+    def getvalue(self):
+        self.flush()
+        return self.buffer.getvalue().decode(self._encoding, self._errors)
+
+    def __repr__(self):
+        # TextIOWrapper tells the encoding in its repr. In StringIO,
+        # that's a implementation detail.
+        return object.__repr__(self)
+
+    @property
+    def errors(self):
+        return None
+
+    @property
+    def encoding(self):
+        return None
+
+    def detach(self):
+        # This doesn't make sense on StringIO.
+        self._unsupported("detach")
diff --git a/lib-python/3.2/_strptime.py b/lib-python/3.2/_strptime.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/_strptime.py
@@ -0,0 +1,499 @@
+"""Strptime-related classes and functions.
+
+CLASSES:
+    LocaleTime -- Discovers and stores locale-specific time information
+    TimeRE -- Creates regexes for pattern matching a string of text containing
+                time information
+
+FUNCTIONS:
+    _getlang -- Figure out what language is being used for the locale
+    strptime -- Calculates the time struct represented by the passed-in string
+
+"""
+import time
+import locale
+import calendar
+from re import compile as re_compile
+from re import IGNORECASE, ASCII
+from re import escape as re_escape
+from datetime import (date as datetime_date,
+                      timedelta as datetime_timedelta,
+                      timezone as datetime_timezone)
+try:
+    from _thread import allocate_lock as _thread_allocate_lock
+except:
+    from _dummy_thread import allocate_lock as _thread_allocate_lock
+
+__all__ = []
+
+def _getlang():
+    # Figure out what the current language is set to.
+    return locale.getlocale(locale.LC_TIME)
+
+class LocaleTime(object):
+    """Stores and handles locale-specific information related to time.
+
+    ATTRIBUTES:
+        f_weekday -- full weekday names (7-item list)
+        a_weekday -- abbreviated weekday names (7-item list)
+        f_month -- full month names (13-item list; dummy value in [0], which
+                    is added by code)
+        a_month -- abbreviated month names (13-item list, dummy value in
+                    [0], which is added by code)
+        am_pm -- AM/PM representation (2-item list)
+        LC_date_time -- format string for date/time representation (string)
+        LC_date -- format string for date representation (string)
+        LC_time -- format string for time representation (string)
+        timezone -- daylight- and non-daylight-savings timezone representation
+                    (2-item list of sets)
+        lang -- Language used by instance (2-item tuple)
+    """
+
+    def __init__(self):
+        """Set all attributes.
+
+        Order of methods called matters for dependency reasons.
+
+        The locale language is set at the offset and then checked again before
+        exiting.  This is to make sure that the attributes were not set with a
+        mix of information from more than one locale.  This would most likely
+        happen when using threads where one thread calls a locale-dependent
+        function while another thread changes the locale while the function in
+        the other thread is still running.  Proper coding would call for
+        locks to prevent changing the locale while locale-dependent code is
+        running.  The check here is done in case someone does not think about
+        doing this.
+
+        Only other possible issue is if someone changed the timezone and did
+        not call tz.tzset .  That is an issue for the programmer, though,
+        since changing the timezone is worthless without that call.
+
+        """
+        self.lang = _getlang()
+        self.__calc_weekday()
+        self.__calc_month()
+        self.__calc_am_pm()
+        self.__calc_timezone()
+        self.__calc_date_time()
+        if _getlang() != self.lang:
+            raise ValueError("locale changed during initialization")
+
+    def __pad(self, seq, front):
+        # Add '' to seq to either the front (is True), else the back.
+        seq = list(seq)
+        if front:
+            seq.insert(0, '')
+        else:
+            seq.append('')
+        return seq
+
+    def __calc_weekday(self):
+        # Set self.a_weekday and self.f_weekday using the calendar
+        # module.
+        a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
+        f_weekday = [calendar.day_name[i].lower() for i in range(7)]
+        self.a_weekday = a_weekday
+        self.f_weekday = f_weekday
+
+    def __calc_month(self):
+        # Set self.f_month and self.a_month using the calendar module.
+        a_month = [calendar.month_abbr[i].lower() for i in range(13)]
+        f_month = [calendar.month_name[i].lower() for i in range(13)]
+        self.a_month = a_month
+        self.f_month = f_month
+
+    def __calc_am_pm(self):
+        # Set self.am_pm by using time.strftime().
+
+        # The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
+        # magical; just happened to have used it everywhere else where a
+        # static date was needed.
+        am_pm = []
+        for hour in (1, 22):
+            time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
+            am_pm.append(time.strftime("%p", time_tuple).lower())
+        self.am_pm = am_pm
+
+    def __calc_date_time(self):
+        # Set self.date_time, self.date, & self.time by using
+        # time.strftime().
+
+        # Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
+        # overloaded numbers is minimized.  The order in which searches for
+        # values within the format string is very important; it eliminates
+        # possible ambiguity for what something represents.
+        time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
+        date_time = [None, None, None]
+        date_time[0] = time.strftime("%c", time_tuple).lower()
+        date_time[1] = time.strftime("%x", time_tuple).lower()
+        date_time[2] = time.strftime("%X", time_tuple).lower()
+        replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
+                    (self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
+                    (self.a_month[3], '%b'), (self.am_pm[1], '%p'),
+                    ('1999', '%Y'), ('99', '%y'), ('22', '%H'),
+                    ('44', '%M'), ('55', '%S'), ('76', '%j'),
+                    ('17', '%d'), ('03', '%m'), ('3', '%m'),
+                    # '3' needed for when no leading zero.
+                    ('2', '%w'), ('10', '%I')]
+        replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
+                                                for tz in tz_values])
+        for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
+            current_format = date_time[offset]
+            for old, new in replacement_pairs:
+                # Must deal with possible lack of locale info
+                # manifesting itself as the empty string (e.g., Swedish's
+                # lack of AM/PM info) or a platform returning a tuple of empty
+                # strings (e.g., MacOS 9 having timezone as ('','')).
+                if old:
+                    current_format = current_format.replace(old, new)
+            # If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
+            # 2005-01-03 occurs before the first Monday of the year.  Otherwise
+            # %U is used.
+            time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
+            if '00' in time.strftime(directive, time_tuple):
+                U_W = '%W'
+            else:
+                U_W = '%U'
+            date_time[offset] = current_format.replace('11', U_W)
+        self.LC_date_time = date_time[0]
+        self.LC_date = date_time[1]
+        self.LC_time = date_time[2]
+
+    def __calc_timezone(self):
+        # Set self.timezone by using time.tzname.
+        # Do not worry about possibility of time.tzname[0] == timetzname[1]
+        # and time.daylight; handle that in strptime .
+        try:
+            time.tzset()
+        except AttributeError:
+            pass
+        no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
+        if time.daylight:
+            has_saving = frozenset([time.tzname[1].lower()])
+        else:
+            has_saving = frozenset()
+        self.timezone = (no_saving, has_saving)
+
+
+class TimeRE(dict):
+    """Handle conversion from format directives to regexes."""
+
+    def __init__(self, locale_time=None):
+        """Create keys/values.
+
+        Order of execution is important for dependency reasons.
+
+        """
+        if locale_time:
+            self.locale_time = locale_time
+        else:
+            self.locale_time = LocaleTime()
+        base = super()
+        base.__init__({
+            # The " \d" part of the regex is to make %c from ANSI C work
+            'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
+            'f': r"(?P<f>[0-9]{1,6})",
+            'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
+            'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
+            'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
+            'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
+            'M': r"(?P<M>[0-5]\d|\d)",
+            'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
+            'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
+            'w': r"(?P<w>[0-6])",
+            # W is set below by using 'U'
+            'y': r"(?P<y>\d\d)",
+            #XXX: Does 'Y' need to worry about having less or more than
+            #     4 digits?
+            'Y': r"(?P<Y>\d\d\d\d)",
+            'z': r"(?P<z>[+-]\d\d[0-5]\d)",
+            'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
+            'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
+            'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
+            'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
+            'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
+            'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
+                                        for tz in tz_names),
+                                'Z'),
+            '%': '%'})
+        base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
+        base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
+        base.__setitem__('x', self.pattern(self.locale_time.LC_date))
+        base.__setitem__('X', self.pattern(self.locale_time.LC_time))
+
+    def __seqToRE(self, to_convert, directive):
+        """Convert a list to a regex string for matching a directive.
+
+        Want possible matching values to be from longest to shortest.  This
+        prevents the possibility of a match occuring for a value that also
+        a substring of a larger value that should have matched (e.g., 'abc'
+        matching when 'abcdef' should have been the match).
+
+        """
+        to_convert = sorted(to_convert, key=len, reverse=True)
+        for value in to_convert:
+            if value != '':
+                break
+        else:
+            return ''
+        regex = '|'.join(re_escape(stuff) for stuff in to_convert)
+        regex = '(?P<%s>%s' % (directive, regex)
+        return '%s)' % regex
+
+    def pattern(self, format):
+        """Return regex pattern for the format string.
+
+        Need to make sure that any characters that might be interpreted as
+        regex syntax are escaped.
+
+        """
+        processed_format = ''
+        # The sub() call escapes all characters that might be misconstrued
+        # as regex syntax.  Cannot use re.escape since we have to deal with
+        # format directives (%m, etc.).
+        regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
+        format = regex_chars.sub(r"\\\1", format)
+        whitespace_replacement = re_compile('\s+')
+        format = whitespace_replacement.sub('\s+', format)
+        while '%' in format:
+            directive_index = format.index('%')+1
+            processed_format = "%s%s%s" % (processed_format,
+                                           format[:directive_index-1],
+                                           self[format[directive_index]])
+            format = format[directive_index+1:]
+        return "%s%s" % (processed_format, format)
+
+    def compile(self, format):
+        """Return a compiled re object for the format string."""
+        return re_compile(self.pattern(format), IGNORECASE)
+
+_cache_lock = _thread_allocate_lock()
+# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
+# first!
+_TimeRE_cache = TimeRE()
+_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
+_regex_cache = {}
+
+def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
+    """Calculate the Julian day based on the year, week of the year, and day of
+    the week, with week_start_day representing whether the week of the year
+    assumes the week starts on Sunday or Monday (6 or 0)."""
+    first_weekday = datetime_date(year, 1, 1).weekday()
+    # If we are dealing with the %U directive (week starts on Sunday), it's
+    # easier to just shift the view to Sunday being the first day of the
+    # week.
+    if not week_starts_Mon:
+        first_weekday = (first_weekday + 1) % 7
+        day_of_week = (day_of_week + 1) % 7
+    # Need to watch out for a week 0 (when the first day of the year is not
+    # the same as that specified by %U or %W).
+    week_0_length = (7 - first_weekday) % 7
+    if week_of_year == 0:
+        return 1 + day_of_week - first_weekday
+    else:
+        days_to_week = week_0_length + (7 * (week_of_year - 1))
+        return 1 + days_to_week + day_of_week
+
+
+def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
+    """Return a 2-tuple consisting of a time struct and an int containing
+    the number of microseconds based on the input string and the
+    format string."""
+
+    for index, arg in enumerate([data_string, format]):
+        if not isinstance(arg, str):
+            msg = "strptime() argument {} must be str, not {}"
+            raise TypeError(msg.format(index, type(arg)))
+
+    global _TimeRE_cache, _regex_cache
+    with _cache_lock:
+
+        if _getlang() != _TimeRE_cache.locale_time.lang:
+            _TimeRE_cache = TimeRE()
+            _regex_cache.clear()
+        if len(_regex_cache) > _CACHE_MAX_SIZE:
+            _regex_cache.clear()
+        locale_time = _TimeRE_cache.locale_time
+        format_regex = _regex_cache.get(format)
+        if not format_regex:
+            try:
+                format_regex = _TimeRE_cache.compile(format)
+            # KeyError raised when a bad format is found; can be specified as
+            # \\, in which case it was a stray % but with a space after it
+            except KeyError as err:
+                bad_directive = err.args[0]
+                if bad_directive == "\\":
+                    bad_directive = "%"
+                del err
+                raise ValueError("'%s' is a bad directive in format '%s'" %
+                                    (bad_directive, format))
+            # IndexError only occurs when the format string is "%"
+            except IndexError:
+                raise ValueError("stray %% in format '%s'" % format)
+            _regex_cache[format] = format_regex
+    found = format_regex.match(data_string)
+    if not found:
+        raise ValueError("time data %r does not match format %r" %
+                         (data_string, format))
+    if len(data_string) != found.end():
+        raise ValueError("unconverted data remains: %s" %
+                          data_string[found.end():])
+
+    year = 1900
+    month = day = 1
+    hour = minute = second = fraction = 0
+    tz = -1
+    tzoffset = None
+    # Default to -1 to signify that values not known; not critical to have,
+    # though
+    week_of_year = -1
+    week_of_year_start = -1
+    # weekday and julian defaulted to -1 so as to signal need to calculate
+    # values
+    weekday = julian = -1
+    found_dict = found.groupdict()
+    for group_key in found_dict.keys():
+        # Directives not explicitly handled below:
+        #   c, x, X
+        #      handled by making out of other directives
+        #   U, W
+        #      worthless without day of the week
+        if group_key == 'y':
+            year = int(found_dict['y'])
+            # Open Group specification for strptime() states that a %y
+            #value in the range of [00, 68] is in the century 2000, while
+            #[69,99] is in the century 1900
+            if year <= 68:
+                year += 2000
+            else:
+                year += 1900
+        elif group_key == 'Y':
+            year = int(found_dict['Y'])
+        elif group_key == 'm':
+            month = int(found_dict['m'])
+        elif group_key == 'B':
+            month = locale_time.f_month.index(found_dict['B'].lower())
+        elif group_key == 'b':
+            month = locale_time.a_month.index(found_dict['b'].lower())
+        elif group_key == 'd':
+            day = int(found_dict['d'])
+        elif group_key == 'H':
+            hour = int(found_dict['H'])
+        elif group_key == 'I':
+            hour = int(found_dict['I'])
+            ampm = found_dict.get('p', '').lower()
+            # If there was no AM/PM indicator, we'll treat this like AM
+            if ampm in ('', locale_time.am_pm[0]):
+                # We're in AM so the hour is correct unless we're
+                # looking at 12 midnight.
+                # 12 midnight == 12 AM == hour 0
+                if hour == 12:
+                    hour = 0
+            elif ampm == locale_time.am_pm[1]:
+                # We're in PM so we need to add 12 to the hour unless
+                # we're looking at 12 noon.
+                # 12 noon == 12 PM == hour 12
+                if hour != 12:
+                    hour += 12
+        elif group_key == 'M':
+            minute = int(found_dict['M'])
+        elif group_key == 'S':
+            second = int(found_dict['S'])
+        elif group_key == 'f':
+            s = found_dict['f']
+            # Pad to always return microseconds.
+            s += "0" * (6 - len(s))
+            fraction = int(s)
+        elif group_key == 'A':
+            weekday = locale_time.f_weekday.index(found_dict['A'].lower())
+        elif group_key == 'a':
+            weekday = locale_time.a_weekday.index(found_dict['a'].lower())
+        elif group_key == 'w':
+            weekday = int(found_dict['w'])
+            if weekday == 0:
+                weekday = 6
+            else:
+                weekday -= 1
+        elif group_key == 'j':
+            julian = int(found_dict['j'])
+        elif group_key in ('U', 'W'):
+            week_of_year = int(found_dict[group_key])
+            if group_key == 'U':
+                # U starts week on Sunday.
+                week_of_year_start = 6
+            else:
+                # W starts week on Monday.
+                week_of_year_start = 0
+        elif group_key == 'z':
+            z = found_dict['z']
+            tzoffset = int(z[1:3]) * 60 + int(z[3:5])
+            if z.startswith("-"):
+                tzoffset = -tzoffset
+        elif group_key == 'Z':
+            # Since -1 is default value only need to worry about setting tz if
+            # it can be something other than -1.
+            found_zone = found_dict['Z'].lower()
+            for value, tz_values in enumerate(locale_time.timezone):
+                if found_zone in tz_values:
+                    # Deal with bad locale setup where timezone names are the
+                    # same and yet time.daylight is true; too ambiguous to
+                    # be able to tell what timezone has daylight savings
+                    if (time.tzname[0] == time.tzname[1] and
+                       time.daylight and found_zone not in ("utc", "gmt")):
+                        break
+                    else:
+                        tz = value
+                        break
+    # If we know the week of the year and what day of that week, we can figure
+    # out the Julian day of the year.
+    if julian == -1 and week_of_year != -1 and weekday != -1:
+        week_starts_Mon = True if week_of_year_start == 0 else False
+        julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
+                                            week_starts_Mon)
+    # Cannot pre-calculate datetime_date() since can change in Julian
+    # calculation and thus could have different value for the day of the week
+    # calculation.
+    if julian == -1:
+        # Need to add 1 to result since first day of the year is 1, not 0.
+        julian = datetime_date(year, month, day).toordinal() - \
+                  datetime_date(year, 1, 1).toordinal() + 1
+    else:  # Assume that if they bothered to include Julian day it will
+           # be accurate.
+        datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
+        year = datetime_result.year
+        month = datetime_result.month
+        day = datetime_result.day
+    if weekday == -1:
+        weekday = datetime_date(year, month, day).weekday()
+    # Add timezone info
+    tzname = found_dict.get("Z")
+    if tzoffset is not None:
+        gmtoff = tzoffset * 60
+    else:
+        gmtoff = None
+
+    return (year, month, day,
+            hour, minute, second,
+            weekday, julian, tz, gmtoff, tzname), fraction
+
+def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
+    """Return a time struct based on the input string and the
+    format string."""
+    tt = _strptime(data_string, format)[0]
+    return time.struct_time(tt[:9])
+
+def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"):
+    """Return a class cls instance based on the input string and the
+    format string."""
+    tt, fraction = _strptime(data_string, format)
+    gmtoff, tzname = tt[-2:]
+    args = tt[:6] + (fraction,)
+    if gmtoff is not None:
+        tzdelta = datetime_timedelta(seconds=gmtoff)
+        if tzname:
+            tz = datetime_timezone(tzdelta, tzname)
+        else:
+            tz = datetime_timezone(tzdelta)
+        args += (tz,)
+
+    return cls(*args)
diff --git a/lib-python/3.2/_threading_local.py b/lib-python/3.2/_threading_local.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/_threading_local.py
@@ -0,0 +1,246 @@
+"""Thread-local objects.
+
+(Note that this module provides a Python version of the threading.local
+ class.  Depending on the version of Python you're using, there may be a
+ faster one available.  You should always import the `local` class from
+ `threading`.)
+
+Thread-local objects support the management of thread-local data.
+If you have data that you want to be local to a thread, simply create
+a thread-local object and use its attributes:
+
+  >>> mydata = local()
+  >>> mydata.number = 42
+  >>> mydata.number
+  42
+
+You can also access the local-object's dictionary:
+
+  >>> mydata.__dict__
+  {'number': 42}
+  >>> mydata.__dict__.setdefault('widgets', [])
+  []
+  >>> mydata.widgets
+  []
+
+What's important about thread-local objects is that their data are
+local to a thread. If we access the data in a different thread:
+
+  >>> log = []
+  >>> def f():
+  ...     items = sorted(mydata.__dict__.items())
+  ...     log.append(items)
+  ...     mydata.number = 11
+  ...     log.append(mydata.number)
+
+  >>> import threading
+  >>> thread = threading.Thread(target=f)
+  >>> thread.start()
+  >>> thread.join()
+  >>> log
+  [[], 11]
+
+we get different data.  Furthermore, changes made in the other thread
+don't affect data seen in this thread:
+
+  >>> mydata.number
+  42
+
+Of course, values you get from a local object, including a __dict__
+attribute, are for whatever thread was current at the time the
+attribute was read.  For that reason, you generally don't want to save
+these values across threads, as they apply only to the thread they
+came from.
+
+You can create custom local objects by subclassing the local class:
+
+  >>> class MyLocal(local):
+  ...     number = 2
+  ...     initialized = False
+  ...     def __init__(self, **kw):
+  ...         if self.initialized:
+  ...             raise SystemError('__init__ called too many times')
+  ...         self.initialized = True
+  ...         self.__dict__.update(kw)
+  ...     def squared(self):
+  ...         return self.number ** 2
+
+This can be useful to support default values, methods and
+initialization.  Note that if you define an __init__ method, it will be
+called each time the local object is used in a separate thread.  This
+is necessary to initialize each thread's dictionary.
+
+Now if we create a local object:
+
+  >>> mydata = MyLocal(color='red')
+
+Now we have a default number:
+
+  >>> mydata.number
+  2
+
+an initial color:
+
+  >>> mydata.color
+  'red'
+  >>> del mydata.color
+
+And a method that operates on the data:
+
+  >>> mydata.squared()
+  4
+
+As before, we can access the data in a separate thread:
+
+  >>> log = []
+  >>> thread = threading.Thread(target=f)
+  >>> thread.start()
+  >>> thread.join()
+  >>> log
+  [[('color', 'red'), ('initialized', True)], 11]
+
+without affecting this thread's data:
+
+  >>> mydata.number
+  2
+  >>> mydata.color
+  Traceback (most recent call last):
+  ...
+  AttributeError: 'MyLocal' object has no attribute 'color'
+
+Note that subclasses can define slots, but they are not thread
+local. They are shared across threads:
+
+  >>> class MyLocal(local):
+  ...     __slots__ = 'number'
+
+  >>> mydata = MyLocal()
+  >>> mydata.number = 42
+  >>> mydata.color = 'red'
+
+So, the separate thread:
+
+  >>> thread = threading.Thread(target=f)
+  >>> thread.start()
+  >>> thread.join()
+
+affects what we see:
+
+  >>> mydata.number
+  11
+
+>>> del mydata
+"""
+
+from weakref import ref
+from contextlib import contextmanager
+
+__all__ = ["local"]
+
+# We need to use objects from the threading module, but the threading
+# module may also want to use our `local` class, if support for locals
+# isn't compiled in to the `thread` module.  This creates potential problems
+# with circular imports.  For that reason, we don't import `threading`
+# until the bottom of this file (a hack sufficient to worm around the
+# potential problems).  Note that all platforms on CPython do have support
+# for locals in the `thread` module, and there is no circular import problem
+# then, so problems introduced by fiddling the order of imports here won't
+# manifest.
+
+class _localimpl:
+    """A class managing thread-local dicts"""
+    __slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
+
+    def __init__(self):
+        # The key used in the Thread objects' attribute dicts.
+        # We keep it a string for speed but make it unlikely to clash with
+        # a "real" attribute.
+        self.key = '_threading_local._localimpl.' + str(id(self))
+        # { id(Thread) -> (ref(Thread), thread-local dict) }
+        self.dicts = {}
+
+    def get_dict(self):
+        """Return the dict for the current thread. Raises KeyError if none
+        defined."""
+        thread = current_thread()
+        return self.dicts[id(thread)][1]
+
+    def create_dict(self):
+        """Create a new dict for the current thread, and return it."""
+        localdict = {}
+        key = self.key
+        thread = current_thread()
+        idt = id(thread)
+        def local_deleted(_, key=key):
+            # When the localimpl is deleted, remove the thread attribute.
+            thread = wrthread()
+            if thread is not None:
+                del thread.__dict__[key]
+        def thread_deleted(_, idt=idt):
+            # When the thread is deleted, remove the local dict.
+            # Note that this is suboptimal if the thread object gets
+            # caught in a reference loop. We would like to be called
+            # as soon as the OS-level thread ends instead.
+            local = wrlocal()
+            if local is not None:
+                dct = local.dicts.pop(idt)
+        wrlocal = ref(self, local_deleted)
+        wrthread = ref(thread, thread_deleted)
+        thread.__dict__[key] = wrlocal
+        self.dicts[idt] = wrthread, localdict
+        return localdict
+
+
+ at contextmanager
+def _patch(self):
+    impl = object.__getattribute__(self, '_local__impl')
+    try:
+        dct = impl.get_dict()
+    except KeyError:
+        dct = impl.create_dict()
+        args, kw = impl.localargs
+        self.__init__(*args, **kw)
+    with impl.locallock:
+        object.__setattr__(self, '__dict__', dct)
+        yield
+
+
+class local:
+    __slots__ = '_local__impl', '__dict__'
+
+    def __new__(cls, *args, **kw):
+        if (args or kw) and (cls.__init__ is object.__init__):
+            raise TypeError("Initialization arguments are not supported")
+        self = object.__new__(cls)
+        impl = _localimpl()
+        impl.localargs = (args, kw)
+        impl.locallock = RLock()
+        object.__setattr__(self, '_local__impl', impl)
+        # We need to create the thread dict in anticipation of
+        # __init__ being called, to make sure we don't call it
+        # again ourselves.
+        impl.create_dict()
+        return self
+
+    def __getattribute__(self, name):
+        with _patch(self):
+            return object.__getattribute__(self, name)
+
+    def __setattr__(self, name, value):
+        if name == '__dict__':
+            raise AttributeError(
+                "%r object attribute '__dict__' is read-only"
+                % self.__class__.__name__)
+        with _patch(self):
+            return object.__setattr__(self, name, value)
+
+    def __delattr__(self, name):
+        if name == '__dict__':
+            raise AttributeError(
+                "%r object attribute '__dict__' is read-only"
+                % self.__class__.__name__)
+        with _patch(self):
+            return object.__delattr__(self, name)
+
+
+from threading import current_thread, RLock
diff --git a/lib-python/3.2/_weakrefset.py b/lib-python/3.2/_weakrefset.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/_weakrefset.py
@@ -0,0 +1,214 @@
+# Access WeakSet through the weakref module.
+# This code is separated-out because it is needed
+# by abc.py to load everything else at startup.
+
+from _weakref import ref
+
+__all__ = ['WeakSet']
+
+
+class _IterationGuard:
+    # This context manager registers itself in the current iterators of the
+    # weak container, such as to delay all removals until the context manager
+    # exits.
+    # This technique should be relatively thread-safe (since sets are).
+
+    def __init__(self, weakcontainer):
+        # Don't create cycles
+        self.weakcontainer = ref(weakcontainer)
+
+    def __enter__(self):
+        w = self.weakcontainer()
+        if w is not None:
+            w._iterating.add(self)
+        return self
+
+    def __exit__(self, e, t, b):
+        w = self.weakcontainer()
+        if w is not None:
+            s = w._iterating
+            s.remove(self)
+            if not s:
+                w._commit_removals()
+
+
+class WeakSet:
+    def __init__(self, data=None):
+        self.data = set()
+        def _remove(item, selfref=ref(self)):
+            self = selfref()
+            if self is not None:
+                if self._iterating:
+                    self._pending_removals.append(item)
+                else:
+                    self.data.discard(item)
+        self._remove = _remove
+        # A list of keys to be removed
+        self._pending_removals = []
+        self._iterating = set()
+        if data is not None:
+            self.update(data)
+
+    def _commit_removals(self):
+        l = self._pending_removals
+        discard = self.data.discard
+        while l:
+            discard(l.pop())
+
+    def __iter__(self):
+        with _IterationGuard(self):
+            for itemref in self.data:
+                item = itemref()
+                if item is not None:
+                    yield item
+
+    def __len__(self):
+        return sum(x() is not None for x in self.data)
+
+    def __contains__(self, item):
+        try:
+            wr = ref(item)
+        except TypeError:
+            return False
+        return wr in self.data
+
+    def __reduce__(self):
+        return (self.__class__, (list(self),),
+                getattr(self, '__dict__', None))
+
+    def add(self, item):
+        if self._pending_removals:
+            self._commit_removals()
+        self.data.add(ref(item, self._remove))
+
+    def clear(self):
+        if self._pending_removals:
+            self._commit_removals()
+        self.data.clear()
+
+    def copy(self):
+        return self.__class__(self)
+
+    def pop(self):
+        if self._pending_removals:
+            self._commit_removals()
+        while True:
+            try:
+                itemref = self.data.pop()
+            except KeyError:
+                raise KeyError('pop from empty WeakSet')
+            item = itemref()
+            if item is not None:
+                return item
+
+    def remove(self, item):
+        if self._pending_removals:
+            self._commit_removals()
+        self.data.remove(ref(item))
+
+    def discard(self, item):
+        if self._pending_removals:
+            self._commit_removals()
+        self.data.discard(ref(item))
+
+    def update(self, other):
+        if self._pending_removals:
+            self._commit_removals()
+        if isinstance(other, self.__class__):
+            self.data.update(other.data)
+        else:
+            for element in other:
+                self.add(element)
+
+    def __ior__(self, other):
+        self.update(other)
+        return self
+
+    # Helper functions for simple delegating methods.
+    def _apply(self, other, method):
+        if not isinstance(other, self.__class__):
+            other = self.__class__(other)
+        newdata = method(other.data)
+        newset = self.__class__()
+        newset.data = newdata
+        return newset
+
+    def difference(self, other):
+        return self._apply(other, self.data.difference)
+    __sub__ = difference
+
+    def difference_update(self, other):
+        if self._pending_removals:
+            self._commit_removals()
+        if self is other:
+            self.data.clear()
+        else:
+            self.data.difference_update(ref(item) for item in other)
+    def __isub__(self, other):
+        if self._pending_removals:
+            self._commit_removals()
+        if self is other:
+            self.data.clear()
+        else:
+            self.data.difference_update(ref(item) for item in other)
+        return self
+
+    def intersection(self, other):
+        return self._apply(other, self.data.intersection)
+    __and__ = intersection
+
+    def intersection_update(self, other):
+        if self._pending_removals:
+            self._commit_removals()
+        self.data.intersection_update(ref(item) for item in other)
+    def __iand__(self, other):
+        if self._pending_removals:
+            self._commit_removals()
+        self.data.intersection_update(ref(item) for item in other)
+        return self
+
+    def issubset(self, other):
+        return self.data.issubset(ref(item) for item in other)
+    __lt__ = issubset
+
+    def __le__(self, other):
+        return self.data <= set(ref(item) for item in other)
+
+    def issuperset(self, other):
+        return self.data.issuperset(ref(item) for item in other)
+    __gt__ = issuperset
+
+    def __ge__(self, other):
+        return self.data >= set(ref(item) for item in other)
+
+    def __eq__(self, other):
+        if not isinstance(other, self.__class__):
+            return NotImplemented
+        return self.data == set(ref(item) for item in other)
+
+    def symmetric_difference(self, other):
+        return self._apply(other, self.data.symmetric_difference)
+    __xor__ = symmetric_difference
+
+    def symmetric_difference_update(self, other):
+        if self._pending_removals:
+            self._commit_removals()
+        if self is other:
+            self.data.clear()
+        else:
+            self.data.symmetric_difference_update(ref(item) for item in other)
+    def __ixor__(self, other):
+        if self._pending_removals:
+            self._commit_removals()
+        if self is other:
+            self.data.clear()
+        else:
+            self.data.symmetric_difference_update(ref(item) for item in other)
+        return self
+
+    def union(self, other):
+        return self._apply(other, self.data.union)
+    __or__ = union
+
+    def isdisjoint(self, other):
+        return len(self.intersection(other)) == 0
diff --git a/lib-python/3.2/abc.py b/lib-python/3.2/abc.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/abc.py
@@ -0,0 +1,211 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Abstract Base Classes (ABCs) according to PEP 3119."""
+
+from _weakrefset import WeakSet
+
+def abstractmethod(funcobj):
+    """A decorator indicating abstract methods.
+
+    Requires that the metaclass is ABCMeta or derived from it.  A
+    class that has a metaclass derived from ABCMeta cannot be
+    instantiated unless all of its abstract methods are overridden.
+    The abstract methods can be called using any of the normal
+    'super' call mechanisms.
+
+    Usage:
+
+        class C(metaclass=ABCMeta):
+            @abstractmethod
+            def my_abstract_method(self, ...):
+                ...
+    """
+    funcobj.__isabstractmethod__ = True
+    return funcobj
+
+
+class abstractclassmethod(classmethod):
+    """A decorator indicating abstract classmethods.
+
+    Similar to abstractmethod.
+
+    Usage:
+
+        class C(metaclass=ABCMeta):
+            @abstractclassmethod
+            def my_abstract_classmethod(cls, ...):
+                ...
+    """
+
+    __isabstractmethod__ = True
+
+    def __init__(self, callable):
+        callable.__isabstractmethod__ = True
+        super().__init__(callable)
+
+
+class abstractstaticmethod(staticmethod):
+    """A decorator indicating abstract staticmethods.
+
+    Similar to abstractmethod.
+
+    Usage:
+
+        class C(metaclass=ABCMeta):
+            @abstractstaticmethod
+            def my_abstract_staticmethod(...):
+                ...
+    """
+
+    __isabstractmethod__ = True
+
+    def __init__(self, callable):
+        callable.__isabstractmethod__ = True
+        super().__init__(callable)
+
+
+class abstractproperty(property):
+    """A decorator indicating abstract properties.
+
+    Requires that the metaclass is ABCMeta or derived from it.  A
+    class that has a metaclass derived from ABCMeta cannot be
+    instantiated unless all of its abstract properties are overridden.
+    The abstract properties can be called using any of the normal
+    'super' call mechanisms.
+
+    Usage:
+
+        class C(metaclass=ABCMeta):
+            @abstractproperty
+            def my_abstract_property(self):
+                ...
+
+    This defines a read-only property; you can also define a read-write
+    abstract property using the 'long' form of property declaration:
+
+        class C(metaclass=ABCMeta):
+            def getx(self): ...
+            def setx(self, value): ...
+            x = abstractproperty(getx, setx)
+    """
+    __isabstractmethod__ = True
+
+
+class ABCMeta(type):
+
+    """Metaclass for defining Abstract Base Classes (ABCs).
+
+    Use this metaclass to create an ABC.  An ABC can be subclassed
+    directly, and then acts as a mix-in class.  You can also register
+    unrelated concrete classes (even built-in classes) and unrelated
+    ABCs as 'virtual subclasses' -- these and their descendants will
+    be considered subclasses of the registering ABC by the built-in
+    issubclass() function, but the registering ABC won't show up in
+    their MRO (Method Resolution Order) nor will method
+    implementations defined by the registering ABC be callable (not
+    even via super()).
+
+    """
+
+    # A global counter that is incremented each time a class is
+    # registered as a virtual subclass of anything.  It forces the
+    # negative cache to be cleared before its next use.
+    _abc_invalidation_counter = 0
+
+    def __new__(mcls, name, bases, namespace):
+        cls = super().__new__(mcls, name, bases, namespace)
+        # Compute set of abstract method names
+        abstracts = {name
+                     for name, value in namespace.items()
+                     if getattr(value, "__isabstractmethod__", False)}
+        for base in bases:
+            for name in getattr(base, "__abstractmethods__", set()):
+                value = getattr(cls, name, None)
+                if getattr(value, "__isabstractmethod__", False):
+                    abstracts.add(name)
+        cls.__abstractmethods__ = frozenset(abstracts)
+        # Set up inheritance registry
+        cls._abc_registry = WeakSet()
+        cls._abc_cache = WeakSet()
+        cls._abc_negative_cache = WeakSet()
+        cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
+        return cls
+
+    def register(cls, subclass):
+        """Register a virtual subclass of an ABC."""
+        if not isinstance(subclass, type):
+            raise TypeError("Can only register classes")
+        if issubclass(subclass, cls):
+            return  # Already a subclass
+        # Subtle: test for cycles *after* testing for "already a subclass";
+        # this means we allow X.register(X) and interpret it as a no-op.
+        if issubclass(cls, subclass):
+            # This would create a cycle, which is bad for the algorithm below
+            raise RuntimeError("Refusing to create an inheritance cycle")
+        cls._abc_registry.add(subclass)
+        ABCMeta._abc_invalidation_counter += 1  # Invalidate negative cache
+
+    def _dump_registry(cls, file=None):
+        """Debug helper to print the ABC registry."""
+        print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
+        print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
+        for name in sorted(cls.__dict__.keys()):
+            if name.startswith("_abc_"):
+                value = getattr(cls, name)
+                print("%s: %r" % (name, value), file=file)
+
+    def __instancecheck__(cls, instance):
+        """Override for isinstance(instance, cls)."""
+        # Inline the cache checking
+        subclass = instance.__class__
+        if subclass in cls._abc_cache:
+            return True
+        subtype = type(instance)
+        if subtype is subclass:
+            if (cls._abc_negative_cache_version ==
+                ABCMeta._abc_invalidation_counter and
+                subclass in cls._abc_negative_cache):
+                return False
+            # Fall back to the subclass check.
+            return cls.__subclasscheck__(subclass)
+        return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
+
+    def __subclasscheck__(cls, subclass):
+        """Override for issubclass(subclass, cls)."""
+        # Check cache
+        if subclass in cls._abc_cache:
+            return True
+        # Check negative cache; may have to invalidate
+        if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
+            # Invalidate the negative cache
+            cls._abc_negative_cache = WeakSet()
+            cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
+        elif subclass in cls._abc_negative_cache:
+            return False
+        # Check the subclass hook
+        ok = cls.__subclasshook__(subclass)
+        if ok is not NotImplemented:
+            assert isinstance(ok, bool)
+            if ok:
+                cls._abc_cache.add(subclass)
+            else:
+                cls._abc_negative_cache.add(subclass)
+            return ok
+        # Check if it's a direct subclass
+        if cls in getattr(subclass, '__mro__', ()):
+            cls._abc_cache.add(subclass)
+            return True
+        # Check if it's a subclass of a registered class (recursive)
+        for rcls in cls._abc_registry:
+            if issubclass(subclass, rcls):
+                cls._abc_cache.add(subclass)
+                return True
+        # Check if it's a subclass of a subclass (recursive)
+        for scls in cls.__subclasses__():
+            if issubclass(subclass, scls):
+                cls._abc_cache.add(subclass)
+                return True
+        # No dice; update negative cache
+        cls._abc_negative_cache.add(subclass)
+        return False
diff --git a/lib-python/3.2/aifc.py b/lib-python/3.2/aifc.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/aifc.py
@@ -0,0 +1,891 @@
+"""Stuff to parse AIFF-C and AIFF files.
+
+Unless explicitly stated otherwise, the description below is true
+both for AIFF-C files and AIFF files.
+
+An AIFF-C file has the following structure.
+
+  +-----------------+
+  | FORM            |
+  +-----------------+
+  | <size>          |
+  +----+------------+
+  |    | AIFC       |
+  |    +------------+
+  |    | <chunks>   |
+  |    |    .       |
+  |    |    .       |
+  |    |    .       |
+  +----+------------+
+
+An AIFF file has the string "AIFF" instead of "AIFC".
+
+A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
+big endian order), followed by the data.  The size field does not include
+the size of the 8 byte header.
+
+The following chunk types are recognized.
+
+  FVER
+      <version number of AIFF-C defining document> (AIFF-C only).
+  MARK
+      <# of markers> (2 bytes)
+      list of markers:
+          <marker ID> (2 bytes, must be > 0)
+          <position> (4 bytes)
+          <marker name> ("pstring")
+  COMM
+      <# of channels> (2 bytes)
+      <# of sound frames> (4 bytes)
+      <size of the samples> (2 bytes)
+      <sampling frequency> (10 bytes, IEEE 80-bit extended
+          floating point)
+      in AIFF-C files only:
+      <compression type> (4 bytes)
+      <human-readable version of compression type> ("pstring")
+  SSND
+      <offset> (4 bytes, not used by this program)
+      <blocksize> (4 bytes, not used by this program)
+      <sound data>
+
+A pstring consists of 1 byte length, a string of characters, and 0 or 1
+byte pad to make the total length even.
+
+Usage.
+
+Reading AIFF files:
+  f = aifc.open(file, 'r')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods read(), seek(), and close().
+In some types of audio files, if the setpos() method is not used,
+the seek() method is not necessary.
+
+This returns an instance of a class with the following public methods:
+  getnchannels()  -- returns number of audio channels (1 for
+             mono, 2 for stereo)
+  getsampwidth()  -- returns sample width in bytes
+  getframerate()  -- returns sampling frequency
+  getnframes()    -- returns number of audio frames
+  getcomptype()   -- returns compression type ('NONE' for AIFF files)
+  getcompname()   -- returns human-readable version of
+             compression type ('not compressed' for AIFF files)
+  getparams() -- returns a tuple consisting of all of the
+             above in the above order
+  getmarkers()    -- get the list of marks in the audio file or None
+             if there are no marks
+  getmark(id) -- get mark with the specified id (raises an error
+             if the mark does not exist)
+  readframes(n)   -- returns at most n frames of audio
+  rewind()    -- rewind to the beginning of the audio stream
+  setpos(pos) -- seek to the specified position
+  tell()      -- return the current position
+  close()     -- close the instance (make it unusable)
+The position returned by tell(), the position given to setpos() and
+the position of marks are all compatible and have nothing to do with
+the actual position in the file.
+The close() method is called automatically when the class instance
+is destroyed.
+
+Writing AIFF files:
+  f = aifc.open(file, 'w')
+where file is either the name of a file or an open file pointer.
+The open file pointer must have methods write(), tell(), seek(), and
+close().
+
+This returns an instance of a class with the following public methods:
+  aiff()      -- create an AIFF file (AIFF-C default)
+  aifc()      -- create an AIFF-C file
+  setnchannels(n) -- set the number of channels
+  setsampwidth(n) -- set the sample width
+  setframerate(n) -- set the frame rate
+  setnframes(n)   -- set the number of frames
+  setcomptype(type, name)
+          -- set the compression type and the
+             human-readable compression type
+  setparams(tuple)
+          -- set all parameters at once
+  setmark(id, pos, name)
+          -- add specified mark to the list of marks
+  tell()      -- return current position in output file (useful
+             in combination with setmark())
+  writeframesraw(data)
+          -- write audio frames without pathing up the
+             file header
+  writeframes(data)
+          -- write audio frames and patch up the file header
+  close()     -- patch up the file header and close the
+             output file
+You should set the parameters before the first writeframesraw or
+writeframes.  The total number of frames does not need to be set,
+but when it is set to the correct value, the header does not have to
+be patched up.
+It is best to first set all parameters, perhaps possibly the
+compression type, and then write audio frames using writeframesraw.
+When all frames have been written, either call writeframes('') or
+close() to patch up the sizes in the header.
+Marks can be added anytime.  If there are any marks, ypu must call
+close() after all frames have been written.
+The close() method is called automatically when the class instance
+is destroyed.
+
+When a file is opened with the extension '.aiff', an AIFF file is
+written, otherwise an AIFF-C file is written.  This default can be
+changed by calling aiff() or aifc() before the first writeframes or
+writeframesraw.
+"""
+
+import struct
+import builtins
+
+__all__ = ["Error", "open", "openfp"]
+
+class Error(Exception):
+    pass
+
+_AIFC_version = 0xA2805140     # Version 1 of AIFF-C
+
+def _read_long(file):
+    try:
+        return struct.unpack('>l', file.read(4))[0]
+    except struct.error:
+        raise EOFError
+
+def _read_ulong(file):
+    try:
+        return struct.unpack('>L', file.read(4))[0]
+    except struct.error:
+        raise EOFError
+
+def _read_short(file):
+    try:
+        return struct.unpack('>h', file.read(2))[0]
+    except struct.error:
+        raise EOFError
+
+def _read_ushort(file):
+    try:
+        return struct.unpack('>H', file.read(2))[0]
+    except struct.error:
+        raise EOFError
+
+def _read_string(file):
+    length = ord(file.read(1))
+    if length == 0:
+        data = b''
+    else:
+        data = file.read(length)
+    if length & 1 == 0:
+        dummy = file.read(1)
+    return data
+
+_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
+
+def _read_float(f): # 10 bytes
+    expon = _read_short(f) # 2 bytes
+    sign = 1
+    if expon < 0:
+        sign = -1
+        expon = expon + 0x8000
+    himant = _read_ulong(f) # 4 bytes
+    lomant = _read_ulong(f) # 4 bytes
+    if expon == himant == lomant == 0:
+        f = 0.0
+    elif expon == 0x7FFF:
+        f = _HUGE_VAL
+    else:
+        expon = expon - 16383
+        f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
+    return sign * f
+
+def _write_short(f, x):
+    f.write(struct.pack('>h', x))
+
+def _write_ushort(f, x):
+    f.write(struct.pack('>H', x))
+
+def _write_long(f, x):
+    f.write(struct.pack('>l', x))
+
+def _write_ulong(f, x):
+    f.write(struct.pack('>L', x))
+
+def _write_string(f, s):
+    if len(s) > 255:
+        raise ValueError("string exceeds maximum pstring length")
+    f.write(struct.pack('B', len(s)))
+    f.write(s)
+    if len(s) & 1 == 0:
+        f.write(b'\x00')
+
+def _write_float(f, x):
+    import math
+    if x < 0:
+        sign = 0x8000
+        x = x * -1
+    else:
+        sign = 0
+    if x == 0:
+        expon = 0
+        himant = 0
+        lomant = 0
+    else:
+        fmant, expon = math.frexp(x)
+        if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN
+            expon = sign|0x7FFF
+            himant = 0
+            lomant = 0
+        else:                   # Finite
+            expon = expon + 16382
+            if expon < 0:           # denormalized
+                fmant = math.ldexp(fmant, expon)
+                expon = 0
+            expon = expon | sign
+            fmant = math.ldexp(fmant, 32)
+            fsmant = math.floor(fmant)
+            himant = int(fsmant)
+            fmant = math.ldexp(fmant - fsmant, 32)
+            fsmant = math.floor(fmant)
+            lomant = int(fsmant)
+    _write_ushort(f, expon)
+    _write_ulong(f, himant)
+    _write_ulong(f, lomant)
+
+from chunk import Chunk
+
+class Aifc_read:
+    # Variables used in this class:
+    #
+    # These variables are available to the user though appropriate
+    # methods of this class:
+    # _file -- the open file with methods read(), close(), and seek()
+    #       set through the __init__() method
+    # _nchannels -- the number of audio channels
+    #       available through the getnchannels() method
+    # _nframes -- the number of audio frames
+    #       available through the getnframes() method
+    # _sampwidth -- the number of bytes per audio sample
+    #       available through the getsampwidth() method
+    # _framerate -- the sampling frequency
+    #       available through the getframerate() method
+    # _comptype -- the AIFF-C compression type ('NONE' if AIFF)
+    #       available through the getcomptype() method
+    # _compname -- the human-readable AIFF-C compression type
+    #       available through the getcomptype() method
+    # _markers -- the marks in the audio file
+    #       available through the getmarkers() and getmark()
+    #       methods
+    # _soundpos -- the position in the audio stream
+    #       available through the tell() method, set through the
+    #       setpos() method
+    #
+    # These variables are used internally only:
+    # _version -- the AIFF-C version number
+    # _decomp -- the decompressor from builtin module cl
+    # _comm_chunk_read -- 1 iff the COMM chunk has been read
+    # _aifc -- 1 iff reading an AIFF-C file
+    # _ssnd_seek_needed -- 1 iff positioned correctly in audio
+    #       file for readframes()
+    # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
+    # _framesize -- size of one frame in the file
+
+    def initfp(self, file):
+        self._version = 0
+        self._convert = None
+        self._markers = []
+        self._soundpos = 0
+        self._file = file
+        chunk = Chunk(file)
+        if chunk.getname() != b'FORM':
+            raise Error('file does not start with FORM id')
+        formdata = chunk.read(4)
+        if formdata == b'AIFF':
+            self._aifc = 0
+        elif formdata == b'AIFC':
+            self._aifc = 1
+        else:
+            raise Error('not an AIFF or AIFF-C file')
+        self._comm_chunk_read = 0
+        while 1:
+            self._ssnd_seek_needed = 1
+            try:
+                chunk = Chunk(self._file)
+            except EOFError:
+                break
+            chunkname = chunk.getname()
+            if chunkname == b'COMM':
+                self._read_comm_chunk(chunk)
+                self._comm_chunk_read = 1
+            elif chunkname == b'SSND':
+                self._ssnd_chunk = chunk
+                dummy = chunk.read(8)
+                self._ssnd_seek_needed = 0
+            elif chunkname == b'FVER':
+                self._version = _read_ulong(chunk)
+            elif chunkname == b'MARK':
+                self._readmark(chunk)
+            chunk.skip()
+        if not self._comm_chunk_read or not self._ssnd_chunk:
+            raise Error('COMM chunk and/or SSND chunk missing')
+
+    def __init__(self, f):
+        if isinstance(f, str):
+            f = builtins.open(f, 'rb')
+        # else, assume it is an open file object already
+        self.initfp(f)
+
+    #
+    # User visible methods.
+    #
+    def getfp(self):
+        return self._file
+
+    def rewind(self):
+        self._ssnd_seek_needed = 1
+        self._soundpos = 0
+
+    def close(self):
+        self._file.close()
+
+    def tell(self):
+        return self._soundpos
+
+    def getnchannels(self):
+        return self._nchannels
+
+    def getnframes(self):
+        return self._nframes
+
+    def getsampwidth(self):
+        return self._sampwidth
+
+    def getframerate(self):
+        return self._framerate
+
+    def getcomptype(self):
+        return self._comptype
+
+    def getcompname(self):
+        return self._compname
+
+##  def getversion(self):
+##      return self._version
+
+    def getparams(self):
+        return self.getnchannels(), self.getsampwidth(), \
+              self.getframerate(), self.getnframes(), \
+              self.getcomptype(), self.getcompname()
+
+    def getmarkers(self):
+        if len(self._markers) == 0:
+            return None
+        return self._markers
+
+    def getmark(self, id):
+        for marker in self._markers:
+            if id == marker[0]:
+                return marker
+        raise Error('marker {0!r} does not exist'.format(id))
+
+    def setpos(self, pos):
+        if pos < 0 or pos > self._nframes:
+            raise Error('position not in range')
+        self._soundpos = pos
+        self._ssnd_seek_needed = 1
+
+    def readframes(self, nframes):
+        if self._ssnd_seek_needed:
+            self._ssnd_chunk.seek(0)
+            dummy = self._ssnd_chunk.read(8)
+            pos = self._soundpos * self._framesize
+            if pos:
+                self._ssnd_chunk.seek(pos + 8)
+            self._ssnd_seek_needed = 0
+        if nframes == 0:
+            return b''
+        data = self._ssnd_chunk.read(nframes * self._framesize)
+        if self._convert and data:
+            data = self._convert(data)
+        self._soundpos = self._soundpos + len(data) // (self._nchannels
+                                                        * self._sampwidth)
+        return data
+
+    #
+    # Internal methods.
+    #
+
+    def _alaw2lin(self, data):
+        import audioop
+        return audioop.alaw2lin(data, 2)
+
+    def _ulaw2lin(self, data):
+        import audioop
+        return audioop.ulaw2lin(data, 2)
+
+    def _adpcm2lin(self, data):
+        import audioop
+        if not hasattr(self, '_adpcmstate'):
+            # first time
+            self._adpcmstate = None
+        data, self._adpcmstate = audioop.adpcm2lin(data, 2, self._adpcmstate)
+        return data
+
+    def _read_comm_chunk(self, chunk):
+        self._nchannels = _read_short(chunk)
+        self._nframes = _read_long(chunk)
+        self._sampwidth = (_read_short(chunk) + 7) // 8
+        self._framerate = int(_read_float(chunk))
+        self._framesize = self._nchannels * self._sampwidth
+        if self._aifc:
+            #DEBUG: SGI's soundeditor produces a bad size :-(
+            kludge = 0
+            if chunk.chunksize == 18:
+                kludge = 1
+                print('Warning: bad COMM chunk size')
+                chunk.chunksize = 23
+            #DEBUG end
+            self._comptype = chunk.read(4)
+            #DEBUG start
+            if kludge:
+                length = ord(chunk.file.read(1))
+                if length & 1 == 0:
+                    length = length + 1
+                chunk.chunksize = chunk.chunksize + length
+                chunk.file.seek(-1, 1)
+            #DEBUG end
+            self._compname = _read_string(chunk)
+            if self._comptype != b'NONE':
+                if self._comptype == b'G722':
+                    self._convert = self._adpcm2lin
+                    self._framesize = self._framesize // 4
+                elif self._comptype in (b'ulaw', b'ULAW'):
+                    self._convert = self._ulaw2lin
+                    self._framesize = self._framesize // 2
+                elif self._comptype in (b'alaw', b'ALAW'):
+                    self._convert = self._alaw2lin
+                    self._framesize = self._framesize // 2
+                else:
+                    raise Error('unsupported compression type')
+        else:
+            self._comptype = b'NONE'
+            self._compname = b'not compressed'
+
+    def _readmark(self, chunk):
+        nmarkers = _read_short(chunk)
+        # Some files appear to contain invalid counts.
+        # Cope with this by testing for EOF.
+        try:
+            for i in range(nmarkers):
+                id = _read_short(chunk)
+                pos = _read_long(chunk)
+                name = _read_string(chunk)
+                if pos or name:
+                    # some files appear to have
+                    # dummy markers consisting of
+                    # a position 0 and name ''
+                    self._markers.append((id, pos, name))
+        except EOFError:
+            print('Warning: MARK chunk contains only', end=' ')
+            print(len(self._markers), end=' ')
+            if len(self._markers) == 1: print('marker', end=' ')
+            else: print('markers', end=' ')
+            print('instead of', nmarkers)
+
+class Aifc_write:
+    # Variables used in this class:
+    #
+    # These variables are user settable through appropriate methods
+    # of this class:
+    # _file -- the open file with methods write(), close(), tell(), seek()
+    #       set through the __init__() method
+    # _comptype -- the AIFF-C compression type ('NONE' in AIFF)
+    #       set through the setcomptype() or setparams() method
+    # _compname -- the human-readable AIFF-C compression type
+    #       set through the setcomptype() or setparams() method
+    # _nchannels -- the number of audio channels
+    #       set through the setnchannels() or setparams() method
+    # _sampwidth -- the number of bytes per audio sample
+    #       set through the setsampwidth() or setparams() method
+    # _framerate -- the sampling frequency
+    #       set through the setframerate() or setparams() method
+    # _nframes -- the number of audio frames written to the header
+    #       set through the setnframes() or setparams() method
+    # _aifc -- whether we're writing an AIFF-C file or an AIFF file
+    #       set through the aifc() method, reset through the
+    #       aiff() method
+    #
+    # These variables are used internally only:
+    # _version -- the AIFF-C version number
+    # _comp -- the compressor from builtin module cl
+    # _nframeswritten -- the number of audio frames actually written
+    # _datalength -- the size of the audio samples written to the header
+    # _datawritten -- the size of the audio samples actually written
+
+    def __init__(self, f):
+        if isinstance(f, str):
+            filename = f
+            f = builtins.open(f, 'wb')
+        else:
+            # else, assume it is an open file object already
+            filename = '???'
+        self.initfp(f)
+        if filename[-5:] == '.aiff':
+            self._aifc = 0
+        else:
+            self._aifc = 1
+
+    def initfp(self, file):
+        self._file = file
+        self._version = _AIFC_version
+        self._comptype = b'NONE'
+        self._compname = b'not compressed'
+        self._convert = None
+        self._nchannels = 0
+        self._sampwidth = 0
+        self._framerate = 0
+        self._nframes = 0
+        self._nframeswritten = 0
+        self._datawritten = 0
+        self._datalength = 0
+        self._markers = []
+        self._marklength = 0
+        self._aifc = 1      # AIFF-C is default
+
+    def __del__(self):
+        self.close()
+
+    #
+    # User visible methods.
+    #
+    def aiff(self):
+        if self._nframeswritten:
+            raise Error('cannot change parameters after starting to write')
+        self._aifc = 0
+
+    def aifc(self):
+        if self._nframeswritten:
+            raise Error('cannot change parameters after starting to write')
+        self._aifc = 1
+
+    def setnchannels(self, nchannels):
+        if self._nframeswritten:
+            raise Error('cannot change parameters after starting to write')
+        if nchannels < 1:
+            raise Error('bad # of channels')
+        self._nchannels = nchannels
+
+    def getnchannels(self):
+        if not self._nchannels:
+            raise Error('number of channels not set')
+        return self._nchannels
+
+    def setsampwidth(self, sampwidth):
+        if self._nframeswritten:
+            raise Error('cannot change parameters after starting to write')
+        if sampwidth < 1 or sampwidth > 4:
+            raise Error('bad sample width')
+        self._sampwidth = sampwidth
+
+    def getsampwidth(self):
+        if not self._sampwidth:
+            raise Error('sample width not set')
+        return self._sampwidth
+
+    def setframerate(self, framerate):
+        if self._nframeswritten:
+            raise Error('cannot change parameters after starting to write')
+        if framerate <= 0:
+            raise Error('bad frame rate')
+        self._framerate = framerate
+
+    def getframerate(self):
+        if not self._framerate:
+            raise Error('frame rate not set')
+        return self._framerate
+
+    def setnframes(self, nframes):
+        if self._nframeswritten:
+            raise Error('cannot change parameters after starting to write')
+        self._nframes = nframes
+
+    def getnframes(self):
+        return self._nframeswritten
+
+    def setcomptype(self, comptype, compname):
+        if self._nframeswritten:
+            raise Error('cannot change parameters after starting to write')
+        if comptype not in (b'NONE', b'ulaw', b'ULAW',
+                            b'alaw', b'ALAW', b'G722'):
+            raise Error('unsupported compression type')
+        self._comptype = comptype
+        self._compname = compname
+
+    def getcomptype(self):
+        return self._comptype
+
+    def getcompname(self):
+        return self._compname
+
+##  def setversion(self, version):
+##      if self._nframeswritten:
+##          raise Error, 'cannot change parameters after starting to write'
+##      self._version = version
+
+    def setparams(self, params):
+        nchannels, sampwidth, framerate, nframes, comptype, compname = params
+        if self._nframeswritten:
+            raise Error('cannot change parameters after starting to write')
+        if comptype not in (b'NONE', b'ulaw', b'ULAW',
+                            b'alaw', b'ALAW', b'G722'):
+            raise Error('unsupported compression type')
+        self.setnchannels(nchannels)
+        self.setsampwidth(sampwidth)
+        self.setframerate(framerate)
+        self.setnframes(nframes)
+        self.setcomptype(comptype, compname)
+
+    def getparams(self):
+        if not self._nchannels or not self._sampwidth or not self._framerate:
+            raise Error('not all parameters set')
+        return self._nchannels, self._sampwidth, self._framerate, \
+              self._nframes, self._comptype, self._compname
+
+    def setmark(self, id, pos, name):
+        if id <= 0:
+            raise Error('marker ID must be > 0')
+        if pos < 0:
+            raise Error('marker position must be >= 0')
+        if not isinstance(name, bytes):
+            raise Error('marker name must be bytes')
+        for i in range(len(self._markers)):
+            if id == self._markers[i][0]:
+                self._markers[i] = id, pos, name
+                return
+        self._markers.append((id, pos, name))
+
+    def getmark(self, id):
+        for marker in self._markers:
+            if id == marker[0]:
+                return marker
+        raise Error('marker {0!r} does not exist'.format(id))
+
+    def getmarkers(self):
+        if len(self._markers) == 0:
+            return None
+        return self._markers
+
+    def tell(self):
+        return self._nframeswritten
+
+    def writeframesraw(self, data):
+        self._ensure_header_written(len(data))
+        nframes = len(data) // (self._sampwidth * self._nchannels)
+        if self._convert:
+            data = self._convert(data)
+        self._file.write(data)
+        self._nframeswritten = self._nframeswritten + nframes
+        self._datawritten = self._datawritten + len(data)
+
+    def writeframes(self, data):
+        self.writeframesraw(data)
+        if self._nframeswritten != self._nframes or \
+              self._datalength != self._datawritten:
+            self._patchheader()
+
+    def close(self):
+        if self._file:
+            self._ensure_header_written(0)
+            if self._datawritten & 1:
+                # quick pad to even size
+                self._file.write(b'\x00')
+                self._datawritten = self._datawritten + 1
+            self._writemarkers()
+            if self._nframeswritten != self._nframes or \
+                  self._datalength != self._datawritten or \
+                  self._marklength:
+                self._patchheader()
+            # Prevent ref cycles
+            self._convert = None
+            self._file.close()
+            self._file = None
+
+    #
+    # Internal methods.
+    #
+
+    def _lin2alaw(self, data):
+        import audioop
+        return audioop.lin2alaw(data, 2)
+
+    def _lin2ulaw(self, data):
+        import audioop
+        return audioop.lin2ulaw(data, 2)
+
+    def _lin2adpcm(self, data):
+        import audioop
+        if not hasattr(self, '_adpcmstate'):
+            self._adpcmstate = None
+        data, self._adpcmstate = audioop.lin2adpcm(data, 2, self._adpcmstate)
+        return data
+
+    def _ensure_header_written(self, datasize):
+        if not self._nframeswritten:
+            if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
+                if not self._sampwidth:
+                    self._sampwidth = 2
+                if self._sampwidth != 2:
+                    raise Error('sample width must be 2 when compressing '
+                                'with ulaw/ULAW, alaw/ALAW or G7.22 (ADPCM)')
+            if not self._nchannels:
+                raise Error('# channels not specified')
+            if not self._sampwidth:
+                raise Error('sample width not specified')
+            if not self._framerate:
+                raise Error('sampling rate not specified')
+            self._write_header(datasize)
+
+    def _init_compression(self):
+        if self._comptype == b'G722':
+            self._convert = self._lin2adpcm
+        elif self._comptype in (b'ulaw', b'ULAW'):
+            self._convert = self._lin2ulaw
+        elif self._comptype in (b'alaw', b'ALAW'):
+            self._convert = self._lin2alaw
+
+    def _write_header(self, initlength):
+        if self._aifc and self._comptype != b'NONE':
+            self._init_compression()
+        self._file.write(b'FORM')
+        if not self._nframes:
+            self._nframes = initlength // (self._nchannels * self._sampwidth)
+        self._datalength = self._nframes * self._nchannels * self._sampwidth
+        if self._datalength & 1:
+            self._datalength = self._datalength + 1
+        if self._aifc:
+            if self._comptype in (b'ulaw', b'ULAW', b'alaw', b'ALAW'):
+                self._datalength = self._datalength // 2
+                if self._datalength & 1:
+                    self._datalength = self._datalength + 1
+            elif self._comptype == b'G722':
+                self._datalength = (self._datalength + 3) // 4
+                if self._datalength & 1:
+                    self._datalength = self._datalength + 1
+        self._form_length_pos = self._file.tell()
+        commlength = self._write_form_length(self._datalength)
+        if self._aifc:
+            self._file.write(b'AIFC')
+            self._file.write(b'FVER')
+            _write_ulong(self._file, 4)
+            _write_ulong(self._file, self._version)
+        else:
+            self._file.write(b'AIFF')
+        self._file.write(b'COMM')
+        _write_ulong(self._file, commlength)
+        _write_short(self._file, self._nchannels)
+        self._nframes_pos = self._file.tell()
+        _write_ulong(self._file, self._nframes)
+        _write_short(self._file, self._sampwidth * 8)
+        _write_float(self._file, self._framerate)
+        if self._aifc:
+            self._file.write(self._comptype)
+            _write_string(self._file, self._compname)
+        self._file.write(b'SSND')
+        self._ssnd_length_pos = self._file.tell()
+        _write_ulong(self._file, self._datalength + 8)
+        _write_ulong(self._file, 0)
+        _write_ulong(self._file, 0)
+
+    def _write_form_length(self, datalength):
+        if self._aifc:
+            commlength = 18 + 5 + len(self._compname)
+            if commlength & 1:
+                commlength = commlength + 1
+            verslength = 12
+        else:
+            commlength = 18
+            verslength = 0
+        _write_ulong(self._file, 4 + verslength + self._marklength + \
+                     8 + commlength + 16 + datalength)
+        return commlength
+
+    def _patchheader(self):
+        curpos = self._file.tell()
+        if self._datawritten & 1:
+            datalength = self._datawritten + 1
+            self._file.write(b'\x00')
+        else:
+            datalength = self._datawritten
+        if datalength == self._datalength and \
+              self._nframes == self._nframeswritten and \
+              self._marklength == 0:
+            self._file.seek(curpos, 0)
+            return
+        self._file.seek(self._form_length_pos, 0)
+        dummy = self._write_form_length(datalength)
+        self._file.seek(self._nframes_pos, 0)
+        _write_ulong(self._file, self._nframeswritten)
+        self._file.seek(self._ssnd_length_pos, 0)
+        _write_ulong(self._file, datalength + 8)
+        self._file.seek(curpos, 0)
+        self._nframes = self._nframeswritten
+        self._datalength = datalength
+
+    def _writemarkers(self):
+        if len(self._markers) == 0:
+            return
+        self._file.write(b'MARK')
+        length = 2
+        for marker in self._markers:
+            id, pos, name = marker
+            length = length + len(name) + 1 + 6
+            if len(name) & 1 == 0:
+                length = length + 1
+        _write_ulong(self._file, length)
+        self._marklength = length + 8
+        _write_short(self._file, len(self._markers))
+        for marker in self._markers:
+            id, pos, name = marker
+            _write_short(self._file, id)
+            _write_ulong(self._file, pos)
+            _write_string(self._file, name)
+
+def open(f, mode=None):
+    if mode is None:
+        if hasattr(f, 'mode'):
+            mode = f.mode
+        else:
+            mode = 'rb'
+    if mode in ('r', 'rb'):
+        return Aifc_read(f)
+    elif mode in ('w', 'wb'):
+        return Aifc_write(f)
+    else:
+        raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
+
+openfp = open # B/W compatibility
+
+if __name__ == '__main__':
+    import sys
+    if not sys.argv[1:]:
+        sys.argv.append('/usr/demos/data/audio/bach.aiff')
+    fn = sys.argv[1]
+    f = open(fn, 'r')
+    print("Reading", fn)
+    print("nchannels =", f.getnchannels())
+    print("nframes   =", f.getnframes())
+    print("sampwidth =", f.getsampwidth())
+    print("framerate =", f.getframerate())
+    print("comptype  =", f.getcomptype())
+    print("compname  =", f.getcompname())
+    if sys.argv[2:]:
+        gn = sys.argv[2]
+        print("Writing", gn)
+        g = open(gn, 'w')
+        g.setparams(f.getparams())
+        while 1:
+            data = f.readframes(1024)
+            if not data:
+                break
+            g.writeframes(data)
+        g.close()
+        f.close()
+        print("Done.")
diff --git a/lib-python/3.2/antigravity.py b/lib-python/3.2/antigravity.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/antigravity.py
@@ -0,0 +1,17 @@
+
+import webbrowser
+import hashlib
+
+webbrowser.open("http://xkcd.com/353/")
+
+def geohash(latitude, longitude, datedow):
+    '''Compute geohash() using the Munroe algorithm.
+
+    >>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68')
+    37.857713 -122.544543
+
+    '''
+    # http://xkcd.com/426/
+    h = hashlib.md5(datedow).hexdigest()
+    p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])]
+    print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
diff --git a/lib-python/3.2/argparse.py b/lib-python/3.2/argparse.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/argparse.py
@@ -0,0 +1,2362 @@
+# Author: Steven J. Bethard <steven.bethard at gmail.com>.
+
+"""Command-line parsing library
+
+This module is an optparse-inspired command-line parsing library that:
+
+    - handles both optional and positional arguments
+    - produces highly informative usage messages
+    - supports parsers that dispatch to sub-parsers
+
+The following is a simple usage example that sums integers from the
+command-line and writes the result to a file::
+
+    parser = argparse.ArgumentParser(
+        description='sum the integers at the command line')
+    parser.add_argument(
+        'integers', metavar='int', nargs='+', type=int,
+        help='an integer to be summed')
+    parser.add_argument(
+        '--log', default=sys.stdout, type=argparse.FileType('w'),
+        help='the file where the sum should be written')
+    args = parser.parse_args()
+    args.log.write('%s' % sum(args.integers))
+    args.log.close()
+
+The module contains the following public classes:
+
+    - ArgumentParser -- The main entry point for command-line parsing. As the
+        example above shows, the add_argument() method is used to populate
+        the parser with actions for optional and positional arguments. Then
+        the parse_args() method is invoked to convert the args at the
+        command-line into an object with attributes.
+
+    - ArgumentError -- The exception raised by ArgumentParser objects when
+        there are errors with the parser's actions. Errors raised while
+        parsing the command-line are caught by ArgumentParser and emitted
+        as command-line messages.
+
+    - FileType -- A factory for defining types of files to be created. As the
+        example above shows, instances of FileType are typically passed as
+        the type= argument of add_argument() calls.
+
+    - Action -- The base class for parser actions. Typically actions are
+        selected by passing strings like 'store_true' or 'append_const' to
+        the action= argument of add_argument(). However, for greater
+        customization of ArgumentParser actions, subclasses of Action may
+        be defined and passed as the action= argument.
+
+    - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
+        ArgumentDefaultsHelpFormatter -- Formatter classes which
+        may be passed as the formatter_class= argument to the
+        ArgumentParser constructor. HelpFormatter is the default,
+        RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
+        not to change the formatting for help text, and
+        ArgumentDefaultsHelpFormatter adds information about argument defaults
+        to the help.
+
+All other classes in this module are considered implementation details.
+(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
+considered public as object names -- the API of the formatter objects is
+still considered an implementation detail.)
+"""
+
+__version__ = '1.1'
+__all__ = [
+    'ArgumentParser',
+    'ArgumentError',
+    'ArgumentTypeError',
+    'FileType',
+    'HelpFormatter',
+    'ArgumentDefaultsHelpFormatter',
+    'RawDescriptionHelpFormatter',
+    'RawTextHelpFormatter',
+    'Namespace',
+    'Action',
+    'ONE_OR_MORE',
+    'OPTIONAL',
+    'PARSER',
+    'REMAINDER',
+    'SUPPRESS',
+    'ZERO_OR_MORE',
+]
+
+
+import collections as _collections
+import copy as _copy
+import os as _os
+import re as _re
+import sys as _sys
+import textwrap as _textwrap
+
+from gettext import gettext as _, ngettext
+
+
+SUPPRESS = '==SUPPRESS=='
+
+OPTIONAL = '?'
+ZERO_OR_MORE = '*'
+ONE_OR_MORE = '+'
+PARSER = 'A...'
+REMAINDER = '...'
+_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args'
+
+# =============================
+# Utility functions and classes
+# =============================
+
+class _AttributeHolder(object):
+    """Abstract base class that provides __repr__.
+
+    The __repr__ method returns a string in the format::
+        ClassName(attr=name, attr=name, ...)
+    The attributes are determined either by a class-level attribute,
+    '_kwarg_names', or by inspecting the instance __dict__.
+    """
+
+    def __repr__(self):
+        type_name = type(self).__name__
+        arg_strings = []
+        for arg in self._get_args():
+            arg_strings.append(repr(arg))
+        for name, value in self._get_kwargs():
+            arg_strings.append('%s=%r' % (name, value))
+        return '%s(%s)' % (type_name, ', '.join(arg_strings))
+
+    def _get_kwargs(self):
+        return sorted(self.__dict__.items())
+
+    def _get_args(self):
+        return []
+
+
+def _ensure_value(namespace, name, value):
+    if getattr(namespace, name, None) is None:
+        setattr(namespace, name, value)
+    return getattr(namespace, name)
+
+
+# ===============
+# Formatting Help
+# ===============
+
+class HelpFormatter(object):
+    """Formatter for generating usage messages and argument help strings.
+
+    Only the name of this class is considered a public API. All the methods
+    provided by the class are considered an implementation detail.
+    """
+
+    def __init__(self,
+                 prog,
+                 indent_increment=2,
+                 max_help_position=24,
+                 width=None):
+
+        # default setting for width
+        if width is None:
+            try:
+                width = int(_os.environ['COLUMNS'])
+            except (KeyError, ValueError):
+                width = 80
+            width -= 2
+
+        self._prog = prog
+        self._indent_increment = indent_increment
+        self._max_help_position = max_help_position
+        self._width = width
+
+        self._current_indent = 0
+        self._level = 0
+        self._action_max_length = 0
+
+        self._root_section = self._Section(self, None)
+        self._current_section = self._root_section
+
+        self._whitespace_matcher = _re.compile(r'\s+')
+        self._long_break_matcher = _re.compile(r'\n\n\n+')
+
+    # ===============================
+    # Section and indentation methods
+    # ===============================
+    def _indent(self):
+        self._current_indent += self._indent_increment
+        self._level += 1
+
+    def _dedent(self):
+        self._current_indent -= self._indent_increment
+        assert self._current_indent >= 0, 'Indent decreased below 0.'
+        self._level -= 1
+
+    class _Section(object):
+
+        def __init__(self, formatter, parent, heading=None):
+            self.formatter = formatter
+            self.parent = parent
+            self.heading = heading
+            self.items = []
+
+        def format_help(self):
+            # format the indented section
+            if self.parent is not None:
+                self.formatter._indent()
+            join = self.formatter._join_parts
+            for func, args in self.items:
+                func(*args)
+            item_help = join([func(*args) for func, args in self.items])
+            if self.parent is not None:
+                self.formatter._dedent()
+
+            # return nothing if the section was empty
+            if not item_help:
+                return ''
+
+            # add the heading if the section was non-empty
+            if self.heading is not SUPPRESS and self.heading is not None:
+                current_indent = self.formatter._current_indent
+                heading = '%*s%s:\n' % (current_indent, '', self.heading)
+            else:
+                heading = ''
+
+            # join the section-initial newline, the heading and the help
+            return join(['\n', heading, item_help, '\n'])
+
+    def _add_item(self, func, args):
+        self._current_section.items.append((func, args))
+
+    # ========================
+    # Message building methods
+    # ========================
+    def start_section(self, heading):
+        self._indent()
+        section = self._Section(self, self._current_section, heading)
+        self._add_item(section.format_help, [])
+        self._current_section = section
+
+    def end_section(self):
+        self._current_section = self._current_section.parent
+        self._dedent()
+
+    def add_text(self, text):
+        if text is not SUPPRESS and text is not None:
+            self._add_item(self._format_text, [text])
+
+    def add_usage(self, usage, actions, groups, prefix=None):
+        if usage is not SUPPRESS:
+            args = usage, actions, groups, prefix
+            self._add_item(self._format_usage, args)
+
+    def add_argument(self, action):
+        if action.help is not SUPPRESS:
+
+            # find all invocations
+            get_invocation = self._format_action_invocation
+            invocations = [get_invocation(action)]
+            for subaction in self._iter_indented_subactions(action):
+                invocations.append(get_invocation(subaction))
+
+            # update the maximum item length
+            invocation_length = max([len(s) for s in invocations])
+            action_length = invocation_length + self._current_indent
+            self._action_max_length = max(self._action_max_length,
+                                          action_length)
+
+            # add the item to the list
+            self._add_item(self._format_action, [action])
+
+    def add_arguments(self, actions):
+        for action in actions:
+            self.add_argument(action)
+
+    # =======================
+    # Help-formatting methods
+    # =======================
+    def format_help(self):
+        help = self._root_section.format_help()
+        if help:
+            help = self._long_break_matcher.sub('\n\n', help)
+            help = help.strip('\n') + '\n'
+        return help
+
+    def _join_parts(self, part_strings):
+        return ''.join([part
+                        for part in part_strings
+                        if part and part is not SUPPRESS])
+
+    def _format_usage(self, usage, actions, groups, prefix):
+        if prefix is None:
+            prefix = _('usage: ')
+
+        # if usage is specified, use that
+        if usage is not None:
+            usage = usage % dict(prog=self._prog)
+
+        # if no optionals or positionals are available, usage is just prog
+        elif usage is None and not actions:
+            usage = '%(prog)s' % dict(prog=self._prog)
+
+        # if optionals and positionals are available, calculate usage
+        elif usage is None:
+            prog = '%(prog)s' % dict(prog=self._prog)
+
+            # split optionals from positionals
+            optionals = []
+            positionals = []
+            for action in actions:
+                if action.option_strings:
+                    optionals.append(action)
+                else:
+                    positionals.append(action)
+
+            # build full usage string
+            format = self._format_actions_usage
+            action_usage = format(optionals + positionals, groups)
+            usage = ' '.join([s for s in [prog, action_usage] if s])
+
+            # wrap the usage parts if it's too long
+            text_width = self._width - self._current_indent
+            if len(prefix) + len(usage) > text_width:
+
+                # break usage into wrappable parts
+                part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
+                opt_usage = format(optionals, groups)
+                pos_usage = format(positionals, groups)
+                opt_parts = _re.findall(part_regexp, opt_usage)
+                pos_parts = _re.findall(part_regexp, pos_usage)
+                assert ' '.join(opt_parts) == opt_usage
+                assert ' '.join(pos_parts) == pos_usage
+
+                # helper for wrapping lines
+                def get_lines(parts, indent, prefix=None):
+                    lines = []
+                    line = []
+                    if prefix is not None:
+                        line_len = len(prefix) - 1
+                    else:
+                        line_len = len(indent) - 1
+                    for part in parts:
+                        if line_len + 1 + len(part) > text_width:
+                            lines.append(indent + ' '.join(line))
+                            line = []
+                            line_len = len(indent) - 1
+                        line.append(part)
+                        line_len += len(part) + 1
+                    if line:
+                        lines.append(indent + ' '.join(line))
+                    if prefix is not None:
+                        lines[0] = lines[0][len(indent):]
+                    return lines
+
+                # if prog is short, follow it with optionals or positionals
+                if len(prefix) + len(prog) <= 0.75 * text_width:
+                    indent = ' ' * (len(prefix) + len(prog) + 1)
+                    if opt_parts:
+                        lines = get_lines([prog] + opt_parts, indent, prefix)
+                        lines.extend(get_lines(pos_parts, indent))
+                    elif pos_parts:
+                        lines = get_lines([prog] + pos_parts, indent, prefix)
+                    else:
+                        lines = [prog]
+
+                # if prog is long, put it on its own line
+                else:
+                    indent = ' ' * len(prefix)
+                    parts = opt_parts + pos_parts
+                    lines = get_lines(parts, indent)
+                    if len(lines) > 1:
+                        lines = []
+                        lines.extend(get_lines(opt_parts, indent))
+                        lines.extend(get_lines(pos_parts, indent))
+                    lines = [prog] + lines
+
+                # join lines into usage
+                usage = '\n'.join(lines)
+
+        # prefix with 'usage:'
+        return '%s%s\n\n' % (prefix, usage)
+
+    def _format_actions_usage(self, actions, groups):
+        # find group indices and identify actions in groups
+        group_actions = set()
+        inserts = {}
+        for group in groups:
+            try:
+                start = actions.index(group._group_actions[0])
+            except ValueError:
+                continue
+            else:
+                end = start + len(group._group_actions)
+                if actions[start:end] == group._group_actions:
+                    for action in group._group_actions:
+                        group_actions.add(action)
+                    if not group.required:
+                        if start in inserts:
+                            inserts[start] += ' ['
+                        else:
+                            inserts[start] = '['
+                        inserts[end] = ']'
+                    else:
+                        if start in inserts:
+                            inserts[start] += ' ('
+                        else:
+                            inserts[start] = '('
+                        inserts[end] = ')'
+                    for i in range(start + 1, end):
+                        inserts[i] = '|'
+
+        # collect all actions format strings
+        parts = []
+        for i, action in enumerate(actions):
+
+            # suppressed arguments are marked with None
+            # remove | separators for suppressed arguments
+            if action.help is SUPPRESS:
+                parts.append(None)
+                if inserts.get(i) == '|':
+                    inserts.pop(i)
+                elif inserts.get(i + 1) == '|':
+                    inserts.pop(i + 1)
+
+            # produce all arg strings
+            elif not action.option_strings:
+                part = self._format_args(action, action.dest)
+
+                # if it's in a group, strip the outer []
+                if action in group_actions:
+                    if part[0] == '[' and part[-1] == ']':
+                        part = part[1:-1]
+
+                # add the action string to the list
+                parts.append(part)
+
+            # produce the first way to invoke the option in brackets
+            else:
+                option_string = action.option_strings[0]
+
+                # if the Optional doesn't take a value, format is:
+                #    -s or --long
+                if action.nargs == 0:
+                    part = '%s' % option_string
+
+                # if the Optional takes a value, format is:
+                #    -s ARGS or --long ARGS
+                else:
+                    default = action.dest.upper()
+                    args_string = self._format_args(action, default)
+                    part = '%s %s' % (option_string, args_string)
+
+                # make it look optional if it's not required or in a group
+                if not action.required and action not in group_actions:
+                    part = '[%s]' % part
+
+                # add the action string to the list
+                parts.append(part)
+
+        # insert things at the necessary indices
+        for i in sorted(inserts, reverse=True):
+            parts[i:i] = [inserts[i]]
+
+        # join all the action items with spaces
+        text = ' '.join([item for item in parts if item is not None])
+
+        # clean up separators for mutually exclusive groups
+        open = r'[\[(]'
+        close = r'[\])]'
+        text = _re.sub(r'(%s) ' % open, r'\1', text)
+        text = _re.sub(r' (%s)' % close, r'\1', text)
+        text = _re.sub(r'%s *%s' % (open, close), r'', text)
+        text = _re.sub(r'\(([^|]*)\)', r'\1', text)
+        text = text.strip()
+
+        # return the text
+        return text
+
+    def _format_text(self, text):
+        if '%(prog)' in text:
+            text = text % dict(prog=self._prog)
+        text_width = self._width - self._current_indent
+        indent = ' ' * self._current_indent
+        return self._fill_text(text, text_width, indent) + '\n\n'
+
+    def _format_action(self, action):
+        # determine the required width and the entry label
+        help_position = min(self._action_max_length + 2,
+                            self._max_help_position)
+        help_width = self._width - help_position
+        action_width = help_position - self._current_indent - 2
+        action_header = self._format_action_invocation(action)
+
+        # ho nelp; start on same line and add a final newline
+        if not action.help:
+            tup = self._current_indent, '', action_header
+            action_header = '%*s%s\n' % tup
+
+        # short action name; start on the same line and pad two spaces
+        elif len(action_header) <= action_width:
+            tup = self._current_indent, '', action_width, action_header
+            action_header = '%*s%-*s  ' % tup
+            indent_first = 0
+
+        # long action name; start on the next line
+        else:
+            tup = self._current_indent, '', action_header
+            action_header = '%*s%s\n' % tup
+            indent_first = help_position
+
+        # collect the pieces of the action help
+        parts = [action_header]
+
+        # if there was help for the action, add lines of help text
+        if action.help:
+            help_text = self._expand_help(action)
+            help_lines = self._split_lines(help_text, help_width)
+            parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
+            for line in help_lines[1:]:
+                parts.append('%*s%s\n' % (help_position, '', line))
+
+        # or add a newline if the description doesn't end with one
+        elif not action_header.endswith('\n'):
+            parts.append('\n')
+
+        # if there are any sub-actions, add their help as well
+        for subaction in self._iter_indented_subactions(action):
+            parts.append(self._format_action(subaction))
+
+        # return a single string
+        return self._join_parts(parts)
+
+    def _format_action_invocation(self, action):
+        if not action.option_strings:
+            metavar, = self._metavar_formatter(action, action.dest)(1)
+            return metavar
+
+        else:
+            parts = []
+
+            # if the Optional doesn't take a value, format is:
+            #    -s, --long
+            if action.nargs == 0:
+                parts.extend(action.option_strings)
+
+            # if the Optional takes a value, format is:
+            #    -s ARGS, --long ARGS
+            else:
+                default = action.dest.upper()
+                args_string = self._format_args(action, default)
+                for option_string in action.option_strings:
+                    parts.append('%s %s' % (option_string, args_string))
+
+            return ', '.join(parts)
+
+    def _metavar_formatter(self, action, default_metavar):
+        if action.metavar is not None:
+            result = action.metavar
+        elif action.choices is not None:
+            choice_strs = [str(choice) for choice in action.choices]
+            result = '{%s}' % ','.join(choice_strs)
+        else:
+            result = default_metavar
+
+        def format(tuple_size):
+            if isinstance(result, tuple):
+                return result
+            else:
+                return (result, ) * tuple_size
+        return format
+
+    def _format_args(self, action, default_metavar):
+        get_metavar = self._metavar_formatter(action, default_metavar)
+        if action.nargs is None:
+            result = '%s' % get_metavar(1)
+        elif action.nargs == OPTIONAL:
+            result = '[%s]' % get_metavar(1)
+        elif action.nargs == ZERO_OR_MORE:
+            result = '[%s [%s ...]]' % get_metavar(2)
+        elif action.nargs == ONE_OR_MORE:
+            result = '%s [%s ...]' % get_metavar(2)
+        elif action.nargs == REMAINDER:
+            result = '...'
+        elif action.nargs == PARSER:
+            result = '%s ...' % get_metavar(1)
+        else:
+            formats = ['%s' for _ in range(action.nargs)]
+            result = ' '.join(formats) % get_metavar(action.nargs)
+        return result
+
+    def _expand_help(self, action):
+        params = dict(vars(action), prog=self._prog)
+        for name in list(params):
+            if params[name] is SUPPRESS:
+                del params[name]
+        for name in list(params):
+            if hasattr(params[name], '__name__'):
+                params[name] = params[name].__name__
+        if params.get('choices') is not None:
+            choices_str = ', '.join([str(c) for c in params['choices']])
+            params['choices'] = choices_str
+        return self._get_help_string(action) % params
+
+    def _iter_indented_subactions(self, action):
+        try:
+            get_subactions = action._get_subactions
+        except AttributeError:
+            pass
+        else:
+            self._indent()
+            for subaction in get_subactions():
+                yield subaction
+            self._dedent()
+
+    def _split_lines(self, text, width):
+        text = self._whitespace_matcher.sub(' ', text).strip()
+        return _textwrap.wrap(text, width)
+
+    def _fill_text(self, text, width, indent):
+        text = self._whitespace_matcher.sub(' ', text).strip()
+        return _textwrap.fill(text, width, initial_indent=indent,
+                                           subsequent_indent=indent)
+
+    def _get_help_string(self, action):
+        return action.help
+
+
+class RawDescriptionHelpFormatter(HelpFormatter):
+    """Help message formatter which retains any formatting in descriptions.
+
+    Only the name of this class is considered a public API. All the methods
+    provided by the class are considered an implementation detail.
+    """
+
+    def _fill_text(self, text, width, indent):
+        return ''.join([indent + line for line in text.splitlines(True)])
+
+
+class RawTextHelpFormatter(RawDescriptionHelpFormatter):
+    """Help message formatter which retains formatting of all help text.
+
+    Only the name of this class is considered a public API. All the methods
+    provided by the class are considered an implementation detail.
+    """
+
+    def _split_lines(self, text, width):
+        return text.splitlines()
+
+
+class ArgumentDefaultsHelpFormatter(HelpFormatter):
+    """Help message formatter which adds default values to argument help.
+
+    Only the name of this class is considered a public API. All the methods
+    provided by the class are considered an implementation detail.
+    """
+
+    def _get_help_string(self, action):
+        help = action.help
+        if '%(default)' not in action.help:
+            if action.default is not SUPPRESS:
+                defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
+                if action.option_strings or action.nargs in defaulting_nargs:
+                    help += ' (default: %(default)s)'
+        return help
+
+
+# =====================
+# Options and Arguments
+# =====================
+
+def _get_action_name(argument):
+    if argument is None:
+        return None
+    elif argument.option_strings:
+        return  '/'.join(argument.option_strings)
+    elif argument.metavar not in (None, SUPPRESS):
+        return argument.metavar
+    elif argument.dest not in (None, SUPPRESS):
+        return argument.dest
+    else:
+        return None
+
+
+class ArgumentError(Exception):
+    """An error from creating or using an argument (optional or positional).
+
+    The string value of this exception is the message, augmented with
+    information about the argument that caused it.
+    """
+
+    def __init__(self, argument, message):
+        self.argument_name = _get_action_name(argument)
+        self.message = message
+
+    def __str__(self):
+        if self.argument_name is None:
+            format = '%(message)s'
+        else:
+            format = 'argument %(argument_name)s: %(message)s'
+        return format % dict(message=self.message,
+                             argument_name=self.argument_name)
+
+
+class ArgumentTypeError(Exception):
+    """An error from trying to convert a command line string to a type."""
+    pass
+
+
+# ==============
+# Action classes
+# ==============
+
+class Action(_AttributeHolder):
+    """Information about how to convert command line strings to Python objects.
+
+    Action objects are used by an ArgumentParser to represent the information
+    needed to parse a single argument from one or more strings from the
+    command line. The keyword arguments to the Action constructor are also
+    all attributes of Action instances.
+
+    Keyword Arguments:
+
+        - option_strings -- A list of command-line option strings which
+            should be associated with this action.
+
+        - dest -- The name of the attribute to hold the created object(s)
+
+        - nargs -- The number of command-line arguments that should be
+            consumed. By default, one argument will be consumed and a single
+            value will be produced.  Other values include:
+                - N (an integer) consumes N arguments (and produces a list)
+                - '?' consumes zero or one arguments
+                - '*' consumes zero or more arguments (and produces a list)
+                - '+' consumes one or more arguments (and produces a list)
+            Note that the difference between the default and nargs=1 is that
+            with the default, a single value will be produced, while with
+            nargs=1, a list containing a single value will be produced.
+
+        - const -- The value to be produced if the option is specified and the
+            option uses an action that takes no values.
+
+        - default -- The value to be produced if the option is not specified.
+
+        - type -- The type which the command-line arguments should be converted
+            to, should be one of 'string', 'int', 'float', 'complex' or a
+            callable object that accepts a single string argument. If None,
+            'string' is assumed.
+
+        - choices -- A container of values that should be allowed. If not None,
+            after a command-line argument has been converted to the appropriate
+            type, an exception will be raised if it is not a member of this
+            collection.
+
+        - required -- True if the action must always be specified at the
+            command line. This is only meaningful for optional command-line
+            arguments.
+
+        - help -- The help string describing the argument.
+
+        - metavar -- The name to be used for the option's argument with the
+            help string. If None, the 'dest' value will be used as the name.
+    """
+
+    def __init__(self,
+                 option_strings,
+                 dest,
+                 nargs=None,
+                 const=None,
+                 default=None,
+                 type=None,
+                 choices=None,
+                 required=False,
+                 help=None,
+                 metavar=None):
+        self.option_strings = option_strings
+        self.dest = dest
+        self.nargs = nargs
+        self.const = const
+        self.default = default
+        self.type = type
+        self.choices = choices
+        self.required = required
+        self.help = help
+        self.metavar = metavar
+
+    def _get_kwargs(self):
+        names = [
+            'option_strings',
+            'dest',
+            'nargs',
+            'const',
+            'default',
+            'type',
+            'choices',
+            'help',
+            'metavar',
+        ]
+        return [(name, getattr(self, name)) for name in names]
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        raise NotImplementedError(_('.__call__() not defined'))
+
+
+class _StoreAction(Action):
+
+    def __init__(self,
+                 option_strings,
+                 dest,
+                 nargs=None,
+                 const=None,
+                 default=None,
+                 type=None,
+                 choices=None,
+                 required=False,
+                 help=None,
+                 metavar=None):
+        if nargs == 0:
+            raise ValueError('nargs for store actions must be > 0; if you '
+                             'have nothing to store, actions such as store '
+                             'true or store const may be more appropriate')
+        if const is not None and nargs != OPTIONAL:
+            raise ValueError('nargs must be %r to supply const' % OPTIONAL)
+        super(_StoreAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            nargs=nargs,
+            const=const,
+            default=default,
+            type=type,
+            choices=choices,
+            required=required,
+            help=help,
+            metavar=metavar)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        setattr(namespace, self.dest, values)
+
+
+class _StoreConstAction(Action):
+
+    def __init__(self,
+                 option_strings,
+                 dest,
+                 const,
+                 default=None,
+                 required=False,
+                 help=None,
+                 metavar=None):
+        super(_StoreConstAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            nargs=0,
+            const=const,
+            default=default,
+            required=required,
+            help=help)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        setattr(namespace, self.dest, self.const)
+
+
+class _StoreTrueAction(_StoreConstAction):
+
+    def __init__(self,
+                 option_strings,
+                 dest,
+                 default=False,
+                 required=False,
+                 help=None):
+        super(_StoreTrueAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            const=True,
+            default=default,
+            required=required,
+            help=help)
+
+
+class _StoreFalseAction(_StoreConstAction):
+
+    def __init__(self,
+                 option_strings,
+                 dest,
+                 default=True,
+                 required=False,
+                 help=None):
+        super(_StoreFalseAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            const=False,
+            default=default,
+            required=required,
+            help=help)
+
+
+class _AppendAction(Action):
+
+    def __init__(self,
+                 option_strings,
+                 dest,
+                 nargs=None,
+                 const=None,
+                 default=None,
+                 type=None,
+                 choices=None,
+                 required=False,
+                 help=None,
+                 metavar=None):
+        if nargs == 0:
+            raise ValueError('nargs for append actions must be > 0; if arg '
+                             'strings are not supplying the value to append, '
+                             'the append const action may be more appropriate')
+        if const is not None and nargs != OPTIONAL:
+            raise ValueError('nargs must be %r to supply const' % OPTIONAL)
+        super(_AppendAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            nargs=nargs,
+            const=const,
+            default=default,
+            type=type,
+            choices=choices,
+            required=required,
+            help=help,
+            metavar=metavar)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        items = _copy.copy(_ensure_value(namespace, self.dest, []))
+        items.append(values)
+        setattr(namespace, self.dest, items)
+
+
+class _AppendConstAction(Action):
+
+    def __init__(self,
+                 option_strings,
+                 dest,
+                 const,
+                 default=None,
+                 required=False,
+                 help=None,
+                 metavar=None):
+        super(_AppendConstAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            nargs=0,
+            const=const,
+            default=default,
+            required=required,
+            help=help,
+            metavar=metavar)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        items = _copy.copy(_ensure_value(namespace, self.dest, []))
+        items.append(self.const)
+        setattr(namespace, self.dest, items)
+
+
+class _CountAction(Action):
+
+    def __init__(self,
+                 option_strings,
+                 dest,
+                 default=None,
+                 required=False,
+                 help=None):
+        super(_CountAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            nargs=0,
+            default=default,
+            required=required,
+            help=help)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        new_count = _ensure_value(namespace, self.dest, 0) + 1
+        setattr(namespace, self.dest, new_count)
+
+
+class _HelpAction(Action):
+
+    def __init__(self,
+                 option_strings,
+                 dest=SUPPRESS,
+                 default=SUPPRESS,
+                 help=None):
+        super(_HelpAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            default=default,
+            nargs=0,
+            help=help)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        parser.print_help()
+        parser.exit()
+
+
+class _VersionAction(Action):
+
+    def __init__(self,
+                 option_strings,
+                 version=None,
+                 dest=SUPPRESS,
+                 default=SUPPRESS,
+                 help="show program's version number and exit"):
+        super(_VersionAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            default=default,
+            nargs=0,
+            help=help)
+        self.version = version
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        version = self.version
+        if version is None:
+            version = parser.version
+        formatter = parser._get_formatter()
+        formatter.add_text(version)
+        parser.exit(message=formatter.format_help())
+
+
+class _SubParsersAction(Action):
+
+    class _ChoicesPseudoAction(Action):
+
+        def __init__(self, name, aliases, help):
+            metavar = dest = name
+            if aliases:
+                metavar += ' (%s)' % ', '.join(aliases)
+            sup = super(_SubParsersAction._ChoicesPseudoAction, self)
+            sup.__init__(option_strings=[], dest=dest, help=help,
+                         metavar=metavar)
+
+    def __init__(self,
+                 option_strings,
+                 prog,
+                 parser_class,
+                 dest=SUPPRESS,
+                 help=None,
+                 metavar=None):
+
+        self._prog_prefix = prog
+        self._parser_class = parser_class
+        self._name_parser_map = _collections.OrderedDict()
+        self._choices_actions = []
+
+        super(_SubParsersAction, self).__init__(
+            option_strings=option_strings,
+            dest=dest,
+            nargs=PARSER,
+            choices=self._name_parser_map,
+            help=help,
+            metavar=metavar)
+
+    def add_parser(self, name, **kwargs):
+        # set prog from the existing prefix
+        if kwargs.get('prog') is None:
+            kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
+
+        aliases = kwargs.pop('aliases', ())
+
+        # create a pseudo-action to hold the choice help
+        if 'help' in kwargs:
+            help = kwargs.pop('help')
+            choice_action = self._ChoicesPseudoAction(name, aliases, help)
+            self._choices_actions.append(choice_action)
+
+        # create the parser and add it to the map
+        parser = self._parser_class(**kwargs)
+        self._name_parser_map[name] = parser
+
+        # make parser available under aliases also
+        for alias in aliases:
+            self._name_parser_map[alias] = parser
+
+        return parser
+
+    def _get_subactions(self):
+        return self._choices_actions
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        parser_name = values[0]
+        arg_strings = values[1:]
+
+        # set the parser name if requested
+        if self.dest is not SUPPRESS:
+            setattr(namespace, self.dest, parser_name)
+
+        # select the parser
+        try:
+            parser = self._name_parser_map[parser_name]
+        except KeyError:
+            args = {'parser_name': parser_name,
+                    'choices': ', '.join(self._name_parser_map)}
+            msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args
+            raise ArgumentError(self, msg)
+
+        # parse all the remaining options into the namespace
+        # store any unrecognized options on the object, so that the top
+        # level parser can decide what to do with them
+        namespace, arg_strings = parser.parse_known_args(arg_strings, namespace)
+        if arg_strings:
+            vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, [])
+            getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings)
+
+
+# ==============
+# Type classes
+# ==============
+
+class FileType(object):
+    """Factory for creating file object types
+
+    Instances of FileType are typically passed as type= arguments to the
+    ArgumentParser add_argument() method.
+
+    Keyword Arguments:
+        - mode -- A string indicating how the file is to be opened. Accepts the
+            same values as the builtin open() function.
+        - bufsize -- The file's desired buffer size. Accepts the same values as
+            the builtin open() function.
+    """
+
+    def __init__(self, mode='r', bufsize=-1):
+        self._mode = mode
+        self._bufsize = bufsize
+
+    def __call__(self, string):
+        # the special argument "-" means sys.std{in,out}
+        if string == '-':
+            if 'r' in self._mode:
+                return _sys.stdin
+            elif 'w' in self._mode:
+                return _sys.stdout
+            else:
+                msg = _('argument "-" with mode %r') % self._mode
+                raise ValueError(msg)
+
+        # all other arguments are used as file names
+        try:
+            return open(string, self._mode, self._bufsize)
+        except IOError as e:
+            message = _("can't open '%s': %s")
+            raise ArgumentTypeError(message % (string, e))
+
+    def __repr__(self):
+        args = self._mode, self._bufsize
+        args_str = ', '.join(repr(arg) for arg in args if arg != -1)
+        return '%s(%s)' % (type(self).__name__, args_str)
+
+# ===========================
+# Optional and Positional Parsing
+# ===========================
+
+class Namespace(_AttributeHolder):
+    """Simple object for storing attributes.
+
+    Implements equality by attribute names and values, and provides a simple
+    string representation.
+    """
+
+    def __init__(self, **kwargs):
+        for name in kwargs:
+            setattr(self, name, kwargs[name])
+
+    def __eq__(self, other):
+        return vars(self) == vars(other)
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    def __contains__(self, key):
+        return key in self.__dict__
+
+
+class _ActionsContainer(object):
+
+    def __init__(self,
+                 description,
+                 prefix_chars,
+                 argument_default,
+                 conflict_handler):
+        super(_ActionsContainer, self).__init__()
+
+        self.description = description
+        self.argument_default = argument_default
+        self.prefix_chars = prefix_chars
+        self.conflict_handler = conflict_handler
+
+        # set up registries
+        self._registries = {}
+
+        # register actions
+        self.register('action', None, _StoreAction)
+        self.register('action', 'store', _StoreAction)
+        self.register('action', 'store_const', _StoreConstAction)
+        self.register('action', 'store_true', _StoreTrueAction)
+        self.register('action', 'store_false', _StoreFalseAction)
+        self.register('action', 'append', _AppendAction)
+        self.register('action', 'append_const', _AppendConstAction)
+        self.register('action', 'count', _CountAction)
+        self.register('action', 'help', _HelpAction)
+        self.register('action', 'version', _VersionAction)
+        self.register('action', 'parsers', _SubParsersAction)
+
+        # raise an exception if the conflict handler is invalid
+        self._get_handler()
+
+        # action storage
+        self._actions = []
+        self._option_string_actions = {}
+
+        # groups
+        self._action_groups = []
+        self._mutually_exclusive_groups = []
+
+        # defaults storage
+        self._defaults = {}
+
+        # determines whether an "option" looks like a negative number
+        self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$')
+
+        # whether or not there are any optionals that look like negative
+        # numbers -- uses a list so it can be shared and edited
+        self._has_negative_number_optionals = []
+
+    # ====================
+    # Registration methods
+    # ====================
+    def register(self, registry_name, value, object):
+        registry = self._registries.setdefault(registry_name, {})
+        registry[value] = object
+
+    def _registry_get(self, registry_name, value, default=None):
+        return self._registries[registry_name].get(value, default)
+
+    # ==================================
+    # Namespace default accessor methods
+    # ==================================
+    def set_defaults(self, **kwargs):
+        self._defaults.update(kwargs)
+
+        # if these defaults match any existing arguments, replace
+        # the previous default on the object with the new one
+        for action in self._actions:
+            if action.dest in kwargs:
+                action.default = kwargs[action.dest]
+
+    def get_default(self, dest):
+        for action in self._actions:
+            if action.dest == dest and action.default is not None:
+                return action.default
+        return self._defaults.get(dest, None)
+
+
+    # =======================
+    # Adding argument actions
+    # =======================
+    def add_argument(self, *args, **kwargs):
+        """
+        add_argument(dest, ..., name=value, ...)
+        add_argument(option_string, option_string, ..., name=value, ...)
+        """
+
+        # if no positional args are supplied or only one is supplied and
+        # it doesn't look like an option string, parse a positional
+        # argument
+        chars = self.prefix_chars
+        if not args or len(args) == 1 and args[0][0] not in chars:
+            if args and 'dest' in kwargs:
+                raise ValueError('dest supplied twice for positional argument')
+            kwargs = self._get_positional_kwargs(*args, **kwargs)
+
+        # otherwise, we're adding an optional argument
+        else:
+            kwargs = self._get_optional_kwargs(*args, **kwargs)
+
+        # if no default was supplied, use the parser-level default
+        if 'default' not in kwargs:
+            dest = kwargs['dest']
+            if dest in self._defaults:
+                kwargs['default'] = self._defaults[dest]
+            elif self.argument_default is not None:
+                kwargs['default'] = self.argument_default
+
+        # create the action object, and add it to the parser
+        action_class = self._pop_action_class(kwargs)
+        if not callable(action_class):
+            raise ValueError('unknown action "%s"' % (action_class,))
+        action = action_class(**kwargs)
+
+        # raise an error if the action type is not callable
+        type_func = self._registry_get('type', action.type, action.type)
+        if not callable(type_func):
+            raise ValueError('%r is not callable' % (type_func,))
+
+        # raise an error if the metavar does not match the type
+        if hasattr(self, "_get_formatter"):
+            try:
+                self._get_formatter()._format_args(action, None)
+            except TypeError:
+                raise ValueError("length of metavar tuple does not match nargs")
+
+        return self._add_action(action)
+
+    def add_argument_group(self, *args, **kwargs):
+        group = _ArgumentGroup(self, *args, **kwargs)
+        self._action_groups.append(group)
+        return group
+
+    def add_mutually_exclusive_group(self, **kwargs):
+        group = _MutuallyExclusiveGroup(self, **kwargs)
+        self._mutually_exclusive_groups.append(group)
+        return group
+
+    def _add_action(self, action):
+        # resolve any conflicts
+        self._check_conflict(action)
+
+        # add to actions list
+        self._actions.append(action)
+        action.container = self
+
+        # index the action by any option strings it has
+        for option_string in action.option_strings:
+            self._option_string_actions[option_string] = action
+
+        # set the flag if any option strings look like negative numbers
+        for option_string in action.option_strings:
+            if self._negative_number_matcher.match(option_string):
+                if not self._has_negative_number_optionals:
+                    self._has_negative_number_optionals.append(True)
+
+        # return the created action
+        return action
+
+    def _remove_action(self, action):
+        self._actions.remove(action)
+
+    def _add_container_actions(self, container):
+        # collect groups by titles
+        title_group_map = {}
+        for group in self._action_groups:
+            if group.title in title_group_map:
+                msg = _('cannot merge actions - two groups are named %r')
+                raise ValueError(msg % (group.title))
+            title_group_map[group.title] = group
+
+        # map each action to its group
+        group_map = {}
+        for group in container._action_groups:
+
+            # if a group with the title exists, use that, otherwise
+            # create a new group matching the container's group
+            if group.title not in title_group_map:
+                title_group_map[group.title] = self.add_argument_group(
+                    title=group.title,
+                    description=group.description,
+                    conflict_handler=group.conflict_handler)
+
+            # map the actions to their new group
+            for action in group._group_actions:
+                group_map[action] = title_group_map[group.title]
+
+        # add container's mutually exclusive groups
+        # NOTE: if add_mutually_exclusive_group ever gains title= and
+        # description= then this code will need to be expanded as above
+        for group in container._mutually_exclusive_groups:
+            mutex_group = self.add_mutually_exclusive_group(
+                required=group.required)
+
+            # map the actions to their new mutex group
+            for action in group._group_actions:
+                group_map[action] = mutex_group
+
+        # add all actions to this container or their group
+        for action in container._actions:
+            group_map.get(action, self)._add_action(action)
+
+    def _get_positional_kwargs(self, dest, **kwargs):
+        # make sure required is not specified
+        if 'required' in kwargs:
+            msg = _("'required' is an invalid argument for positionals")
+            raise TypeError(msg)
+
+        # mark positional arguments as required if at least one is
+        # always required
+        if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
+            kwargs['required'] = True
+        if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
+            kwargs['required'] = True
+
+        # return the keyword arguments with no option strings
+        return dict(kwargs, dest=dest, option_strings=[])
+
+    def _get_optional_kwargs(self, *args, **kwargs):
+        # determine short and long option strings
+        option_strings = []
+        long_option_strings = []
+        for option_string in args:
+            # error on strings that don't start with an appropriate prefix
+            if not option_string[0] in self.prefix_chars:
+                args = {'option': option_string,
+                        'prefix_chars': self.prefix_chars}
+                msg = _('invalid option string %(option)r: '
+                        'must start with a character %(prefix_chars)r')
+                raise ValueError(msg % args)
+
+            # strings starting with two prefix characters are long options
+            option_strings.append(option_string)
+            if option_string[0] in self.prefix_chars:
+                if len(option_string) > 1:
+                    if option_string[1] in self.prefix_chars:
+                        long_option_strings.append(option_string)
+
+        # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
+        dest = kwargs.pop('dest', None)
+        if dest is None:
+            if long_option_strings:
+                dest_option_string = long_option_strings[0]
+            else:
+                dest_option_string = option_strings[0]
+            dest = dest_option_string.lstrip(self.prefix_chars)
+            if not dest:
+                msg = _('dest= is required for options like %r')
+                raise ValueError(msg % option_string)
+            dest = dest.replace('-', '_')
+
+        # return the updated keyword arguments
+        return dict(kwargs, dest=dest, option_strings=option_strings)
+
+    def _pop_action_class(self, kwargs, default=None):
+        action = kwargs.pop('action', default)
+        return self._registry_get('action', action, action)
+
+    def _get_handler(self):
+        # determine function from conflict handler string
+        handler_func_name = '_handle_conflict_%s' % self.conflict_handler
+        try:
+            return getattr(self, handler_func_name)
+        except AttributeError:
+            msg = _('invalid conflict_resolution value: %r')
+            raise ValueError(msg % self.conflict_handler)
+
+    def _check_conflict(self, action):
+
+        # find all options that conflict with this option
+        confl_optionals = []
+        for option_string in action.option_strings:
+            if option_string in self._option_string_actions:
+                confl_optional = self._option_string_actions[option_string]
+                confl_optionals.append((option_string, confl_optional))
+
+        # resolve any conflicts
+        if confl_optionals:
+            conflict_handler = self._get_handler()
+            conflict_handler(action, confl_optionals)
+
+    def _handle_conflict_error(self, action, conflicting_actions):
+        message = ngettext('conflicting option string: %s',
+                           'conflicting option strings: %s',
+                           len(conflicting_actions))
+        conflict_string = ', '.join([option_string
+                                     for option_string, action
+                                     in conflicting_actions])
+        raise ArgumentError(action, message % conflict_string)
+
+    def _handle_conflict_resolve(self, action, conflicting_actions):
+
+        # remove all conflicting options
+        for option_string, action in conflicting_actions:
+
+            # remove the conflicting option
+            action.option_strings.remove(option_string)
+            self._option_string_actions.pop(option_string, None)
+
+            # if the option now has no option string, remove it from the
+            # container holding it
+            if not action.option_strings:
+                action.container._remove_action(action)
+
+
+class _ArgumentGroup(_ActionsContainer):
+
+    def __init__(self, container, title=None, description=None, **kwargs):
+        # add any missing keyword arguments by checking the container
+        update = kwargs.setdefault
+        update('conflict_handler', container.conflict_handler)
+        update('prefix_chars', container.prefix_chars)
+        update('argument_default', container.argument_default)
+        super_init = super(_ArgumentGroup, self).__init__
+        super_init(description=description, **kwargs)
+
+        # group attributes
+        self.title = title
+        self._group_actions = []
+
+        # share most attributes with the container
+        self._registries = container._registries
+        self._actions = container._actions
+        self._option_string_actions = container._option_string_actions
+        self._defaults = container._defaults
+        self._has_negative_number_optionals = \
+            container._has_negative_number_optionals
+        self._mutually_exclusive_groups = container._mutually_exclusive_groups
+
+    def _add_action(self, action):
+        action = super(_ArgumentGroup, self)._add_action(action)
+        self._group_actions.append(action)
+        return action
+
+    def _remove_action(self, action):
+        super(_ArgumentGroup, self)._remove_action(action)
+        self._group_actions.remove(action)
+
+
+class _MutuallyExclusiveGroup(_ArgumentGroup):
+
+    def __init__(self, container, required=False):
+        super(_MutuallyExclusiveGroup, self).__init__(container)
+        self.required = required
+        self._container = container
+
+    def _add_action(self, action):
+        if action.required:
+            msg = _('mutually exclusive arguments must be optional')
+            raise ValueError(msg)
+        action = self._container._add_action(action)
+        self._group_actions.append(action)
+        return action
+
+    def _remove_action(self, action):
+        self._container._remove_action(action)
+        self._group_actions.remove(action)
+
+
+class ArgumentParser(_AttributeHolder, _ActionsContainer):
+    """Object for parsing command line strings into Python objects.
+
+    Keyword Arguments:
+        - prog -- The name of the program (default: sys.argv[0])
+        - usage -- A usage message (default: auto-generated from arguments)
+        - description -- A description of what the program does
+        - epilog -- Text following the argument descriptions
+        - parents -- Parsers whose arguments should be copied into this one
+        - formatter_class -- HelpFormatter class for printing help messages
+        - prefix_chars -- Characters that prefix optional arguments
+        - fromfile_prefix_chars -- Characters that prefix files containing
+            additional arguments
+        - argument_default -- The default value for all arguments
+        - conflict_handler -- String indicating how to handle conflicts
+        - add_help -- Add a -h/-help option
+    """
+
+    def __init__(self,
+                 prog=None,
+                 usage=None,
+                 description=None,
+                 epilog=None,
+                 version=None,
+                 parents=[],
+                 formatter_class=HelpFormatter,
+                 prefix_chars='-',
+                 fromfile_prefix_chars=None,
+                 argument_default=None,
+                 conflict_handler='error',
+                 add_help=True):
+
+        if version is not None:
+            import warnings
+            warnings.warn(
+                """The "version" argument to ArgumentParser is deprecated. """
+                """Please use """
+                """"add_argument(..., action='version', version="N", ...)" """
+                """instead""", DeprecationWarning)
+
+        superinit = super(ArgumentParser, self).__init__
+        superinit(description=description,
+                  prefix_chars=prefix_chars,
+                  argument_default=argument_default,
+                  conflict_handler=conflict_handler)
+
+        # default setting for prog
+        if prog is None:
+            prog = _os.path.basename(_sys.argv[0])
+
+        self.prog = prog
+        self.usage = usage
+        self.epilog = epilog
+        self.version = version
+        self.formatter_class = formatter_class
+        self.fromfile_prefix_chars = fromfile_prefix_chars
+        self.add_help = add_help
+
+        add_group = self.add_argument_group
+        self._positionals = add_group(_('positional arguments'))
+        self._optionals = add_group(_('optional arguments'))
+        self._subparsers = None
+
+        # register types
+        def identity(string):
+            return string
+        self.register('type', None, identity)
+
+        # add help and version arguments if necessary
+        # (using explicit default to override global argument_default)
+        default_prefix = '-' if '-' in prefix_chars else prefix_chars[0]
+        if self.add_help:
+            self.add_argument(
+                default_prefix+'h', default_prefix*2+'help',
+                action='help', default=SUPPRESS,
+                help=_('show this help message and exit'))
+        if self.version:
+            self.add_argument(
+                default_prefix+'v', default_prefix*2+'version',
+                action='version', default=SUPPRESS,
+                version=self.version,
+                help=_("show program's version number and exit"))
+
+        # add parent arguments and defaults
+        for parent in parents:
+            self._add_container_actions(parent)
+            try:
+                defaults = parent._defaults
+            except AttributeError:
+                pass
+            else:
+                self._defaults.update(defaults)
+
+    # =======================
+    # Pretty __repr__ methods
+    # =======================
+    def _get_kwargs(self):
+        names = [
+            'prog',
+            'usage',
+            'description',
+            'version',
+            'formatter_class',
+            'conflict_handler',
+            'add_help',
+        ]
+        return [(name, getattr(self, name)) for name in names]
+
+    # ==================================
+    # Optional/Positional adding methods
+    # ==================================
+    def add_subparsers(self, **kwargs):
+        if self._subparsers is not None:
+            self.error(_('cannot have multiple subparser arguments'))
+
+        # add the parser class to the arguments if it's not present
+        kwargs.setdefault('parser_class', type(self))
+
+        if 'title' in kwargs or 'description' in kwargs:
+            title = _(kwargs.pop('title', 'subcommands'))
+            description = _(kwargs.pop('description', None))
+            self._subparsers = self.add_argument_group(title, description)
+        else:
+            self._subparsers = self._positionals
+
+        # prog defaults to the usage message of this parser, skipping
+        # optional arguments and with no "usage:" prefix
+        if kwargs.get('prog') is None:
+            formatter = self._get_formatter()
+            positionals = self._get_positional_actions()
+            groups = self._mutually_exclusive_groups
+            formatter.add_usage(self.usage, positionals, groups, '')
+            kwargs['prog'] = formatter.format_help().strip()
+
+        # create the parsers action and add it to the positionals list
+        parsers_class = self._pop_action_class(kwargs, 'parsers')
+        action = parsers_class(option_strings=[], **kwargs)
+        self._subparsers._add_action(action)
+
+        # return the created parsers action
+        return action
+
+    def _add_action(self, action):
+        if action.option_strings:
+            self._optionals._add_action(action)
+        else:
+            self._positionals._add_action(action)
+        return action
+
+    def _get_optional_actions(self):
+        return [action
+                for action in self._actions
+                if action.option_strings]
+
+    def _get_positional_actions(self):
+        return [action
+                for action in self._actions
+                if not action.option_strings]
+
+    # =====================================
+    # Command line argument parsing methods
+    # =====================================
+    def parse_args(self, args=None, namespace=None):
+        args, argv = self.parse_known_args(args, namespace)
+        if argv:
+            msg = _('unrecognized arguments: %s')
+            self.error(msg % ' '.join(argv))
+        return args
+
+    def parse_known_args(self, args=None, namespace=None):
+        # args default to the system args
+        if args is None:
+            args = _sys.argv[1:]
+
+        # default Namespace built from parser defaults
+        if namespace is None:
+            namespace = Namespace()
+
+        # add any action defaults that aren't present
+        for action in self._actions:
+            if action.dest is not SUPPRESS:
+                if not hasattr(namespace, action.dest):
+                    if action.default is not SUPPRESS:
+                        default = action.default
+                        if isinstance(action.default, str):
+                            default = self._get_value(action, default)
+                        setattr(namespace, action.dest, default)
+
+        # add any parser defaults that aren't present
+        for dest in self._defaults:
+            if not hasattr(namespace, dest):
+                setattr(namespace, dest, self._defaults[dest])
+
+        # parse the arguments and exit if there are any errors
+        try:
+            namespace, args = self._parse_known_args(args, namespace)
+            if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR):
+                args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR))
+                delattr(namespace, _UNRECOGNIZED_ARGS_ATTR)
+            return namespace, args
+        except ArgumentError:
+            err = _sys.exc_info()[1]
+            self.error(str(err))
+
+    def _parse_known_args(self, arg_strings, namespace):
+        # replace arg strings that are file references
+        if self.fromfile_prefix_chars is not None:
+            arg_strings = self._read_args_from_files(arg_strings)
+
+        # map all mutually exclusive arguments to the other arguments
+        # they can't occur with
+        action_conflicts = {}
+        for mutex_group in self._mutually_exclusive_groups:
+            group_actions = mutex_group._group_actions
+            for i, mutex_action in enumerate(mutex_group._group_actions):
+                conflicts = action_conflicts.setdefault(mutex_action, [])
+                conflicts.extend(group_actions[:i])
+                conflicts.extend(group_actions[i + 1:])
+
+        # find all option indices, and determine the arg_string_pattern
+        # which has an 'O' if there is an option at an index,
+        # an 'A' if there is an argument, or a '-' if there is a '--'
+        option_string_indices = {}
+        arg_string_pattern_parts = []
+        arg_strings_iter = iter(arg_strings)
+        for i, arg_string in enumerate(arg_strings_iter):
+
+            # all args after -- are non-options
+            if arg_string == '--':
+                arg_string_pattern_parts.append('-')
+                for arg_string in arg_strings_iter:
+                    arg_string_pattern_parts.append('A')
+
+            # otherwise, add the arg to the arg strings
+            # and note the index if it was an option
+            else:
+                option_tuple = self._parse_optional(arg_string)
+                if option_tuple is None:
+                    pattern = 'A'
+                else:
+                    option_string_indices[i] = option_tuple
+                    pattern = 'O'
+                arg_string_pattern_parts.append(pattern)
+
+        # join the pieces together to form the pattern
+        arg_strings_pattern = ''.join(arg_string_pattern_parts)
+
+        # converts arg strings to the appropriate and then takes the action
+        seen_actions = set()
+        seen_non_default_actions = set()
+
+        def take_action(action, argument_strings, option_string=None):
+            seen_actions.add(action)
+            argument_values = self._get_values(action, argument_strings)
+
+            # error if this argument is not allowed with other previously
+            # seen arguments, assuming that actions that use the default
+            # value don't really count as "present"
+            if argument_values is not action.default:
+                seen_non_default_actions.add(action)
+                for conflict_action in action_conflicts.get(action, []):
+                    if conflict_action in seen_non_default_actions:
+                        msg = _('not allowed with argument %s')
+                        action_name = _get_action_name(conflict_action)
+                        raise ArgumentError(action, msg % action_name)
+
+            # take the action if we didn't receive a SUPPRESS value
+            # (e.g. from a default)
+            if argument_values is not SUPPRESS:
+                action(self, namespace, argument_values, option_string)
+
+        # function to convert arg_strings into an optional action
+        def consume_optional(start_index):
+
+            # get the optional identified at this index
+            option_tuple = option_string_indices[start_index]
+            action, option_string, explicit_arg = option_tuple
+
+            # identify additional optionals in the same arg string
+            # (e.g. -xyz is the same as -x -y -z if no args are required)
+            match_argument = self._match_argument
+            action_tuples = []
+            while True:
+
+                # if we found no optional action, skip it
+                if action is None:
+                    extras.append(arg_strings[start_index])
+                    return start_index + 1
+
+                # if there is an explicit argument, try to match the
+                # optional's string arguments to only this
+                if explicit_arg is not None:
+                    arg_count = match_argument(action, 'A')
+
+                    # if the action is a single-dash option and takes no
+                    # arguments, try to parse more single-dash options out
+                    # of the tail of the option string
+                    chars = self.prefix_chars
+                    if arg_count == 0 and option_string[1] not in chars:
+                        action_tuples.append((action, [], option_string))
+                        char = option_string[0]
+                        option_string = char + explicit_arg[0]
+                        new_explicit_arg = explicit_arg[1:] or None
+                        optionals_map = self._option_string_actions
+                        if option_string in optionals_map:
+                            action = optionals_map[option_string]
+                            explicit_arg = new_explicit_arg
+                        else:
+                            msg = _('ignored explicit argument %r')
+                            raise ArgumentError(action, msg % explicit_arg)
+
+                    # if the action expect exactly one argument, we've
+                    # successfully matched the option; exit the loop
+                    elif arg_count == 1:
+                        stop = start_index + 1
+                        args = [explicit_arg]
+                        action_tuples.append((action, args, option_string))
+                        break
+
+                    # error if a double-dash option did not use the
+                    # explicit argument
+                    else:
+                        msg = _('ignored explicit argument %r')
+                        raise ArgumentError(action, msg % explicit_arg)
+
+                # if there is no explicit argument, try to match the
+                # optional's string arguments with the following strings
+                # if successful, exit the loop
+                else:
+                    start = start_index + 1
+                    selected_patterns = arg_strings_pattern[start:]
+                    arg_count = match_argument(action, selected_patterns)
+                    stop = start + arg_count
+                    args = arg_strings[start:stop]
+                    action_tuples.append((action, args, option_string))
+                    break
+
+            # add the Optional to the list and return the index at which
+            # the Optional's string args stopped
+            assert action_tuples
+            for action, args, option_string in action_tuples:
+                take_action(action, args, option_string)
+            return stop
+
+        # the list of Positionals left to be parsed; this is modified
+        # by consume_positionals()
+        positionals = self._get_positional_actions()
+
+        # function to convert arg_strings into positional actions
+        def consume_positionals(start_index):
+            # match as many Positionals as possible
+            match_partial = self._match_arguments_partial
+            selected_pattern = arg_strings_pattern[start_index:]
+            arg_counts = match_partial(positionals, selected_pattern)
+
+            # slice off the appropriate arg strings for each Positional
+            # and add the Positional and its args to the list
+            for action, arg_count in zip(positionals, arg_counts):
+                args = arg_strings[start_index: start_index + arg_count]
+                start_index += arg_count
+                take_action(action, args)
+
+            # slice off the Positionals that we just parsed and return the
+            # index at which the Positionals' string args stopped
+            positionals[:] = positionals[len(arg_counts):]
+            return start_index
+
+        # consume Positionals and Optionals alternately, until we have
+        # passed the last option string
+        extras = []
+        start_index = 0
+        if option_string_indices:
+            max_option_string_index = max(option_string_indices)
+        else:
+            max_option_string_index = -1
+        while start_index <= max_option_string_index:
+
+            # consume any Positionals preceding the next option
+            next_option_string_index = min([
+                index
+                for index in option_string_indices
+                if index >= start_index])
+            if start_index != next_option_string_index:
+                positionals_end_index = consume_positionals(start_index)
+
+                # only try to parse the next optional if we didn't consume
+                # the option string during the positionals parsing
+                if positionals_end_index > start_index:
+                    start_index = positionals_end_index
+                    continue
+                else:
+                    start_index = positionals_end_index
+
+            # if we consumed all the positionals we could and we're not
+            # at the index of an option string, there were extra arguments
+            if start_index not in option_string_indices:
+                strings = arg_strings[start_index:next_option_string_index]
+                extras.extend(strings)
+                start_index = next_option_string_index
+
+            # consume the next optional and any arguments for it
+            start_index = consume_optional(start_index)
+
+        # consume any positionals following the last Optional
+        stop_index = consume_positionals(start_index)
+
+        # if we didn't consume all the argument strings, there were extras
+        extras.extend(arg_strings[stop_index:])
+
+        # if we didn't use all the Positional objects, there were too few
+        # arg strings supplied.
+        if positionals:
+            self.error(_('too few arguments'))
+
+        # make sure all required actions were present
+        for action in self._actions:
+            if action.required:
+                if action not in seen_actions:
+                    name = _get_action_name(action)
+                    self.error(_('argument %s is required') % name)
+
+        # make sure all required groups had one option present
+        for group in self._mutually_exclusive_groups:
+            if group.required:
+                for action in group._group_actions:
+                    if action in seen_non_default_actions:
+                        break
+
+                # if no actions were used, report the error
+                else:
+                    names = [_get_action_name(action)
+                             for action in group._group_actions
+                             if action.help is not SUPPRESS]
+                    msg = _('one of the arguments %s is required')
+                    self.error(msg % ' '.join(names))
+
+        # return the updated namespace and the extra arguments
+        return namespace, extras
+
+    def _read_args_from_files(self, arg_strings):
+        # expand arguments referencing files
+        new_arg_strings = []
+        for arg_string in arg_strings:
+
+            # for regular arguments, just add them back into the list
+            if arg_string[0] not in self.fromfile_prefix_chars:
+                new_arg_strings.append(arg_string)
+
+            # replace arguments referencing files with the file content
+            else:
+                try:
+                    args_file = open(arg_string[1:])
+                    try:
+                        arg_strings = []
+                        for arg_line in args_file.read().splitlines():
+                            for arg in self.convert_arg_line_to_args(arg_line):
+                                arg_strings.append(arg)
+                        arg_strings = self._read_args_from_files(arg_strings)
+                        new_arg_strings.extend(arg_strings)
+                    finally:
+                        args_file.close()
+                except IOError:
+                    err = _sys.exc_info()[1]
+                    self.error(str(err))
+
+        # return the modified argument list
+        return new_arg_strings
+
+    def convert_arg_line_to_args(self, arg_line):
+        return [arg_line]
+
+    def _match_argument(self, action, arg_strings_pattern):
+        # match the pattern for this action to the arg strings
+        nargs_pattern = self._get_nargs_pattern(action)
+        match = _re.match(nargs_pattern, arg_strings_pattern)
+
+        # raise an exception if we weren't able to find a match
+        if match is None:
+            nargs_errors = {
+                None: _('expected one argument'),
+                OPTIONAL: _('expected at most one argument'),
+                ONE_OR_MORE: _('expected at least one argument'),
+            }
+            default = ngettext('expected %s argument',
+                               'expected %s arguments',
+                               action.nargs) % action.nargs
+            msg = nargs_errors.get(action.nargs, default)
+            raise ArgumentError(action, msg)
+
+        # return the number of arguments matched
+        return len(match.group(1))
+
+    def _match_arguments_partial(self, actions, arg_strings_pattern):
+        # progressively shorten the actions list by slicing off the
+        # final actions until we find a match
+        result = []
+        for i in range(len(actions), 0, -1):
+            actions_slice = actions[:i]
+            pattern = ''.join([self._get_nargs_pattern(action)
+                               for action in actions_slice])
+            match = _re.match(pattern, arg_strings_pattern)
+            if match is not None:
+                result.extend([len(string) for string in match.groups()])
+                break
+
+        # return the list of arg string counts
+        return result
+
+    def _parse_optional(self, arg_string):
+        # if it's an empty string, it was meant to be a positional
+        if not arg_string:
+            return None
+
+        # if it doesn't start with a prefix, it was meant to be positional
+        if not arg_string[0] in self.prefix_chars:
+            return None
+
+        # if the option string is present in the parser, return the action
+        if arg_string in self._option_string_actions:
+            action = self._option_string_actions[arg_string]
+            return action, arg_string, None
+
+        # if it's just a single character, it was meant to be positional
+        if len(arg_string) == 1:
+            return None
+
+        # if the option string before the "=" is present, return the action
+        if '=' in arg_string:
+            option_string, explicit_arg = arg_string.split('=', 1)
+            if option_string in self._option_string_actions:
+                action = self._option_string_actions[option_string]
+                return action, option_string, explicit_arg
+
+        # search through all possible prefixes of the option string
+        # and all actions in the parser for possible interpretations
+        option_tuples = self._get_option_tuples(arg_string)
+
+        # if multiple actions match, the option string was ambiguous
+        if len(option_tuples) > 1:
+            options = ', '.join([option_string
+                for action, option_string, explicit_arg in option_tuples])
+            args = {'option': arg_string, 'matches': options}
+            msg = _('ambiguous option: %(option)s could match %(matches)s')
+            self.error(msg % args)
+
+        # if exactly one action matched, this segmentation is good,
+        # so return the parsed action
+        elif len(option_tuples) == 1:
+            option_tuple, = option_tuples
+            return option_tuple
+
+        # if it was not found as an option, but it looks like a negative
+        # number, it was meant to be positional
+        # unless there are negative-number-like options
+        if self._negative_number_matcher.match(arg_string):
+            if not self._has_negative_number_optionals:
+                return None
+
+        # if it contains a space, it was meant to be a positional
+        if ' ' in arg_string:
+            return None
+
+        # it was meant to be an optional but there is no such option
+        # in this parser (though it might be a valid option in a subparser)
+        return None, arg_string, None
+
+    def _get_option_tuples(self, option_string):
+        result = []
+
+        # option strings starting with two prefix characters are only
+        # split at the '='
+        chars = self.prefix_chars
+        if option_string[0] in chars and option_string[1] in chars:
+            if '=' in option_string:
+                option_prefix, explicit_arg = option_string.split('=', 1)
+            else:
+                option_prefix = option_string
+                explicit_arg = None
+            for option_string in self._option_string_actions:
+                if option_string.startswith(option_prefix):
+                    action = self._option_string_actions[option_string]
+                    tup = action, option_string, explicit_arg
+                    result.append(tup)
+
+        # single character options can be concatenated with their arguments
+        # but multiple character options always have to have their argument
+        # separate
+        elif option_string[0] in chars and option_string[1] not in chars:
+            option_prefix = option_string
+            explicit_arg = None
+            short_option_prefix = option_string[:2]
+            short_explicit_arg = option_string[2:]
+
+            for option_string in self._option_string_actions:
+                if option_string == short_option_prefix:
+                    action = self._option_string_actions[option_string]
+                    tup = action, option_string, short_explicit_arg
+                    result.append(tup)
+                elif option_string.startswith(option_prefix):
+                    action = self._option_string_actions[option_string]
+                    tup = action, option_string, explicit_arg
+                    result.append(tup)
+
+        # shouldn't ever get here
+        else:
+            self.error(_('unexpected option string: %s') % option_string)
+
+        # return the collected option tuples
+        return result
+
+    def _get_nargs_pattern(self, action):
+        # in all examples below, we have to allow for '--' args
+        # which are represented as '-' in the pattern
+        nargs = action.nargs
+
+        # the default (None) is assumed to be a single argument
+        if nargs is None:
+            nargs_pattern = '(-*A-*)'
+
+        # allow zero or one arguments
+        elif nargs == OPTIONAL:
+            nargs_pattern = '(-*A?-*)'
+
+        # allow zero or more arguments
+        elif nargs == ZERO_OR_MORE:
+            nargs_pattern = '(-*[A-]*)'
+
+        # allow one or more arguments
+        elif nargs == ONE_OR_MORE:
+            nargs_pattern = '(-*A[A-]*)'
+
+        # allow any number of options or arguments
+        elif nargs == REMAINDER:
+            nargs_pattern = '([-AO]*)'
+
+        # allow one argument followed by any number of options or arguments
+        elif nargs == PARSER:
+            nargs_pattern = '(-*A[-AO]*)'
+
+        # all others should be integers
+        else:
+            nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
+
+        # if this is an optional action, -- is not allowed
+        if action.option_strings:
+            nargs_pattern = nargs_pattern.replace('-*', '')
+            nargs_pattern = nargs_pattern.replace('-', '')
+
+        # return the pattern
+        return nargs_pattern
+
+    # ========================
+    # Value conversion methods
+    # ========================
+    def _get_values(self, action, arg_strings):
+        # for everything but PARSER args, strip out '--'
+        if action.nargs not in [PARSER, REMAINDER]:
+            arg_strings = [s for s in arg_strings if s != '--']
+
+        # optional argument produces a default when not present
+        if not arg_strings and action.nargs == OPTIONAL:
+            if action.option_strings:
+                value = action.const
+            else:
+                value = action.default
+            if isinstance(value, str):
+                value = self._get_value(action, value)
+                self._check_value(action, value)
+
+        # when nargs='*' on a positional, if there were no command-line
+        # args, use the default if it is anything other than None
+        elif (not arg_strings and action.nargs == ZERO_OR_MORE and
+              not action.option_strings):
+            if action.default is not None:
+                value = action.default
+            else:
+                value = arg_strings
+            self._check_value(action, value)
+
+        # single argument or optional argument produces a single value
+        elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
+            arg_string, = arg_strings
+            value = self._get_value(action, arg_string)
+            self._check_value(action, value)
+
+        # REMAINDER arguments convert all values, checking none
+        elif action.nargs == REMAINDER:
+            value = [self._get_value(action, v) for v in arg_strings]
+
+        # PARSER arguments convert all values, but check only the first
+        elif action.nargs == PARSER:
+            value = [self._get_value(action, v) for v in arg_strings]
+            self._check_value(action, value[0])
+
+        # all other types of nargs produce a list
+        else:
+            value = [self._get_value(action, v) for v in arg_strings]
+            for v in value:
+                self._check_value(action, v)
+
+        # return the converted value
+        return value
+
+    def _get_value(self, action, arg_string):
+        type_func = self._registry_get('type', action.type, action.type)
+        if not callable(type_func):
+            msg = _('%r is not callable')
+            raise ArgumentError(action, msg % type_func)
+
+        # convert the value to the appropriate type
+        try:
+            result = type_func(arg_string)
+
+        # ArgumentTypeErrors indicate errors
+        except ArgumentTypeError:
+            name = getattr(action.type, '__name__', repr(action.type))
+            msg = str(_sys.exc_info()[1])
+            raise ArgumentError(action, msg)
+
+        # TypeErrors or ValueErrors also indicate errors
+        except (TypeError, ValueError):
+            name = getattr(action.type, '__name__', repr(action.type))
+            args = {'type': name, 'value': arg_string}
+            msg = _('invalid %(type)s value: %(value)r')
+            raise ArgumentError(action, msg % args)
+
+        # return the converted value
+        return result
+
+    def _check_value(self, action, value):
+        # converted value must be one of the choices (if specified)
+        if action.choices is not None and value not in action.choices:
+            args = {'value': value,
+                    'choices': ', '.join(map(repr, action.choices))}
+            msg = _('invalid choice: %(value)r (choose from %(choices)s)')
+            raise ArgumentError(action, msg % args)
+
+    # =======================
+    # Help-formatting methods
+    # =======================
+    def format_usage(self):
+        formatter = self._get_formatter()
+        formatter.add_usage(self.usage, self._actions,
+                            self._mutually_exclusive_groups)
+        return formatter.format_help()
+
+    def format_help(self):
+        formatter = self._get_formatter()
+
+        # usage
+        formatter.add_usage(self.usage, self._actions,
+                            self._mutually_exclusive_groups)
+
+        # description
+        formatter.add_text(self.description)
+
+        # positionals, optionals and user-defined groups
+        for action_group in self._action_groups:
+            formatter.start_section(action_group.title)
+            formatter.add_text(action_group.description)
+            formatter.add_arguments(action_group._group_actions)
+            formatter.end_section()
+
+        # epilog
+        formatter.add_text(self.epilog)
+
+        # determine help from format above
+        return formatter.format_help()
+
+    def format_version(self):
+        import warnings
+        warnings.warn(
+            'The format_version method is deprecated -- the "version" '
+            'argument to ArgumentParser is no longer supported.',
+            DeprecationWarning)
+        formatter = self._get_formatter()
+        formatter.add_text(self.version)
+        return formatter.format_help()
+
+    def _get_formatter(self):
+        return self.formatter_class(prog=self.prog)
+
+    # =====================
+    # Help-printing methods
+    # =====================
+    def print_usage(self, file=None):
+        if file is None:
+            file = _sys.stdout
+        self._print_message(self.format_usage(), file)
+
+    def print_help(self, file=None):
+        if file is None:
+            file = _sys.stdout
+        self._print_message(self.format_help(), file)
+
+    def print_version(self, file=None):
+        import warnings
+        warnings.warn(
+            'The print_version method is deprecated -- the "version" '
+            'argument to ArgumentParser is no longer supported.',
+            DeprecationWarning)
+        self._print_message(self.format_version(), file)
+
+    def _print_message(self, message, file=None):
+        if message:
+            if file is None:
+                file = _sys.stderr
+            file.write(message)
+
+    # ===============
+    # Exiting methods
+    # ===============
+    def exit(self, status=0, message=None):
+        if message:
+            self._print_message(message, _sys.stderr)
+        _sys.exit(status)
+
+    def error(self, message):
+        """error(message: string)
+
+        Prints a usage message incorporating the message to stderr and
+        exits.
+
+        If you override this in a subclass, it should not return -- it
+        should either exit or raise an exception.
+        """
+        self.print_usage(_sys.stderr)
+        args = {'prog': self.prog, 'message': message}
+        self.exit(2, _('%(prog)s: error: %(message)s\n') % args)
diff --git a/lib-python/3.2/ast.py b/lib-python/3.2/ast.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/ast.py
@@ -0,0 +1,318 @@
+"""
+    ast
+    ~~~
+
+    The `ast` module helps Python applications to process trees of the Python
+    abstract syntax grammar.  The abstract syntax itself might change with
+    each Python release; this module helps to find out programmatically what
+    the current grammar looks like and allows modifications of it.
+
+    An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
+    a flag to the `compile()` builtin function or by using the `parse()`
+    function from this module.  The result will be a tree of objects whose
+    classes all inherit from `ast.AST`.
+
+    A modified abstract syntax tree can be compiled into a Python code object
+    using the built-in `compile()` function.
+
+    Additionally various helper functions are provided that make working with
+    the trees simpler.  The main intention of the helper functions and this
+    module in general is to provide an easy to use interface for libraries
+    that work tightly with the python syntax (template engines for example).
+
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: Python License.
+"""
+from _ast import *
+from _ast import __version__
+
+
+def parse(source, filename='<unknown>', mode='exec'):
+    """
+    Parse the source into an AST node.
+    Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
+    """
+    return compile(source, filename, mode, PyCF_ONLY_AST)
+
+
+def literal_eval(node_or_string):
+    """
+    Safely evaluate an expression node or a string containing a Python
+    expression.  The string or node provided may only consist of the following
+    Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
+    sets, booleans, and None.
+    """
+    _safe_names = {'None': None, 'True': True, 'False': False}
+    if isinstance(node_or_string, str):
+        node_or_string = parse(node_or_string, mode='eval')
+    if isinstance(node_or_string, Expression):
+        node_or_string = node_or_string.body
+    def _convert(node):
+        if isinstance(node, (Str, Bytes)):
+            return node.s
+        elif isinstance(node, Num):
+            return node.n
+        elif isinstance(node, Tuple):
+            return tuple(map(_convert, node.elts))
+        elif isinstance(node, List):
+            return list(map(_convert, node.elts))
+        elif isinstance(node, Set):
+            return set(map(_convert, node.elts))
+        elif isinstance(node, Dict):
+            return dict((_convert(k), _convert(v)) for k, v
+                        in zip(node.keys, node.values))
+        elif isinstance(node, Name):
+            if node.id in _safe_names:
+                return _safe_names[node.id]
+        elif isinstance(node, UnaryOp) and \
+             isinstance(node.op, (UAdd, USub)) and \
+             isinstance(node.operand, (Num, UnaryOp, BinOp)):
+            operand = _convert(node.operand)
+            if isinstance(node.op, UAdd):
+                return + operand
+            else:
+                return - operand
+        elif isinstance(node, BinOp) and \
+             isinstance(node.op, (Add, Sub)) and \
+             isinstance(node.right, (Num, UnaryOp, BinOp)) and \
+             isinstance(node.left, (Num, UnaryOp, BinOp)):
+            left = _convert(node.left)
+            right = _convert(node.right)
+            if isinstance(node.op, Add):
+                return left + right
+            else:
+                return left - right
+        raise ValueError('malformed node or string: ' + repr(node))
+    return _convert(node_or_string)
+
+
+def dump(node, annotate_fields=True, include_attributes=False):
+    """
+    Return a formatted dump of the tree in *node*.  This is mainly useful for
+    debugging purposes.  The returned string will show the names and the values
+    for fields.  This makes the code impossible to evaluate, so if evaluation is
+    wanted *annotate_fields* must be set to False.  Attributes such as line
+    numbers and column offsets are not dumped by default.  If this is wanted,
+    *include_attributes* can be set to True.
+    """
+    def _format(node):
+        if isinstance(node, AST):
+            fields = [(a, _format(b)) for a, b in iter_fields(node)]
+            rv = '%s(%s' % (node.__class__.__name__, ', '.join(
+                ('%s=%s' % field for field in fields)
+                if annotate_fields else
+                (b for a, b in fields)
+            ))
+            if include_attributes and node._attributes:
+                rv += fields and ', ' or ' '
+                rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
+                                for a in node._attributes)
+            return rv + ')'
+        elif isinstance(node, list):
+            return '[%s]' % ', '.join(_format(x) for x in node)
+        return repr(node)
+    if not isinstance(node, AST):
+        raise TypeError('expected AST, got %r' % node.__class__.__name__)
+    return _format(node)
+
+
+def copy_location(new_node, old_node):
+    """
+    Copy source location (`lineno` and `col_offset` attributes) from
+    *old_node* to *new_node* if possible, and return *new_node*.
+    """
+    for attr in 'lineno', 'col_offset':
+        if attr in old_node._attributes and attr in new_node._attributes \
+           and hasattr(old_node, attr):
+            setattr(new_node, attr, getattr(old_node, attr))
+    return new_node
+
+
+def fix_missing_locations(node):
+    """
+    When you compile a node tree with compile(), the compiler expects lineno and
+    col_offset attributes for every node that supports them.  This is rather
+    tedious to fill in for generated nodes, so this helper adds these attributes
+    recursively where not already set, by setting them to the values of the
+    parent node.  It works recursively starting at *node*.
+    """
+    def _fix(node, lineno, col_offset):
+        if 'lineno' in node._attributes:
+            if not hasattr(node, 'lineno'):
+                node.lineno = lineno
+            else:
+                lineno = node.lineno
+        if 'col_offset' in node._attributes:
+            if not hasattr(node, 'col_offset'):
+                node.col_offset = col_offset
+            else:
+                col_offset = node.col_offset
+        for child in iter_child_nodes(node):
+            _fix(child, lineno, col_offset)
+    _fix(node, 1, 0)
+    return node
+
+
+def increment_lineno(node, n=1):
+    """
+    Increment the line number of each node in the tree starting at *node* by *n*.
+    This is useful to "move code" to a different location in a file.
+    """
+    for child in walk(node):
+        if 'lineno' in child._attributes:
+            child.lineno = getattr(child, 'lineno', 0) + n
+    return node
+
+
+def iter_fields(node):
+    """
+    Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
+    that is present on *node*.
+    """
+    for field in node._fields:
+        try:
+            yield field, getattr(node, field)
+        except AttributeError:
+            pass
+
+
+def iter_child_nodes(node):
+    """
+    Yield all direct child nodes of *node*, that is, all fields that are nodes
+    and all items of fields that are lists of nodes.
+    """
+    for name, field in iter_fields(node):
+        if isinstance(field, AST):
+            yield field
+        elif isinstance(field, list):
+            for item in field:
+                if isinstance(item, AST):
+                    yield item
+
+
+def get_docstring(node, clean=True):
+    """
+    Return the docstring for the given node or None if no docstring can
+    be found.  If the node provided does not have docstrings a TypeError
+    will be raised.
+    """
+    if not isinstance(node, (FunctionDef, ClassDef, Module)):
+        raise TypeError("%r can't have docstrings" % node.__class__.__name__)
+    if node.body and isinstance(node.body[0], Expr) and \
+       isinstance(node.body[0].value, Str):
+        if clean:
+            import inspect
+            return inspect.cleandoc(node.body[0].value.s)
+        return node.body[0].value.s
+
+
+def walk(node):
+    """
+    Recursively yield all descendant nodes in the tree starting at *node*
+    (including *node* itself), in no specified order.  This is useful if you
+    only want to modify nodes in place and don't care about the context.
+    """
+    from collections import deque
+    todo = deque([node])
+    while todo:
+        node = todo.popleft()
+        todo.extend(iter_child_nodes(node))
+        yield node
+
+
+class NodeVisitor(object):
+    """
+    A node visitor base class that walks the abstract syntax tree and calls a
+    visitor function for every node found.  This function may return a value
+    which is forwarded by the `visit` method.
+
+    This class is meant to be subclassed, with the subclass adding visitor
+    methods.
+
+    Per default the visitor functions for the nodes are ``'visit_'`` +
+    class name of the node.  So a `TryFinally` node visit function would
+    be `visit_TryFinally`.  This behavior can be changed by overriding
+    the `visit` method.  If no visitor function exists for a node
+    (return value `None`) the `generic_visit` visitor is used instead.
+
+    Don't use the `NodeVisitor` if you want to apply changes to nodes during
+    traversing.  For this a special visitor exists (`NodeTransformer`) that
+    allows modifications.
+    """
+
+    def visit(self, node):
+        """Visit a node."""
+        method = 'visit_' + node.__class__.__name__
+        visitor = getattr(self, method, self.generic_visit)
+        return visitor(node)
+
+    def generic_visit(self, node):
+        """Called if no explicit visitor function exists for a node."""
+        for field, value in iter_fields(node):
+            if isinstance(value, list):
+                for item in value:
+                    if isinstance(item, AST):
+                        self.visit(item)
+            elif isinstance(value, AST):
+                self.visit(value)
+
+
+class NodeTransformer(NodeVisitor):
+    """
+    A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
+    allows modification of nodes.
+
+    The `NodeTransformer` will walk the AST and use the return value of the
+    visitor methods to replace or remove the old node.  If the return value of
+    the visitor method is ``None``, the node will be removed from its location,
+    otherwise it is replaced with the return value.  The return value may be the
+    original node in which case no replacement takes place.
+
+    Here is an example transformer that rewrites all occurrences of name lookups
+    (``foo``) to ``data['foo']``::
+
+       class RewriteName(NodeTransformer):
+
+           def visit_Name(self, node):
+               return copy_location(Subscript(
+                   value=Name(id='data', ctx=Load()),
+                   slice=Index(value=Str(s=node.id)),
+                   ctx=node.ctx
+               ), node)
+
+    Keep in mind that if the node you're operating on has child nodes you must
+    either transform the child nodes yourself or call the :meth:`generic_visit`
+    method for the node first.
+
+    For nodes that were part of a collection of statements (that applies to all
+    statement nodes), the visitor may also return a list of nodes rather than
+    just a single node.
+
+    Usually you use the transformer like this::
+
+       node = YourTransformer().visit(node)
+    """
+
+    def generic_visit(self, node):
+        for field, old_value in iter_fields(node):
+            old_value = getattr(node, field, None)
+            if isinstance(old_value, list):
+                new_values = []
+                for value in old_value:
+                    if isinstance(value, AST):
+                        value = self.visit(value)
+                        if value is None:
+                            continue
+                        elif not isinstance(value, AST):
+                            new_values.extend(value)
+                            continue
+                    new_values.append(value)
+                old_value[:] = new_values
+            elif isinstance(old_value, AST):
+                new_node = self.visit(old_value)
+                if new_node is None:
+                    delattr(node, field)
+                else:
+                    setattr(node, field, new_node)
+        return node
diff --git a/lib-python/3.2/asynchat.py b/lib-python/3.2/asynchat.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/asynchat.py
@@ -0,0 +1,336 @@
+# -*- Mode: Python; tab-width: 4 -*-
+#       Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
+#       Author: Sam Rushing <rushing at nightmare.com>
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+#                         All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+r"""A class supporting chat-style (command/response) protocols.
+
+This class adds support for 'chat' style protocols - where one side
+sends a 'command', and the other sends a response (examples would be
+the common internet protocols - smtp, nntp, ftp, etc..).
+
+The handle_read() method looks at the input stream for the current
+'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
+for multi-line output), calling self.found_terminator() on its
+receipt.
+
+for example:
+Say you build an async nntp client using this class.  At the start
+of the connection, you'll have self.terminator set to '\r\n', in
+order to process the single-line greeting.  Just before issuing a
+'LIST' command you'll set it to '\r\n.\r\n'.  The output of the LIST
+command will be accumulated (using your own 'collect_incoming_data'
+method) up to the terminator, and then control will be returned to
+you - by calling your self.found_terminator() method.
+"""
+import socket
+import asyncore
+from collections import deque
+
+def buffer(obj, start=None, stop=None):
+    # if memoryview objects gain slicing semantics,
+    # this function will change for the better
+    # memoryview used for the TypeError
+    memoryview(obj)
+    if start == None:
+        start = 0
+    if stop == None:
+        stop = len(obj)
+    x = obj[start:stop]
+    ## print("buffer type is: %s"%(type(x),))
+    return x
+
+class async_chat (asyncore.dispatcher):
+    """This is an abstract class.  You must derive from this class, and add
+    the two methods collect_incoming_data() and found_terminator()"""
+
+    # these are overridable defaults
+
+    ac_in_buffer_size       = 4096
+    ac_out_buffer_size      = 4096
+
+    # we don't want to enable the use of encoding by default, because that is a
+    # sign of an application bug that we don't want to pass silently
+
+    use_encoding            = 0
+    encoding                = 'latin1'
+
+    def __init__ (self, sock=None, map=None):
+        # for string terminator matching
+        self.ac_in_buffer = b''
+
+        # we use a list here rather than cStringIO for a few reasons...
+        # del lst[:] is faster than sio.truncate(0)
+        # lst = [] is faster than sio.truncate(0)
+        # cStringIO will be gaining unicode support in py3k, which
+        # will negatively affect the performance of bytes compared to
+        # a ''.join() equivalent
+        self.incoming = []
+
+        # we toss the use of the "simple producer" and replace it with
+        # a pure deque, which the original fifo was a wrapping of
+        self.producer_fifo = deque()
+        asyncore.dispatcher.__init__ (self, sock, map)
+
+    def collect_incoming_data(self, data):
+        raise NotImplementedError("must be implemented in subclass")
+
+    def _collect_incoming_data(self, data):
+        self.incoming.append(data)
+
+    def _get_data(self):
+        d = b''.join(self.incoming)
+        del self.incoming[:]
+        return d
+
+    def found_terminator(self):
+        raise NotImplementedError("must be implemented in subclass")
+
+    def set_terminator (self, term):
+        "Set the input delimiter.  Can be a fixed string of any length, an integer, or None"
+        if isinstance(term, str) and self.use_encoding:
+            term = bytes(term, self.encoding)
+        self.terminator = term
+
+    def get_terminator (self):
+        return self.terminator
+
+    # grab some more data from the socket,
+    # throw it to the collector method,
+    # check for the terminator,
+    # if found, transition to the next state.
+
+    def handle_read (self):
+
+        try:
+            data = self.recv (self.ac_in_buffer_size)
+        except socket.error as why:
+            self.handle_error()
+            return
+
+        if isinstance(data, str) and self.use_encoding:
+            data = bytes(str, self.encoding)
+        self.ac_in_buffer = self.ac_in_buffer + data
+
+        # Continue to search for self.terminator in self.ac_in_buffer,
+        # while calling self.collect_incoming_data.  The while loop
+        # is necessary because we might read several data+terminator
+        # combos with a single recv(4096).
+
+        while self.ac_in_buffer:
+            lb = len(self.ac_in_buffer)
+            terminator = self.get_terminator()
+            if not terminator:
+                # no terminator, collect it all
+                self.collect_incoming_data (self.ac_in_buffer)
+                self.ac_in_buffer = b''
+            elif isinstance(terminator, int):
+                # numeric terminator
+                n = terminator
+                if lb < n:
+                    self.collect_incoming_data (self.ac_in_buffer)
+                    self.ac_in_buffer = b''
+                    self.terminator = self.terminator - lb
+                else:
+                    self.collect_incoming_data (self.ac_in_buffer[:n])
+                    self.ac_in_buffer = self.ac_in_buffer[n:]
+                    self.terminator = 0
+                    self.found_terminator()
+            else:
+                # 3 cases:
+                # 1) end of buffer matches terminator exactly:
+                #    collect data, transition
+                # 2) end of buffer matches some prefix:
+                #    collect data to the prefix
+                # 3) end of buffer does not match any prefix:
+                #    collect data
+                terminator_len = len(terminator)
+                index = self.ac_in_buffer.find(terminator)
+                if index != -1:
+                    # we found the terminator
+                    if index > 0:
+                        # don't bother reporting the empty string (source of subtle bugs)
+                        self.collect_incoming_data (self.ac_in_buffer[:index])
+                    self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
+                    # This does the Right Thing if the terminator is changed here.
+                    self.found_terminator()
+                else:
+                    # check for a prefix of the terminator
+                    index = find_prefix_at_end (self.ac_in_buffer, terminator)
+                    if index:
+                        if index != lb:
+                            # we found a prefix, collect up to the prefix
+                            self.collect_incoming_data (self.ac_in_buffer[:-index])
+                            self.ac_in_buffer = self.ac_in_buffer[-index:]
+                        break
+                    else:
+                        # no prefix, collect it all
+                        self.collect_incoming_data (self.ac_in_buffer)
+                        self.ac_in_buffer = b''
+
+    def handle_write (self):
+        self.initiate_send()
+
+    def handle_close (self):
+        self.close()
+
+    def push (self, data):
+        sabs = self.ac_out_buffer_size
+        if len(data) > sabs:
+            for i in range(0, len(data), sabs):
+                self.producer_fifo.append(data[i:i+sabs])
+        else:
+            self.producer_fifo.append(data)
+        self.initiate_send()
+
+    def push_with_producer (self, producer):
+        self.producer_fifo.append(producer)
+        self.initiate_send()
+
+    def readable (self):
+        "predicate for inclusion in the readable for select()"
+        # cannot use the old predicate, it violates the claim of the
+        # set_terminator method.
+
+        # return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
+        return 1
+
+    def writable (self):
+        "predicate for inclusion in the writable for select()"
+        return self.producer_fifo or (not self.connected)
+
+    def close_when_done (self):
+        "automatically close this channel once the outgoing queue is empty"
+        self.producer_fifo.append(None)
+
+    def initiate_send(self):
+        while self.producer_fifo and self.connected:
+            first = self.producer_fifo[0]
+            # handle empty string/buffer or None entry
+            if not first:
+                del self.producer_fifo[0]
+                if first is None:
+                    ## print("first is None")
+                    self.handle_close()
+                    return
+                ## print("first is not None")
+
+            # handle classic producer behavior
+            obs = self.ac_out_buffer_size
+            try:
+                data = buffer(first, 0, obs)
+            except TypeError:
+                data = first.more()
+                if data:
+                    self.producer_fifo.appendleft(data)
+                else:
+                    del self.producer_fifo[0]
+                continue
+
+            if isinstance(data, str) and self.use_encoding:
+                data = bytes(data, self.encoding)
+
+            # send the data
+            try:
+                num_sent = self.send(data)
+            except socket.error:
+                self.handle_error()
+                return
+
+            if num_sent:
+                if num_sent < len(data) or obs < len(first):
+                    self.producer_fifo[0] = first[num_sent:]
+                else:
+                    del self.producer_fifo[0]
+            # we tried to send some actual data
+            return
+
+    def discard_buffers (self):
+        # Emergencies only!
+        self.ac_in_buffer = b''
+        del self.incoming[:]
+        self.producer_fifo.clear()
+
+class simple_producer:
+
+    def __init__ (self, data, buffer_size=512):
+        self.data = data
+        self.buffer_size = buffer_size
+
+    def more (self):
+        if len (self.data) > self.buffer_size:
+            result = self.data[:self.buffer_size]
+            self.data = self.data[self.buffer_size:]
+            return result
+        else:
+            result = self.data
+            self.data = b''
+            return result
+
+class fifo:
+    def __init__ (self, list=None):
+        if not list:
+            self.list = deque()
+        else:
+            self.list = deque(list)
+
+    def __len__ (self):
+        return len(self.list)
+
+    def is_empty (self):
+        return not self.list
+
+    def first (self):
+        return self.list[0]
+
+    def push (self, data):
+        self.list.append(data)
+
+    def pop (self):
+        if self.list:
+            return (1, self.list.popleft())
+        else:
+            return (0, None)
+
+# Given 'haystack', see if any prefix of 'needle' is at its end.  This
+# assumes an exact match has already been checked.  Return the number of
+# characters matched.
+# for example:
+# f_p_a_e ("qwerty\r", "\r\n") => 1
+# f_p_a_e ("qwertydkjf", "\r\n") => 0
+# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
+
+# this could maybe be made faster with a computed regex?
+# [answer: no; circa Python-2.0, Jan 2001]
+# new python:   28961/s
+# old python:   18307/s
+# re:        12820/s
+# regex:     14035/s
+
+def find_prefix_at_end (haystack, needle):
+    l = len(needle) - 1
+    while l and not haystack.endswith(needle[:l]):
+        l -= 1
+    return l
diff --git a/lib-python/3.2/asyncore.py b/lib-python/3.2/asyncore.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/asyncore.py
@@ -0,0 +1,663 @@
+# -*- Mode: Python -*-
+#   Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
+#   Author: Sam Rushing <rushing at nightmare.com>
+
+# ======================================================================
+# Copyright 1996 by Sam Rushing
+#
+#                         All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software and
+# its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of Sam
+# Rushing not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
+# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+# ======================================================================
+
+"""Basic infrastructure for asynchronous socket service clients and servers.
+
+There are only two ways to have a program on a single processor do "more
+than one thing at a time".  Multi-threaded programming is the simplest and
+most popular way to do it, but there is another very different technique,
+that lets you have nearly all the advantages of multi-threading, without
+actually using multiple threads. it's really only practical if your program
+is largely I/O bound. If your program is CPU bound, then pre-emptive
+scheduled threads are probably what you really need. Network servers are
+rarely CPU-bound, however.
+
+If your operating system supports the select() system call in its I/O
+library (and nearly all do), then you can use it to juggle multiple
+communication channels at once; doing other work while your I/O is taking
+place in the "background."  Although this strategy can seem strange and
+complex, especially at first, it is in many ways easier to understand and
+control than multi-threaded programming. The module documented here solves
+many of the difficult problems for you, making the task of building
+sophisticated high-performance network servers and clients a snap.
+"""
+
+import select
+import socket
+import sys
+import time
+import warnings
+
+import os
+from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
+     ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
+     errorcode
+
+_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
+                           EBADF))
+
+try:
+    socket_map
+except NameError:
+    socket_map = {}
+
+def _strerror(err):
+    try:
+        return os.strerror(err)
+    except (ValueError, OverflowError, NameError):
+        if err in errorcode:
+            return errorcode[err]
+        return "Unknown error %s" %err
+
+class ExitNow(Exception):
+    pass
+
+_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
+
+def read(obj):
+    try:
+        obj.handle_read_event()
+    except _reraised_exceptions:
+        raise
+    except:
+        obj.handle_error()
+
+def write(obj):
+    try:
+        obj.handle_write_event()
+    except _reraised_exceptions:
+        raise
+    except:
+        obj.handle_error()
+
+def _exception(obj):
+    try:
+        obj.handle_expt_event()
+    except _reraised_exceptions:
+        raise
+    except:
+        obj.handle_error()
+
+def readwrite(obj, flags):
+    try:
+        if flags & select.POLLIN:
+            obj.handle_read_event()
+        if flags & select.POLLOUT:
+            obj.handle_write_event()
+        if flags & select.POLLPRI:
+            obj.handle_expt_event()
+        if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
+            obj.handle_close()
+    except socket.error as e:
+        if e.args[0] not in _DISCONNECTED:
+            obj.handle_error()
+        else:
+            obj.handle_close()
+    except _reraised_exceptions:
+        raise
+    except:
+        obj.handle_error()
+
+def poll(timeout=0.0, map=None):
+    if map is None:
+        map = socket_map
+    if map:
+        r = []; w = []; e = []
+        for fd, obj in list(map.items()):
+            is_r = obj.readable()
+            is_w = obj.writable()
+            if is_r:
+                r.append(fd)
+            # accepting sockets should not be writable
+            if is_w and not obj.accepting:
+                w.append(fd)
+            if is_r or is_w:
+                e.append(fd)
+        if [] == r == w == e:
+            time.sleep(timeout)
+            return
+
+        try:
+            r, w, e = select.select(r, w, e, timeout)
+        except select.error as err:
+            if err.args[0] != EINTR:
+                raise
+            else:
+                return
+
+        for fd in r:
+            obj = map.get(fd)
+            if obj is None:
+                continue
+            read(obj)
+
+        for fd in w:
+            obj = map.get(fd)
+            if obj is None:
+                continue
+            write(obj)
+
+        for fd in e:
+            obj = map.get(fd)
+            if obj is None:
+                continue
+            _exception(obj)
+
+def poll2(timeout=0.0, map=None):
+    # Use the poll() support added to the select module in Python 2.0
+    if map is None:
+        map = socket_map
+    if timeout is not None:
+        # timeout is in milliseconds
+        timeout = int(timeout*1000)
+    pollster = select.poll()
+    if map:
+        for fd, obj in list(map.items()):
+            flags = 0
+            if obj.readable():
+                flags |= select.POLLIN | select.POLLPRI
+            # accepting sockets should not be writable
+            if obj.writable() and not obj.accepting:
+                flags |= select.POLLOUT
+            if flags:
+                # Only check for exceptions if object was either readable
+                # or writable.
+                flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
+                pollster.register(fd, flags)
+        try:
+            r = pollster.poll(timeout)
+        except select.error as err:
+            if err.args[0] != EINTR:
+                raise
+            r = []
+        for fd, flags in r:
+            obj = map.get(fd)
+            if obj is None:
+                continue
+            readwrite(obj, flags)
+
+poll3 = poll2                           # Alias for backward compatibility
+
+def loop(timeout=30.0, use_poll=False, map=None, count=None):
+    if map is None:
+        map = socket_map
+
+    if use_poll and hasattr(select, 'poll'):
+        poll_fun = poll2
+    else:
+        poll_fun = poll
+
+    if count is None:
+        while map:
+            poll_fun(timeout, map)
+
+    else:
+        while map and count > 0:
+            poll_fun(timeout, map)
+            count = count - 1
+
+class dispatcher:
+
+    debug = False
+    connected = False
+    accepting = False
+    closing = False
+    addr = None
+    ignore_log_types = frozenset(['warning'])
+
+    def __init__(self, sock=None, map=None):
+        if map is None:
+            self._map = socket_map
+        else:
+            self._map = map
+
+        self._fileno = None
+
+        if sock:
+            # Set to nonblocking just to make sure for cases where we
+            # get a socket from a blocking source.
+            sock.setblocking(0)
+            self.set_socket(sock, map)
+            self.connected = True
+            # The constructor no longer requires that the socket
+            # passed be connected.
+            try:
+                self.addr = sock.getpeername()
+            except socket.error as err:
+                if err.args[0] == ENOTCONN:
+                    # To handle the case where we got an unconnected
+                    # socket.
+                    self.connected = False
+                else:
+                    # The socket is broken in some unknown way, alert
+                    # the user and remove it from the map (to prevent
+                    # polling of broken sockets).
+                    self.del_channel(map)
+                    raise
+        else:
+            self.socket = None
+
+    def __repr__(self):
+        status = [self.__class__.__module__+"."+self.__class__.__name__]
+        if self.accepting and self.addr:
+            status.append('listening')
+        elif self.connected:
+            status.append('connected')
+        if self.addr is not None:
+            try:
+                status.append('%s:%d' % self.addr)
+            except TypeError:
+                status.append(repr(self.addr))
+        return '<%s at %#x>' % (' '.join(status), id(self))
+
+    __str__ = __repr__
+
+    def add_channel(self, map=None):
+        #self.log_info('adding channel %s' % self)
+        if map is None:
+            map = self._map
+        map[self._fileno] = self
+
+    def del_channel(self, map=None):
+        fd = self._fileno
+        if map is None:
+            map = self._map
+        if fd in map:
+            #self.log_info('closing channel %d:%s' % (fd, self))
+            del map[fd]
+        self._fileno = None
+
+    def create_socket(self, family, type):
+        self.family_and_type = family, type
+        sock = socket.socket(family, type)
+        sock.setblocking(0)
+        self.set_socket(sock)
+
+    def set_socket(self, sock, map=None):
+        self.socket = sock
+##        self.__dict__['socket'] = sock
+        self._fileno = sock.fileno()
+        self.add_channel(map)
+
+    def set_reuse_addr(self):
+        # try to re-use a server port if possible
+        try:
+            self.socket.setsockopt(
+                socket.SOL_SOCKET, socket.SO_REUSEADDR,
+                self.socket.getsockopt(socket.SOL_SOCKET,
+                                       socket.SO_REUSEADDR) | 1
+                )
+        except socket.error:
+            pass
+
+    # ==================================================
+    # predicates for select()
+    # these are used as filters for the lists of sockets
+    # to pass to select().
+    # ==================================================
+
+    def readable(self):
+        return True
+
+    def writable(self):
+        return True
+
+    # ==================================================
+    # socket object methods.
+    # ==================================================
+
+    def listen(self, num):
+        self.accepting = True
+        if os.name == 'nt' and num > 5:
+            num = 5
+        return self.socket.listen(num)
+
+    def bind(self, addr):
+        self.addr = addr
+        return self.socket.bind(addr)
+
+    def connect(self, address):
+        self.connected = False
+        err = self.socket.connect_ex(address)
+        if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
+        or err == EINVAL and os.name in ('nt', 'ce'):
+            return
+        if err in (0, EISCONN):
+            self.addr = address
+            self.handle_connect_event()
+        else:
+            raise socket.error(err, errorcode[err])
+
+    def accept(self):
+        # XXX can return either an address pair or None
+        try:
+            conn, addr = self.socket.accept()
+        except TypeError:
+            return None
+        except socket.error as why:
+            if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
+                return None
+            else:
+                raise
+        else:
+            return conn, addr
+
+    def send(self, data):
+        try:
+            result = self.socket.send(data)
+            return result
+        except socket.error as why:
+            if why.args[0] == EWOULDBLOCK:
+                return 0
+            elif why.args[0] in _DISCONNECTED:
+                self.handle_close()
+                return 0
+            else:
+                raise
+
+    def recv(self, buffer_size):
+        try:
+            data = self.socket.recv(buffer_size)
+            if not data:
+                # a closed connection is indicated by signaling
+                # a read condition, and having recv() return 0.
+                self.handle_close()
+                return b''
+            else:
+                return data
+        except socket.error as why:
+            # winsock sometimes throws ENOTCONN
+            if why.args[0] in _DISCONNECTED:
+                self.handle_close()
+                return b''
+            else:
+                raise
+
+    def close(self):
+        self.connected = False
+        self.accepting = False
+        self.del_channel()
+        try:
+            self.socket.close()
+        except socket.error as why:
+            if why.args[0] not in (ENOTCONN, EBADF):
+                raise
+
+    # cheap inheritance, used to pass all other attribute
+    # references to the underlying socket object.
+    def __getattr__(self, attr):
+        try:
+            retattr = getattr(self.socket, attr)
+        except AttributeError:
+            raise AttributeError("%s instance has no attribute '%s'"
+                                 %(self.__class__.__name__, attr))
+        else:
+            msg = "%(me)s.%(attr)s is deprecated; use %(me)s.socket.%(attr)s " \
+                  "instead" % {'me' : self.__class__.__name__, 'attr' : attr}
+            warnings.warn(msg, DeprecationWarning, stacklevel=2)
+            return retattr
+
+    # log and log_info may be overridden to provide more sophisticated
+    # logging and warning methods. In general, log is for 'hit' logging
+    # and 'log_info' is for informational, warning and error logging.
+
+    def log(self, message):
+        sys.stderr.write('log: %s\n' % str(message))
+
+    def log_info(self, message, type='info'):
+        if type not in self.ignore_log_types:
+            print('%s: %s' % (type, message))
+
+    def handle_read_event(self):
+        if self.accepting:
+            # accepting sockets are never connected, they "spawn" new
+            # sockets that are connected
+            self.handle_accept()
+        elif not self.connected:
+            self.handle_connect_event()
+            self.handle_read()
+        else:
+            self.handle_read()
+
+    def handle_connect_event(self):
+        err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+        if err != 0:
+            raise socket.error(err, _strerror(err))
+        self.handle_connect()
+        self.connected = True
+
+    def handle_write_event(self):
+        if self.accepting:
+            # Accepting sockets shouldn't get a write event.
+            # We will pretend it didn't happen.
+            return
+
+        if not self.connected:
+            #check for errors
+            err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+            if err != 0:
+                raise socket.error(err, _strerror(err))
+
+            self.handle_connect_event()
+        self.handle_write()
+
+    def handle_expt_event(self):
+        # handle_expt_event() is called if there might be an error on the
+        # socket, or if there is OOB data
+        # check for the error condition first
+        err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+        if err != 0:
+            # we can get here when select.select() says that there is an
+            # exceptional condition on the socket
+            # since there is an error, we'll go ahead and close the socket
+            # like we would in a subclassed handle_read() that received no
+            # data
+            self.handle_close()
+        else:
+            self.handle_expt()
+
+    def handle_error(self):
+        nil, t, v, tbinfo = compact_traceback()
+
+        # sometimes a user repr method will crash.
+        try:
+            self_repr = repr(self)
+        except:
+            self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
+
+        self.log_info(
+            'uncaptured python exception, closing channel %s (%s:%s %s)' % (
+                self_repr,
+                t,
+                v,
+                tbinfo
+                ),
+            'error'
+            )
+        self.handle_close()
+
+    def handle_expt(self):
+        self.log_info('unhandled incoming priority event', 'warning')
+
+    def handle_read(self):
+        self.log_info('unhandled read event', 'warning')
+
+    def handle_write(self):
+        self.log_info('unhandled write event', 'warning')
+
+    def handle_connect(self):
+        self.log_info('unhandled connect event', 'warning')
+
+    def handle_accept(self):
+        pair = self.accept()
+        if pair is not None:
+            self.handle_accepted(*pair)
+
+    def handle_accepted(self, sock, addr):
+        sock.close()
+        self.log_info('unhandled accepted event', 'warning')
+
+    def handle_close(self):
+        self.log_info('unhandled close event', 'warning')
+        self.close()
+
+# ---------------------------------------------------------------------------
+# adds simple buffered output capability, useful for simple clients.
+# [for more sophisticated usage use asynchat.async_chat]
+# ---------------------------------------------------------------------------
+
+class dispatcher_with_send(dispatcher):
+
+    def __init__(self, sock=None, map=None):
+        dispatcher.__init__(self, sock, map)
+        self.out_buffer = b''
+
+    def initiate_send(self):
+        num_sent = 0
+        num_sent = dispatcher.send(self, self.out_buffer[:512])
+        self.out_buffer = self.out_buffer[num_sent:]
+
+    def handle_write(self):
+        self.initiate_send()
+
+    def writable(self):
+        return (not self.connected) or len(self.out_buffer)
+
+    def send(self, data):
+        if self.debug:
+            self.log_info('sending %s' % repr(data))
+        self.out_buffer = self.out_buffer + data
+        self.initiate_send()
+
+# ---------------------------------------------------------------------------
+# used for debugging.
+# ---------------------------------------------------------------------------
+
+def compact_traceback():
+    t, v, tb = sys.exc_info()
+    tbinfo = []
+    if not tb: # Must have a traceback
+        raise AssertionError("traceback does not exist")
+    while tb:
+        tbinfo.append((
+            tb.tb_frame.f_code.co_filename,
+            tb.tb_frame.f_code.co_name,
+            str(tb.tb_lineno)
+            ))
+        tb = tb.tb_next
+
+    # just to be safe
+    del tb
+
+    file, function, line = tbinfo[-1]
+    info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
+    return (file, function, line), t, v, info
+
+def close_all(map=None, ignore_all=False):
+    if map is None:
+        map = socket_map
+    for x in list(map.values()):
+        try:
+            x.close()
+        except OSError as x:
+            if x.args[0] == EBADF:
+                pass
+            elif not ignore_all:
+                raise
+        except _reraised_exceptions:
+            raise
+        except:
+            if not ignore_all:
+                raise
+    map.clear()
+
+# Asynchronous File I/O:
+#
+# After a little research (reading man pages on various unixen, and
+# digging through the linux kernel), I've determined that select()
+# isn't meant for doing asynchronous file i/o.
+# Heartening, though - reading linux/mm/filemap.c shows that linux
+# supports asynchronous read-ahead.  So _MOST_ of the time, the data
+# will be sitting in memory for us already when we go to read it.
+#
+# What other OS's (besides NT) support async file i/o?  [VMS?]
+#
+# Regardless, this is useful for pipes, and stdin/stdout...
+
+if os.name == 'posix':
+    import fcntl
+
+    class file_wrapper:
+        # Here we override just enough to make a file
+        # look like a socket for the purposes of asyncore.
+        # The passed fd is automatically os.dup()'d
+
+        def __init__(self, fd):
+            self.fd = os.dup(fd)
+
+        def recv(self, *args):
+            return os.read(self.fd, *args)
+
+        def send(self, *args):
+            return os.write(self.fd, *args)
+
+        def getsockopt(self, level, optname, buflen=None):
+            if (level == socket.SOL_SOCKET and
+                optname == socket.SO_ERROR and
+                not buflen):
+                return 0
+            raise NotImplementedError("Only asyncore specific behaviour "
+                                      "implemented.")
+
+        read = recv
+        write = send
+
+        def close(self):
+            os.close(self.fd)
+
+        def fileno(self):
+            return self.fd
+
+    class file_dispatcher(dispatcher):
+
+        def __init__(self, fd, map=None):
+            dispatcher.__init__(self, None, map)
+            self.connected = True
+            try:
+                fd = fd.fileno()
+            except AttributeError:
+                pass
+            self.set_file(fd)
+            # set it to non-blocking mode
+            flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
+            flags = flags | os.O_NONBLOCK
+            fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+        def set_file(self, fd):
+            self.socket = file_wrapper(fd)
+            self._fileno = self.socket.fileno()
+            self.add_channel()
diff --git a/lib-python/3.2/base64.py b/lib-python/3.2/base64.py
new file mode 100755
--- /dev/null
+++ b/lib-python/3.2/base64.py
@@ -0,0 +1,408 @@
+#! /usr/bin/env python3
+
+"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
+
+# Modified 04-Oct-1995 by Jack Jansen to use binascii module
+# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
+# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere
+
+import re
+import struct
+import binascii
+
+
+__all__ = [
+    # Legacy interface exports traditional RFC 1521 Base64 encodings
+    'encode', 'decode', 'encodebytes', 'decodebytes',
+    # Generalized interface for other encodings
+    'b64encode', 'b64decode', 'b32encode', 'b32decode',
+    'b16encode', 'b16decode',
+    # Standard Base64 encoding
+    'standard_b64encode', 'standard_b64decode',
+    # Some common Base64 alternatives.  As referenced by RFC 3458, see thread
+    # starting at:
+    #
+    # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
+    'urlsafe_b64encode', 'urlsafe_b64decode',
+    ]
+
+
+bytes_types = (bytes, bytearray)  # Types acceptable as binary data
+
+
+def _translate(s, altchars):
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    translation = bytearray(range(256))
+    for k, v in altchars.items():
+        translation[ord(k)] = v[0]
+    return s.translate(translation)
+
+
+
+# Base64 encoding/decoding uses binascii
+
+def b64encode(s, altchars=None):
+    """Encode a byte string using Base64.
+
+    s is the byte string to encode.  Optional altchars must be a byte
+    string of length 2 which specifies an alternative alphabet for the
+    '+' and '/' characters.  This allows an application to
+    e.g. generate url or filesystem safe Base64 strings.
+
+    The encoded byte string is returned.
+    """
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    # Strip off the trailing newline
+    encoded = binascii.b2a_base64(s)[:-1]
+    if altchars is not None:
+        if not isinstance(altchars, bytes_types):
+            raise TypeError("expected bytes, not %s"
+                            % altchars.__class__.__name__)
+        assert len(altchars) == 2, repr(altchars)
+        return _translate(encoded, {'+': altchars[0:1], '/': altchars[1:2]})
+    return encoded
+
+
+def b64decode(s, altchars=None, validate=False):
+    """Decode a Base64 encoded byte string.
+
+    s is the byte string to decode.  Optional altchars must be a
+    string of length 2 which specifies the alternative alphabet used
+    instead of the '+' and '/' characters.
+
+    The decoded string is returned.  A binascii.Error is raised if s is
+    incorrectly padded.
+
+    If validate is False (the default), non-base64-alphabet characters are
+    discarded prior to the padding check.  If validate is True,
+    non-base64-alphabet characters in the input result in a binascii.Error.
+    """
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    if altchars is not None:
+        if not isinstance(altchars, bytes_types):
+            raise TypeError("expected bytes, not %s"
+                            % altchars.__class__.__name__)
+        assert len(altchars) == 2, repr(altchars)
+        s = _translate(s, {chr(altchars[0]): b'+', chr(altchars[1]): b'/'})
+    if validate and not re.match(b'^[A-Za-z0-9+/]*={0,2}$', s):
+        raise binascii.Error('Non-base64 digit found')
+    return binascii.a2b_base64(s)
+
+
+def standard_b64encode(s):
+    """Encode a byte string using the standard Base64 alphabet.
+
+    s is the byte string to encode.  The encoded byte string is returned.
+    """
+    return b64encode(s)
+
+def standard_b64decode(s):
+    """Decode a byte string encoded with the standard Base64 alphabet.
+
+    s is the byte string to decode.  The decoded byte string is
+    returned.  binascii.Error is raised if the input is incorrectly
+    padded or if there are non-alphabet characters present in the
+    input.
+    """
+    return b64decode(s)
+
+def urlsafe_b64encode(s):
+    """Encode a byte string using a url-safe Base64 alphabet.
+
+    s is the byte string to encode.  The encoded byte string is
+    returned.  The alphabet uses '-' instead of '+' and '_' instead of
+    '/'.
+    """
+    return b64encode(s, b'-_')
+
+def urlsafe_b64decode(s):
+    """Decode a byte string encoded with the standard Base64 alphabet.
+
+    s is the byte string to decode.  The decoded byte string is
+    returned.  binascii.Error is raised if the input is incorrectly
+    padded or if there are non-alphabet characters present in the
+    input.
+
+    The alphabet uses '-' instead of '+' and '_' instead of '/'.
+    """
+    return b64decode(s, b'-_')
+
+
+
+# Base32 encoding/decoding must be done in Python
+_b32alphabet = {
+    0: b'A',  9: b'J', 18: b'S', 27: b'3',
+    1: b'B', 10: b'K', 19: b'T', 28: b'4',
+    2: b'C', 11: b'L', 20: b'U', 29: b'5',
+    3: b'D', 12: b'M', 21: b'V', 30: b'6',
+    4: b'E', 13: b'N', 22: b'W', 31: b'7',
+    5: b'F', 14: b'O', 23: b'X',
+    6: b'G', 15: b'P', 24: b'Y',
+    7: b'H', 16: b'Q', 25: b'Z',
+    8: b'I', 17: b'R', 26: b'2',
+    }
+
+_b32tab = [v[0] for k, v in sorted(_b32alphabet.items())]
+_b32rev = dict([(v[0], k) for k, v in _b32alphabet.items()])
+
+
+def b32encode(s):
+    """Encode a byte string using Base32.
+
+    s is the byte string to encode.  The encoded byte string is returned.
+    """
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    quanta, leftover = divmod(len(s), 5)
+    # Pad the last quantum with zero bits if necessary
+    if leftover:
+        s = s + bytes(5 - leftover)  # Don't use += !
+        quanta += 1
+    encoded = bytes()
+    for i in range(quanta):
+        # c1 and c2 are 16 bits wide, c3 is 8 bits wide.  The intent of this
+        # code is to process the 40 bits in units of 5 bits.  So we take the 1
+        # leftover bit of c1 and tack it onto c2.  Then we take the 2 leftover
+        # bits of c2 and tack them onto c3.  The shifts and masks are intended
+        # to give us values of exactly 5 bits in width.
+        c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
+        c2 += (c1 & 1) << 16 # 17 bits wide
+        c3 += (c2 & 3) << 8  # 10 bits wide
+        encoded += bytes([_b32tab[c1 >> 11],         # bits 1 - 5
+                          _b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
+                          _b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
+                          _b32tab[c2 >> 12],         # bits 16 - 20 (1 - 5)
+                          _b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
+                          _b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
+                          _b32tab[c3 >> 5],          # bits 31 - 35 (1 - 5)
+                          _b32tab[c3 & 0x1f],        # bits 36 - 40 (1 - 5)
+                          ])
+    # Adjust for any leftover partial quanta
+    if leftover == 1:
+        return encoded[:-6] + b'======'
+    elif leftover == 2:
+        return encoded[:-4] + b'===='
+    elif leftover == 3:
+        return encoded[:-3] + b'==='
+    elif leftover == 4:
+        return encoded[:-1] + b'='
+    return encoded
+
+
+def b32decode(s, casefold=False, map01=None):
+    """Decode a Base32 encoded byte string.
+
+    s is the byte string to decode.  Optional casefold is a flag
+    specifying whether a lowercase alphabet is acceptable as input.
+    For security purposes, the default is False.
+
+    RFC 3548 allows for optional mapping of the digit 0 (zero) to the
+    letter O (oh), and for optional mapping of the digit 1 (one) to
+    either the letter I (eye) or letter L (el).  The optional argument
+    map01 when not None, specifies which letter the digit 1 should be
+    mapped to (when map01 is not None, the digit 0 is always mapped to
+    the letter O).  For security purposes the default is None, so that
+    0 and 1 are not allowed in the input.
+
+    The decoded byte string is returned.  binascii.Error is raised if
+    the input is incorrectly padded or if there are non-alphabet
+    characters present in the input.
+    """
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    quanta, leftover = divmod(len(s), 8)
+    if leftover:
+        raise binascii.Error('Incorrect padding')
+    # Handle section 2.4 zero and one mapping.  The flag map01 will be either
+    # False, or the character to map the digit 1 (one) to.  It should be
+    # either L (el) or I (eye).
+    if map01 is not None:
+        if not isinstance(map01, bytes_types):
+            raise TypeError("expected bytes, not %s" % map01.__class__.__name__)
+        assert len(map01) == 1, repr(map01)
+        s = _translate(s, {b'0': b'O', b'1': map01})
+    if casefold:
+        s = s.upper()
+    # Strip off pad characters from the right.  We need to count the pad
+    # characters because this will tell us how many null bytes to remove from
+    # the end of the decoded string.
+    padchars = 0
+    mo = re.search(b'(?P<pad>[=]*)$', s)
+    if mo:
+        padchars = len(mo.group('pad'))
+        if padchars > 0:
+            s = s[:-padchars]
+    # Now decode the full quanta
+    parts = []
+    acc = 0
+    shift = 35
+    for c in s:
+        val = _b32rev.get(c)
+        if val is None:
+            raise TypeError('Non-base32 digit found')
+        acc += _b32rev[c] << shift
+        shift -= 5
+        if shift < 0:
+            parts.append(binascii.unhexlify(bytes('%010x' % acc, "ascii")))
+            acc = 0
+            shift = 35
+    # Process the last, partial quanta
+    last = binascii.unhexlify(bytes('%010x' % acc, "ascii"))
+    if padchars == 0:
+        last = b''                      # No characters
+    elif padchars == 1:
+        last = last[:-1]
+    elif padchars == 3:
+        last = last[:-2]
+    elif padchars == 4:
+        last = last[:-3]
+    elif padchars == 6:
+        last = last[:-4]
+    else:
+        raise binascii.Error('Incorrect padding')
+    parts.append(last)
+    return b''.join(parts)
+
+
+
+# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
+# lowercase.  The RFC also recommends against accepting input case
+# insensitively.
+def b16encode(s):
+    """Encode a byte string using Base16.
+
+    s is the byte string to encode.  The encoded byte string is returned.
+    """
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    return binascii.hexlify(s).upper()
+
+
+def b16decode(s, casefold=False):
+    """Decode a Base16 encoded byte string.
+
+    s is the byte string to decode.  Optional casefold is a flag
+    specifying whether a lowercase alphabet is acceptable as input.
+    For security purposes, the default is False.
+
+    The decoded byte string is returned.  binascii.Error is raised if
+    s were incorrectly padded or if there are non-alphabet characters
+    present in the string.
+    """
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    if casefold:
+        s = s.upper()
+    if re.search(b'[^0-9A-F]', s):
+        raise binascii.Error('Non-base16 digit found')
+    return binascii.unhexlify(s)
+
+
+
+# Legacy interface.  This code could be cleaned up since I don't believe
+# binascii has any line length limitations.  It just doesn't seem worth it
+# though.  The files should be opened in binary mode.
+
+MAXLINESIZE = 76 # Excluding the CRLF
+MAXBINSIZE = (MAXLINESIZE//4)*3
+
+def encode(input, output):
+    """Encode a file; input and output are binary files."""
+    while True:
+        s = input.read(MAXBINSIZE)
+        if not s:
+            break
+        while len(s) < MAXBINSIZE:
+            ns = input.read(MAXBINSIZE-len(s))
+            if not ns:
+                break
+            s += ns
+        line = binascii.b2a_base64(s)
+        output.write(line)
+
+
+def decode(input, output):
+    """Decode a file; input and output are binary files."""
+    while True:
+        line = input.readline()
+        if not line:
+            break
+        s = binascii.a2b_base64(line)
+        output.write(s)
+
+
+def encodebytes(s):
+    """Encode a bytestring into a bytestring containing multiple lines
+    of base-64 data."""
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    pieces = []
+    for i in range(0, len(s), MAXBINSIZE):
+        chunk = s[i : i + MAXBINSIZE]
+        pieces.append(binascii.b2a_base64(chunk))
+    return b"".join(pieces)
+
+def encodestring(s):
+    """Legacy alias of encodebytes()."""
+    import warnings
+    warnings.warn("encodestring() is a deprecated alias, use encodebytes()",
+                  DeprecationWarning, 2)
+    return encodebytes(s)
+
+
+def decodebytes(s):
+    """Decode a bytestring of base-64 data into a bytestring."""
+    if not isinstance(s, bytes_types):
+        raise TypeError("expected bytes, not %s" % s.__class__.__name__)
+    return binascii.a2b_base64(s)
+
+def decodestring(s):
+    """Legacy alias of decodebytes()."""
+    import warnings
+    warnings.warn("decodestring() is a deprecated alias, use decodebytes()",
+                  DeprecationWarning, 2)
+    return decodebytes(s)
+
+
+# Usable as a script...
+def main():
+    """Small main program"""
+    import sys, getopt
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], 'deut')
+    except getopt.error as msg:
+        sys.stdout = sys.stderr
+        print(msg)
+        print("""usage: %s [-d|-e|-u|-t] [file|-]
+        -d, -u: decode
+        -e: encode (default)
+        -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0])
+        sys.exit(2)
+    func = encode
+    for o, a in opts:
+        if o == '-e': func = encode
+        if o == '-d': func = decode
+        if o == '-u': func = decode
+        if o == '-t': test(); return
+    if args and args[0] != '-':
+        with open(args[0], 'rb') as f:
+            func(f, sys.stdout.buffer)
+    else:
+        func(sys.stdin.buffer, sys.stdout.buffer)
+
+
+def test():
+    s0 = b"Aladdin:open sesame"
+    print(repr(s0))
+    s1 = encodebytes(s0)
+    print(repr(s1))
+    s2 = decodebytes(s1)
+    print(repr(s2))
+    assert s0 == s2
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib-python/3.2/bdb.py b/lib-python/3.2/bdb.py
new file mode 100644
--- /dev/null
+++ b/lib-python/3.2/bdb.py
@@ -0,0 +1,634 @@
+"""Debugger basics"""
+
+import fnmatch
+import sys
+import os
+
+__all__ = ["BdbQuit", "Bdb", "Breakpoint"]
+
+class BdbQuit(Exception):
+    """Exception to give up completely."""
+
+
+class Bdb:
+    """Generic Python debugger base class.
+
+    This class takes care of details of the trace facility;
+    a derived class should implement user interaction.
+    The standard debugger class (pdb.Pdb) is an example.
+    """
+
+    def __init__(self, skip=None):
+        self.skip = set(skip) if skip else None
+        self.breaks = {}
+        self.fncache = {}
+
+    def canonic(self, filename):
+        if filename == "<" + filename[1:-1] + ">":
+            return filename
+        canonic = self.fncache.get(filename)
+        if not canonic:
+            canonic = os.path.abspath(filename)
+            canonic = os.path.normcase(canonic)
+            self.fncache[filename] = canonic
+        return canonic
+
+    def reset(self):
+        import linecache
+        linecache.checkcache()
+        self.botframe = None
+        self._set_stopinfo(None, None)
+
+    def trace_dispatch(self, frame, event, arg):
+        if self.quitting:
+            return # None
+        if event == 'line':
+            return self.dispatch_line(frame)
+        if event == 'call':
+            return self.dispatch_call(frame, arg)
+        if event == 'return':
+            return self.dispatch_return(frame, arg)
+        if event == 'exception':
+            return self.dispatch_exception(frame, arg)
+        if event == 'c_call':
+            return self.trace_dispatch
+        if event == 'c_exception':
+            return self.trace_dispatch
+        if event == 'c_return':
+            return self.trace_dispatch
+        print('bdb.Bdb.dispatch: unknown debugging event:', repr(event))
+        return self.trace_dispatch
+
+    def dispatch_line(self, frame):
+        if self.stop_here(frame) or self.break_here(frame):
+            self.user_line(frame)
+            if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    def dispatch_call(self, frame, arg):
+        # XXX 'arg' is no longer used
+        if self.botframe is None:
+            # First call of dispatch since reset()
+            self.botframe = frame.f_back # (CT) Note that this may also be None!
+            return self.trace_dispatch
+        if not (self.stop_here(frame) or self.break_anywhere(frame)):
+            # No need to trace this function
+            return # None
+        self.user_call(frame, arg)
+        if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    def dispatch_return(self, frame, arg):
+        if self.stop_here(frame) or frame == self.returnframe:
+            self.user_return(frame, arg)
+            if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    def dispatch_exception(self, frame, arg):
+        if self.stop_here(frame):
+            self.user_exception(frame, arg)
+            if self.quitting: raise BdbQuit
+        return self.trace_dispatch
+
+    # Normally derived classes don't override the following
+    # methods, but they may if they want to redefine the
+    # definition of stopping and breakpoints.
+
+    def is_skipped_module(self, module_name):
+        for pattern in self.skip:
+            if fnmatch.fnmatch(module_name, pattern):
+                return True
+        return False
+
+    def stop_here(self, frame):
+        # (CT) stopframe may now also be None, see dispatch_call.
+        # (CT) the former test for None is therefore removed from here.
+        if self.skip and \
+               self.is_skipped_module(frame.f_globals.get('__name__')):
+            return False
+        if frame is self.stopframe:
+            if self.stoplineno == -1:
+                return False
+            return frame.f_lineno >= self.stoplineno
+        while frame is not None and frame is not self.stopframe:
+            if frame is self.botframe:
+                return True
+            frame = frame.f_back
+        return False
+
+    def break_here(self, frame):
+        filename = self.canonic(frame.f_code.co_filename)
+        if filename not in self.breaks:
+            return False
+        lineno = frame.f_lineno
+        if lineno not in self.breaks[filename]:
+            # The line itself has no breakpoint, but maybe the line is the
+            # first line of a function with breakpoint set by function name.
+            lineno = frame.f_code.co_firstlineno
+            if lineno not in self.breaks[filename]:
+                return False
+
+        # flag says ok to delete temp. bp
+        (bp, flag) = effective(filename, lineno, frame)
+        if bp:
+            self.currentbp = bp.number
+            if (flag and bp.temporary):
+                self.do_clear(str(bp.number))
+            return True
+        else:
+            return False
+
+    def do_clear(self, arg):
+        raise NotImplementedError("subclass of bdb must implement do_clear()")
+
+    def break_anywhere(self, frame):
+        return self.canonic(frame.f_code.co_filename) in self.breaks
+
+    # Derived classes should override the user_* methods
+    # to gain control.
+
+    def user_call(self, frame, argument_list):
+        """This method is called when there is the remote possibility
+        that we ever need to stop in this function."""
+        pass
+
+    def user_line(self, frame):
+        """This method is called when we stop or break at this line."""
+        pass
+
+    def user_return(self, frame, return_value):
+        """This method is called when a return trap is set here."""
+        pass
+
+    def user_exception(self, frame, exc_info):
+        """This method is called if an exception occurs,
+        but only if we are to stop at or just below this level."""
+        pass
+
+    def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
+        self.stopframe = stopframe
+        self.returnframe = returnframe
+        self.quitting = False
+        # stoplineno >= 0 means: stop at line >= the stoplineno
+        # stoplineno -1 means: don't stop at all
+        self.stoplineno = stoplineno
+
+    # Derived classes and clients can call the following methods
+    # to affect the stepping state.
+
+    def set_until(self, frame, lineno=None):
+        """Stop when the line with the line no greater than the current one is
+        reached or when returning from current frame"""
+        # the name "until" is borrowed from gdb
+        if lineno is None:
+            lineno = frame.f_lineno + 1
+        self._set_stopinfo(frame, frame, lineno)
+
+    def set_step(self):
+        """Stop after one line of code."""
+        self._set_stopinfo(None, None)
+
+    def set_next(self, frame):
+        """Stop on the next line in or below the given frame."""
+        self._set_stopinfo(frame, None)
+
+    def set_return(self, frame):
+        """Stop when returning from the given frame."""
+        self._set_stopinfo(frame.f_back, frame)
+
+    def set_trace(self, frame=None):
+        """Start debugging from `frame`.
+
+        If frame is not specified, debugging starts from caller's frame.
+        """
+        if frame is None:
+            frame = sys._getframe().f_back
+        self.reset()
+        while frame:
+            frame.f_trace = self.trace_dispatch
+            self.botframe = frame
+            frame = frame.f_back
+        self.set_step()
+        sys.settrace(self.trace_dispatch)
+
+    def set_continue(self):
+        # Don't stop except at breakpoints or when finished
+        self._set_stopinfo(self.botframe, None, -1)
+        if not self.breaks:
+            # no breakpoints; run without debugger overhead
+            sys.settrace(None)
+            frame = sys._getframe().f_back
+            while frame and frame is not self.botframe:
+                del frame.f_trace
+                frame = frame.f_back
+
+    def set_quit(self):
+        self.stopframe = self.botframe
+        self.returnframe = None
+        self.quitting = True
+        sys.settrace(None)
+
+    # Derived classes and clients can call the following methods
+    # to manipulate breakpoints.  These methods return an
+    # error message is something went wrong, None if all is well.
+    # Set_break prints out the breakpoint line and file:lineno.
+    # Call self.get_*break*() to see the breakpoints or better
+    # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
+
+    def set_break(self, filename, lineno, temporary=False, cond=None,
+                  funcname=None):
+        filename = self.canonic(filename)
+        import linecache # Import as late as possible
+        line = linecache.getline(filename, lineno)
+        if not line:
+            return 'Line %s:%d does not exist' % (filename, lineno)
+        list = self.breaks.setdefault(filename, [])
+        if lineno not in list:
+            list.append(lineno)
+        bp = Breakpoint(filename, lineno, temporary, cond, funcname)
+
+    def _prune_breaks(self, filename, lineno):
+        if (filename, lineno) not in Breakpoint.bplist:
+            self.breaks[filename].remove(lineno)
+        if not self.breaks[filename]:
+            del self.breaks[filename]
+
+    def clear_break(self, filename, lineno):
+        filename = self.canonic(filename)
+        if filename not in self.breaks:
+            return 'There are no breakpoints in %s' % filename
+        if lineno not in self.breaks[filename]:
+            return 'There is no breakpoint at %s:%d' % (filename, lineno)
+        # If there's only one bp in the list for that file,line
+        # pair, then remove the breaks entry
+        for bp in Breakpoint.bplist[filename, lineno][:]:
+            bp.deleteMe()
+        self._prune_breaks(filename, lineno)
+
+    def clear_bpbynumber(self, arg):
+        try:
+            bp = self.get_bpbynumber(arg)
+        except ValueError as err:
+            return str(err)
+        bp.deleteMe()
+        self._prune_breaks(bp.file, bp.line)
+
+    def clear_all_file_breaks(self, filename):
+        filename = self.canonic(filename)
+        if filename not in self.breaks:
+            return 'There are no breakpoints in %s' % filename
+        for line in self.breaks[filename]:
+            blist = Breakpoint.bplist[filename, line]
+            for bp in blist:
+                bp.deleteMe()
+        del self.breaks[filename]
+
+    def clear_all_breaks(self):
+        if not self.breaks:
+            return 'There are no breakpoints'
+        for bp in Breakpoint.bpbynumber:
+            if bp:
+                bp.deleteMe()
+        self.breaks = {}
+
+    def get_bpbynumber(self, arg):
+        if not arg:
+            raise ValueError('Breakpoint number expected')
+        try:
+            number = int(arg)
+        except ValueError:
+            raise ValueError('Non-numeric breakpoint number %s' % arg)
+        try:
+            bp = Breakpoint.bpbynumber[number]
+        except IndexError:
+            raise ValueError('Breakpoint number %d out of range' % number)
+        if bp is None:
+            raise ValueError('Breakpoint %d already deleted' % number)
+        return bp
+
+    def get_break(self, filename, lineno):
+        filename = self.canonic(filename)
+        return filename in self.breaks and \
+            lineno in self.breaks[filename]
+
+    def get_breaks(self, filename, lineno):
+        filename = self.canonic(filename)
+        return filename in self.breaks and \
+            lineno in self.breaks[filename] and \
+            Breakpoint.bplist[filename, lineno] or []


More information about the pypy-commit mailing list